xref: /dpdk/lib/ipsec/esp_outb.c (revision c99d26197c535ecda727fb1d641c2bbd27f95374)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2020 Intel Corporation
3  */
4 
5 #include <rte_ipsec.h>
6 #include <rte_esp.h>
7 #include <rte_ip.h>
8 #include <rte_errno.h>
9 #include <rte_cryptodev.h>
10 
11 #include "sa.h"
12 #include "ipsec_sqn.h"
13 #include "crypto.h"
14 #include "iph.h"
15 #include "misc.h"
16 #include "pad.h"
17 
18 typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
19 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
20 	union sym_op_data *icv, uint8_t sqh_len);
21 
22 /*
23  * helper function to fill crypto_sym op for cipher+auth algorithms.
24  * used by outb_cop_prepare(), see below.
25  */
26 static inline void
27 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
28 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
29 	uint32_t pofs, uint32_t plen)
30 {
31 	sop->cipher.data.offset = sa->ctp.cipher.offset + pofs;
32 	sop->cipher.data.length = sa->ctp.cipher.length + plen;
33 	sop->auth.data.offset = sa->ctp.auth.offset + pofs;
34 	sop->auth.data.length = sa->ctp.auth.length + plen;
35 	sop->auth.digest.data = icv->va;
36 	sop->auth.digest.phys_addr = icv->pa;
37 }
38 
39 /*
40  * helper function to fill crypto_sym op for cipher+auth algorithms.
41  * used by outb_cop_prepare(), see below.
42  */
43 static inline void
44 sop_aead_prepare(struct rte_crypto_sym_op *sop,
45 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
46 	uint32_t pofs, uint32_t plen)
47 {
48 	sop->aead.data.offset = sa->ctp.cipher.offset + pofs;
49 	sop->aead.data.length = sa->ctp.cipher.length + plen;
50 	sop->aead.digest.data = icv->va;
51 	sop->aead.digest.phys_addr = icv->pa;
52 	sop->aead.aad.data = icv->va + sa->icv_len;
53 	sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
54 }
55 
56 /*
57  * setup crypto op and crypto sym op for ESP outbound packet.
58  */
59 static inline void
60 outb_cop_prepare(struct rte_crypto_op *cop,
61 	const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
62 	const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
63 {
64 	struct rte_crypto_sym_op *sop;
65 	struct aead_gcm_iv *gcm;
66 	struct aead_ccm_iv *ccm;
67 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
68 	struct aesctr_cnt_blk *ctr;
69 	uint32_t algo;
70 
71 	algo = sa->algo_type;
72 
73 	/* fill sym op fields */
74 	sop = cop->sym;
75 
76 	switch (algo) {
77 	case ALGO_TYPE_AES_CBC:
78 		/* Cipher-Auth (AES-CBC *) case */
79 	case ALGO_TYPE_3DES_CBC:
80 		/* Cipher-Auth (3DES-CBC *) case */
81 	case ALGO_TYPE_NULL:
82 		/* NULL case */
83 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
84 		break;
85 	case ALGO_TYPE_AES_GMAC:
86 		/* GMAC case */
87 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
88 
89 		/* fill AAD IV (located inside crypto op) */
90 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
91 			sa->iv_ofs);
92 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
93 		break;
94 	case ALGO_TYPE_AES_GCM:
95 		/* AEAD (AES_GCM) case */
96 		sop_aead_prepare(sop, sa, icv, hlen, plen);
97 
98 		/* fill AAD IV (located inside crypto op) */
99 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
100 			sa->iv_ofs);
101 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
102 		break;
103 	case ALGO_TYPE_AES_CCM:
104 		/* AEAD (AES_CCM) case */
105 		sop_aead_prepare(sop, sa, icv, hlen, plen);
106 
107 		/* fill AAD IV (located inside crypto op) */
108 		ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
109 			sa->iv_ofs);
110 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
111 		break;
112 	case ALGO_TYPE_CHACHA20_POLY1305:
113 		/* AEAD (CHACHA20_POLY) case */
114 		sop_aead_prepare(sop, sa, icv, hlen, plen);
115 
116 		/* fill AAD IV (located inside crypto op) */
117 		chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
118 			struct aead_chacha20_poly1305_iv *,
119 			sa->iv_ofs);
120 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
121 					       ivp[0], sa->salt);
122 		break;
123 	case ALGO_TYPE_AES_CTR:
124 		/* Cipher-Auth (AES-CTR *) case */
125 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
126 
127 		/* fill CTR block (located inside crypto op) */
128 		ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
129 			sa->iv_ofs);
130 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
131 		break;
132 	}
133 }
134 
135 /*
136  * setup/update packet data and metadata for ESP outbound tunnel case.
137  */
138 static inline int32_t
139 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
140 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
141 	union sym_op_data *icv, uint8_t sqh_len)
142 {
143 	uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
144 	struct rte_mbuf *ml;
145 	struct rte_esp_hdr *esph;
146 	struct rte_esp_tail *espt;
147 	char *ph, *pt;
148 	uint64_t *iv;
149 
150 	/* calculate extra header space required */
151 	hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
152 
153 	/* size of ipsec protected data */
154 	l2len = mb->l2_len;
155 	plen = mb->pkt_len - l2len;
156 
157 	/* number of bytes to encrypt */
158 	clen = plen + sizeof(*espt);
159 	clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
160 
161 	/* pad length + esp tail */
162 	pdlen = clen - plen;
163 	tlen = pdlen + sa->icv_len + sqh_len;
164 
165 	/* do append and prepend */
166 	ml = rte_pktmbuf_lastseg(mb);
167 	if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
168 		return -ENOSPC;
169 
170 	/* prepend header */
171 	ph = rte_pktmbuf_prepend(mb, hlen - l2len);
172 	if (ph == NULL)
173 		return -ENOSPC;
174 
175 	/* append tail */
176 	pdofs = ml->data_len;
177 	ml->data_len += tlen;
178 	mb->pkt_len += tlen;
179 	pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
180 
181 	/* update pkt l2/l3 len */
182 	mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
183 		sa->tx_offload.val;
184 
185 	/* copy tunnel pkt header */
186 	rte_memcpy(ph, sa->hdr, sa->hdr_len);
187 
188 	/* update original and new ip header fields */
189 	update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen,
190 			mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
191 
192 	/* update spi, seqn and iv */
193 	esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
194 	iv = (uint64_t *)(esph + 1);
195 	copy_iv(iv, ivp, sa->iv_len);
196 
197 	esph->spi = sa->spi;
198 	esph->seq = sqn_low32(sqc);
199 
200 	/* offset for ICV */
201 	pdofs += pdlen + sa->sqh_len;
202 
203 	/* pad length */
204 	pdlen -= sizeof(*espt);
205 
206 	/* copy padding data */
207 	rte_memcpy(pt, esp_pad_bytes, pdlen);
208 
209 	/* update esp trailer */
210 	espt = (struct rte_esp_tail *)(pt + pdlen);
211 	espt->pad_len = pdlen;
212 	espt->next_proto = sa->proto;
213 
214 	/* set icv va/pa value(s) */
215 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
216 	icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
217 
218 	return clen;
219 }
220 
221 /*
222  * for pure cryptodev (lookaside none) depending on SA settings,
223  * we might have to write some extra data to the packet.
224  */
225 static inline void
226 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
227 	const union sym_op_data *icv)
228 {
229 	uint32_t *psqh;
230 	struct aead_gcm_aad *gaad;
231 	struct aead_ccm_aad *caad;
232 	struct aead_chacha20_poly1305_aad *chacha20_poly1305_aad;
233 
234 	/* insert SQN.hi between ESP trailer and ICV */
235 	if (sa->sqh_len != 0) {
236 		psqh = (uint32_t *)(icv->va - sa->sqh_len);
237 		psqh[0] = sqn_hi32(sqc);
238 	}
239 
240 	/*
241 	 * fill IV and AAD fields, if any (aad fields are placed after icv),
242 	 * right now we support only one AEAD algorithm: AES-GCM .
243 	 */
244 	switch (sa->algo_type) {
245 	case ALGO_TYPE_AES_GCM:
246 	if (sa->aad_len != 0) {
247 		gaad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
248 		aead_gcm_aad_fill(gaad, sa->spi, sqc, IS_ESN(sa));
249 	}
250 		break;
251 	case ALGO_TYPE_AES_CCM:
252 	if (sa->aad_len != 0) {
253 		caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
254 		aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
255 	}
256 		break;
257 	case ALGO_TYPE_CHACHA20_POLY1305:
258 	if (sa->aad_len != 0) {
259 		chacha20_poly1305_aad =	(struct aead_chacha20_poly1305_aad *)
260 			(icv->va + sa->icv_len);
261 		aead_chacha20_poly1305_aad_fill(chacha20_poly1305_aad,
262 			sa->spi, sqc, IS_ESN(sa));
263 	}
264 		break;
265 	default:
266 		break;
267 	}
268 }
269 
270 /*
271  * setup/update packets and crypto ops for ESP outbound tunnel case.
272  */
273 uint16_t
274 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
275 	struct rte_crypto_op *cop[], uint16_t num)
276 {
277 	int32_t rc;
278 	uint32_t i, k, n;
279 	uint64_t sqn;
280 	rte_be64_t sqc;
281 	struct rte_ipsec_sa *sa;
282 	struct rte_cryptodev_sym_session *cs;
283 	union sym_op_data icv;
284 	uint64_t iv[IPSEC_MAX_IV_QWORD];
285 	uint32_t dr[num];
286 
287 	sa = ss->sa;
288 	cs = ss->crypto.ses;
289 
290 	n = num;
291 	sqn = esn_outb_update_sqn(sa, &n);
292 	if (n != num)
293 		rte_errno = EOVERFLOW;
294 
295 	k = 0;
296 	for (i = 0; i != n; i++) {
297 
298 		sqc = rte_cpu_to_be_64(sqn + i);
299 		gen_iv(iv, sqc);
300 
301 		/* try to update the packet itself */
302 		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
303 					  sa->sqh_len);
304 		/* success, setup crypto op */
305 		if (rc >= 0) {
306 			outb_pkt_xprepare(sa, sqc, &icv);
307 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
308 			outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
309 			k++;
310 		/* failure, put packet into the death-row */
311 		} else {
312 			dr[i - k] = i;
313 			rte_errno = -rc;
314 		}
315 	}
316 
317 	 /* copy not prepared mbufs beyond good ones */
318 	if (k != n && k != 0)
319 		move_bad_mbufs(mb, dr, n, n - k);
320 
321 	return k;
322 }
323 
324 /*
325  * setup/update packet data and metadata for ESP outbound transport case.
326  */
327 static inline int32_t
328 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
329 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
330 	union sym_op_data *icv, uint8_t sqh_len)
331 {
332 	uint8_t np;
333 	uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
334 	struct rte_mbuf *ml;
335 	struct rte_esp_hdr *esph;
336 	struct rte_esp_tail *espt;
337 	char *ph, *pt;
338 	uint64_t *iv;
339 	uint32_t l2len, l3len;
340 
341 	l2len = mb->l2_len;
342 	l3len = mb->l3_len;
343 
344 	uhlen = l2len + l3len;
345 	plen = mb->pkt_len - uhlen;
346 
347 	/* calculate extra header space required */
348 	hlen = sa->iv_len + sizeof(*esph);
349 
350 	/* number of bytes to encrypt */
351 	clen = plen + sizeof(*espt);
352 	clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
353 
354 	/* pad length + esp tail */
355 	pdlen = clen - plen;
356 	tlen = pdlen + sa->icv_len + sqh_len;
357 
358 	/* do append and insert */
359 	ml = rte_pktmbuf_lastseg(mb);
360 	if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
361 		return -ENOSPC;
362 
363 	/* prepend space for ESP header */
364 	ph = rte_pktmbuf_prepend(mb, hlen);
365 	if (ph == NULL)
366 		return -ENOSPC;
367 
368 	/* append tail */
369 	pdofs = ml->data_len;
370 	ml->data_len += tlen;
371 	mb->pkt_len += tlen;
372 	pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
373 
374 	/* shift L2/L3 headers */
375 	insert_esph(ph, ph + hlen, uhlen);
376 
377 	/* update ip  header fields */
378 	np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
379 			l3len, IPPROTO_ESP);
380 
381 	/* update spi, seqn and iv */
382 	esph = (struct rte_esp_hdr *)(ph + uhlen);
383 	iv = (uint64_t *)(esph + 1);
384 	copy_iv(iv, ivp, sa->iv_len);
385 
386 	esph->spi = sa->spi;
387 	esph->seq = sqn_low32(sqc);
388 
389 	/* offset for ICV */
390 	pdofs += pdlen + sa->sqh_len;
391 
392 	/* pad length */
393 	pdlen -= sizeof(*espt);
394 
395 	/* copy padding data */
396 	rte_memcpy(pt, esp_pad_bytes, pdlen);
397 
398 	/* update esp trailer */
399 	espt = (struct rte_esp_tail *)(pt + pdlen);
400 	espt->pad_len = pdlen;
401 	espt->next_proto = np;
402 
403 	/* set icv va/pa value(s) */
404 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
405 	icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
406 
407 	return clen;
408 }
409 
410 /*
411  * setup/update packets and crypto ops for ESP outbound transport case.
412  */
413 uint16_t
414 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
415 	struct rte_crypto_op *cop[], uint16_t num)
416 {
417 	int32_t rc;
418 	uint32_t i, k, n, l2, l3;
419 	uint64_t sqn;
420 	rte_be64_t sqc;
421 	struct rte_ipsec_sa *sa;
422 	struct rte_cryptodev_sym_session *cs;
423 	union sym_op_data icv;
424 	uint64_t iv[IPSEC_MAX_IV_QWORD];
425 	uint32_t dr[num];
426 
427 	sa = ss->sa;
428 	cs = ss->crypto.ses;
429 
430 	n = num;
431 	sqn = esn_outb_update_sqn(sa, &n);
432 	if (n != num)
433 		rte_errno = EOVERFLOW;
434 
435 	k = 0;
436 	for (i = 0; i != n; i++) {
437 
438 		l2 = mb[i]->l2_len;
439 		l3 = mb[i]->l3_len;
440 
441 		sqc = rte_cpu_to_be_64(sqn + i);
442 		gen_iv(iv, sqc);
443 
444 		/* try to update the packet itself */
445 		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
446 				  sa->sqh_len);
447 		/* success, setup crypto op */
448 		if (rc >= 0) {
449 			outb_pkt_xprepare(sa, sqc, &icv);
450 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
451 			outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
452 			k++;
453 		/* failure, put packet into the death-row */
454 		} else {
455 			dr[i - k] = i;
456 			rte_errno = -rc;
457 		}
458 	}
459 
460 	/* copy not prepared mbufs beyond good ones */
461 	if (k != n && k != 0)
462 		move_bad_mbufs(mb, dr, n, n - k);
463 
464 	return k;
465 }
466 
467 
468 static inline uint32_t
469 outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
470 	uint32_t plen, void *iv)
471 {
472 	uint64_t *ivp = iv;
473 	struct aead_gcm_iv *gcm;
474 	struct aead_ccm_iv *ccm;
475 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
476 	struct aesctr_cnt_blk *ctr;
477 	uint32_t clen;
478 
479 	switch (sa->algo_type) {
480 	case ALGO_TYPE_AES_GCM:
481 		gcm = iv;
482 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
483 		break;
484 	case ALGO_TYPE_AES_CCM:
485 		ccm = iv;
486 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
487 		break;
488 	case ALGO_TYPE_CHACHA20_POLY1305:
489 		chacha20_poly1305 = iv;
490 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
491 					       ivp[0], sa->salt);
492 		break;
493 	case ALGO_TYPE_AES_CTR:
494 		ctr = iv;
495 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
496 		break;
497 	}
498 
499 	*pofs += sa->ctp.auth.offset;
500 	clen = plen + sa->ctp.auth.length;
501 	return clen;
502 }
503 
504 static uint16_t
505 cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
506 		struct rte_mbuf *mb[], uint16_t num,
507 		esp_outb_prepare_t prepare, uint32_t cofs_mask)
508 {
509 	int32_t rc;
510 	uint64_t sqn;
511 	rte_be64_t sqc;
512 	struct rte_ipsec_sa *sa;
513 	uint32_t i, k, n;
514 	uint32_t l2, l3;
515 	union sym_op_data icv;
516 	struct rte_crypto_va_iova_ptr iv[num];
517 	struct rte_crypto_va_iova_ptr aad[num];
518 	struct rte_crypto_va_iova_ptr dgst[num];
519 	uint32_t dr[num];
520 	uint32_t l4ofs[num];
521 	uint32_t clen[num];
522 	uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
523 
524 	sa = ss->sa;
525 
526 	n = num;
527 	sqn = esn_outb_update_sqn(sa, &n);
528 	if (n != num)
529 		rte_errno = EOVERFLOW;
530 
531 	for (i = 0, k = 0; i != n; i++) {
532 
533 		l2 = mb[i]->l2_len;
534 		l3 = mb[i]->l3_len;
535 
536 		/* calculate ESP header offset */
537 		l4ofs[k] = (l2 + l3) & cofs_mask;
538 
539 		sqc = rte_cpu_to_be_64(sqn + i);
540 		gen_iv(ivbuf[k], sqc);
541 
542 		/* try to update the packet itself */
543 		rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len);
544 
545 		/* success, proceed with preparations */
546 		if (rc >= 0) {
547 
548 			outb_pkt_xprepare(sa, sqc, &icv);
549 
550 			/* get encrypted data offset and length */
551 			clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc,
552 				ivbuf[k]);
553 
554 			/* fill iv, digest and aad */
555 			iv[k].va = ivbuf[k];
556 			aad[k].va = icv.va + sa->icv_len;
557 			dgst[k++].va = icv.va;
558 		} else {
559 			dr[i - k] = i;
560 			rte_errno = -rc;
561 		}
562 	}
563 
564 	/* copy not prepared mbufs beyond good ones */
565 	if (k != n && k != 0)
566 		move_bad_mbufs(mb, dr, n, n - k);
567 
568 	/* convert mbufs to iovecs and do actual crypto/auth processing */
569 	if (k != 0)
570 		cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
571 			l4ofs, clen, k);
572 	return k;
573 }
574 
575 uint16_t
576 cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
577 		struct rte_mbuf *mb[], uint16_t num)
578 {
579 	return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0);
580 }
581 
582 uint16_t
583 cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
584 		struct rte_mbuf *mb[], uint16_t num)
585 {
586 	return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare,
587 		UINT32_MAX);
588 }
589 
590 /*
591  * process outbound packets for SA with ESN support,
592  * for algorithms that require SQN.hibits to be implictly included
593  * into digest computation.
594  * In that case we have to move ICV bytes back to their proper place.
595  */
596 uint16_t
597 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
598 	uint16_t num)
599 {
600 	uint32_t i, k, icv_len, *icv;
601 	struct rte_mbuf *ml;
602 	struct rte_ipsec_sa *sa;
603 	uint32_t dr[num];
604 
605 	sa = ss->sa;
606 
607 	k = 0;
608 	icv_len = sa->icv_len;
609 
610 	for (i = 0; i != num; i++) {
611 		if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
612 			ml = rte_pktmbuf_lastseg(mb[i]);
613 			/* remove high-order 32 bits of esn from packet len */
614 			mb[i]->pkt_len -= sa->sqh_len;
615 			ml->data_len -= sa->sqh_len;
616 			icv = rte_pktmbuf_mtod_offset(ml, void *,
617 				ml->data_len - icv_len);
618 			remove_sqh(icv, icv_len);
619 			k++;
620 		} else
621 			dr[i - k] = i;
622 	}
623 
624 	/* handle unprocessed mbufs */
625 	if (k != num) {
626 		rte_errno = EBADMSG;
627 		if (k != 0)
628 			move_bad_mbufs(mb, dr, num, num - k);
629 	}
630 
631 	return k;
632 }
633 
634 /*
635  * prepare packets for inline ipsec processing:
636  * set ol_flags and attach metadata.
637  */
638 static inline void
639 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
640 	struct rte_mbuf *mb[], uint16_t num)
641 {
642 	uint32_t i, ol_flags;
643 
644 	ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
645 	for (i = 0; i != num; i++) {
646 
647 		mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
648 		if (ol_flags != 0)
649 			rte_security_set_pkt_metadata(ss->security.ctx,
650 				ss->security.ses, mb[i], NULL);
651 	}
652 }
653 
654 /*
655  * process group of ESP outbound tunnel packets destined for
656  * INLINE_CRYPTO type of device.
657  */
658 uint16_t
659 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
660 	struct rte_mbuf *mb[], uint16_t num)
661 {
662 	int32_t rc;
663 	uint32_t i, k, n;
664 	uint64_t sqn;
665 	rte_be64_t sqc;
666 	struct rte_ipsec_sa *sa;
667 	union sym_op_data icv;
668 	uint64_t iv[IPSEC_MAX_IV_QWORD];
669 	uint32_t dr[num];
670 
671 	sa = ss->sa;
672 
673 	n = num;
674 	sqn = esn_outb_update_sqn(sa, &n);
675 	if (n != num)
676 		rte_errno = EOVERFLOW;
677 
678 	k = 0;
679 	for (i = 0; i != n; i++) {
680 
681 		sqc = rte_cpu_to_be_64(sqn + i);
682 		gen_iv(iv, sqc);
683 
684 		/* try to update the packet itself */
685 		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
686 
687 		k += (rc >= 0);
688 
689 		/* failure, put packet into the death-row */
690 		if (rc < 0) {
691 			dr[i - k] = i;
692 			rte_errno = -rc;
693 		}
694 	}
695 
696 	/* copy not processed mbufs beyond good ones */
697 	if (k != n && k != 0)
698 		move_bad_mbufs(mb, dr, n, n - k);
699 
700 	inline_outb_mbuf_prepare(ss, mb, k);
701 	return k;
702 }
703 
704 /*
705  * process group of ESP outbound transport packets destined for
706  * INLINE_CRYPTO type of device.
707  */
708 uint16_t
709 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
710 	struct rte_mbuf *mb[], uint16_t num)
711 {
712 	int32_t rc;
713 	uint32_t i, k, n;
714 	uint64_t sqn;
715 	rte_be64_t sqc;
716 	struct rte_ipsec_sa *sa;
717 	union sym_op_data icv;
718 	uint64_t iv[IPSEC_MAX_IV_QWORD];
719 	uint32_t dr[num];
720 
721 	sa = ss->sa;
722 
723 	n = num;
724 	sqn = esn_outb_update_sqn(sa, &n);
725 	if (n != num)
726 		rte_errno = EOVERFLOW;
727 
728 	k = 0;
729 	for (i = 0; i != n; i++) {
730 
731 		sqc = rte_cpu_to_be_64(sqn + i);
732 		gen_iv(iv, sqc);
733 
734 		/* try to update the packet itself */
735 		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
736 
737 		k += (rc >= 0);
738 
739 		/* failure, put packet into the death-row */
740 		if (rc < 0) {
741 			dr[i - k] = i;
742 			rte_errno = -rc;
743 		}
744 	}
745 
746 	/* copy not processed mbufs beyond good ones */
747 	if (k != n && k != 0)
748 		move_bad_mbufs(mb, dr, n, n - k);
749 
750 	inline_outb_mbuf_prepare(ss, mb, k);
751 	return k;
752 }
753 
754 /*
755  * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
756  * actual processing is done by HW/PMD, just set flags and metadata.
757  */
758 uint16_t
759 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
760 	struct rte_mbuf *mb[], uint16_t num)
761 {
762 	inline_outb_mbuf_prepare(ss, mb, num);
763 	return num;
764 }
765