xref: /dpdk/lib/ipsec/esp_outb.c (revision aae98b8c6690ccc49d7a1536a1b1ee1264de49a7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2020 Intel Corporation
3  */
4 
5 #include <rte_ipsec.h>
6 #include <rte_esp.h>
7 #include <rte_udp.h>
8 #include <rte_errno.h>
9 #include <rte_cryptodev.h>
10 
11 #include "sa.h"
12 #include "ipsec_sqn.h"
13 #include "crypto.h"
14 #include "iph.h"
15 #include "misc.h"
16 #include "pad.h"
17 
18 typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
19 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
20 	union sym_op_data *icv, uint8_t sqh_len, uint8_t tso);
21 
22 /*
23  * helper function to fill crypto_sym op for cipher+auth algorithms.
24  * used by outb_cop_prepare(), see below.
25  */
26 static inline void
27 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
28 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
29 	uint32_t pofs, uint32_t plen)
30 {
31 	sop->cipher.data.offset = sa->ctp.cipher.offset + pofs;
32 	sop->cipher.data.length = sa->ctp.cipher.length + plen;
33 	sop->auth.data.offset = sa->ctp.auth.offset + pofs;
34 	sop->auth.data.length = sa->ctp.auth.length + plen;
35 	sop->auth.digest.data = icv->va;
36 	sop->auth.digest.phys_addr = icv->pa;
37 }
38 
39 /*
40  * helper function to fill crypto_sym op for cipher+auth algorithms.
41  * used by outb_cop_prepare(), see below.
42  */
43 static inline void
44 sop_aead_prepare(struct rte_crypto_sym_op *sop,
45 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
46 	uint32_t pofs, uint32_t plen)
47 {
48 	sop->aead.data.offset = sa->ctp.cipher.offset + pofs;
49 	sop->aead.data.length = sa->ctp.cipher.length + plen;
50 	sop->aead.digest.data = icv->va;
51 	sop->aead.digest.phys_addr = icv->pa;
52 	sop->aead.aad.data = icv->va + sa->icv_len;
53 	sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
54 }
55 
56 /*
57  * setup crypto op and crypto sym op for ESP outbound packet.
58  */
59 static inline void
60 outb_cop_prepare(struct rte_crypto_op *cop,
61 	const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
62 	const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
63 {
64 	struct rte_crypto_sym_op *sop;
65 	struct aead_gcm_iv *gcm;
66 	struct aead_ccm_iv *ccm;
67 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
68 	struct aesctr_cnt_blk *ctr;
69 	uint32_t algo;
70 
71 	algo = sa->algo_type;
72 
73 	/* fill sym op fields */
74 	sop = cop->sym;
75 
76 	switch (algo) {
77 	case ALGO_TYPE_AES_CBC:
78 		/* Cipher-Auth (AES-CBC *) case */
79 	case ALGO_TYPE_3DES_CBC:
80 		/* Cipher-Auth (3DES-CBC *) case */
81 	case ALGO_TYPE_NULL:
82 		/* NULL case */
83 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
84 		break;
85 	case ALGO_TYPE_AES_GMAC:
86 		/* GMAC case */
87 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
88 
89 		/* fill AAD IV (located inside crypto op) */
90 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
91 			sa->iv_ofs);
92 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
93 		break;
94 	case ALGO_TYPE_AES_GCM:
95 		/* AEAD (AES_GCM) case */
96 		sop_aead_prepare(sop, sa, icv, hlen, plen);
97 
98 		/* fill AAD IV (located inside crypto op) */
99 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
100 			sa->iv_ofs);
101 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
102 		break;
103 	case ALGO_TYPE_AES_CCM:
104 		/* AEAD (AES_CCM) case */
105 		sop_aead_prepare(sop, sa, icv, hlen, plen);
106 
107 		/* fill AAD IV (located inside crypto op) */
108 		ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
109 			sa->iv_ofs);
110 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
111 		break;
112 	case ALGO_TYPE_CHACHA20_POLY1305:
113 		/* AEAD (CHACHA20_POLY) case */
114 		sop_aead_prepare(sop, sa, icv, hlen, plen);
115 
116 		/* fill AAD IV (located inside crypto op) */
117 		chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
118 			struct aead_chacha20_poly1305_iv *,
119 			sa->iv_ofs);
120 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
121 					       ivp[0], sa->salt);
122 		break;
123 	case ALGO_TYPE_AES_CTR:
124 		/* Cipher-Auth (AES-CTR *) case */
125 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
126 
127 		/* fill CTR block (located inside crypto op) */
128 		ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
129 			sa->iv_ofs);
130 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
131 		break;
132 	}
133 }
134 
135 /*
136  * setup/update packet data and metadata for ESP outbound tunnel case.
137  */
138 static inline int32_t
139 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
140 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
141 	union sym_op_data *icv, uint8_t sqh_len, uint8_t tso)
142 {
143 	uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
144 	struct rte_mbuf *ml;
145 	struct rte_esp_hdr *esph;
146 	struct rte_esp_tail *espt;
147 	char *ph, *pt;
148 	uint64_t *iv;
149 
150 	/* calculate extra header space required */
151 	hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
152 
153 	/* size of ipsec protected data */
154 	l2len = mb->l2_len;
155 	plen = mb->pkt_len - l2len;
156 
157 	/* number of bytes to encrypt */
158 	clen = plen + sizeof(*espt);
159 
160 	if (!tso) {
161 		clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
162 		/* pad length + esp tail */
163 		pdlen = clen - plen;
164 		tlen = pdlen + sa->icv_len + sqh_len;
165 	} else {
166 		/* We don't need to pad/align packet or append ICV length
167 		 * when using TSO offload
168 		 */
169 		pdlen = clen - plen;
170 		tlen = pdlen + sqh_len;
171 	}
172 
173 	/* do append and prepend */
174 	ml = rte_pktmbuf_lastseg(mb);
175 	if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
176 		return -ENOSPC;
177 
178 	/* prepend header */
179 	ph = rte_pktmbuf_prepend(mb, hlen - l2len);
180 	if (ph == NULL)
181 		return -ENOSPC;
182 
183 	/* append tail */
184 	pdofs = ml->data_len;
185 	ml->data_len += tlen;
186 	mb->pkt_len += tlen;
187 	pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
188 
189 	/* update pkt l2/l3 len */
190 	mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
191 		sa->tx_offload.val;
192 
193 	/* copy tunnel pkt header */
194 	rte_memcpy(ph, sa->hdr, sa->hdr_len);
195 
196 	/* if UDP encap is enabled update the dgram_len */
197 	if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
198 		struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
199 			(ph + sa->hdr_len - sizeof(struct rte_udp_hdr));
200 		udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len -
201 				sa->hdr_len + sizeof(struct rte_udp_hdr));
202 	}
203 
204 	/* update original and new ip header fields */
205 	update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen,
206 			mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
207 
208 	/* update spi, seqn and iv */
209 	esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
210 	iv = (uint64_t *)(esph + 1);
211 	copy_iv(iv, ivp, sa->iv_len);
212 
213 	esph->spi = sa->spi;
214 	esph->seq = sqn_low32(sqc);
215 
216 	/* offset for ICV */
217 	pdofs += pdlen + sa->sqh_len;
218 
219 	/* pad length */
220 	pdlen -= sizeof(*espt);
221 
222 	RTE_ASSERT(pdlen <= sizeof(esp_pad_bytes));
223 
224 	/* copy padding data */
225 	rte_memcpy(pt, esp_pad_bytes, RTE_MIN(pdlen, sizeof(esp_pad_bytes)));
226 
227 	/* update esp trailer */
228 	espt = (struct rte_esp_tail *)(pt + pdlen);
229 	espt->pad_len = pdlen;
230 	espt->next_proto = sa->proto;
231 
232 	/* set icv va/pa value(s) */
233 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
234 	icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
235 
236 	return clen;
237 }
238 
239 /*
240  * for pure cryptodev (lookaside none) depending on SA settings,
241  * we might have to write some extra data to the packet.
242  */
243 static inline void
244 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
245 	const union sym_op_data *icv)
246 {
247 	uint32_t *psqh;
248 	struct aead_gcm_aad *gaad;
249 	struct aead_ccm_aad *caad;
250 	struct aead_chacha20_poly1305_aad *chacha20_poly1305_aad;
251 
252 	/* insert SQN.hi between ESP trailer and ICV */
253 	if (sa->sqh_len != 0) {
254 		psqh = (uint32_t *)(icv->va - sa->sqh_len);
255 		psqh[0] = sqn_hi32(sqc);
256 	}
257 
258 	/*
259 	 * fill IV and AAD fields, if any (aad fields are placed after icv),
260 	 * right now we support only one AEAD algorithm: AES-GCM .
261 	 */
262 	switch (sa->algo_type) {
263 	case ALGO_TYPE_AES_GCM:
264 	if (sa->aad_len != 0) {
265 		gaad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
266 		aead_gcm_aad_fill(gaad, sa->spi, sqc, IS_ESN(sa));
267 	}
268 		break;
269 	case ALGO_TYPE_AES_CCM:
270 	if (sa->aad_len != 0) {
271 		caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
272 		aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
273 	}
274 		break;
275 	case ALGO_TYPE_CHACHA20_POLY1305:
276 	if (sa->aad_len != 0) {
277 		chacha20_poly1305_aad =	(struct aead_chacha20_poly1305_aad *)
278 			(icv->va + sa->icv_len);
279 		aead_chacha20_poly1305_aad_fill(chacha20_poly1305_aad,
280 			sa->spi, sqc, IS_ESN(sa));
281 	}
282 		break;
283 	default:
284 		break;
285 	}
286 }
287 
288 /*
289  * setup/update packets and crypto ops for ESP outbound tunnel case.
290  */
291 static inline uint16_t
292 esp_outb_tun_prepare_helper(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
293 	struct rte_crypto_op *cop[], uint16_t n, uint64_t sqn)
294 {
295 	int32_t rc;
296 	uint32_t i, k;
297 	rte_be64_t sqc;
298 	struct rte_ipsec_sa *sa;
299 	struct rte_cryptodev_sym_session *cs;
300 	union sym_op_data icv;
301 	uint64_t iv[IPSEC_MAX_IV_QWORD];
302 	uint32_t dr[n];
303 
304 	sa = ss->sa;
305 	cs = ss->crypto.ses;
306 
307 	k = 0;
308 	for (i = 0; i != n; i++) {
309 
310 		sqc = rte_cpu_to_be_64(sqn + i);
311 		gen_iv(iv, sqc);
312 
313 		/* try to update the packet itself */
314 		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
315 					  sa->sqh_len, 0);
316 		/* success, setup crypto op */
317 		if (rc >= 0) {
318 			outb_pkt_xprepare(sa, sqc, &icv);
319 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
320 			outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
321 			k++;
322 		/* failure, put packet into the death-row */
323 		} else {
324 			dr[i - k] = i;
325 			rte_errno = -rc;
326 		}
327 	}
328 
329 	 /* copy not prepared mbufs beyond good ones */
330 	if (k != n && k != 0)
331 		move_bad_mbufs(mb, dr, n, n - k);
332 
333 	return k;
334 }
335 
336 uint16_t
337 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
338 	struct rte_crypto_op *cop[], uint16_t num)
339 {
340 	uint64_t sqn;
341 	uint32_t n;
342 
343 	n = num;
344 	sqn = esn_outb_update_sqn(ss->sa, &n);
345 	if (n != num)
346 		rte_errno = EOVERFLOW;
347 
348 	return esp_outb_tun_prepare_helper(ss, mb, cop, n, sqn);
349 }
350 
351 uint16_t
352 esp_outb_tun_prepare_stateless(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
353 	struct rte_crypto_op *cop[], uint16_t num, struct rte_ipsec_state *state)
354 {
355 	uint64_t sqn = state->sqn;
356 
357 	return esp_outb_tun_prepare_helper(ss, mb, cop, num, sqn);
358 }
359 
360 /*
361  * setup/update packet data and metadata for ESP outbound transport case.
362  */
363 static inline int32_t
364 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
365 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
366 	union sym_op_data *icv, uint8_t sqh_len, uint8_t tso)
367 {
368 	uint8_t np;
369 	uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
370 	struct rte_mbuf *ml;
371 	struct rte_esp_hdr *esph;
372 	struct rte_esp_tail *espt;
373 	char *ph, *pt;
374 	uint64_t *iv;
375 	uint32_t l2len, l3len;
376 
377 	l2len = mb->l2_len;
378 	l3len = mb->l3_len;
379 
380 	uhlen = l2len + l3len;
381 	plen = mb->pkt_len - uhlen;
382 
383 	/* calculate extra header space required */
384 	hlen = sa->iv_len + sizeof(*esph);
385 
386 	/* number of bytes to encrypt */
387 	clen = plen + sizeof(*espt);
388 
389 	if (!tso) {
390 		clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
391 		/* pad length + esp tail */
392 		pdlen = clen - plen;
393 		tlen = pdlen + sa->icv_len + sqh_len;
394 	} else {
395 		/* We don't need to pad/align packet or append ICV length
396 		 * when using TSO offload
397 		 */
398 		pdlen = clen - plen;
399 		tlen = pdlen + sqh_len;
400 	}
401 
402 	/* do append and insert */
403 	ml = rte_pktmbuf_lastseg(mb);
404 	if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
405 		return -ENOSPC;
406 
407 	/* prepend space for ESP header */
408 	ph = rte_pktmbuf_prepend(mb, hlen);
409 	if (ph == NULL)
410 		return -ENOSPC;
411 
412 	/* append tail */
413 	pdofs = ml->data_len;
414 	ml->data_len += tlen;
415 	mb->pkt_len += tlen;
416 	pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
417 
418 	/* shift L2/L3 headers */
419 	insert_esph(ph, ph + hlen, uhlen);
420 
421 	/* update ip  header fields */
422 	np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
423 			l3len, IPPROTO_ESP);
424 
425 	/* update spi, seqn and iv */
426 	esph = (struct rte_esp_hdr *)(ph + uhlen);
427 	iv = (uint64_t *)(esph + 1);
428 	copy_iv(iv, ivp, sa->iv_len);
429 
430 	esph->spi = sa->spi;
431 	esph->seq = sqn_low32(sqc);
432 
433 	/* offset for ICV */
434 	pdofs += pdlen + sa->sqh_len;
435 
436 	/* pad length */
437 	pdlen -= sizeof(*espt);
438 
439 	RTE_ASSERT(pdlen <= sizeof(esp_pad_bytes));
440 
441 	/* copy padding data */
442 	rte_memcpy(pt, esp_pad_bytes, RTE_MIN(pdlen, sizeof(esp_pad_bytes)));
443 
444 	/* update esp trailer */
445 	espt = (struct rte_esp_tail *)(pt + pdlen);
446 	espt->pad_len = pdlen;
447 	espt->next_proto = np;
448 
449 	/* set icv va/pa value(s) */
450 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
451 	icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
452 
453 	return clen;
454 }
455 
456 /*
457  * setup/update packets and crypto ops for ESP outbound transport case.
458  */
459 uint16_t
460 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
461 	struct rte_crypto_op *cop[], uint16_t num)
462 {
463 	int32_t rc;
464 	uint32_t i, k, n, l2, l3;
465 	uint64_t sqn;
466 	rte_be64_t sqc;
467 	struct rte_ipsec_sa *sa;
468 	struct rte_cryptodev_sym_session *cs;
469 	union sym_op_data icv;
470 	uint64_t iv[IPSEC_MAX_IV_QWORD];
471 	uint32_t dr[num];
472 
473 	sa = ss->sa;
474 	cs = ss->crypto.ses;
475 
476 	n = num;
477 	sqn = esn_outb_update_sqn(sa, &n);
478 	if (n != num)
479 		rte_errno = EOVERFLOW;
480 
481 	k = 0;
482 	for (i = 0; i != n; i++) {
483 
484 		l2 = mb[i]->l2_len;
485 		l3 = mb[i]->l3_len;
486 
487 		sqc = rte_cpu_to_be_64(sqn + i);
488 		gen_iv(iv, sqc);
489 
490 		/* try to update the packet itself */
491 		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
492 				  sa->sqh_len, 0);
493 		/* success, setup crypto op */
494 		if (rc >= 0) {
495 			outb_pkt_xprepare(sa, sqc, &icv);
496 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
497 			outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
498 			k++;
499 		/* failure, put packet into the death-row */
500 		} else {
501 			dr[i - k] = i;
502 			rte_errno = -rc;
503 		}
504 	}
505 
506 	/* copy not prepared mbufs beyond good ones */
507 	if (k != n && k != 0)
508 		move_bad_mbufs(mb, dr, n, n - k);
509 
510 	return k;
511 }
512 
513 
514 static inline uint32_t
515 outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
516 	uint32_t plen, void *iv)
517 {
518 	uint64_t *ivp = iv;
519 	struct aead_gcm_iv *gcm;
520 	struct aead_ccm_iv *ccm;
521 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
522 	struct aesctr_cnt_blk *ctr;
523 	uint32_t clen;
524 
525 	switch (sa->algo_type) {
526 	case ALGO_TYPE_AES_GCM:
527 		gcm = iv;
528 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
529 		break;
530 	case ALGO_TYPE_AES_CCM:
531 		ccm = iv;
532 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
533 		break;
534 	case ALGO_TYPE_CHACHA20_POLY1305:
535 		chacha20_poly1305 = iv;
536 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
537 					       ivp[0], sa->salt);
538 		break;
539 	case ALGO_TYPE_AES_CTR:
540 		ctr = iv;
541 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
542 		break;
543 	}
544 
545 	*pofs += sa->ctp.auth.offset;
546 	clen = plen + sa->ctp.auth.length;
547 	return clen;
548 }
549 
550 static inline uint16_t
551 cpu_outb_pkt_prepare_helper(const struct rte_ipsec_session *ss,
552 		struct rte_mbuf *mb[], uint16_t n, esp_outb_prepare_t prepare,
553 		uint32_t cofs_mask,	uint64_t sqn)
554 {
555 	int32_t rc;
556 	rte_be64_t sqc;
557 	struct rte_ipsec_sa *sa;
558 	uint32_t i, k;
559 	uint32_t l2, l3;
560 	union sym_op_data icv;
561 	struct rte_crypto_va_iova_ptr iv[n];
562 	struct rte_crypto_va_iova_ptr aad[n];
563 	struct rte_crypto_va_iova_ptr dgst[n];
564 	uint32_t dr[n];
565 	uint32_t l4ofs[n];
566 	uint32_t clen[n];
567 	uint64_t ivbuf[n][IPSEC_MAX_IV_QWORD];
568 
569 	sa = ss->sa;
570 
571 	for (i = 0, k = 0; i != n; i++) {
572 
573 		l2 = mb[i]->l2_len;
574 		l3 = mb[i]->l3_len;
575 
576 		/* calculate ESP header offset */
577 		l4ofs[k] = (l2 + l3) & cofs_mask;
578 
579 		sqc = rte_cpu_to_be_64(sqn + i);
580 		gen_iv(ivbuf[k], sqc);
581 
582 		/* try to update the packet itself */
583 		rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len, 0);
584 
585 		/* success, proceed with preparations */
586 		if (rc >= 0) {
587 
588 			outb_pkt_xprepare(sa, sqc, &icv);
589 
590 			/* get encrypted data offset and length */
591 			clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc,
592 				ivbuf[k]);
593 
594 			/* fill iv, digest and aad */
595 			iv[k].va = ivbuf[k];
596 			aad[k].va = icv.va + sa->icv_len;
597 			dgst[k++].va = icv.va;
598 		} else {
599 			dr[i - k] = i;
600 			rte_errno = -rc;
601 		}
602 	}
603 
604 	/* copy not prepared mbufs beyond good ones */
605 	if (k != n && k != 0)
606 		move_bad_mbufs(mb, dr, n, n - k);
607 
608 	/* convert mbufs to iovecs and do actual crypto/auth processing */
609 	if (k != 0)
610 		cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
611 			l4ofs, clen, k);
612 	return k;
613 }
614 
615 uint16_t
616 cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
617 		struct rte_mbuf *mb[], uint16_t num)
618 {
619 	uint64_t sqn;
620 	uint32_t n;
621 
622 	n = num;
623 	sqn = esn_outb_update_sqn(ss->sa, &n);
624 	if (n != num)
625 		rte_errno = EOVERFLOW;
626 
627 	return cpu_outb_pkt_prepare_helper(ss, mb, n, outb_tun_pkt_prepare, 0, sqn);
628 }
629 
630 uint16_t
631 cpu_outb_tun_pkt_prepare_stateless(const struct rte_ipsec_session *ss,
632 		struct rte_mbuf *mb[], uint16_t num, struct rte_ipsec_state *state)
633 {
634 	uint64_t sqn = state->sqn;
635 
636 	return cpu_outb_pkt_prepare_helper(ss, mb, num, outb_tun_pkt_prepare, 0, sqn);
637 }
638 
639 uint16_t
640 cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
641 		struct rte_mbuf *mb[], uint16_t num)
642 {
643 	uint64_t sqn;
644 	uint32_t n;
645 
646 	n = num;
647 	sqn = esn_outb_update_sqn(ss->sa, &n);
648 	if (n != num)
649 		rte_errno = EOVERFLOW;
650 
651 	return cpu_outb_pkt_prepare_helper(ss, mb, n, outb_trs_pkt_prepare,
652 		UINT32_MAX, sqn);
653 }
654 
655 /*
656  * process outbound packets for SA with ESN support,
657  * for algorithms that require SQN.hibits to be implicitly included
658  * into digest computation.
659  * In that case we have to move ICV bytes back to their proper place.
660  */
661 uint16_t
662 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
663 	uint16_t num)
664 {
665 	uint32_t i, k, icv_len, *icv, bytes;
666 	struct rte_mbuf *ml;
667 	struct rte_ipsec_sa *sa;
668 	uint32_t dr[num];
669 
670 	sa = ss->sa;
671 
672 	k = 0;
673 	icv_len = sa->icv_len;
674 	bytes = 0;
675 
676 	for (i = 0; i != num; i++) {
677 		if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) {
678 			ml = rte_pktmbuf_lastseg(mb[i]);
679 			/* remove high-order 32 bits of esn from packet len */
680 			mb[i]->pkt_len -= sa->sqh_len;
681 			ml->data_len -= sa->sqh_len;
682 			icv = rte_pktmbuf_mtod_offset(ml, void *,
683 				ml->data_len - icv_len);
684 			remove_sqh(icv, icv_len);
685 			bytes += mb[i]->pkt_len;
686 			k++;
687 		} else
688 			dr[i - k] = i;
689 	}
690 	sa->statistics.count += k;
691 	sa->statistics.bytes += bytes;
692 
693 	/* handle unprocessed mbufs */
694 	if (k != num) {
695 		rte_errno = EBADMSG;
696 		if (k != 0)
697 			move_bad_mbufs(mb, dr, num, num - k);
698 	}
699 
700 	return k;
701 }
702 
703 /*
704  * prepare packets for inline ipsec processing:
705  * set ol_flags and attach metadata.
706  */
707 static inline void
708 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
709 	struct rte_mbuf *mb[], uint16_t num)
710 {
711 	uint32_t i, ol_flags, bytes;
712 
713 	ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
714 	bytes = 0;
715 	for (i = 0; i != num; i++) {
716 
717 		mb[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
718 		bytes += mb[i]->pkt_len;
719 		if (ol_flags != 0)
720 			rte_security_set_pkt_metadata(ss->security.ctx,
721 				ss->security.ses, mb[i], NULL);
722 	}
723 	ss->sa->statistics.count += num;
724 	ss->sa->statistics.bytes += bytes;
725 }
726 
727 
728 static inline int
729 esn_outb_nb_segments(struct rte_mbuf *m)
730 {
731 	if  (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
732 		uint16_t pkt_l3len = m->pkt_len - m->l2_len;
733 		uint16_t segments =
734 			(m->tso_segsz > 0 && pkt_l3len > m->tso_segsz) ?
735 			(pkt_l3len + m->tso_segsz - 1) / m->tso_segsz : 1;
736 		return segments;
737 	}
738 	return 1; /* no TSO */
739 }
740 
741 /* Compute how many packets can be sent before overflow occurs */
742 static inline uint16_t
743 esn_outb_nb_valid_packets(uint16_t num, uint32_t n_sqn, uint16_t nb_segs[])
744 {
745 	uint16_t i;
746 	uint32_t seg_cnt = 0;
747 	for (i = 0; i < num && seg_cnt < n_sqn; i++)
748 		seg_cnt += nb_segs[i];
749 	return i - 1;
750 }
751 
752 /*
753  * process group of ESP outbound tunnel packets destined for
754  * INLINE_CRYPTO type of device.
755  */
756 uint16_t
757 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
758 	struct rte_mbuf *mb[], uint16_t num)
759 {
760 	int32_t rc;
761 	uint32_t i, k, nb_segs_total, n_sqn;
762 	uint64_t sqn;
763 	rte_be64_t sqc;
764 	struct rte_ipsec_sa *sa;
765 	union sym_op_data icv;
766 	uint64_t iv[IPSEC_MAX_IV_QWORD];
767 	uint32_t dr[num];
768 	uint16_t nb_segs[num];
769 
770 	sa = ss->sa;
771 	nb_segs_total = 0;
772 	/* Calculate number of segments */
773 	for (i = 0; i != num; i++) {
774 		nb_segs[i] = esn_outb_nb_segments(mb[i]);
775 		nb_segs_total += nb_segs[i];
776 	}
777 
778 	n_sqn = nb_segs_total;
779 	sqn = esn_outb_update_sqn(sa, &n_sqn);
780 	if (n_sqn != nb_segs_total) {
781 		rte_errno = EOVERFLOW;
782 		/* if there are segmented packets find out how many can be
783 		 * sent until overflow occurs
784 		 */
785 		if (nb_segs_total > num) /* there is at least 1 */
786 			num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs);
787 		else
788 			num = n_sqn; /* no segmented packets */
789 	}
790 
791 	k = 0;
792 	for (i = 0; i != num; i++) {
793 
794 		sqc = rte_cpu_to_be_64(sqn);
795 		gen_iv(iv, sqc);
796 		sqn += nb_segs[i];
797 
798 		/* try to update the packet itself */
799 		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
800 			(mb[i]->ol_flags &
801 			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0);
802 
803 		k += (rc >= 0);
804 
805 		/* failure, put packet into the death-row */
806 		if (rc < 0) {
807 			dr[i - k] = i;
808 			rte_errno = -rc;
809 		}
810 	}
811 
812 	/* copy not processed mbufs beyond good ones */
813 	if (k != num && k != 0)
814 		move_bad_mbufs(mb, dr, num, num - k);
815 
816 	inline_outb_mbuf_prepare(ss, mb, k);
817 	return k;
818 }
819 
820 /*
821  * process group of ESP outbound transport packets destined for
822  * INLINE_CRYPTO type of device.
823  */
824 uint16_t
825 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
826 	struct rte_mbuf *mb[], uint16_t num)
827 {
828 	int32_t rc;
829 	uint32_t i, k, nb_segs_total, n_sqn;
830 	uint64_t sqn;
831 	rte_be64_t sqc;
832 	struct rte_ipsec_sa *sa;
833 	union sym_op_data icv;
834 	uint64_t iv[IPSEC_MAX_IV_QWORD];
835 	uint32_t dr[num];
836 	uint16_t nb_segs[num];
837 
838 	sa = ss->sa;
839 	nb_segs_total = 0;
840 	/* Calculate number of segments */
841 	for (i = 0; i != num; i++) {
842 		nb_segs[i] = esn_outb_nb_segments(mb[i]);
843 		nb_segs_total += nb_segs[i];
844 	}
845 
846 	n_sqn = nb_segs_total;
847 	sqn = esn_outb_update_sqn(sa, &n_sqn);
848 	if (n_sqn != nb_segs_total) {
849 		rte_errno = EOVERFLOW;
850 		/* if there are segmented packets find out how many can be
851 		 * sent until overflow occurs
852 		 */
853 		if (nb_segs_total > num) /* there is at least 1 */
854 			num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs);
855 		else
856 			num = n_sqn; /* no segmented packets */
857 	}
858 
859 	k = 0;
860 	for (i = 0; i != num; i++) {
861 
862 		sqc = rte_cpu_to_be_64(sqn);
863 		gen_iv(iv, sqc);
864 		sqn += nb_segs[i];
865 
866 		/* try to update the packet itself */
867 		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
868 			(mb[i]->ol_flags &
869 			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0);
870 
871 		k += (rc >= 0);
872 
873 		/* failure, put packet into the death-row */
874 		if (rc < 0) {
875 			dr[i - k] = i;
876 			rte_errno = -rc;
877 		}
878 	}
879 
880 	/* copy not processed mbufs beyond good ones */
881 	if (k != num && k != 0)
882 		move_bad_mbufs(mb, dr, num, num - k);
883 
884 	inline_outb_mbuf_prepare(ss, mb, k);
885 	return k;
886 }
887 
888 /*
889  * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
890  * actual processing is done by HW/PMD, just set flags and metadata.
891  */
892 uint16_t
893 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
894 	struct rte_mbuf *mb[], uint16_t num)
895 {
896 	inline_outb_mbuf_prepare(ss, mb, num);
897 	return num;
898 }
899