xref: /dpdk/lib/ipsec/esp_outb.c (revision 5208d68d30cbc36dc453703e58084f33af0320ef)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2020 Intel Corporation
3  */
4 
5 #include <rte_ipsec.h>
6 #include <rte_esp.h>
7 #include <rte_ip.h>
8 #include <rte_udp.h>
9 #include <rte_errno.h>
10 #include <rte_cryptodev.h>
11 
12 #include "sa.h"
13 #include "ipsec_sqn.h"
14 #include "crypto.h"
15 #include "iph.h"
16 #include "misc.h"
17 #include "pad.h"
18 
19 typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
20 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
21 	union sym_op_data *icv, uint8_t sqh_len);
22 
23 /*
24  * helper function to fill crypto_sym op for cipher+auth algorithms.
25  * used by outb_cop_prepare(), see below.
26  */
27 static inline void
28 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
29 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
30 	uint32_t pofs, uint32_t plen)
31 {
32 	sop->cipher.data.offset = sa->ctp.cipher.offset + pofs;
33 	sop->cipher.data.length = sa->ctp.cipher.length + plen;
34 	sop->auth.data.offset = sa->ctp.auth.offset + pofs;
35 	sop->auth.data.length = sa->ctp.auth.length + plen;
36 	sop->auth.digest.data = icv->va;
37 	sop->auth.digest.phys_addr = icv->pa;
38 }
39 
40 /*
41  * helper function to fill crypto_sym op for cipher+auth algorithms.
42  * used by outb_cop_prepare(), see below.
43  */
44 static inline void
45 sop_aead_prepare(struct rte_crypto_sym_op *sop,
46 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
47 	uint32_t pofs, uint32_t plen)
48 {
49 	sop->aead.data.offset = sa->ctp.cipher.offset + pofs;
50 	sop->aead.data.length = sa->ctp.cipher.length + plen;
51 	sop->aead.digest.data = icv->va;
52 	sop->aead.digest.phys_addr = icv->pa;
53 	sop->aead.aad.data = icv->va + sa->icv_len;
54 	sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
55 }
56 
57 /*
58  * setup crypto op and crypto sym op for ESP outbound packet.
59  */
60 static inline void
61 outb_cop_prepare(struct rte_crypto_op *cop,
62 	const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
63 	const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
64 {
65 	struct rte_crypto_sym_op *sop;
66 	struct aead_gcm_iv *gcm;
67 	struct aead_ccm_iv *ccm;
68 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
69 	struct aesctr_cnt_blk *ctr;
70 	uint32_t algo;
71 
72 	algo = sa->algo_type;
73 
74 	/* fill sym op fields */
75 	sop = cop->sym;
76 
77 	switch (algo) {
78 	case ALGO_TYPE_AES_CBC:
79 		/* Cipher-Auth (AES-CBC *) case */
80 	case ALGO_TYPE_3DES_CBC:
81 		/* Cipher-Auth (3DES-CBC *) case */
82 	case ALGO_TYPE_NULL:
83 		/* NULL case */
84 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
85 		break;
86 	case ALGO_TYPE_AES_GMAC:
87 		/* GMAC case */
88 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
89 
90 		/* fill AAD IV (located inside crypto op) */
91 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
92 			sa->iv_ofs);
93 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
94 		break;
95 	case ALGO_TYPE_AES_GCM:
96 		/* AEAD (AES_GCM) case */
97 		sop_aead_prepare(sop, sa, icv, hlen, plen);
98 
99 		/* fill AAD IV (located inside crypto op) */
100 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
101 			sa->iv_ofs);
102 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
103 		break;
104 	case ALGO_TYPE_AES_CCM:
105 		/* AEAD (AES_CCM) case */
106 		sop_aead_prepare(sop, sa, icv, hlen, plen);
107 
108 		/* fill AAD IV (located inside crypto op) */
109 		ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
110 			sa->iv_ofs);
111 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
112 		break;
113 	case ALGO_TYPE_CHACHA20_POLY1305:
114 		/* AEAD (CHACHA20_POLY) case */
115 		sop_aead_prepare(sop, sa, icv, hlen, plen);
116 
117 		/* fill AAD IV (located inside crypto op) */
118 		chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
119 			struct aead_chacha20_poly1305_iv *,
120 			sa->iv_ofs);
121 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
122 					       ivp[0], sa->salt);
123 		break;
124 	case ALGO_TYPE_AES_CTR:
125 		/* Cipher-Auth (AES-CTR *) case */
126 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
127 
128 		/* fill CTR block (located inside crypto op) */
129 		ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
130 			sa->iv_ofs);
131 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
132 		break;
133 	}
134 }
135 
136 /*
137  * setup/update packet data and metadata for ESP outbound tunnel case.
138  */
139 static inline int32_t
140 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
141 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
142 	union sym_op_data *icv, uint8_t sqh_len)
143 {
144 	uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
145 	struct rte_mbuf *ml;
146 	struct rte_esp_hdr *esph;
147 	struct rte_esp_tail *espt;
148 	char *ph, *pt;
149 	uint64_t *iv;
150 
151 	/* calculate extra header space required */
152 	hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
153 
154 	/* size of ipsec protected data */
155 	l2len = mb->l2_len;
156 	plen = mb->pkt_len - l2len;
157 
158 	/* number of bytes to encrypt */
159 	clen = plen + sizeof(*espt);
160 	clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
161 
162 	/* pad length + esp tail */
163 	pdlen = clen - plen;
164 	tlen = pdlen + sa->icv_len + sqh_len;
165 
166 	/* do append and prepend */
167 	ml = rte_pktmbuf_lastseg(mb);
168 	if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
169 		return -ENOSPC;
170 
171 	/* prepend header */
172 	ph = rte_pktmbuf_prepend(mb, hlen - l2len);
173 	if (ph == NULL)
174 		return -ENOSPC;
175 
176 	/* append tail */
177 	pdofs = ml->data_len;
178 	ml->data_len += tlen;
179 	mb->pkt_len += tlen;
180 	pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
181 
182 	/* update pkt l2/l3 len */
183 	mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
184 		sa->tx_offload.val;
185 
186 	/* copy tunnel pkt header */
187 	rte_memcpy(ph, sa->hdr, sa->hdr_len);
188 
189 	/* if UDP encap is enabled update the dgram_len */
190 	if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
191 		struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
192 				(ph - sizeof(struct rte_udp_hdr));
193 		udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len -
194 				sa->hdr_l3_off - sa->hdr_len);
195 	}
196 
197 	/* update original and new ip header fields */
198 	update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen,
199 			mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
200 
201 	/* update spi, seqn and iv */
202 	esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
203 	iv = (uint64_t *)(esph + 1);
204 	copy_iv(iv, ivp, sa->iv_len);
205 
206 	esph->spi = sa->spi;
207 	esph->seq = sqn_low32(sqc);
208 
209 	/* offset for ICV */
210 	pdofs += pdlen + sa->sqh_len;
211 
212 	/* pad length */
213 	pdlen -= sizeof(*espt);
214 
215 	/* copy padding data */
216 	rte_memcpy(pt, esp_pad_bytes, pdlen);
217 
218 	/* update esp trailer */
219 	espt = (struct rte_esp_tail *)(pt + pdlen);
220 	espt->pad_len = pdlen;
221 	espt->next_proto = sa->proto;
222 
223 	/* set icv va/pa value(s) */
224 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
225 	icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
226 
227 	return clen;
228 }
229 
230 /*
231  * for pure cryptodev (lookaside none) depending on SA settings,
232  * we might have to write some extra data to the packet.
233  */
234 static inline void
235 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
236 	const union sym_op_data *icv)
237 {
238 	uint32_t *psqh;
239 	struct aead_gcm_aad *gaad;
240 	struct aead_ccm_aad *caad;
241 	struct aead_chacha20_poly1305_aad *chacha20_poly1305_aad;
242 
243 	/* insert SQN.hi between ESP trailer and ICV */
244 	if (sa->sqh_len != 0) {
245 		psqh = (uint32_t *)(icv->va - sa->sqh_len);
246 		psqh[0] = sqn_hi32(sqc);
247 	}
248 
249 	/*
250 	 * fill IV and AAD fields, if any (aad fields are placed after icv),
251 	 * right now we support only one AEAD algorithm: AES-GCM .
252 	 */
253 	switch (sa->algo_type) {
254 	case ALGO_TYPE_AES_GCM:
255 	if (sa->aad_len != 0) {
256 		gaad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
257 		aead_gcm_aad_fill(gaad, sa->spi, sqc, IS_ESN(sa));
258 	}
259 		break;
260 	case ALGO_TYPE_AES_CCM:
261 	if (sa->aad_len != 0) {
262 		caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
263 		aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
264 	}
265 		break;
266 	case ALGO_TYPE_CHACHA20_POLY1305:
267 	if (sa->aad_len != 0) {
268 		chacha20_poly1305_aad =	(struct aead_chacha20_poly1305_aad *)
269 			(icv->va + sa->icv_len);
270 		aead_chacha20_poly1305_aad_fill(chacha20_poly1305_aad,
271 			sa->spi, sqc, IS_ESN(sa));
272 	}
273 		break;
274 	default:
275 		break;
276 	}
277 }
278 
279 /*
280  * setup/update packets and crypto ops for ESP outbound tunnel case.
281  */
282 uint16_t
283 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
284 	struct rte_crypto_op *cop[], uint16_t num)
285 {
286 	int32_t rc;
287 	uint32_t i, k, n;
288 	uint64_t sqn;
289 	rte_be64_t sqc;
290 	struct rte_ipsec_sa *sa;
291 	struct rte_cryptodev_sym_session *cs;
292 	union sym_op_data icv;
293 	uint64_t iv[IPSEC_MAX_IV_QWORD];
294 	uint32_t dr[num];
295 
296 	sa = ss->sa;
297 	cs = ss->crypto.ses;
298 
299 	n = num;
300 	sqn = esn_outb_update_sqn(sa, &n);
301 	if (n != num)
302 		rte_errno = EOVERFLOW;
303 
304 	k = 0;
305 	for (i = 0; i != n; i++) {
306 
307 		sqc = rte_cpu_to_be_64(sqn + i);
308 		gen_iv(iv, sqc);
309 
310 		/* try to update the packet itself */
311 		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
312 					  sa->sqh_len);
313 		/* success, setup crypto op */
314 		if (rc >= 0) {
315 			outb_pkt_xprepare(sa, sqc, &icv);
316 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
317 			outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
318 			k++;
319 		/* failure, put packet into the death-row */
320 		} else {
321 			dr[i - k] = i;
322 			rte_errno = -rc;
323 		}
324 	}
325 
326 	 /* copy not prepared mbufs beyond good ones */
327 	if (k != n && k != 0)
328 		move_bad_mbufs(mb, dr, n, n - k);
329 
330 	return k;
331 }
332 
333 /*
334  * setup/update packet data and metadata for ESP outbound transport case.
335  */
336 static inline int32_t
337 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
338 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
339 	union sym_op_data *icv, uint8_t sqh_len)
340 {
341 	uint8_t np;
342 	uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
343 	struct rte_mbuf *ml;
344 	struct rte_esp_hdr *esph;
345 	struct rte_esp_tail *espt;
346 	char *ph, *pt;
347 	uint64_t *iv;
348 	uint32_t l2len, l3len;
349 
350 	l2len = mb->l2_len;
351 	l3len = mb->l3_len;
352 
353 	uhlen = l2len + l3len;
354 	plen = mb->pkt_len - uhlen;
355 
356 	/* calculate extra header space required */
357 	hlen = sa->iv_len + sizeof(*esph);
358 
359 	/* number of bytes to encrypt */
360 	clen = plen + sizeof(*espt);
361 	clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
362 
363 	/* pad length + esp tail */
364 	pdlen = clen - plen;
365 	tlen = pdlen + sa->icv_len + sqh_len;
366 
367 	/* do append and insert */
368 	ml = rte_pktmbuf_lastseg(mb);
369 	if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
370 		return -ENOSPC;
371 
372 	/* prepend space for ESP header */
373 	ph = rte_pktmbuf_prepend(mb, hlen);
374 	if (ph == NULL)
375 		return -ENOSPC;
376 
377 	/* append tail */
378 	pdofs = ml->data_len;
379 	ml->data_len += tlen;
380 	mb->pkt_len += tlen;
381 	pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
382 
383 	/* shift L2/L3 headers */
384 	insert_esph(ph, ph + hlen, uhlen);
385 
386 	/* update ip  header fields */
387 	np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
388 			l3len, IPPROTO_ESP);
389 
390 	/* update spi, seqn and iv */
391 	esph = (struct rte_esp_hdr *)(ph + uhlen);
392 	iv = (uint64_t *)(esph + 1);
393 	copy_iv(iv, ivp, sa->iv_len);
394 
395 	esph->spi = sa->spi;
396 	esph->seq = sqn_low32(sqc);
397 
398 	/* offset for ICV */
399 	pdofs += pdlen + sa->sqh_len;
400 
401 	/* pad length */
402 	pdlen -= sizeof(*espt);
403 
404 	/* copy padding data */
405 	rte_memcpy(pt, esp_pad_bytes, pdlen);
406 
407 	/* update esp trailer */
408 	espt = (struct rte_esp_tail *)(pt + pdlen);
409 	espt->pad_len = pdlen;
410 	espt->next_proto = np;
411 
412 	/* set icv va/pa value(s) */
413 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
414 	icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
415 
416 	return clen;
417 }
418 
419 /*
420  * setup/update packets and crypto ops for ESP outbound transport case.
421  */
422 uint16_t
423 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
424 	struct rte_crypto_op *cop[], uint16_t num)
425 {
426 	int32_t rc;
427 	uint32_t i, k, n, l2, l3;
428 	uint64_t sqn;
429 	rte_be64_t sqc;
430 	struct rte_ipsec_sa *sa;
431 	struct rte_cryptodev_sym_session *cs;
432 	union sym_op_data icv;
433 	uint64_t iv[IPSEC_MAX_IV_QWORD];
434 	uint32_t dr[num];
435 
436 	sa = ss->sa;
437 	cs = ss->crypto.ses;
438 
439 	n = num;
440 	sqn = esn_outb_update_sqn(sa, &n);
441 	if (n != num)
442 		rte_errno = EOVERFLOW;
443 
444 	k = 0;
445 	for (i = 0; i != n; i++) {
446 
447 		l2 = mb[i]->l2_len;
448 		l3 = mb[i]->l3_len;
449 
450 		sqc = rte_cpu_to_be_64(sqn + i);
451 		gen_iv(iv, sqc);
452 
453 		/* try to update the packet itself */
454 		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
455 				  sa->sqh_len);
456 		/* success, setup crypto op */
457 		if (rc >= 0) {
458 			outb_pkt_xprepare(sa, sqc, &icv);
459 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
460 			outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
461 			k++;
462 		/* failure, put packet into the death-row */
463 		} else {
464 			dr[i - k] = i;
465 			rte_errno = -rc;
466 		}
467 	}
468 
469 	/* copy not prepared mbufs beyond good ones */
470 	if (k != n && k != 0)
471 		move_bad_mbufs(mb, dr, n, n - k);
472 
473 	return k;
474 }
475 
476 
477 static inline uint32_t
478 outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
479 	uint32_t plen, void *iv)
480 {
481 	uint64_t *ivp = iv;
482 	struct aead_gcm_iv *gcm;
483 	struct aead_ccm_iv *ccm;
484 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
485 	struct aesctr_cnt_blk *ctr;
486 	uint32_t clen;
487 
488 	switch (sa->algo_type) {
489 	case ALGO_TYPE_AES_GCM:
490 		gcm = iv;
491 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
492 		break;
493 	case ALGO_TYPE_AES_CCM:
494 		ccm = iv;
495 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
496 		break;
497 	case ALGO_TYPE_CHACHA20_POLY1305:
498 		chacha20_poly1305 = iv;
499 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
500 					       ivp[0], sa->salt);
501 		break;
502 	case ALGO_TYPE_AES_CTR:
503 		ctr = iv;
504 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
505 		break;
506 	}
507 
508 	*pofs += sa->ctp.auth.offset;
509 	clen = plen + sa->ctp.auth.length;
510 	return clen;
511 }
512 
513 static uint16_t
514 cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
515 		struct rte_mbuf *mb[], uint16_t num,
516 		esp_outb_prepare_t prepare, uint32_t cofs_mask)
517 {
518 	int32_t rc;
519 	uint64_t sqn;
520 	rte_be64_t sqc;
521 	struct rte_ipsec_sa *sa;
522 	uint32_t i, k, n;
523 	uint32_t l2, l3;
524 	union sym_op_data icv;
525 	struct rte_crypto_va_iova_ptr iv[num];
526 	struct rte_crypto_va_iova_ptr aad[num];
527 	struct rte_crypto_va_iova_ptr dgst[num];
528 	uint32_t dr[num];
529 	uint32_t l4ofs[num];
530 	uint32_t clen[num];
531 	uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
532 
533 	sa = ss->sa;
534 
535 	n = num;
536 	sqn = esn_outb_update_sqn(sa, &n);
537 	if (n != num)
538 		rte_errno = EOVERFLOW;
539 
540 	for (i = 0, k = 0; i != n; i++) {
541 
542 		l2 = mb[i]->l2_len;
543 		l3 = mb[i]->l3_len;
544 
545 		/* calculate ESP header offset */
546 		l4ofs[k] = (l2 + l3) & cofs_mask;
547 
548 		sqc = rte_cpu_to_be_64(sqn + i);
549 		gen_iv(ivbuf[k], sqc);
550 
551 		/* try to update the packet itself */
552 		rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len);
553 
554 		/* success, proceed with preparations */
555 		if (rc >= 0) {
556 
557 			outb_pkt_xprepare(sa, sqc, &icv);
558 
559 			/* get encrypted data offset and length */
560 			clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc,
561 				ivbuf[k]);
562 
563 			/* fill iv, digest and aad */
564 			iv[k].va = ivbuf[k];
565 			aad[k].va = icv.va + sa->icv_len;
566 			dgst[k++].va = icv.va;
567 		} else {
568 			dr[i - k] = i;
569 			rte_errno = -rc;
570 		}
571 	}
572 
573 	/* copy not prepared mbufs beyond good ones */
574 	if (k != n && k != 0)
575 		move_bad_mbufs(mb, dr, n, n - k);
576 
577 	/* convert mbufs to iovecs and do actual crypto/auth processing */
578 	if (k != 0)
579 		cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
580 			l4ofs, clen, k);
581 	return k;
582 }
583 
584 uint16_t
585 cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
586 		struct rte_mbuf *mb[], uint16_t num)
587 {
588 	return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0);
589 }
590 
591 uint16_t
592 cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
593 		struct rte_mbuf *mb[], uint16_t num)
594 {
595 	return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare,
596 		UINT32_MAX);
597 }
598 
599 /*
600  * process outbound packets for SA with ESN support,
601  * for algorithms that require SQN.hibits to be implictly included
602  * into digest computation.
603  * In that case we have to move ICV bytes back to their proper place.
604  */
605 uint16_t
606 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
607 	uint16_t num)
608 {
609 	uint32_t i, k, icv_len, *icv, bytes;
610 	struct rte_mbuf *ml;
611 	struct rte_ipsec_sa *sa;
612 	uint32_t dr[num];
613 
614 	sa = ss->sa;
615 
616 	k = 0;
617 	icv_len = sa->icv_len;
618 	bytes = 0;
619 
620 	for (i = 0; i != num; i++) {
621 		if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) {
622 			ml = rte_pktmbuf_lastseg(mb[i]);
623 			/* remove high-order 32 bits of esn from packet len */
624 			mb[i]->pkt_len -= sa->sqh_len;
625 			ml->data_len -= sa->sqh_len;
626 			icv = rte_pktmbuf_mtod_offset(ml, void *,
627 				ml->data_len - icv_len);
628 			remove_sqh(icv, icv_len);
629 			bytes += mb[i]->pkt_len;
630 			k++;
631 		} else
632 			dr[i - k] = i;
633 	}
634 	sa->statistics.count += k;
635 	sa->statistics.bytes += bytes;
636 
637 	/* handle unprocessed mbufs */
638 	if (k != num) {
639 		rte_errno = EBADMSG;
640 		if (k != 0)
641 			move_bad_mbufs(mb, dr, num, num - k);
642 	}
643 
644 	return k;
645 }
646 
647 /*
648  * prepare packets for inline ipsec processing:
649  * set ol_flags and attach metadata.
650  */
651 static inline void
652 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
653 	struct rte_mbuf *mb[], uint16_t num)
654 {
655 	uint32_t i, ol_flags, bytes;
656 
657 	ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
658 	bytes = 0;
659 	for (i = 0; i != num; i++) {
660 
661 		mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD;
662 		bytes += mb[i]->pkt_len;
663 		if (ol_flags != 0)
664 			rte_security_set_pkt_metadata(ss->security.ctx,
665 				ss->security.ses, mb[i], NULL);
666 	}
667 	ss->sa->statistics.count += num;
668 	ss->sa->statistics.bytes += bytes;
669 }
670 
671 /*
672  * process group of ESP outbound tunnel packets destined for
673  * INLINE_CRYPTO type of device.
674  */
675 uint16_t
676 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
677 	struct rte_mbuf *mb[], uint16_t num)
678 {
679 	int32_t rc;
680 	uint32_t i, k, n;
681 	uint64_t sqn;
682 	rte_be64_t sqc;
683 	struct rte_ipsec_sa *sa;
684 	union sym_op_data icv;
685 	uint64_t iv[IPSEC_MAX_IV_QWORD];
686 	uint32_t dr[num];
687 
688 	sa = ss->sa;
689 
690 	n = num;
691 	sqn = esn_outb_update_sqn(sa, &n);
692 	if (n != num)
693 		rte_errno = EOVERFLOW;
694 
695 	k = 0;
696 	for (i = 0; i != n; i++) {
697 
698 		sqc = rte_cpu_to_be_64(sqn + i);
699 		gen_iv(iv, sqc);
700 
701 		/* try to update the packet itself */
702 		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
703 
704 		k += (rc >= 0);
705 
706 		/* failure, put packet into the death-row */
707 		if (rc < 0) {
708 			dr[i - k] = i;
709 			rte_errno = -rc;
710 		}
711 	}
712 
713 	/* copy not processed mbufs beyond good ones */
714 	if (k != n && k != 0)
715 		move_bad_mbufs(mb, dr, n, n - k);
716 
717 	inline_outb_mbuf_prepare(ss, mb, k);
718 	return k;
719 }
720 
721 /*
722  * process group of ESP outbound transport packets destined for
723  * INLINE_CRYPTO type of device.
724  */
725 uint16_t
726 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
727 	struct rte_mbuf *mb[], uint16_t num)
728 {
729 	int32_t rc;
730 	uint32_t i, k, n;
731 	uint64_t sqn;
732 	rte_be64_t sqc;
733 	struct rte_ipsec_sa *sa;
734 	union sym_op_data icv;
735 	uint64_t iv[IPSEC_MAX_IV_QWORD];
736 	uint32_t dr[num];
737 
738 	sa = ss->sa;
739 
740 	n = num;
741 	sqn = esn_outb_update_sqn(sa, &n);
742 	if (n != num)
743 		rte_errno = EOVERFLOW;
744 
745 	k = 0;
746 	for (i = 0; i != n; i++) {
747 
748 		sqc = rte_cpu_to_be_64(sqn + i);
749 		gen_iv(iv, sqc);
750 
751 		/* try to update the packet itself */
752 		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
753 
754 		k += (rc >= 0);
755 
756 		/* failure, put packet into the death-row */
757 		if (rc < 0) {
758 			dr[i - k] = i;
759 			rte_errno = -rc;
760 		}
761 	}
762 
763 	/* copy not processed mbufs beyond good ones */
764 	if (k != n && k != 0)
765 		move_bad_mbufs(mb, dr, n, n - k);
766 
767 	inline_outb_mbuf_prepare(ss, mb, k);
768 	return k;
769 }
770 
771 /*
772  * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
773  * actual processing is done by HW/PMD, just set flags and metadata.
774  */
775 uint16_t
776 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
777 	struct rte_mbuf *mb[], uint16_t num)
778 {
779 	inline_outb_mbuf_prepare(ss, mb, num);
780 	return num;
781 }
782