xref: /dpdk/lib/ipsec/esp_outb.c (revision 72206323a5dd3182b13f61b25a64abdddfee595c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2020 Intel Corporation
3  */
4 
5 #include <rte_ipsec.h>
6 #include <rte_esp.h>
7 #include <rte_udp.h>
8 #include <rte_errno.h>
9 #include <rte_cryptodev.h>
10 
11 #include "sa.h"
12 #include "ipsec_sqn.h"
13 #include "crypto.h"
14 #include "iph.h"
15 #include "misc.h"
16 #include "pad.h"
17 
18 typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
19 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
20 	union sym_op_data *icv, uint8_t sqh_len, uint8_t tso);
21 
22 /*
23  * helper function to fill crypto_sym op for cipher+auth algorithms.
24  * used by outb_cop_prepare(), see below.
25  */
26 static inline void
27 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
28 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
29 	uint32_t pofs, uint32_t plen)
30 {
31 	sop->cipher.data.offset = sa->ctp.cipher.offset + pofs;
32 	sop->cipher.data.length = sa->ctp.cipher.length + plen;
33 	sop->auth.data.offset = sa->ctp.auth.offset + pofs;
34 	sop->auth.data.length = sa->ctp.auth.length + plen;
35 	sop->auth.digest.data = icv->va;
36 	sop->auth.digest.phys_addr = icv->pa;
37 }
38 
39 /*
40  * helper function to fill crypto_sym op for cipher+auth algorithms.
41  * used by outb_cop_prepare(), see below.
42  */
43 static inline void
44 sop_aead_prepare(struct rte_crypto_sym_op *sop,
45 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
46 	uint32_t pofs, uint32_t plen)
47 {
48 	sop->aead.data.offset = sa->ctp.cipher.offset + pofs;
49 	sop->aead.data.length = sa->ctp.cipher.length + plen;
50 	sop->aead.digest.data = icv->va;
51 	sop->aead.digest.phys_addr = icv->pa;
52 	sop->aead.aad.data = icv->va + sa->icv_len;
53 	sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
54 }
55 
56 /*
57  * setup crypto op and crypto sym op for ESP outbound packet.
58  */
59 static inline void
60 outb_cop_prepare(struct rte_crypto_op *cop,
61 	const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
62 	const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
63 {
64 	struct rte_crypto_sym_op *sop;
65 	struct aead_gcm_iv *gcm;
66 	struct aead_ccm_iv *ccm;
67 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
68 	struct aesctr_cnt_blk *ctr;
69 	uint32_t algo;
70 
71 	algo = sa->algo_type;
72 
73 	/* fill sym op fields */
74 	sop = cop->sym;
75 
76 	switch (algo) {
77 	case ALGO_TYPE_AES_CBC:
78 		/* Cipher-Auth (AES-CBC *) case */
79 	case ALGO_TYPE_3DES_CBC:
80 		/* Cipher-Auth (3DES-CBC *) case */
81 	case ALGO_TYPE_NULL:
82 		/* NULL case */
83 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
84 		break;
85 	case ALGO_TYPE_AES_GMAC:
86 		/* GMAC case */
87 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
88 
89 		/* fill AAD IV (located inside crypto op) */
90 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
91 			sa->iv_ofs);
92 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
93 		break;
94 	case ALGO_TYPE_AES_GCM:
95 		/* AEAD (AES_GCM) case */
96 		sop_aead_prepare(sop, sa, icv, hlen, plen);
97 
98 		/* fill AAD IV (located inside crypto op) */
99 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
100 			sa->iv_ofs);
101 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
102 		break;
103 	case ALGO_TYPE_AES_CCM:
104 		/* AEAD (AES_CCM) case */
105 		sop_aead_prepare(sop, sa, icv, hlen, plen);
106 
107 		/* fill AAD IV (located inside crypto op) */
108 		ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
109 			sa->iv_ofs);
110 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
111 		break;
112 	case ALGO_TYPE_CHACHA20_POLY1305:
113 		/* AEAD (CHACHA20_POLY) case */
114 		sop_aead_prepare(sop, sa, icv, hlen, plen);
115 
116 		/* fill AAD IV (located inside crypto op) */
117 		chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
118 			struct aead_chacha20_poly1305_iv *,
119 			sa->iv_ofs);
120 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
121 					       ivp[0], sa->salt);
122 		break;
123 	case ALGO_TYPE_AES_CTR:
124 		/* Cipher-Auth (AES-CTR *) case */
125 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
126 
127 		/* fill CTR block (located inside crypto op) */
128 		ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
129 			sa->iv_ofs);
130 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
131 		break;
132 	}
133 }
134 
135 /*
136  * setup/update packet data and metadata for ESP outbound tunnel case.
137  */
138 static inline int32_t
139 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
140 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
141 	union sym_op_data *icv, uint8_t sqh_len, uint8_t tso)
142 {
143 	uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
144 	struct rte_mbuf *ml;
145 	struct rte_esp_hdr *esph;
146 	struct rte_esp_tail *espt;
147 	char *ph, *pt;
148 	uint64_t *iv;
149 
150 	/* calculate extra header space required */
151 	hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
152 
153 	/* size of ipsec protected data */
154 	l2len = mb->l2_len;
155 	plen = mb->pkt_len - l2len;
156 
157 	/* number of bytes to encrypt */
158 	clen = plen + sizeof(*espt);
159 
160 	if (!tso) {
161 		clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
162 		/* pad length + esp tail */
163 		pdlen = clen - plen;
164 		tlen = pdlen + sa->icv_len + sqh_len;
165 	} else {
166 		/* We don't need to pad/align packet or append ICV length
167 		 * when using TSO offload
168 		 */
169 		pdlen = clen - plen;
170 		tlen = pdlen + sqh_len;
171 	}
172 
173 	/* do append and prepend */
174 	ml = rte_pktmbuf_lastseg(mb);
175 	if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
176 		return -ENOSPC;
177 
178 	/* prepend header */
179 	ph = rte_pktmbuf_prepend(mb, hlen - l2len);
180 	if (ph == NULL)
181 		return -ENOSPC;
182 
183 	/* append tail */
184 	pdofs = ml->data_len;
185 	ml->data_len += tlen;
186 	mb->pkt_len += tlen;
187 	pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
188 
189 	/* update pkt l2/l3 len */
190 	mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
191 		sa->tx_offload.val;
192 
193 	/* copy tunnel pkt header */
194 	rte_memcpy(ph, sa->hdr, sa->hdr_len);
195 
196 	/* if UDP encap is enabled update the dgram_len */
197 	if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
198 		struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
199 			(ph + sa->hdr_len - sizeof(struct rte_udp_hdr));
200 		udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len -
201 				sa->hdr_l3_off - sa->hdr_len);
202 	}
203 
204 	/* update original and new ip header fields */
205 	update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen,
206 			mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
207 
208 	/* update spi, seqn and iv */
209 	esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
210 	iv = (uint64_t *)(esph + 1);
211 	copy_iv(iv, ivp, sa->iv_len);
212 
213 	esph->spi = sa->spi;
214 	esph->seq = sqn_low32(sqc);
215 
216 	/* offset for ICV */
217 	pdofs += pdlen + sa->sqh_len;
218 
219 	/* pad length */
220 	pdlen -= sizeof(*espt);
221 
222 	/* copy padding data */
223 	rte_memcpy(pt, esp_pad_bytes, pdlen);
224 
225 	/* update esp trailer */
226 	espt = (struct rte_esp_tail *)(pt + pdlen);
227 	espt->pad_len = pdlen;
228 	espt->next_proto = sa->proto;
229 
230 	/* set icv va/pa value(s) */
231 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
232 	icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
233 
234 	return clen;
235 }
236 
237 /*
238  * for pure cryptodev (lookaside none) depending on SA settings,
239  * we might have to write some extra data to the packet.
240  */
241 static inline void
242 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
243 	const union sym_op_data *icv)
244 {
245 	uint32_t *psqh;
246 	struct aead_gcm_aad *gaad;
247 	struct aead_ccm_aad *caad;
248 	struct aead_chacha20_poly1305_aad *chacha20_poly1305_aad;
249 
250 	/* insert SQN.hi between ESP trailer and ICV */
251 	if (sa->sqh_len != 0) {
252 		psqh = (uint32_t *)(icv->va - sa->sqh_len);
253 		psqh[0] = sqn_hi32(sqc);
254 	}
255 
256 	/*
257 	 * fill IV and AAD fields, if any (aad fields are placed after icv),
258 	 * right now we support only one AEAD algorithm: AES-GCM .
259 	 */
260 	switch (sa->algo_type) {
261 	case ALGO_TYPE_AES_GCM:
262 	if (sa->aad_len != 0) {
263 		gaad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
264 		aead_gcm_aad_fill(gaad, sa->spi, sqc, IS_ESN(sa));
265 	}
266 		break;
267 	case ALGO_TYPE_AES_CCM:
268 	if (sa->aad_len != 0) {
269 		caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
270 		aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
271 	}
272 		break;
273 	case ALGO_TYPE_CHACHA20_POLY1305:
274 	if (sa->aad_len != 0) {
275 		chacha20_poly1305_aad =	(struct aead_chacha20_poly1305_aad *)
276 			(icv->va + sa->icv_len);
277 		aead_chacha20_poly1305_aad_fill(chacha20_poly1305_aad,
278 			sa->spi, sqc, IS_ESN(sa));
279 	}
280 		break;
281 	default:
282 		break;
283 	}
284 }
285 
286 /*
287  * setup/update packets and crypto ops for ESP outbound tunnel case.
288  */
289 uint16_t
290 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
291 	struct rte_crypto_op *cop[], uint16_t num)
292 {
293 	int32_t rc;
294 	uint32_t i, k, n;
295 	uint64_t sqn;
296 	rte_be64_t sqc;
297 	struct rte_ipsec_sa *sa;
298 	struct rte_cryptodev_sym_session *cs;
299 	union sym_op_data icv;
300 	uint64_t iv[IPSEC_MAX_IV_QWORD];
301 	uint32_t dr[num];
302 
303 	sa = ss->sa;
304 	cs = ss->crypto.ses;
305 
306 	n = num;
307 	sqn = esn_outb_update_sqn(sa, &n);
308 	if (n != num)
309 		rte_errno = EOVERFLOW;
310 
311 	k = 0;
312 	for (i = 0; i != n; i++) {
313 
314 		sqc = rte_cpu_to_be_64(sqn + i);
315 		gen_iv(iv, sqc);
316 
317 		/* try to update the packet itself */
318 		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
319 					  sa->sqh_len, 0);
320 		/* success, setup crypto op */
321 		if (rc >= 0) {
322 			outb_pkt_xprepare(sa, sqc, &icv);
323 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
324 			outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
325 			k++;
326 		/* failure, put packet into the death-row */
327 		} else {
328 			dr[i - k] = i;
329 			rte_errno = -rc;
330 		}
331 	}
332 
333 	 /* copy not prepared mbufs beyond good ones */
334 	if (k != n && k != 0)
335 		move_bad_mbufs(mb, dr, n, n - k);
336 
337 	return k;
338 }
339 
340 /*
341  * setup/update packet data and metadata for ESP outbound transport case.
342  */
343 static inline int32_t
344 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
345 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
346 	union sym_op_data *icv, uint8_t sqh_len, uint8_t tso)
347 {
348 	uint8_t np;
349 	uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
350 	struct rte_mbuf *ml;
351 	struct rte_esp_hdr *esph;
352 	struct rte_esp_tail *espt;
353 	char *ph, *pt;
354 	uint64_t *iv;
355 	uint32_t l2len, l3len;
356 
357 	l2len = mb->l2_len;
358 	l3len = mb->l3_len;
359 
360 	uhlen = l2len + l3len;
361 	plen = mb->pkt_len - uhlen;
362 
363 	/* calculate extra header space required */
364 	hlen = sa->iv_len + sizeof(*esph);
365 
366 	/* number of bytes to encrypt */
367 	clen = plen + sizeof(*espt);
368 
369 	if (!tso) {
370 		clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
371 		/* pad length + esp tail */
372 		pdlen = clen - plen;
373 		tlen = pdlen + sa->icv_len + sqh_len;
374 	} else {
375 		/* We don't need to pad/align packet or append ICV length
376 		 * when using TSO offload
377 		 */
378 		pdlen = clen - plen;
379 		tlen = pdlen + sqh_len;
380 	}
381 
382 	/* do append and insert */
383 	ml = rte_pktmbuf_lastseg(mb);
384 	if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
385 		return -ENOSPC;
386 
387 	/* prepend space for ESP header */
388 	ph = rte_pktmbuf_prepend(mb, hlen);
389 	if (ph == NULL)
390 		return -ENOSPC;
391 
392 	/* append tail */
393 	pdofs = ml->data_len;
394 	ml->data_len += tlen;
395 	mb->pkt_len += tlen;
396 	pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
397 
398 	/* shift L2/L3 headers */
399 	insert_esph(ph, ph + hlen, uhlen);
400 
401 	/* update ip  header fields */
402 	np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
403 			l3len, IPPROTO_ESP);
404 
405 	/* update spi, seqn and iv */
406 	esph = (struct rte_esp_hdr *)(ph + uhlen);
407 	iv = (uint64_t *)(esph + 1);
408 	copy_iv(iv, ivp, sa->iv_len);
409 
410 	esph->spi = sa->spi;
411 	esph->seq = sqn_low32(sqc);
412 
413 	/* offset for ICV */
414 	pdofs += pdlen + sa->sqh_len;
415 
416 	/* pad length */
417 	pdlen -= sizeof(*espt);
418 
419 	/* copy padding data */
420 	rte_memcpy(pt, esp_pad_bytes, pdlen);
421 
422 	/* update esp trailer */
423 	espt = (struct rte_esp_tail *)(pt + pdlen);
424 	espt->pad_len = pdlen;
425 	espt->next_proto = np;
426 
427 	/* set icv va/pa value(s) */
428 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
429 	icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
430 
431 	return clen;
432 }
433 
434 /*
435  * setup/update packets and crypto ops for ESP outbound transport case.
436  */
437 uint16_t
438 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
439 	struct rte_crypto_op *cop[], uint16_t num)
440 {
441 	int32_t rc;
442 	uint32_t i, k, n, l2, l3;
443 	uint64_t sqn;
444 	rte_be64_t sqc;
445 	struct rte_ipsec_sa *sa;
446 	struct rte_cryptodev_sym_session *cs;
447 	union sym_op_data icv;
448 	uint64_t iv[IPSEC_MAX_IV_QWORD];
449 	uint32_t dr[num];
450 
451 	sa = ss->sa;
452 	cs = ss->crypto.ses;
453 
454 	n = num;
455 	sqn = esn_outb_update_sqn(sa, &n);
456 	if (n != num)
457 		rte_errno = EOVERFLOW;
458 
459 	k = 0;
460 	for (i = 0; i != n; i++) {
461 
462 		l2 = mb[i]->l2_len;
463 		l3 = mb[i]->l3_len;
464 
465 		sqc = rte_cpu_to_be_64(sqn + i);
466 		gen_iv(iv, sqc);
467 
468 		/* try to update the packet itself */
469 		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
470 				  sa->sqh_len, 0);
471 		/* success, setup crypto op */
472 		if (rc >= 0) {
473 			outb_pkt_xprepare(sa, sqc, &icv);
474 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
475 			outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
476 			k++;
477 		/* failure, put packet into the death-row */
478 		} else {
479 			dr[i - k] = i;
480 			rte_errno = -rc;
481 		}
482 	}
483 
484 	/* copy not prepared mbufs beyond good ones */
485 	if (k != n && k != 0)
486 		move_bad_mbufs(mb, dr, n, n - k);
487 
488 	return k;
489 }
490 
491 
492 static inline uint32_t
493 outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
494 	uint32_t plen, void *iv)
495 {
496 	uint64_t *ivp = iv;
497 	struct aead_gcm_iv *gcm;
498 	struct aead_ccm_iv *ccm;
499 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
500 	struct aesctr_cnt_blk *ctr;
501 	uint32_t clen;
502 
503 	switch (sa->algo_type) {
504 	case ALGO_TYPE_AES_GCM:
505 		gcm = iv;
506 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
507 		break;
508 	case ALGO_TYPE_AES_CCM:
509 		ccm = iv;
510 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
511 		break;
512 	case ALGO_TYPE_CHACHA20_POLY1305:
513 		chacha20_poly1305 = iv;
514 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
515 					       ivp[0], sa->salt);
516 		break;
517 	case ALGO_TYPE_AES_CTR:
518 		ctr = iv;
519 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
520 		break;
521 	}
522 
523 	*pofs += sa->ctp.auth.offset;
524 	clen = plen + sa->ctp.auth.length;
525 	return clen;
526 }
527 
528 static uint16_t
529 cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
530 		struct rte_mbuf *mb[], uint16_t num,
531 		esp_outb_prepare_t prepare, uint32_t cofs_mask)
532 {
533 	int32_t rc;
534 	uint64_t sqn;
535 	rte_be64_t sqc;
536 	struct rte_ipsec_sa *sa;
537 	uint32_t i, k, n;
538 	uint32_t l2, l3;
539 	union sym_op_data icv;
540 	struct rte_crypto_va_iova_ptr iv[num];
541 	struct rte_crypto_va_iova_ptr aad[num];
542 	struct rte_crypto_va_iova_ptr dgst[num];
543 	uint32_t dr[num];
544 	uint32_t l4ofs[num];
545 	uint32_t clen[num];
546 	uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
547 
548 	sa = ss->sa;
549 
550 	n = num;
551 	sqn = esn_outb_update_sqn(sa, &n);
552 	if (n != num)
553 		rte_errno = EOVERFLOW;
554 
555 	for (i = 0, k = 0; i != n; i++) {
556 
557 		l2 = mb[i]->l2_len;
558 		l3 = mb[i]->l3_len;
559 
560 		/* calculate ESP header offset */
561 		l4ofs[k] = (l2 + l3) & cofs_mask;
562 
563 		sqc = rte_cpu_to_be_64(sqn + i);
564 		gen_iv(ivbuf[k], sqc);
565 
566 		/* try to update the packet itself */
567 		rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len, 0);
568 
569 		/* success, proceed with preparations */
570 		if (rc >= 0) {
571 
572 			outb_pkt_xprepare(sa, sqc, &icv);
573 
574 			/* get encrypted data offset and length */
575 			clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc,
576 				ivbuf[k]);
577 
578 			/* fill iv, digest and aad */
579 			iv[k].va = ivbuf[k];
580 			aad[k].va = icv.va + sa->icv_len;
581 			dgst[k++].va = icv.va;
582 		} else {
583 			dr[i - k] = i;
584 			rte_errno = -rc;
585 		}
586 	}
587 
588 	/* copy not prepared mbufs beyond good ones */
589 	if (k != n && k != 0)
590 		move_bad_mbufs(mb, dr, n, n - k);
591 
592 	/* convert mbufs to iovecs and do actual crypto/auth processing */
593 	if (k != 0)
594 		cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
595 			l4ofs, clen, k);
596 	return k;
597 }
598 
599 uint16_t
600 cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
601 		struct rte_mbuf *mb[], uint16_t num)
602 {
603 	return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0);
604 }
605 
606 uint16_t
607 cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
608 		struct rte_mbuf *mb[], uint16_t num)
609 {
610 	return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare,
611 		UINT32_MAX);
612 }
613 
614 /*
615  * process outbound packets for SA with ESN support,
616  * for algorithms that require SQN.hibits to be implicitly included
617  * into digest computation.
618  * In that case we have to move ICV bytes back to their proper place.
619  */
620 uint16_t
621 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
622 	uint16_t num)
623 {
624 	uint32_t i, k, icv_len, *icv, bytes;
625 	struct rte_mbuf *ml;
626 	struct rte_ipsec_sa *sa;
627 	uint32_t dr[num];
628 
629 	sa = ss->sa;
630 
631 	k = 0;
632 	icv_len = sa->icv_len;
633 	bytes = 0;
634 
635 	for (i = 0; i != num; i++) {
636 		if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) {
637 			ml = rte_pktmbuf_lastseg(mb[i]);
638 			/* remove high-order 32 bits of esn from packet len */
639 			mb[i]->pkt_len -= sa->sqh_len;
640 			ml->data_len -= sa->sqh_len;
641 			icv = rte_pktmbuf_mtod_offset(ml, void *,
642 				ml->data_len - icv_len);
643 			remove_sqh(icv, icv_len);
644 			bytes += mb[i]->pkt_len;
645 			k++;
646 		} else
647 			dr[i - k] = i;
648 	}
649 	sa->statistics.count += k;
650 	sa->statistics.bytes += bytes;
651 
652 	/* handle unprocessed mbufs */
653 	if (k != num) {
654 		rte_errno = EBADMSG;
655 		if (k != 0)
656 			move_bad_mbufs(mb, dr, num, num - k);
657 	}
658 
659 	return k;
660 }
661 
662 /*
663  * prepare packets for inline ipsec processing:
664  * set ol_flags and attach metadata.
665  */
666 static inline void
667 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
668 	struct rte_mbuf *mb[], uint16_t num)
669 {
670 	uint32_t i, ol_flags, bytes;
671 
672 	ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
673 	bytes = 0;
674 	for (i = 0; i != num; i++) {
675 
676 		mb[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
677 		bytes += mb[i]->pkt_len;
678 		if (ol_flags != 0)
679 			rte_security_set_pkt_metadata(ss->security.ctx,
680 				ss->security.ses, mb[i], NULL);
681 	}
682 	ss->sa->statistics.count += num;
683 	ss->sa->statistics.bytes += bytes;
684 }
685 
686 
687 static inline int
688 esn_outb_nb_segments(struct rte_mbuf *m)
689 {
690 	if  (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
691 		uint16_t pkt_l3len = m->pkt_len - m->l2_len;
692 		uint16_t segments =
693 			(m->tso_segsz > 0 && pkt_l3len > m->tso_segsz) ?
694 			(pkt_l3len + m->tso_segsz - 1) / m->tso_segsz : 1;
695 		return segments;
696 	}
697 	return 1; /* no TSO */
698 }
699 
700 /* Compute how many packets can be sent before overflow occurs */
701 static inline uint16_t
702 esn_outb_nb_valid_packets(uint16_t num, uint32_t n_sqn, uint16_t nb_segs[])
703 {
704 	uint16_t i;
705 	uint32_t seg_cnt = 0;
706 	for (i = 0; i < num && seg_cnt < n_sqn; i++)
707 		seg_cnt += nb_segs[i];
708 	return i - 1;
709 }
710 
711 /*
712  * process group of ESP outbound tunnel packets destined for
713  * INLINE_CRYPTO type of device.
714  */
715 uint16_t
716 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
717 	struct rte_mbuf *mb[], uint16_t num)
718 {
719 	int32_t rc;
720 	uint32_t i, k, nb_segs_total, n_sqn;
721 	uint64_t sqn;
722 	rte_be64_t sqc;
723 	struct rte_ipsec_sa *sa;
724 	union sym_op_data icv;
725 	uint64_t iv[IPSEC_MAX_IV_QWORD];
726 	uint32_t dr[num];
727 	uint16_t nb_segs[num];
728 
729 	sa = ss->sa;
730 	nb_segs_total = 0;
731 	/* Calculate number of segments */
732 	for (i = 0; i != num; i++) {
733 		nb_segs[i] = esn_outb_nb_segments(mb[i]);
734 		nb_segs_total += nb_segs[i];
735 	}
736 
737 	n_sqn = nb_segs_total;
738 	sqn = esn_outb_update_sqn(sa, &n_sqn);
739 	if (n_sqn != nb_segs_total) {
740 		rte_errno = EOVERFLOW;
741 		/* if there are segmented packets find out how many can be
742 		 * sent until overflow occurs
743 		 */
744 		if (nb_segs_total > num) /* there is at least 1 */
745 			num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs);
746 		else
747 			num = n_sqn; /* no segmented packets */
748 	}
749 
750 	k = 0;
751 	for (i = 0; i != num; i++) {
752 
753 		sqc = rte_cpu_to_be_64(sqn);
754 		gen_iv(iv, sqc);
755 		sqn += nb_segs[i];
756 
757 		/* try to update the packet itself */
758 		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
759 			(mb[i]->ol_flags &
760 			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0);
761 
762 		k += (rc >= 0);
763 
764 		/* failure, put packet into the death-row */
765 		if (rc < 0) {
766 			dr[i - k] = i;
767 			rte_errno = -rc;
768 		}
769 	}
770 
771 	/* copy not processed mbufs beyond good ones */
772 	if (k != num && k != 0)
773 		move_bad_mbufs(mb, dr, num, num - k);
774 
775 	inline_outb_mbuf_prepare(ss, mb, k);
776 	return k;
777 }
778 
779 /*
780  * process group of ESP outbound transport packets destined for
781  * INLINE_CRYPTO type of device.
782  */
783 uint16_t
784 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
785 	struct rte_mbuf *mb[], uint16_t num)
786 {
787 	int32_t rc;
788 	uint32_t i, k, nb_segs_total, n_sqn;
789 	uint64_t sqn;
790 	rte_be64_t sqc;
791 	struct rte_ipsec_sa *sa;
792 	union sym_op_data icv;
793 	uint64_t iv[IPSEC_MAX_IV_QWORD];
794 	uint32_t dr[num];
795 	uint16_t nb_segs[num];
796 
797 	sa = ss->sa;
798 	nb_segs_total = 0;
799 	/* Calculate number of segments */
800 	for (i = 0; i != num; i++) {
801 		nb_segs[i] = esn_outb_nb_segments(mb[i]);
802 		nb_segs_total += nb_segs[i];
803 	}
804 
805 	n_sqn = nb_segs_total;
806 	sqn = esn_outb_update_sqn(sa, &n_sqn);
807 	if (n_sqn != nb_segs_total) {
808 		rte_errno = EOVERFLOW;
809 		/* if there are segmented packets find out how many can be
810 		 * sent until overflow occurs
811 		 */
812 		if (nb_segs_total > num) /* there is at least 1 */
813 			num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs);
814 		else
815 			num = n_sqn; /* no segmented packets */
816 	}
817 
818 	k = 0;
819 	for (i = 0; i != num; i++) {
820 
821 		sqc = rte_cpu_to_be_64(sqn);
822 		gen_iv(iv, sqc);
823 		sqn += nb_segs[i];
824 
825 		/* try to update the packet itself */
826 		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
827 			(mb[i]->ol_flags &
828 			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0);
829 
830 		k += (rc >= 0);
831 
832 		/* failure, put packet into the death-row */
833 		if (rc < 0) {
834 			dr[i - k] = i;
835 			rte_errno = -rc;
836 		}
837 	}
838 
839 	/* copy not processed mbufs beyond good ones */
840 	if (k != num && k != 0)
841 		move_bad_mbufs(mb, dr, num, num - k);
842 
843 	inline_outb_mbuf_prepare(ss, mb, k);
844 	return k;
845 }
846 
847 /*
848  * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
849  * actual processing is done by HW/PMD, just set flags and metadata.
850  */
851 uint16_t
852 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
853 	struct rte_mbuf *mb[], uint16_t num)
854 {
855 	inline_outb_mbuf_prepare(ss, mb, num);
856 	return num;
857 }
858