xref: /dpdk/lib/ipsec/esp_outb.c (revision f8dbaebbf1c9efcbb2e2354b341ed62175466a57)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2020 Intel Corporation
3  */
4 
5 #include <rte_ipsec.h>
6 #include <rte_esp.h>
7 #include <rte_ip.h>
8 #include <rte_udp.h>
9 #include <rte_errno.h>
10 #include <rte_cryptodev.h>
11 
12 #include "sa.h"
13 #include "ipsec_sqn.h"
14 #include "crypto.h"
15 #include "iph.h"
16 #include "misc.h"
17 #include "pad.h"
18 
19 typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
20 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
21 	union sym_op_data *icv, uint8_t sqh_len, uint8_t tso);
22 
23 /*
24  * helper function to fill crypto_sym op for cipher+auth algorithms.
25  * used by outb_cop_prepare(), see below.
26  */
27 static inline void
28 sop_ciph_auth_prepare(struct rte_crypto_sym_op *sop,
29 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
30 	uint32_t pofs, uint32_t plen)
31 {
32 	sop->cipher.data.offset = sa->ctp.cipher.offset + pofs;
33 	sop->cipher.data.length = sa->ctp.cipher.length + plen;
34 	sop->auth.data.offset = sa->ctp.auth.offset + pofs;
35 	sop->auth.data.length = sa->ctp.auth.length + plen;
36 	sop->auth.digest.data = icv->va;
37 	sop->auth.digest.phys_addr = icv->pa;
38 }
39 
40 /*
41  * helper function to fill crypto_sym op for cipher+auth algorithms.
42  * used by outb_cop_prepare(), see below.
43  */
44 static inline void
45 sop_aead_prepare(struct rte_crypto_sym_op *sop,
46 	const struct rte_ipsec_sa *sa, const union sym_op_data *icv,
47 	uint32_t pofs, uint32_t plen)
48 {
49 	sop->aead.data.offset = sa->ctp.cipher.offset + pofs;
50 	sop->aead.data.length = sa->ctp.cipher.length + plen;
51 	sop->aead.digest.data = icv->va;
52 	sop->aead.digest.phys_addr = icv->pa;
53 	sop->aead.aad.data = icv->va + sa->icv_len;
54 	sop->aead.aad.phys_addr = icv->pa + sa->icv_len;
55 }
56 
57 /*
58  * setup crypto op and crypto sym op for ESP outbound packet.
59  */
60 static inline void
61 outb_cop_prepare(struct rte_crypto_op *cop,
62 	const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD],
63 	const union sym_op_data *icv, uint32_t hlen, uint32_t plen)
64 {
65 	struct rte_crypto_sym_op *sop;
66 	struct aead_gcm_iv *gcm;
67 	struct aead_ccm_iv *ccm;
68 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
69 	struct aesctr_cnt_blk *ctr;
70 	uint32_t algo;
71 
72 	algo = sa->algo_type;
73 
74 	/* fill sym op fields */
75 	sop = cop->sym;
76 
77 	switch (algo) {
78 	case ALGO_TYPE_AES_CBC:
79 		/* Cipher-Auth (AES-CBC *) case */
80 	case ALGO_TYPE_3DES_CBC:
81 		/* Cipher-Auth (3DES-CBC *) case */
82 	case ALGO_TYPE_NULL:
83 		/* NULL case */
84 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
85 		break;
86 	case ALGO_TYPE_AES_GMAC:
87 		/* GMAC case */
88 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
89 
90 		/* fill AAD IV (located inside crypto op) */
91 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
92 			sa->iv_ofs);
93 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
94 		break;
95 	case ALGO_TYPE_AES_GCM:
96 		/* AEAD (AES_GCM) case */
97 		sop_aead_prepare(sop, sa, icv, hlen, plen);
98 
99 		/* fill AAD IV (located inside crypto op) */
100 		gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *,
101 			sa->iv_ofs);
102 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
103 		break;
104 	case ALGO_TYPE_AES_CCM:
105 		/* AEAD (AES_CCM) case */
106 		sop_aead_prepare(sop, sa, icv, hlen, plen);
107 
108 		/* fill AAD IV (located inside crypto op) */
109 		ccm = rte_crypto_op_ctod_offset(cop, struct aead_ccm_iv *,
110 			sa->iv_ofs);
111 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
112 		break;
113 	case ALGO_TYPE_CHACHA20_POLY1305:
114 		/* AEAD (CHACHA20_POLY) case */
115 		sop_aead_prepare(sop, sa, icv, hlen, plen);
116 
117 		/* fill AAD IV (located inside crypto op) */
118 		chacha20_poly1305 = rte_crypto_op_ctod_offset(cop,
119 			struct aead_chacha20_poly1305_iv *,
120 			sa->iv_ofs);
121 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
122 					       ivp[0], sa->salt);
123 		break;
124 	case ALGO_TYPE_AES_CTR:
125 		/* Cipher-Auth (AES-CTR *) case */
126 		sop_ciph_auth_prepare(sop, sa, icv, hlen, plen);
127 
128 		/* fill CTR block (located inside crypto op) */
129 		ctr = rte_crypto_op_ctod_offset(cop, struct aesctr_cnt_blk *,
130 			sa->iv_ofs);
131 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
132 		break;
133 	}
134 }
135 
136 /*
137  * setup/update packet data and metadata for ESP outbound tunnel case.
138  */
139 static inline int32_t
140 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
141 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
142 	union sym_op_data *icv, uint8_t sqh_len, uint8_t tso)
143 {
144 	uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
145 	struct rte_mbuf *ml;
146 	struct rte_esp_hdr *esph;
147 	struct rte_esp_tail *espt;
148 	char *ph, *pt;
149 	uint64_t *iv;
150 
151 	/* calculate extra header space required */
152 	hlen = sa->hdr_len + sa->iv_len + sizeof(*esph);
153 
154 	/* size of ipsec protected data */
155 	l2len = mb->l2_len;
156 	plen = mb->pkt_len - l2len;
157 
158 	/* number of bytes to encrypt */
159 	clen = plen + sizeof(*espt);
160 
161 	if (!tso) {
162 		clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
163 		/* pad length + esp tail */
164 		pdlen = clen - plen;
165 		tlen = pdlen + sa->icv_len + sqh_len;
166 	} else {
167 		/* We don't need to pad/align packet or append ICV length
168 		 * when using TSO offload
169 		 */
170 		pdlen = clen - plen;
171 		tlen = pdlen + sqh_len;
172 	}
173 
174 	/* do append and prepend */
175 	ml = rte_pktmbuf_lastseg(mb);
176 	if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
177 		return -ENOSPC;
178 
179 	/* prepend header */
180 	ph = rte_pktmbuf_prepend(mb, hlen - l2len);
181 	if (ph == NULL)
182 		return -ENOSPC;
183 
184 	/* append tail */
185 	pdofs = ml->data_len;
186 	ml->data_len += tlen;
187 	mb->pkt_len += tlen;
188 	pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
189 
190 	/* update pkt l2/l3 len */
191 	mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) |
192 		sa->tx_offload.val;
193 
194 	/* copy tunnel pkt header */
195 	rte_memcpy(ph, sa->hdr, sa->hdr_len);
196 
197 	/* if UDP encap is enabled update the dgram_len */
198 	if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
199 		struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
200 				(ph - sizeof(struct rte_udp_hdr));
201 		udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len -
202 				sa->hdr_l3_off - sa->hdr_len);
203 	}
204 
205 	/* update original and new ip header fields */
206 	update_tun_outb_l3hdr(sa, ph + sa->hdr_l3_off, ph + hlen,
207 			mb->pkt_len - sqh_len, sa->hdr_l3_off, sqn_low16(sqc));
208 
209 	/* update spi, seqn and iv */
210 	esph = (struct rte_esp_hdr *)(ph + sa->hdr_len);
211 	iv = (uint64_t *)(esph + 1);
212 	copy_iv(iv, ivp, sa->iv_len);
213 
214 	esph->spi = sa->spi;
215 	esph->seq = sqn_low32(sqc);
216 
217 	/* offset for ICV */
218 	pdofs += pdlen + sa->sqh_len;
219 
220 	/* pad length */
221 	pdlen -= sizeof(*espt);
222 
223 	/* copy padding data */
224 	rte_memcpy(pt, esp_pad_bytes, pdlen);
225 
226 	/* update esp trailer */
227 	espt = (struct rte_esp_tail *)(pt + pdlen);
228 	espt->pad_len = pdlen;
229 	espt->next_proto = sa->proto;
230 
231 	/* set icv va/pa value(s) */
232 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
233 	icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
234 
235 	return clen;
236 }
237 
238 /*
239  * for pure cryptodev (lookaside none) depending on SA settings,
240  * we might have to write some extra data to the packet.
241  */
242 static inline void
243 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
244 	const union sym_op_data *icv)
245 {
246 	uint32_t *psqh;
247 	struct aead_gcm_aad *gaad;
248 	struct aead_ccm_aad *caad;
249 	struct aead_chacha20_poly1305_aad *chacha20_poly1305_aad;
250 
251 	/* insert SQN.hi between ESP trailer and ICV */
252 	if (sa->sqh_len != 0) {
253 		psqh = (uint32_t *)(icv->va - sa->sqh_len);
254 		psqh[0] = sqn_hi32(sqc);
255 	}
256 
257 	/*
258 	 * fill IV and AAD fields, if any (aad fields are placed after icv),
259 	 * right now we support only one AEAD algorithm: AES-GCM .
260 	 */
261 	switch (sa->algo_type) {
262 	case ALGO_TYPE_AES_GCM:
263 	if (sa->aad_len != 0) {
264 		gaad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
265 		aead_gcm_aad_fill(gaad, sa->spi, sqc, IS_ESN(sa));
266 	}
267 		break;
268 	case ALGO_TYPE_AES_CCM:
269 	if (sa->aad_len != 0) {
270 		caad = (struct aead_ccm_aad *)(icv->va + sa->icv_len);
271 		aead_ccm_aad_fill(caad, sa->spi, sqc, IS_ESN(sa));
272 	}
273 		break;
274 	case ALGO_TYPE_CHACHA20_POLY1305:
275 	if (sa->aad_len != 0) {
276 		chacha20_poly1305_aad =	(struct aead_chacha20_poly1305_aad *)
277 			(icv->va + sa->icv_len);
278 		aead_chacha20_poly1305_aad_fill(chacha20_poly1305_aad,
279 			sa->spi, sqc, IS_ESN(sa));
280 	}
281 		break;
282 	default:
283 		break;
284 	}
285 }
286 
287 /*
288  * setup/update packets and crypto ops for ESP outbound tunnel case.
289  */
290 uint16_t
291 esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
292 	struct rte_crypto_op *cop[], uint16_t num)
293 {
294 	int32_t rc;
295 	uint32_t i, k, n;
296 	uint64_t sqn;
297 	rte_be64_t sqc;
298 	struct rte_ipsec_sa *sa;
299 	struct rte_cryptodev_sym_session *cs;
300 	union sym_op_data icv;
301 	uint64_t iv[IPSEC_MAX_IV_QWORD];
302 	uint32_t dr[num];
303 
304 	sa = ss->sa;
305 	cs = ss->crypto.ses;
306 
307 	n = num;
308 	sqn = esn_outb_update_sqn(sa, &n);
309 	if (n != num)
310 		rte_errno = EOVERFLOW;
311 
312 	k = 0;
313 	for (i = 0; i != n; i++) {
314 
315 		sqc = rte_cpu_to_be_64(sqn + i);
316 		gen_iv(iv, sqc);
317 
318 		/* try to update the packet itself */
319 		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
320 					  sa->sqh_len, 0);
321 		/* success, setup crypto op */
322 		if (rc >= 0) {
323 			outb_pkt_xprepare(sa, sqc, &icv);
324 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
325 			outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
326 			k++;
327 		/* failure, put packet into the death-row */
328 		} else {
329 			dr[i - k] = i;
330 			rte_errno = -rc;
331 		}
332 	}
333 
334 	 /* copy not prepared mbufs beyond good ones */
335 	if (k != n && k != 0)
336 		move_bad_mbufs(mb, dr, n, n - k);
337 
338 	return k;
339 }
340 
341 /*
342  * setup/update packet data and metadata for ESP outbound transport case.
343  */
344 static inline int32_t
345 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
346 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
347 	union sym_op_data *icv, uint8_t sqh_len, uint8_t tso)
348 {
349 	uint8_t np;
350 	uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
351 	struct rte_mbuf *ml;
352 	struct rte_esp_hdr *esph;
353 	struct rte_esp_tail *espt;
354 	char *ph, *pt;
355 	uint64_t *iv;
356 	uint32_t l2len, l3len;
357 
358 	l2len = mb->l2_len;
359 	l3len = mb->l3_len;
360 
361 	uhlen = l2len + l3len;
362 	plen = mb->pkt_len - uhlen;
363 
364 	/* calculate extra header space required */
365 	hlen = sa->iv_len + sizeof(*esph);
366 
367 	/* number of bytes to encrypt */
368 	clen = plen + sizeof(*espt);
369 
370 	if (!tso) {
371 		clen = RTE_ALIGN_CEIL(clen, sa->pad_align);
372 		/* pad length + esp tail */
373 		pdlen = clen - plen;
374 		tlen = pdlen + sa->icv_len + sqh_len;
375 	} else {
376 		/* We don't need to pad/align packet or append ICV length
377 		 * when using TSO offload
378 		 */
379 		pdlen = clen - plen;
380 		tlen = pdlen + sqh_len;
381 	}
382 
383 	/* do append and insert */
384 	ml = rte_pktmbuf_lastseg(mb);
385 	if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml))
386 		return -ENOSPC;
387 
388 	/* prepend space for ESP header */
389 	ph = rte_pktmbuf_prepend(mb, hlen);
390 	if (ph == NULL)
391 		return -ENOSPC;
392 
393 	/* append tail */
394 	pdofs = ml->data_len;
395 	ml->data_len += tlen;
396 	mb->pkt_len += tlen;
397 	pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs);
398 
399 	/* shift L2/L3 headers */
400 	insert_esph(ph, ph + hlen, uhlen);
401 
402 	/* update ip  header fields */
403 	np = update_trs_l3hdr(sa, ph + l2len, mb->pkt_len - sqh_len, l2len,
404 			l3len, IPPROTO_ESP);
405 
406 	/* update spi, seqn and iv */
407 	esph = (struct rte_esp_hdr *)(ph + uhlen);
408 	iv = (uint64_t *)(esph + 1);
409 	copy_iv(iv, ivp, sa->iv_len);
410 
411 	esph->spi = sa->spi;
412 	esph->seq = sqn_low32(sqc);
413 
414 	/* offset for ICV */
415 	pdofs += pdlen + sa->sqh_len;
416 
417 	/* pad length */
418 	pdlen -= sizeof(*espt);
419 
420 	/* copy padding data */
421 	rte_memcpy(pt, esp_pad_bytes, pdlen);
422 
423 	/* update esp trailer */
424 	espt = (struct rte_esp_tail *)(pt + pdlen);
425 	espt->pad_len = pdlen;
426 	espt->next_proto = np;
427 
428 	/* set icv va/pa value(s) */
429 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
430 	icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
431 
432 	return clen;
433 }
434 
435 /*
436  * setup/update packets and crypto ops for ESP outbound transport case.
437  */
438 uint16_t
439 esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
440 	struct rte_crypto_op *cop[], uint16_t num)
441 {
442 	int32_t rc;
443 	uint32_t i, k, n, l2, l3;
444 	uint64_t sqn;
445 	rte_be64_t sqc;
446 	struct rte_ipsec_sa *sa;
447 	struct rte_cryptodev_sym_session *cs;
448 	union sym_op_data icv;
449 	uint64_t iv[IPSEC_MAX_IV_QWORD];
450 	uint32_t dr[num];
451 
452 	sa = ss->sa;
453 	cs = ss->crypto.ses;
454 
455 	n = num;
456 	sqn = esn_outb_update_sqn(sa, &n);
457 	if (n != num)
458 		rte_errno = EOVERFLOW;
459 
460 	k = 0;
461 	for (i = 0; i != n; i++) {
462 
463 		l2 = mb[i]->l2_len;
464 		l3 = mb[i]->l3_len;
465 
466 		sqc = rte_cpu_to_be_64(sqn + i);
467 		gen_iv(iv, sqc);
468 
469 		/* try to update the packet itself */
470 		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
471 				  sa->sqh_len, 0);
472 		/* success, setup crypto op */
473 		if (rc >= 0) {
474 			outb_pkt_xprepare(sa, sqc, &icv);
475 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
476 			outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
477 			k++;
478 		/* failure, put packet into the death-row */
479 		} else {
480 			dr[i - k] = i;
481 			rte_errno = -rc;
482 		}
483 	}
484 
485 	/* copy not prepared mbufs beyond good ones */
486 	if (k != n && k != 0)
487 		move_bad_mbufs(mb, dr, n, n - k);
488 
489 	return k;
490 }
491 
492 
493 static inline uint32_t
494 outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
495 	uint32_t plen, void *iv)
496 {
497 	uint64_t *ivp = iv;
498 	struct aead_gcm_iv *gcm;
499 	struct aead_ccm_iv *ccm;
500 	struct aead_chacha20_poly1305_iv *chacha20_poly1305;
501 	struct aesctr_cnt_blk *ctr;
502 	uint32_t clen;
503 
504 	switch (sa->algo_type) {
505 	case ALGO_TYPE_AES_GCM:
506 		gcm = iv;
507 		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
508 		break;
509 	case ALGO_TYPE_AES_CCM:
510 		ccm = iv;
511 		aead_ccm_iv_fill(ccm, ivp[0], sa->salt);
512 		break;
513 	case ALGO_TYPE_CHACHA20_POLY1305:
514 		chacha20_poly1305 = iv;
515 		aead_chacha20_poly1305_iv_fill(chacha20_poly1305,
516 					       ivp[0], sa->salt);
517 		break;
518 	case ALGO_TYPE_AES_CTR:
519 		ctr = iv;
520 		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
521 		break;
522 	}
523 
524 	*pofs += sa->ctp.auth.offset;
525 	clen = plen + sa->ctp.auth.length;
526 	return clen;
527 }
528 
529 static uint16_t
530 cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
531 		struct rte_mbuf *mb[], uint16_t num,
532 		esp_outb_prepare_t prepare, uint32_t cofs_mask)
533 {
534 	int32_t rc;
535 	uint64_t sqn;
536 	rte_be64_t sqc;
537 	struct rte_ipsec_sa *sa;
538 	uint32_t i, k, n;
539 	uint32_t l2, l3;
540 	union sym_op_data icv;
541 	struct rte_crypto_va_iova_ptr iv[num];
542 	struct rte_crypto_va_iova_ptr aad[num];
543 	struct rte_crypto_va_iova_ptr dgst[num];
544 	uint32_t dr[num];
545 	uint32_t l4ofs[num];
546 	uint32_t clen[num];
547 	uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
548 
549 	sa = ss->sa;
550 
551 	n = num;
552 	sqn = esn_outb_update_sqn(sa, &n);
553 	if (n != num)
554 		rte_errno = EOVERFLOW;
555 
556 	for (i = 0, k = 0; i != n; i++) {
557 
558 		l2 = mb[i]->l2_len;
559 		l3 = mb[i]->l3_len;
560 
561 		/* calculate ESP header offset */
562 		l4ofs[k] = (l2 + l3) & cofs_mask;
563 
564 		sqc = rte_cpu_to_be_64(sqn + i);
565 		gen_iv(ivbuf[k], sqc);
566 
567 		/* try to update the packet itself */
568 		rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len, 0);
569 
570 		/* success, proceed with preparations */
571 		if (rc >= 0) {
572 
573 			outb_pkt_xprepare(sa, sqc, &icv);
574 
575 			/* get encrypted data offset and length */
576 			clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc,
577 				ivbuf[k]);
578 
579 			/* fill iv, digest and aad */
580 			iv[k].va = ivbuf[k];
581 			aad[k].va = icv.va + sa->icv_len;
582 			dgst[k++].va = icv.va;
583 		} else {
584 			dr[i - k] = i;
585 			rte_errno = -rc;
586 		}
587 	}
588 
589 	/* copy not prepared mbufs beyond good ones */
590 	if (k != n && k != 0)
591 		move_bad_mbufs(mb, dr, n, n - k);
592 
593 	/* convert mbufs to iovecs and do actual crypto/auth processing */
594 	if (k != 0)
595 		cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst,
596 			l4ofs, clen, k);
597 	return k;
598 }
599 
600 uint16_t
601 cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
602 		struct rte_mbuf *mb[], uint16_t num)
603 {
604 	return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0);
605 }
606 
607 uint16_t
608 cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
609 		struct rte_mbuf *mb[], uint16_t num)
610 {
611 	return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare,
612 		UINT32_MAX);
613 }
614 
615 /*
616  * process outbound packets for SA with ESN support,
617  * for algorithms that require SQN.hibits to be implicitly included
618  * into digest computation.
619  * In that case we have to move ICV bytes back to their proper place.
620  */
621 uint16_t
622 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
623 	uint16_t num)
624 {
625 	uint32_t i, k, icv_len, *icv, bytes;
626 	struct rte_mbuf *ml;
627 	struct rte_ipsec_sa *sa;
628 	uint32_t dr[num];
629 
630 	sa = ss->sa;
631 
632 	k = 0;
633 	icv_len = sa->icv_len;
634 	bytes = 0;
635 
636 	for (i = 0; i != num; i++) {
637 		if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) {
638 			ml = rte_pktmbuf_lastseg(mb[i]);
639 			/* remove high-order 32 bits of esn from packet len */
640 			mb[i]->pkt_len -= sa->sqh_len;
641 			ml->data_len -= sa->sqh_len;
642 			icv = rte_pktmbuf_mtod_offset(ml, void *,
643 				ml->data_len - icv_len);
644 			remove_sqh(icv, icv_len);
645 			bytes += mb[i]->pkt_len;
646 			k++;
647 		} else
648 			dr[i - k] = i;
649 	}
650 	sa->statistics.count += k;
651 	sa->statistics.bytes += bytes;
652 
653 	/* handle unprocessed mbufs */
654 	if (k != num) {
655 		rte_errno = EBADMSG;
656 		if (k != 0)
657 			move_bad_mbufs(mb, dr, num, num - k);
658 	}
659 
660 	return k;
661 }
662 
663 /*
664  * prepare packets for inline ipsec processing:
665  * set ol_flags and attach metadata.
666  */
667 static inline void
668 inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss,
669 	struct rte_mbuf *mb[], uint16_t num)
670 {
671 	uint32_t i, ol_flags, bytes;
672 
673 	ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA;
674 	bytes = 0;
675 	for (i = 0; i != num; i++) {
676 
677 		mb[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
678 		bytes += mb[i]->pkt_len;
679 		if (ol_flags != 0)
680 			rte_security_set_pkt_metadata(ss->security.ctx,
681 				ss->security.ses, mb[i], NULL);
682 	}
683 	ss->sa->statistics.count += num;
684 	ss->sa->statistics.bytes += bytes;
685 }
686 
687 
688 static inline int
689 esn_outb_nb_segments(struct rte_mbuf *m)
690 {
691 	if  (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
692 		uint16_t pkt_l3len = m->pkt_len - m->l2_len;
693 		uint16_t segments =
694 			(m->tso_segsz > 0 && pkt_l3len > m->tso_segsz) ?
695 			(pkt_l3len + m->tso_segsz - 1) / m->tso_segsz : 1;
696 		return segments;
697 	}
698 	return 1; /* no TSO */
699 }
700 
701 /* Compute how many packets can be sent before overflow occurs */
702 static inline uint16_t
703 esn_outb_nb_valid_packets(uint16_t num, uint32_t n_sqn, uint16_t nb_segs[])
704 {
705 	uint16_t i;
706 	uint32_t seg_cnt = 0;
707 	for (i = 0; i < num && seg_cnt < n_sqn; i++)
708 		seg_cnt += nb_segs[i];
709 	return i - 1;
710 }
711 
712 /*
713  * process group of ESP outbound tunnel packets destined for
714  * INLINE_CRYPTO type of device.
715  */
716 uint16_t
717 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
718 	struct rte_mbuf *mb[], uint16_t num)
719 {
720 	int32_t rc;
721 	uint32_t i, k, nb_segs_total, n_sqn;
722 	uint64_t sqn;
723 	rte_be64_t sqc;
724 	struct rte_ipsec_sa *sa;
725 	union sym_op_data icv;
726 	uint64_t iv[IPSEC_MAX_IV_QWORD];
727 	uint32_t dr[num];
728 	uint16_t nb_segs[num];
729 
730 	sa = ss->sa;
731 	nb_segs_total = 0;
732 	/* Calculate number of segments */
733 	for (i = 0; i != num; i++) {
734 		nb_segs[i] = esn_outb_nb_segments(mb[i]);
735 		nb_segs_total += nb_segs[i];
736 	}
737 
738 	n_sqn = nb_segs_total;
739 	sqn = esn_outb_update_sqn(sa, &n_sqn);
740 	if (n_sqn != nb_segs_total) {
741 		rte_errno = EOVERFLOW;
742 		/* if there are segmented packets find out how many can be
743 		 * sent until overflow occurs
744 		 */
745 		if (nb_segs_total > num) /* there is at least 1 */
746 			num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs);
747 		else
748 			num = n_sqn; /* no segmented packets */
749 	}
750 
751 	k = 0;
752 	for (i = 0; i != num; i++) {
753 
754 		sqc = rte_cpu_to_be_64(sqn);
755 		gen_iv(iv, sqc);
756 		sqn += nb_segs[i];
757 
758 		/* try to update the packet itself */
759 		rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
760 			(mb[i]->ol_flags &
761 			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0);
762 
763 		k += (rc >= 0);
764 
765 		/* failure, put packet into the death-row */
766 		if (rc < 0) {
767 			dr[i - k] = i;
768 			rte_errno = -rc;
769 		}
770 	}
771 
772 	/* copy not processed mbufs beyond good ones */
773 	if (k != num && k != 0)
774 		move_bad_mbufs(mb, dr, num, num - k);
775 
776 	inline_outb_mbuf_prepare(ss, mb, k);
777 	return k;
778 }
779 
780 /*
781  * process group of ESP outbound transport packets destined for
782  * INLINE_CRYPTO type of device.
783  */
784 uint16_t
785 inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
786 	struct rte_mbuf *mb[], uint16_t num)
787 {
788 	int32_t rc;
789 	uint32_t i, k, nb_segs_total, n_sqn;
790 	uint64_t sqn;
791 	rte_be64_t sqc;
792 	struct rte_ipsec_sa *sa;
793 	union sym_op_data icv;
794 	uint64_t iv[IPSEC_MAX_IV_QWORD];
795 	uint32_t dr[num];
796 	uint16_t nb_segs[num];
797 
798 	sa = ss->sa;
799 	nb_segs_total = 0;
800 	/* Calculate number of segments */
801 	for (i = 0; i != num; i++) {
802 		nb_segs[i] = esn_outb_nb_segments(mb[i]);
803 		nb_segs_total += nb_segs[i];
804 	}
805 
806 	n_sqn = nb_segs_total;
807 	sqn = esn_outb_update_sqn(sa, &n_sqn);
808 	if (n_sqn != nb_segs_total) {
809 		rte_errno = EOVERFLOW;
810 		/* if there are segmented packets find out how many can be
811 		 * sent until overflow occurs
812 		 */
813 		if (nb_segs_total > num) /* there is at least 1 */
814 			num = esn_outb_nb_valid_packets(num, n_sqn, nb_segs);
815 		else
816 			num = n_sqn; /* no segmented packets */
817 	}
818 
819 	k = 0;
820 	for (i = 0; i != num; i++) {
821 
822 		sqc = rte_cpu_to_be_64(sqn);
823 		gen_iv(iv, sqc);
824 		sqn += nb_segs[i];
825 
826 		/* try to update the packet itself */
827 		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0,
828 			(mb[i]->ol_flags &
829 			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) != 0);
830 
831 		k += (rc >= 0);
832 
833 		/* failure, put packet into the death-row */
834 		if (rc < 0) {
835 			dr[i - k] = i;
836 			rte_errno = -rc;
837 		}
838 	}
839 
840 	/* copy not processed mbufs beyond good ones */
841 	if (k != num && k != 0)
842 		move_bad_mbufs(mb, dr, num, num - k);
843 
844 	inline_outb_mbuf_prepare(ss, mb, k);
845 	return k;
846 }
847 
848 /*
849  * outbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
850  * actual processing is done by HW/PMD, just set flags and metadata.
851  */
852 uint16_t
853 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
854 	struct rte_mbuf *mb[], uint16_t num)
855 {
856 	inline_outb_mbuf_prepare(ss, mb, num);
857 	return num;
858 }
859