xref: /dpdk/lib/pipeline/rte_swx_ipsec.c (revision 7fc6ae50369d75b9aa550072182fa92f8c4e13a4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <errno.h>
7 #include <arpa/inet.h>
8 
9 #include <rte_common.h>
10 #include <rte_ip.h>
11 #include <rte_tailq.h>
12 #include <rte_eal_memconfig.h>
13 #include <rte_ring.h>
14 #include <rte_mbuf.h>
15 #include <rte_cryptodev.h>
16 #include <rte_ipsec.h>
17 
18 #include "rte_swx_ipsec.h"
19 
20 #ifndef RTE_SWX_IPSEC_HUGE_PAGES_DISABLE
21 
22 #include <rte_malloc.h>
23 
24 static void *
25 env_calloc(size_t size, size_t alignment, int numa_node)
26 {
27 	return rte_zmalloc_socket(NULL, size, alignment, numa_node);
28 }
29 
30 static void
31 env_free(void *start, size_t size __rte_unused)
32 {
33 	rte_free(start);
34 }
35 
36 #else
37 
38 #include <numa.h>
39 
40 static void *
41 env_calloc(size_t size, size_t alignment __rte_unused, int numa_node)
42 {
43 	void *start;
44 
45 	if (numa_available() == -1)
46 		return NULL;
47 
48 	start = numa_alloc_onnode(size, numa_node);
49 	if (!start)
50 		return NULL;
51 
52 	memset(start, 0, size);
53 	return start;
54 }
55 
56 static void
57 env_free(void *start, size_t size)
58 {
59 	if ((numa_available() == -1) || !start)
60 		return;
61 
62 	numa_free(start, size);
63 }
64 
65 #endif
66 
67 #ifndef RTE_SWX_IPSEC_POOL_CACHE_SIZE
68 #define RTE_SWX_IPSEC_POOL_CACHE_SIZE 256
69 #endif
70 
71 /* The two crypto device mempools have their size set to the number of SAs. The mempool API requires
72  * the mempool size to be at least 1.5 times the size of the mempool cache.
73  */
74 #define N_SA_MIN (RTE_SWX_IPSEC_POOL_CACHE_SIZE * 1.5)
75 
76 struct ipsec_sa {
77 	struct rte_ipsec_session s;
78 	int valid;
79 };
80 
81 struct ipsec_pkts_in {
82 	struct rte_mbuf *pkts[RTE_SWX_IPSEC_BURST_SIZE_MAX];
83 	struct ipsec_sa *sa[RTE_SWX_IPSEC_BURST_SIZE_MAX];
84 	struct rte_ipsec_group groups[RTE_SWX_IPSEC_BURST_SIZE_MAX];
85 	struct rte_crypto_op *group_cops[RTE_SWX_IPSEC_BURST_SIZE_MAX];
86 	struct rte_crypto_op *cops[RTE_SWX_IPSEC_BURST_SIZE_MAX];
87 	uint32_t n_cops;
88 };
89 
90 struct ipsec_pkts_out {
91 	struct rte_crypto_op *cops[RTE_SWX_IPSEC_BURST_SIZE_MAX];
92 	struct rte_mbuf *group_pkts[RTE_SWX_IPSEC_BURST_SIZE_MAX];
93 	struct rte_ipsec_group groups[RTE_SWX_IPSEC_BURST_SIZE_MAX];
94 	struct rte_mbuf *pkts[RTE_SWX_IPSEC_BURST_SIZE_MAX];
95 	uint32_t n_pkts;
96 };
97 
98 struct rte_swx_ipsec {
99 	/*
100 	 * Parameters.
101 	 */
102 
103 	/* IPsec instance name. */
104 	char name[RTE_SWX_IPSEC_NAME_SIZE];
105 
106 	/* Input packet queue. */
107 	struct rte_ring *ring_in;
108 
109 	/* Output packet queue. */
110 	struct rte_ring *ring_out;
111 
112 	/* Crypto device ID. */
113 	uint8_t dev_id;
114 
115 	/* Crypto device queue pair ID. */
116 	uint16_t qp_id;
117 
118 	/* Burst sizes. */
119 	struct rte_swx_ipsec_burst_size bsz;
120 
121 	/* SA table size. */
122 	size_t n_sa_max;
123 
124 	/*
125 	 * Internals.
126 	 */
127 	/* Crypto device buffer pool for sessions. */
128 	struct rte_mempool *mp_session;
129 
130 	/* Pre-crypto packets. */
131 	struct ipsec_pkts_in in;
132 
133 	/* Post-crypto packets. */
134 	struct ipsec_pkts_out out;
135 
136 	/* Crypto device enqueue threshold. */
137 	uint32_t crypto_wr_threshold;
138 
139 	/* Packets currently under crypto device processing. */
140 	uint32_t n_pkts_crypto;
141 
142 	/* List of free SADB positions. */
143 	uint32_t *sa_free_id;
144 
145 	/* Number of elements in the SADB list of free positions. */
146 	size_t n_sa_free_id;
147 
148 	/* Allocated memory total size in bytes. */
149 	size_t total_size;
150 
151 	/* Flag for registration to the global list of instances. */
152 	int registered;
153 
154 	/*
155 	 * Table memory.
156 	 */
157 	uint8_t memory[] __rte_cache_aligned;
158 };
159 
160 static inline struct ipsec_sa *
161 ipsec_sa_get(struct rte_swx_ipsec *ipsec, uint32_t sa_id)
162 {
163 	struct ipsec_sa *sadb = (struct ipsec_sa *)ipsec->memory;
164 
165 	return &sadb[sa_id & (ipsec->n_sa_max - 1)];
166 }
167 
168 /* Global list of instances. */
169 TAILQ_HEAD(rte_swx_ipsec_list, rte_tailq_entry);
170 
171 static struct rte_tailq_elem rte_swx_ipsec_tailq = {
172 	.name = "RTE_SWX_IPSEC",
173 };
174 
175 EAL_REGISTER_TAILQ(rte_swx_ipsec_tailq)
176 
177 struct rte_swx_ipsec *
178 rte_swx_ipsec_find(const char *name)
179 {
180 	struct rte_swx_ipsec_list *ipsec_list;
181 	struct rte_tailq_entry *te = NULL;
182 
183 	if (!name ||
184 	    !name[0] ||
185 	    (strnlen(name, RTE_SWX_IPSEC_NAME_SIZE) == RTE_SWX_IPSEC_NAME_SIZE))
186 		return NULL;
187 
188 	ipsec_list = RTE_TAILQ_CAST(rte_swx_ipsec_tailq.head, rte_swx_ipsec_list);
189 
190 	rte_mcfg_tailq_read_lock();
191 
192 	TAILQ_FOREACH(te, ipsec_list, next) {
193 		struct rte_swx_ipsec *ipsec = (struct rte_swx_ipsec *)te->data;
194 
195 		if (!strncmp(name, ipsec->name, sizeof(ipsec->name))) {
196 			rte_mcfg_tailq_read_unlock();
197 			return ipsec;
198 		}
199 	}
200 
201 	rte_mcfg_tailq_read_unlock();
202 	return NULL;
203 }
204 
205 static int
206 ipsec_register(struct rte_swx_ipsec *ipsec)
207 {
208 	struct rte_swx_ipsec_list *ipsec_list;
209 	struct rte_tailq_entry *te = NULL;
210 
211 	ipsec_list = RTE_TAILQ_CAST(rte_swx_ipsec_tailq.head, rte_swx_ipsec_list);
212 
213 	rte_mcfg_tailq_write_lock();
214 
215 	TAILQ_FOREACH(te, ipsec_list, next) {
216 		struct rte_swx_ipsec *elem = (struct rte_swx_ipsec *)te->data;
217 
218 		if (!strncmp(ipsec->name, elem->name, sizeof(ipsec->name))) {
219 			rte_mcfg_tailq_write_unlock();
220 			return -EEXIST;
221 		}
222 	}
223 
224 	te = calloc(1, sizeof(struct rte_tailq_entry));
225 	if (!te) {
226 		rte_mcfg_tailq_write_unlock();
227 		return -ENOMEM;
228 	}
229 
230 	te->data = (void *)ipsec;
231 	TAILQ_INSERT_TAIL(ipsec_list, te, next);
232 	rte_mcfg_tailq_write_unlock();
233 	return 0;
234 }
235 
236 static void
237 ipsec_unregister(struct rte_swx_ipsec *ipsec)
238 {
239 	struct rte_swx_ipsec_list *ipsec_list;
240 	struct rte_tailq_entry *te = NULL;
241 
242 	ipsec_list = RTE_TAILQ_CAST(rte_swx_ipsec_tailq.head, rte_swx_ipsec_list);
243 
244 	rte_mcfg_tailq_write_lock();
245 
246 	TAILQ_FOREACH(te, ipsec_list, next) {
247 		if (te->data == (void *)ipsec) {
248 			TAILQ_REMOVE(ipsec_list, te, next);
249 			rte_mcfg_tailq_write_unlock();
250 			free(te);
251 			return;
252 		}
253 	}
254 
255 	rte_mcfg_tailq_write_unlock();
256 }
257 
258 static void
259 ipsec_session_free(struct rte_swx_ipsec *ipsec, struct rte_ipsec_session *s);
260 
261 void
262 rte_swx_ipsec_free(struct rte_swx_ipsec *ipsec)
263 {
264 	size_t i;
265 
266 	if (!ipsec)
267 		return;
268 
269 	/* Remove the current instance from the global list. */
270 	if (ipsec->registered)
271 		ipsec_unregister(ipsec);
272 
273 	/* SADB. */
274 	for (i = 0; i < ipsec->n_sa_max; i++) {
275 		struct ipsec_sa *sa = ipsec_sa_get(ipsec, i);
276 
277 		if (!sa->valid)
278 			continue;
279 
280 		/* SA session. */
281 		ipsec_session_free(ipsec, &sa->s);
282 	}
283 
284 	/* Crypto device buffer pools. */
285 	rte_mempool_free(ipsec->mp_session);
286 
287 	/* IPsec object memory. */
288 	env_free(ipsec, ipsec->total_size);
289 }
290 
291 int
292 rte_swx_ipsec_create(struct rte_swx_ipsec **ipsec_out,
293 		     const char *name,
294 		     struct rte_swx_ipsec_params *params,
295 		     int numa_node)
296 {
297 	char resource_name[RTE_SWX_IPSEC_NAME_SIZE];
298 	struct rte_swx_ipsec *ipsec = NULL;
299 	struct rte_ring *ring_in, *ring_out;
300 	struct rte_cryptodev_info dev_info;
301 	size_t n_sa_max, sadb_offset, sadb_size, sa_free_id_offset, sa_free_id_size, total_size, i;
302 	uint32_t dev_session_size;
303 	int dev_id, status = 0;
304 
305 	/* Check input parameters. */
306 	if (!ipsec_out ||
307 	    !name ||
308 	    !name[0] ||
309 	    (strnlen((name), RTE_SWX_IPSEC_NAME_SIZE) == RTE_SWX_IPSEC_NAME_SIZE) ||
310 	    !params ||
311 	    (params->bsz.ring_rd > RTE_SWX_IPSEC_BURST_SIZE_MAX) ||
312 	    (params->bsz.ring_wr > RTE_SWX_IPSEC_BURST_SIZE_MAX) ||
313 	    (params->bsz.crypto_wr > RTE_SWX_IPSEC_BURST_SIZE_MAX) ||
314 	    (params->bsz.crypto_rd > RTE_SWX_IPSEC_BURST_SIZE_MAX) ||
315 	    !params->n_sa_max) {
316 		status = -EINVAL;
317 		goto error;
318 	}
319 
320 	ring_in = rte_ring_lookup(params->ring_in_name);
321 	if (!ring_in) {
322 		status = -EINVAL;
323 		goto error;
324 	}
325 
326 	ring_out = rte_ring_lookup(params->ring_out_name);
327 	if (!ring_out) {
328 		status = -EINVAL;
329 		goto error;
330 	}
331 
332 	dev_id = rte_cryptodev_get_dev_id(params->crypto_dev_name);
333 	if (dev_id == -1) {
334 		status = -EINVAL;
335 		goto error;
336 	}
337 
338 	rte_cryptodev_info_get(dev_id, &dev_info);
339 	if (params->crypto_dev_queue_pair_id >= dev_info.max_nb_queue_pairs) {
340 		status = -EINVAL;
341 		goto error;
342 	}
343 
344 	/* Memory allocation. */
345 	n_sa_max = rte_align64pow2(RTE_MAX(params->n_sa_max, N_SA_MIN));
346 
347 	sadb_offset = sizeof(struct rte_swx_ipsec);
348 	sadb_size = RTE_CACHE_LINE_ROUNDUP(n_sa_max * sizeof(struct ipsec_sa));
349 
350 	sa_free_id_offset = sadb_offset + sadb_size;
351 	sa_free_id_size = RTE_CACHE_LINE_ROUNDUP(n_sa_max * sizeof(uint32_t));
352 
353 	total_size = sa_free_id_offset + sa_free_id_size;
354 	ipsec = env_calloc(total_size, RTE_CACHE_LINE_SIZE, numa_node);
355 	if (!ipsec) {
356 		status = -ENOMEM;
357 		goto error;
358 	}
359 
360 	/* Initialization. */
361 	strcpy(ipsec->name, name);
362 	ipsec->ring_in = ring_in;
363 	ipsec->ring_out = ring_out;
364 	ipsec->dev_id = (uint8_t)dev_id;
365 	ipsec->qp_id = params->crypto_dev_queue_pair_id;
366 	memcpy(&ipsec->bsz, &params->bsz, sizeof(struct rte_swx_ipsec_burst_size));
367 	ipsec->n_sa_max = n_sa_max;
368 
369 	ipsec->crypto_wr_threshold = params->bsz.crypto_wr * 3 / 4;
370 
371 	ipsec->sa_free_id = (uint32_t *)&ipsec->memory[sa_free_id_offset];
372 	for (i = 0; i < n_sa_max; i++)
373 		ipsec->sa_free_id[i] = n_sa_max - 1 - i;
374 	ipsec->n_sa_free_id = n_sa_max;
375 
376 	ipsec->total_size = total_size;
377 
378 	/* Crypto device memory pools. */
379 	dev_session_size = rte_cryptodev_sym_get_private_session_size((uint8_t)dev_id);
380 
381 	snprintf(resource_name, sizeof(resource_name), "%s_mp", name);
382 	ipsec->mp_session = rte_cryptodev_sym_session_pool_create(resource_name,
383 		n_sa_max, /* number of pool elements */
384 		dev_session_size, /* pool element size */
385 		RTE_SWX_IPSEC_POOL_CACHE_SIZE, /* pool cache size */
386 		0, /* pool element private data size */
387 		numa_node);
388 	if (!ipsec->mp_session) {
389 		status = -ENOMEM;
390 		goto error;
391 	}
392 
393 	/* Add the current instance to the global list. */
394 	status = ipsec_register(ipsec);
395 	if (status)
396 		goto error;
397 
398 	ipsec->registered = 1;
399 
400 	*ipsec_out = ipsec;
401 	return 0;
402 
403 error:
404 	rte_swx_ipsec_free(ipsec);
405 	return status;
406 }
407 
408 static inline int
409 ipsec_sa_group(struct rte_swx_ipsec *ipsec, int n_pkts)
410 {
411 	struct ipsec_sa *sa;
412 	struct rte_ipsec_group *g;
413 	int n_groups, n_pkts_in_group, i;
414 
415 	sa = ipsec->in.sa[0];
416 
417 	g = &ipsec->in.groups[0];
418 	g->id.ptr = sa;
419 	g->m = &ipsec->in.pkts[0];
420 	n_pkts_in_group = 1;
421 	n_groups = 1;
422 
423 	for (i = 1; i < n_pkts; i++) {
424 		struct ipsec_sa *sa_new = ipsec->in.sa[i];
425 
426 		/* Same SA => Add the current pkt to the same group. */
427 		if (sa_new == sa) {
428 			n_pkts_in_group++;
429 			continue;
430 		}
431 
432 		/* Different SA => Close the current group & add the current pkt to a new group. */
433 		g->cnt = n_pkts_in_group;
434 		sa = sa_new;
435 
436 		g++;
437 		g->id.ptr = sa;
438 		g->m = &ipsec->in.pkts[i];
439 		n_pkts_in_group = 1;
440 		n_groups++;
441 	}
442 
443 	/* Close the last group. */
444 	g->cnt = n_pkts_in_group;
445 
446 	return n_groups;
447 }
448 
449 static inline void
450 ipsec_crypto_enqueue(struct rte_swx_ipsec *ipsec, uint16_t n_cops)
451 {
452 	struct rte_crypto_op **dst0 = ipsec->in.cops, **dst;
453 	struct rte_crypto_op **src = ipsec->in.group_cops;
454 
455 	uint32_t n_pkts_crypto = ipsec->n_pkts_crypto;
456 	uint32_t n_dst = ipsec->in.n_cops;
457 	uint32_t n_dst_max = ipsec->bsz.crypto_wr;
458 	uint32_t n_dst_avail = n_dst_max - n_dst;
459 	uint32_t n_src = n_cops;
460 	uint32_t i;
461 
462 	dst = &dst0[n_dst];
463 
464 	/* Shortcut: If no elements in DST and enough elements in SRC, then simply use SRC directly
465 	 * instead of moving the SRC to DST first and then using DST.
466 	 */
467 	if (!n_dst && n_src >= ipsec->crypto_wr_threshold) {
468 		uint16_t n_ok;
469 
470 		n_ok = rte_cryptodev_enqueue_burst(ipsec->dev_id, ipsec->qp_id, src, n_src);
471 		ipsec->n_pkts_crypto = n_pkts_crypto + n_ok;
472 
473 		for (i = n_ok; i < n_src; i++) {
474 			struct rte_crypto_op *cop = src[i];
475 			struct rte_mbuf *m = cop->sym->m_src;
476 
477 			rte_pktmbuf_free(m);
478 		}
479 
480 		return;
481 	}
482 
483 	/* Move from SRC to DST. Every time DST gets full, send burst from DST. */
484 	for ( ; n_src >= n_dst_avail; ) {
485 		uint32_t n_ok;
486 
487 		/* Move from SRC to DST. */
488 		for (i = 0; i < n_dst_avail; i++)
489 			*dst++ = *src++;
490 
491 		n_src -= n_dst_avail;
492 
493 		/* DST full: send burst from DST. */
494 		n_ok = rte_cryptodev_enqueue_burst(ipsec->dev_id, ipsec->qp_id, dst0, n_dst_max);
495 		n_pkts_crypto += n_ok;
496 
497 		for (i = n_ok ; i < n_dst_max; i++) {
498 			struct rte_crypto_op *cop = dst0[i];
499 			struct rte_mbuf *m = cop->sym->m_src;
500 
501 			rte_pktmbuf_free(m);
502 		}
503 
504 		/* Next iteration. */
505 		dst = dst0;
506 		n_dst = 0;
507 		n_dst_avail = n_dst_max;
508 	}
509 
510 	ipsec->n_pkts_crypto = n_pkts_crypto;
511 
512 	/* Move from SRC to DST. Not enough elements in SRC to get DST full. */
513 	for (i = 0; i < n_src; i++)
514 		*dst++ = *src++;
515 
516 	n_dst += n_src;
517 
518 	ipsec->in.n_cops = n_dst;
519 }
520 
521 /**
522  * Packet buffer anatomy:
523  *
524  * +----------+---------+--------------------------------------------------------------------------+
525  * | Offset   | Size    | Description                                                              |
526  * | (Byte #) | (Bytes) |                                                                          |
527  * +==========+=========+==========================================================================+
528  * | 0        | 128     | Meta-data: struct rte_mbuf.                                              |
529  * |          |         | The buf_addr field points to the start of the packet section.            |
530  * +----------+---------+--------------------------------------------------------------------------+
531  * | 128      | 128     | Meta-data: struct ipsec_mbuf (see below).                                |
532  * +----------+---------+--------------------------------------------------------------------------+
533  * | 256      |         | Packet section.                                                          |
534  * |          |         | The first packet byte is placed at the offset indicated by the struct    |
535  * |          |         | rte_mbuf::data_off field relative to the start of the packet section.    |
536  * +----------+---------+--------------------------------------------------------------------------+
537  */
538 struct ipsec_mbuf {
539 	struct ipsec_sa *sa;
540 	struct rte_crypto_op cop;
541 	struct rte_crypto_sym_op sym_cop;
542 	uint8_t buffer[32]; /* The crypto IV is placed here. */
543 };
544 
545 /* Offset from the start of the struct ipsec_mbuf::cop where the crypto IV will be placed. */
546 #define IV_OFFSET (sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op))
547 
548 #define META_LENGTH sizeof(struct rte_swx_ipsec_input_packet_metadata)
549 
550 static inline void
551 rte_swx_ipsec_pre_crypto(struct rte_swx_ipsec *ipsec)
552 {
553 	int n_pkts, n_groups, i;
554 
555 	/* Read packets from the input ring. */
556 	n_pkts = rte_ring_sc_dequeue_burst(ipsec->ring_in,
557 					   (void **)ipsec->in.pkts,
558 					   ipsec->bsz.ring_rd,
559 					   NULL);
560 	if (!n_pkts)
561 		return;
562 
563 	/* Get the SA for each packet. */
564 	for (i = 0; i < n_pkts; i++) {
565 		struct rte_mbuf *m = ipsec->in.pkts[i];
566 		struct rte_swx_ipsec_input_packet_metadata *meta;
567 		struct rte_ipv4_hdr *ipv4_hdr;
568 		uint32_t sa_id;
569 
570 		meta = rte_pktmbuf_mtod(m, struct rte_swx_ipsec_input_packet_metadata *);
571 		ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, META_LENGTH);
572 
573 		/* Read the SA ID from the IPsec meta-data placed at the front of the IP packet. */
574 		sa_id = ntohl(meta->sa_id);
575 
576 		/* Consume the IPsec meta-data. */
577 		m->data_off += META_LENGTH;
578 		m->data_len -= META_LENGTH;
579 		m->pkt_len -= META_LENGTH;
580 
581 		/* Set the fields required by the IPsec library. */
582 		m->l2_len = 0;
583 		m->l3_len = (ipv4_hdr->version_ihl >> 4 == 4) ?
584 			sizeof(struct rte_ipv4_hdr) :
585 			sizeof(struct rte_ipv6_hdr);
586 
587 		/* Get the SA. */
588 		ipsec->in.sa[i] = ipsec_sa_get(ipsec, sa_id);
589 	}
590 
591 	/* Group packets that share the same SA. */
592 	n_groups = ipsec_sa_group(ipsec, n_pkts);
593 
594 	/* Write each group of packets sharing the same SA to the crypto device. */
595 	for (i = 0; i < n_groups; i++) {
596 		struct rte_ipsec_group *g = &ipsec->in.groups[i];
597 		struct ipsec_sa *sa = g->id.ptr;
598 		struct rte_ipsec_session *s = &sa->s;
599 		uint32_t j;
600 		uint16_t n_pkts_ok;
601 
602 		/* Prepare the crypto ops for the current group. */
603 		for (j = 0; j < g->cnt; j++) {
604 			struct rte_mbuf *m = g->m[j];
605 			struct ipsec_mbuf *priv = rte_mbuf_to_priv(m);
606 
607 			priv->sa = sa;
608 			ipsec->in.group_cops[j] = &priv->cop;
609 		}
610 
611 		n_pkts_ok = rte_ipsec_pkt_crypto_prepare(s, g->m, ipsec->in.group_cops, g->cnt);
612 
613 		for (j = n_pkts_ok; j < g->cnt; j++) {
614 			struct rte_mbuf *m = g->m[j];
615 
616 			rte_pktmbuf_free(m);
617 		}
618 
619 		/* Write the crypto ops of the current group to the crypto device. */
620 		ipsec_crypto_enqueue(ipsec, n_pkts_ok);
621 	}
622 }
623 
624 static inline void
625 ipsec_ring_enqueue(struct rte_swx_ipsec *ipsec, struct rte_ipsec_group *g, uint32_t n_pkts)
626 {
627 	struct rte_mbuf **dst0 = ipsec->out.pkts, **dst;
628 	struct rte_mbuf **src = g->m;
629 
630 	uint32_t n_dst = ipsec->out.n_pkts;
631 	uint32_t n_dst_max = ipsec->bsz.ring_wr;
632 	uint32_t n_dst_avail = n_dst_max - n_dst;
633 	uint32_t n_src = n_pkts;
634 	uint32_t i;
635 
636 	dst = &dst0[n_dst];
637 
638 	/* Move from SRC to DST. Every time DST gets full, send burst from DST. */
639 	for ( ; n_src >= n_dst_avail; ) {
640 		uint32_t n_ok;
641 
642 		/* Move from SRC to DST. */
643 		for (i = 0; i < n_dst_avail; i++)
644 			*dst++ = *src++;
645 
646 		n_src -= n_dst_avail;
647 
648 		/* DST full: send burst from DST. */
649 		n_ok = rte_ring_sp_enqueue_burst(ipsec->ring_out, (void **)dst0, n_dst_max, NULL);
650 
651 		for (i = n_ok ; i < n_dst_max; i++) {
652 			struct rte_mbuf *m = dst[i];
653 
654 			rte_pktmbuf_free(m);
655 		}
656 
657 		/* Next iteration. */
658 		dst = dst0;
659 		n_dst = 0;
660 		n_dst_avail = n_dst_max;
661 	}
662 
663 	/* Move from SRC to DST. Not enough elements in SRC to get DST full. */
664 	for (i = 0; i < n_src; i++)
665 		*dst++ = *src++;
666 
667 	n_dst += n_src;
668 
669 	ipsec->out.n_pkts = n_dst;
670 }
671 
672 static inline void
673 rte_swx_ipsec_post_crypto(struct rte_swx_ipsec *ipsec)
674 {
675 	uint32_t n_pkts_crypto = ipsec->n_pkts_crypto, n_pkts, ng, i;
676 
677 	/* Read the crypto ops from the crypto device. */
678 	if (!n_pkts_crypto)
679 		return;
680 
681 	n_pkts = rte_cryptodev_dequeue_burst(ipsec->dev_id,
682 					     ipsec->qp_id,
683 					     ipsec->out.cops,
684 					     ipsec->bsz.crypto_rd);
685 	if (!n_pkts)
686 		return;
687 
688 	ipsec->n_pkts_crypto = n_pkts_crypto - n_pkts;
689 
690 	/* Group packets that share the same SA. */
691 	ng = rte_ipsec_pkt_crypto_group((const struct rte_crypto_op **)(uintptr_t)ipsec->out.cops,
692 					      ipsec->out.group_pkts,
693 					      ipsec->out.groups,
694 					      n_pkts);
695 
696 	/* Perform post-crypto IPsec processing for each group of packets that share the same SA.
697 	 * Write each group of packets to the output ring.
698 	 */
699 	for (i = 0, n_pkts = 0; i < ng; i++) {
700 		struct rte_ipsec_group *g = &ipsec->out.groups[i];
701 		struct rte_ipsec_session *s = g->id.ptr;
702 		uint32_t n_pkts_ok, j;
703 
704 		/* Perform post-crypto IPsec processing for the current group. */
705 		n_pkts_ok = rte_ipsec_pkt_process(s, g->m, g->cnt);
706 
707 		for (j = n_pkts_ok; j < g->cnt; j++) {
708 			struct rte_mbuf *m = g->m[j];
709 
710 			rte_pktmbuf_free(m);
711 		}
712 
713 		/* Write the packets of the current group to the output ring. */
714 		ipsec_ring_enqueue(ipsec, g, n_pkts_ok);
715 	}
716 }
717 
718 void
719 rte_swx_ipsec_run(struct rte_swx_ipsec *ipsec)
720 {
721 	rte_swx_ipsec_pre_crypto(ipsec);
722 	rte_swx_ipsec_post_crypto(ipsec);
723 }
724 
725 /**
726  * IPsec Control Plane API
727  */
728 struct cipher_alg {
729 	const char *name;
730 	enum rte_crypto_cipher_algorithm alg;
731 	uint32_t iv_size;
732 	uint32_t block_size;
733 	uint32_t key_size;
734 };
735 
736 struct auth_alg {
737 	const char *name;
738 	enum rte_crypto_auth_algorithm alg;
739 	uint32_t iv_size;
740 	uint32_t digest_size;
741 	uint32_t key_size;
742 };
743 
744 struct aead_alg {
745 	const char *name;
746 	enum rte_crypto_aead_algorithm alg;
747 	uint32_t iv_size;
748 	uint32_t block_size;
749 	uint32_t digest_size;
750 	uint32_t key_size;
751 	uint32_t aad_size;
752 };
753 
754 static struct cipher_alg cipher_algs[] = {
755 	[0] = {
756 		.name = "null",
757 		.alg = RTE_CRYPTO_CIPHER_NULL,
758 		.iv_size = 0,
759 		.block_size = 4,
760 		.key_size = 0,
761 	},
762 
763 	[1] = {
764 		.name = "aes-cbc-128",
765 		.alg = RTE_CRYPTO_CIPHER_AES_CBC,
766 		.iv_size = 16,
767 		.block_size = 16,
768 		.key_size = 16,
769 	},
770 
771 	[2] = {
772 		.name = "aes-cbc-192",
773 		.alg = RTE_CRYPTO_CIPHER_AES_CBC,
774 		.iv_size = 16,
775 		.block_size = 16,
776 		.key_size = 24,
777 	},
778 
779 	[3] = {
780 		.name = "aes-cbc-256",
781 		.alg = RTE_CRYPTO_CIPHER_AES_CBC,
782 		.iv_size = 16,
783 		.block_size = 16,
784 		.key_size = 32,
785 	},
786 
787 	[4] = {
788 		.name = "aes-ctr-128",
789 		.alg = RTE_CRYPTO_CIPHER_AES_CTR,
790 		.iv_size = 8,
791 		.block_size = 4,
792 		.key_size = 20,
793 	},
794 
795 	[5] = {
796 		.name = "aes-ctr-192",
797 		.alg = RTE_CRYPTO_CIPHER_AES_CTR,
798 		.iv_size = 16,
799 		.block_size = 16,
800 		.key_size = 28,
801 	},
802 
803 	[6] = {
804 		.name = "aes-ctr-256",
805 		.alg = RTE_CRYPTO_CIPHER_AES_CTR,
806 		.iv_size = 16,
807 		.block_size = 16,
808 		.key_size = 36,
809 	},
810 
811 	[7] = {
812 		.name = "3des-cbc",
813 		.alg = RTE_CRYPTO_CIPHER_3DES_CBC,
814 		.iv_size = 8,
815 		.block_size = 8,
816 		.key_size = 24,
817 	},
818 
819 	[8] = {
820 		.name = "des-cbc",
821 		.alg = RTE_CRYPTO_CIPHER_DES_CBC,
822 		.iv_size = 8,
823 		.block_size = 8,
824 		.key_size = 8,
825 	},
826 };
827 
828 static struct auth_alg auth_algs[] = {
829 	[0] = {
830 		.name = "null",
831 		.alg = RTE_CRYPTO_AUTH_NULL,
832 		.iv_size = 0,
833 		.digest_size = 0,
834 		.key_size = 0,
835 	},
836 
837 	[1] = {
838 		.name = "sha1-hmac",
839 		.alg = RTE_CRYPTO_AUTH_SHA1_HMAC,
840 		.iv_size = 0,
841 		.digest_size = 12,
842 		.key_size = 20,
843 	},
844 
845 	[2] = {
846 		.name = "sha256-hmac",
847 		.alg = RTE_CRYPTO_AUTH_SHA256_HMAC,
848 		.iv_size = 0,
849 		.digest_size = 16,
850 		.key_size = 32,
851 	},
852 
853 	[3] = {
854 		.name = "sha384-hmac",
855 		.alg = RTE_CRYPTO_AUTH_SHA384_HMAC,
856 		.iv_size = 0,
857 		.digest_size = 24,
858 		.key_size = 48,
859 	},
860 
861 	[4] = {
862 		.name = "sha512-hmac",
863 		.alg = RTE_CRYPTO_AUTH_SHA512_HMAC,
864 		.iv_size = 0,
865 		.digest_size = 32,
866 		.key_size = 64,
867 	},
868 
869 	[5] = {
870 		.name = "aes-gmac",
871 		.alg = RTE_CRYPTO_AUTH_AES_GMAC,
872 		.iv_size = 8,
873 		.digest_size = 16,
874 		.key_size = 20,
875 	},
876 
877 	[6] = {
878 		.name = "aes-xcbc-mac-96",
879 		.alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
880 		.iv_size = 0,
881 		.digest_size = 12,
882 		.key_size = 16,
883 	},
884 };
885 
886 static struct aead_alg aead_algs[] = {
887 	[0] = {
888 		.name = "aes-gcm-128",
889 		.alg = RTE_CRYPTO_AEAD_AES_GCM,
890 		.iv_size = 8,
891 		.block_size = 4,
892 		.key_size = 20,
893 		.digest_size = 16,
894 		.aad_size = 8,
895 	},
896 
897 	[1] = {
898 		.name = "aes-gcm-192",
899 		.alg = RTE_CRYPTO_AEAD_AES_GCM,
900 		.iv_size = 8,
901 		.block_size = 4,
902 		.key_size = 28,
903 		.digest_size = 16,
904 		.aad_size = 8,
905 	},
906 
907 	[2] = {
908 		.name = "aes-gcm-256",
909 		.alg = RTE_CRYPTO_AEAD_AES_GCM,
910 		.iv_size = 8,
911 		.block_size = 4,
912 		.key_size = 36,
913 		.digest_size = 16,
914 		.aad_size = 8,
915 	},
916 
917 	[3] = {
918 		.name = "aes-ccm-128",
919 		.alg = RTE_CRYPTO_AEAD_AES_CCM,
920 		.iv_size = 8,
921 		.block_size = 4,
922 		.key_size = 20,
923 		.digest_size = 16,
924 		.aad_size = 8,
925 	},
926 
927 	[4] = {
928 		.name = "aes-ccm-192",
929 		.alg = RTE_CRYPTO_AEAD_AES_CCM,
930 		.iv_size = 8,
931 		.block_size = 4,
932 		.key_size = 28,
933 		.digest_size = 16,
934 		.aad_size = 8,
935 	},
936 
937 	[5] = {
938 		.name = "aes-ccm-256",
939 		.alg = RTE_CRYPTO_AEAD_AES_CCM,
940 		.iv_size = 8,
941 		.block_size = 4,
942 		.key_size = 36,
943 		.digest_size = 16,
944 		.aad_size = 8,
945 	},
946 
947 	[6] = {
948 		.name = "chacha20-poly1305",
949 		.alg = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
950 		.iv_size = 12,
951 		.block_size = 64,
952 		.key_size = 36,
953 		.digest_size = 16,
954 		.aad_size = 8,
955 	},
956 };
957 
958 static struct cipher_alg *
959 cipher_alg_find(const char *name)
960 {
961 	size_t i;
962 
963 	for (i = 0; i < RTE_DIM(cipher_algs); i++) {
964 		struct cipher_alg *alg = &cipher_algs[i];
965 
966 		if (!strcmp(name, alg->name))
967 			return alg;
968 	}
969 
970 	return NULL;
971 }
972 
973 static struct cipher_alg *
974 cipher_alg_find_by_id(enum rte_crypto_cipher_algorithm alg_id, uint32_t key_size)
975 {
976 	size_t i;
977 
978 	for (i = 0; i < RTE_DIM(cipher_algs); i++) {
979 		struct cipher_alg *alg = &cipher_algs[i];
980 
981 		if (alg->alg == alg_id && alg->key_size == key_size)
982 			return alg;
983 	}
984 
985 	return NULL;
986 }
987 
988 static struct auth_alg *
989 auth_alg_find(const char *name)
990 {
991 	size_t i;
992 
993 	for (i = 0; i < RTE_DIM(auth_algs); i++) {
994 		struct auth_alg *alg = &auth_algs[i];
995 
996 		if (!strcmp(name, alg->name))
997 			return alg;
998 	}
999 
1000 	return NULL;
1001 }
1002 
1003 static struct auth_alg *
1004 auth_alg_find_by_id(enum rte_crypto_auth_algorithm alg_id, uint32_t key_size)
1005 {
1006 	size_t i;
1007 
1008 	for (i = 0; i < RTE_DIM(auth_algs); i++) {
1009 		struct auth_alg *alg = &auth_algs[i];
1010 
1011 		if (alg->alg == alg_id && alg->key_size == key_size)
1012 			return alg;
1013 	}
1014 
1015 	return NULL;
1016 }
1017 
1018 static struct aead_alg *
1019 aead_alg_find(const char *name)
1020 {
1021 	size_t i;
1022 
1023 	for (i = 0; i < RTE_DIM(aead_algs); i++) {
1024 		struct aead_alg *alg = &aead_algs[i];
1025 
1026 		if (!strcmp(name, alg->name))
1027 			return alg;
1028 	}
1029 
1030 	return NULL;
1031 }
1032 
1033 static struct aead_alg *
1034 aead_alg_find_by_id(enum rte_crypto_aead_algorithm alg_id, uint32_t key_size)
1035 {
1036 	size_t i;
1037 
1038 	for (i = 0; i < RTE_DIM(aead_algs); i++) {
1039 		struct aead_alg *alg = &aead_algs[i];
1040 
1041 		if (alg->alg == alg_id && alg->key_size == key_size)
1042 			return alg;
1043 	}
1044 
1045 	return NULL;
1046 }
1047 
1048 static int
1049 char_to_hex(char c, uint8_t *val)
1050 {
1051 	if (c >= '0' && c <= '9') {
1052 		*val = c - '0';
1053 		return 0;
1054 	}
1055 
1056 	if (c >= 'A' && c <= 'F') {
1057 		*val = c - 'A' + 10;
1058 		return 0;
1059 	}
1060 
1061 	if (c >= 'a' && c <= 'f') {
1062 		*val = c - 'a' + 10;
1063 		return 0;
1064 	}
1065 
1066 	return -EINVAL;
1067 }
1068 
1069 static int
1070 hex_string_parse(char *src, uint8_t *dst, uint32_t n_dst_bytes)
1071 {
1072 	uint32_t i;
1073 
1074 	/* Check input arguments. */
1075 	if (!src || !src[0] || !dst || !n_dst_bytes)
1076 		return -EINVAL;
1077 
1078 	/* Skip any leading "0x" or "0X" in the src string. */
1079 	if ((src[0] == '0') && (src[1] == 'x' || src[1] == 'X'))
1080 		src += 2;
1081 
1082 	/* Convert each group of two hex characters in the src string to one byte in dst array. */
1083 	for (i = 0; i < n_dst_bytes; i++) {
1084 		uint8_t a, b;
1085 		int status;
1086 
1087 		status = char_to_hex(*src, &a);
1088 		if (status)
1089 			return status;
1090 		src++;
1091 
1092 		status = char_to_hex(*src, &b);
1093 		if (status)
1094 			return status;
1095 		src++;
1096 
1097 		dst[i] = a * 16 + b;
1098 	}
1099 
1100 	/* Check for the end of the src string. */
1101 	if (*src)
1102 		return -EINVAL;
1103 
1104 	return 0;
1105 }
1106 
1107 static int
1108 token_is_comment(const char *token)
1109 {
1110 	if ((token[0] == '#') ||
1111 	    (token[0] == ';') ||
1112 	    ((token[0] == '/') && (token[1] == '/')))
1113 		return 1; /* TRUE. */
1114 
1115 	return 0; /* FALSE. */
1116 }
1117 
1118 #define MAX_TOKENS 64
1119 
1120 #define CHECK(condition, msg)          \
1121 do {                                   \
1122 	if (!(condition)) {            \
1123 		if (errmsg)            \
1124 			*errmsg = msg; \
1125 		goto error;            \
1126 	}                              \
1127 } while (0)
1128 
1129 struct rte_swx_ipsec_sa_params *
1130 rte_swx_ipsec_sa_read(struct rte_swx_ipsec *ipsec __rte_unused,
1131 		      const char *string,
1132 		      int *is_blank_or_comment,
1133 		      const char **errmsg)
1134 {
1135 	char *token_array[MAX_TOKENS], **t;
1136 	struct rte_swx_ipsec_sa_params *p = NULL;
1137 	char *s0 = NULL, *s;
1138 	uint32_t n_tokens = 0;
1139 	int blank_or_comment = 0;
1140 
1141 	/* Check input arguments. */
1142 	CHECK(string && string[0], "NULL input");
1143 
1144 	/* Memory allocation. */
1145 	s0 = strdup(string);
1146 	p = calloc(1, sizeof(struct rte_swx_ipsec_sa_params));
1147 	CHECK(s0 && p, "Not enough memory");
1148 
1149 	/* Parse the string into tokens. */
1150 	for (s = s0; ; ) {
1151 		char *token;
1152 
1153 		token = strtok_r(s, " \f\n\r\t\v", &s);
1154 		if (!token || token_is_comment(token))
1155 			break;
1156 
1157 		CHECK(n_tokens < RTE_DIM(token_array), "Too many tokens");
1158 
1159 		token_array[n_tokens] = token;
1160 		n_tokens++;
1161 	}
1162 
1163 	t = token_array;
1164 	if (!n_tokens) {
1165 		blank_or_comment = 1;
1166 		goto error;
1167 	}
1168 
1169 	/*
1170 	 * Crypto operation.
1171 	 */
1172 	if (!strcmp(t[0], "encrypt"))
1173 		p->encrypt = 1;
1174 	else if (!strcmp(t[0], "decrypt"))
1175 		p->encrypt = 0;
1176 	else
1177 		CHECK(0, "Missing \"encrypt\"/\"decrypt\" keyword");
1178 
1179 	t++;
1180 	n_tokens--;
1181 
1182 	/*
1183 	 * Crypto parameters.
1184 	 */
1185 	CHECK(n_tokens >= 2, "Not enough tokens");
1186 
1187 	if (!strcmp(t[0], "cipher")) {
1188 		struct cipher_alg *cipher_alg;
1189 		struct auth_alg *auth_alg;
1190 		uint32_t key_size;
1191 
1192 		p->crypto.is_aead = 0;
1193 
1194 		/* cipher. */
1195 		cipher_alg = cipher_alg_find(t[1]);
1196 		CHECK(cipher_alg, "Unsupported cipher algorithm");
1197 
1198 		key_size = cipher_alg->key_size;
1199 		p->crypto.cipher_auth.cipher.alg = cipher_alg->alg;
1200 		p->crypto.cipher_auth.cipher.key_size = key_size;
1201 
1202 		t += 2;
1203 		n_tokens -= 2;
1204 
1205 		if (key_size) {
1206 			int status;
1207 
1208 			CHECK(n_tokens >= 2, "Not enough tokens");
1209 			CHECK(!strcmp(t[0], "key"), "Missing cipher \"key\" keyword");
1210 			CHECK(key_size <= RTE_DIM(p->crypto.cipher_auth.cipher.key),
1211 				"Cipher algorithm key too big");
1212 
1213 			status = hex_string_parse(t[1], p->crypto.cipher_auth.cipher.key, key_size);
1214 			CHECK(!status, "Cipher key invalid format");
1215 
1216 			t += 2;
1217 			n_tokens -= 2;
1218 		}
1219 
1220 		/* authentication. */
1221 		CHECK(n_tokens >= 2, "Not enough tokens");
1222 		CHECK(!strcmp(t[0], "auth"), "Missing \"auth\" keyword");
1223 
1224 		auth_alg = auth_alg_find(t[1]);
1225 		CHECK(auth_alg, "Unsupported authentication algorithm");
1226 
1227 		key_size = auth_alg->key_size;
1228 		p->crypto.cipher_auth.auth.alg = auth_alg->alg;
1229 		p->crypto.cipher_auth.auth.key_size = key_size;
1230 
1231 		t += 2;
1232 		n_tokens -= 2;
1233 
1234 		if (key_size) {
1235 			int status;
1236 
1237 			CHECK(n_tokens >= 2, "Not enough tokens");
1238 			CHECK(!strcmp(t[0], "key"), "Missing authentication \"key\" keyword");
1239 			CHECK(key_size <= RTE_DIM(p->crypto.cipher_auth.auth.key),
1240 				"Authentication algorithm key too big");
1241 
1242 			status = hex_string_parse(t[1], p->crypto.cipher_auth.auth.key, key_size);
1243 			CHECK(!status, "Authentication key invalid format");
1244 
1245 			t += 2;
1246 			n_tokens -= 2;
1247 		}
1248 	} else if (!strcmp(t[0], "aead")) {
1249 		struct aead_alg *alg;
1250 		uint32_t key_size;
1251 		int status;
1252 
1253 		p->crypto.is_aead = 1;
1254 
1255 		CHECK(n_tokens >= 4, "Not enough tokens");
1256 		alg = aead_alg_find(t[1]);
1257 		CHECK(alg, "Unsupported AEAD algorithm");
1258 
1259 		key_size = alg->key_size;
1260 		p->crypto.aead.alg = alg->alg;
1261 		p->crypto.aead.key_size = key_size;
1262 
1263 		CHECK(!strcmp(t[2], "key"), "Missing AEAD \"key\" keyword");
1264 		CHECK(key_size <= RTE_DIM(p->crypto.aead.key),
1265 			"AEAD algorithm key too big");
1266 
1267 		status = hex_string_parse(t[3], p->crypto.aead.key, key_size);
1268 		CHECK(!status, "AEAD key invalid format");
1269 
1270 		t += 4;
1271 		n_tokens -= 4;
1272 	} else
1273 		CHECK(0, "Missing \"cipher\"/\"aead\" keyword");
1274 
1275 	/*
1276 	 * Packet ecapsulation parameters.
1277 	 */
1278 	CHECK(n_tokens >= 4, "Not enough tokens");
1279 	CHECK(!strcmp(t[0], "esp"), "Missing \"esp\" keyword");
1280 	CHECK(!strcmp(t[1], "spi"), "Missing \"spi\" keyword");
1281 
1282 	p->encap.esp.spi = strtoul(t[2], &t[2], 0);
1283 	CHECK(!t[2][0], "ESP SPI field invalid format");
1284 
1285 	t += 3;
1286 	n_tokens -= 3;
1287 
1288 	if (!strcmp(t[0], "tunnel")) {
1289 		p->encap.tunnel_mode = 1;
1290 
1291 		CHECK(n_tokens >= 6, "Not enough tokens");
1292 
1293 		if (!strcmp(t[1], "ipv4")) {
1294 			uint32_t addr;
1295 
1296 			p->encap.tunnel_ipv4 = 1;
1297 
1298 			CHECK(!strcmp(t[2], "srcaddr"), "Missing \"srcaddr\" keyword");
1299 
1300 			addr = strtoul(t[3], &t[3], 0);
1301 			CHECK(!t[3][0], "Tunnel IPv4 source address invalid format");
1302 			p->encap.tunnel.ipv4.src_addr.s_addr = htonl(addr);
1303 
1304 			CHECK(!strcmp(t[4], "dstaddr"), "Missing \"dstaddr\" keyword");
1305 
1306 			addr = strtoul(t[5], &t[5], 0);
1307 			CHECK(!t[5][0], "Tunnel IPv4 destination address invalid format");
1308 			p->encap.tunnel.ipv4.dst_addr.s_addr = htonl(addr);
1309 
1310 			t += 6;
1311 			n_tokens -= 6;
1312 		} else if (!strcmp(t[1], "ipv6")) {
1313 			int status;
1314 
1315 			p->encap.tunnel_ipv4 = 0;
1316 
1317 			CHECK(!strcmp(t[2], "srcaddr"), "Missing \"srcaddr\" keyword");
1318 
1319 			status = hex_string_parse(t[3],
1320 						  p->encap.tunnel.ipv6.src_addr.s6_addr,
1321 						  16);
1322 			CHECK(!status, "Tunnel IPv6 source address invalid format");
1323 
1324 			CHECK(!strcmp(t[4], "dstaddr"), "Missing \"dstaddr\" keyword");
1325 
1326 			status = hex_string_parse(t[5],
1327 						  p->encap.tunnel.ipv6.dst_addr.s6_addr,
1328 						  16);
1329 			CHECK(!status, "Tunnel IPv6 destination address invalid format");
1330 
1331 			t += 6;
1332 			n_tokens -= 6;
1333 		} else
1334 			CHECK(0, "Missing \"ipv4\"/\"ipv6\" keyword");
1335 	} else if (!strcmp(t[0], "transport")) {
1336 		p->encap.tunnel_mode = 0;
1337 
1338 		t++;
1339 		n_tokens--;
1340 	} else
1341 		CHECK(0, "Missing \"tunnel\"/\"transport\" keyword");
1342 
1343 	/*
1344 	 * Any other parameters.
1345 	 */
1346 	CHECK(!n_tokens, "Unexpected trailing tokens");
1347 
1348 	free(s0);
1349 	return p;
1350 
1351 error:
1352 	free(p);
1353 	free(s0);
1354 	if (is_blank_or_comment)
1355 		*is_blank_or_comment = blank_or_comment;
1356 	return NULL;
1357 }
1358 
1359 static void
1360 tunnel_ipv4_header_set(struct rte_ipv4_hdr *h, struct rte_swx_ipsec_sa_params *p)
1361 {
1362 	struct rte_ipv4_hdr ipv4_hdr = {
1363 		.version_ihl = 0x45,
1364 		.type_of_service = 0,
1365 		.total_length = 0, /* Cannot be pre-computed. */
1366 		.packet_id = 0,
1367 		.fragment_offset = 0,
1368 		.time_to_live = 64,
1369 		.next_proto_id = IPPROTO_ESP,
1370 		.hdr_checksum = 0, /* Cannot be pre-computed. */
1371 		.src_addr = p->encap.tunnel.ipv4.src_addr.s_addr,
1372 		.dst_addr = p->encap.tunnel.ipv4.dst_addr.s_addr,
1373 	};
1374 
1375 	memcpy(h, &ipv4_hdr, sizeof(ipv4_hdr));
1376 }
1377 
1378 static void
1379 tunnel_ipv6_header_set(struct rte_ipv6_hdr *h, struct rte_swx_ipsec_sa_params *p)
1380 {
1381 	struct rte_ipv6_hdr ipv6_hdr = {
1382 		.vtc_flow = 0x60000000,
1383 		.payload_len = 0, /* Cannot be pre-computed. */
1384 		.proto = IPPROTO_ESP,
1385 		.hop_limits = 64,
1386 		.src_addr = {0},
1387 		.dst_addr = {0},
1388 	};
1389 
1390 	memcpy(h, &ipv6_hdr, sizeof(ipv6_hdr));
1391 	memcpy(h->src_addr, p->encap.tunnel.ipv6.src_addr.s6_addr, 16);
1392 	memcpy(h->dst_addr, p->encap.tunnel.ipv6.dst_addr.s6_addr, 16);
1393 }
1394 
1395 /* IPsec library SA parameters. */
1396 static struct rte_crypto_sym_xform *
1397 crypto_xform_get(struct rte_swx_ipsec_sa_params *p,
1398 		struct rte_crypto_sym_xform *xform,
1399 		uint32_t *salt_out)
1400 {
1401 	if (p->crypto.is_aead) {
1402 		struct aead_alg *alg;
1403 		uint32_t key_size, salt, iv_length;
1404 
1405 		alg = aead_alg_find_by_id(p->crypto.aead.alg, p->crypto.aead.key_size);
1406 		if (!alg)
1407 			return NULL;
1408 
1409 		/* salt and salt-related key size adjustment. */
1410 		key_size = p->crypto.aead.key_size - 4;
1411 		memcpy(&salt, &p->crypto.aead.key[key_size], 4);
1412 
1413 		/* IV length. */
1414 		iv_length = 12;
1415 		if (p->crypto.aead.alg == RTE_CRYPTO_AEAD_AES_CCM)
1416 			iv_length = 11;
1417 
1418 		/* xform. */
1419 		xform[0].type = RTE_CRYPTO_SYM_XFORM_AEAD;
1420 		xform[0].aead.op = p->encrypt ?
1421 			RTE_CRYPTO_AEAD_OP_ENCRYPT :
1422 			RTE_CRYPTO_AEAD_OP_DECRYPT;
1423 		xform[0].aead.algo = p->crypto.aead.alg;
1424 		xform[0].aead.key.data = p->crypto.aead.key;
1425 		xform[0].aead.key.length = key_size;
1426 		xform[0].aead.iv.offset = IV_OFFSET;
1427 		xform[0].aead.iv.length = iv_length;
1428 		xform[0].aead.digest_length = alg->digest_size;
1429 		xform[0].aead.aad_length = alg->aad_size;
1430 		xform[0].next = NULL;
1431 
1432 		*salt_out = salt;
1433 		return &xform[0];
1434 	} else {
1435 		struct cipher_alg *cipher_alg;
1436 		struct auth_alg *auth_alg;
1437 		uint32_t cipher_key_size, auth_key_size, salt, auth_iv_length;
1438 
1439 		cipher_alg = cipher_alg_find_by_id(p->crypto.cipher_auth.cipher.alg,
1440 						   p->crypto.cipher_auth.cipher.key_size);
1441 		if (!cipher_alg)
1442 			return NULL;
1443 
1444 		auth_alg = auth_alg_find_by_id(p->crypto.cipher_auth.auth.alg,
1445 					       p->crypto.cipher_auth.auth.key_size);
1446 		if (!auth_alg)
1447 			return NULL;
1448 
1449 		/* salt and salt-related key size adjustment. */
1450 		cipher_key_size = p->crypto.cipher_auth.cipher.key_size;
1451 		auth_key_size = p->crypto.cipher_auth.auth.key_size;
1452 
1453 		switch (p->crypto.cipher_auth.cipher.alg) {
1454 		case RTE_CRYPTO_CIPHER_AES_CBC:
1455 		case RTE_CRYPTO_CIPHER_3DES_CBC:
1456 			salt = (uint32_t)rand();
1457 			break;
1458 
1459 		case RTE_CRYPTO_CIPHER_AES_CTR:
1460 			cipher_key_size -= 4;
1461 			memcpy(&salt, &p->crypto.cipher_auth.cipher.key[cipher_key_size], 4);
1462 			break;
1463 
1464 		default:
1465 			salt = 0;
1466 		}
1467 
1468 		if (p->crypto.cipher_auth.auth.alg == RTE_CRYPTO_AUTH_AES_GMAC) {
1469 			auth_key_size -= 4;
1470 			memcpy(&salt, &p->crypto.cipher_auth.auth.key[auth_key_size], 4);
1471 		}
1472 
1473 		/* IV length. */
1474 		auth_iv_length = cipher_alg->iv_size;
1475 		if (p->crypto.cipher_auth.auth.alg == RTE_CRYPTO_AUTH_AES_GMAC)
1476 			auth_iv_length = 12;
1477 
1478 		/* xform. */
1479 		if (p->encrypt) {
1480 			xform[0].type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1481 			xform[0].cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1482 			xform[0].cipher.algo = p->crypto.cipher_auth.cipher.alg;
1483 			xform[0].cipher.key.data = p->crypto.cipher_auth.cipher.key;
1484 			xform[0].cipher.key.length = cipher_key_size;
1485 			xform[0].cipher.iv.offset = IV_OFFSET;
1486 			xform[0].cipher.iv.length = cipher_alg->iv_size;
1487 			xform[0].cipher.dataunit_len = 0;
1488 			xform[0].next = &xform[1];
1489 
1490 			xform[1].type = RTE_CRYPTO_SYM_XFORM_AUTH;
1491 			xform[1].auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
1492 			xform[1].auth.algo = p->crypto.cipher_auth.auth.alg;
1493 			xform[1].auth.key.data = p->crypto.cipher_auth.auth.key;
1494 			xform[1].auth.key.length = auth_key_size;
1495 			xform[1].auth.iv.offset = IV_OFFSET;
1496 			xform[1].auth.iv.length = auth_iv_length;
1497 			xform[1].auth.digest_length = auth_alg->digest_size;
1498 			xform[1].next = NULL;
1499 		} else {
1500 			xform[0].type = RTE_CRYPTO_SYM_XFORM_AUTH;
1501 			xform[0].auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
1502 			xform[0].auth.algo = p->crypto.cipher_auth.auth.alg;
1503 			xform[0].auth.key.data = p->crypto.cipher_auth.auth.key;
1504 			xform[0].auth.key.length = auth_key_size;
1505 			xform[0].auth.iv.offset = IV_OFFSET;
1506 			xform[0].auth.iv.length = auth_iv_length;
1507 			xform[0].auth.digest_length = auth_alg->digest_size;
1508 			xform[0].next = &xform[1];
1509 
1510 			xform[1].type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1511 			xform[1].cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
1512 			xform[1].cipher.algo = p->crypto.cipher_auth.cipher.alg;
1513 			xform[1].cipher.key.data = p->crypto.cipher_auth.cipher.key;
1514 			xform[1].cipher.key.length = cipher_key_size;
1515 			xform[1].cipher.iv.offset = IV_OFFSET;
1516 			xform[1].cipher.iv.length = cipher_alg->iv_size;
1517 			xform[1].cipher.dataunit_len = 0;
1518 			xform[1].next = NULL;
1519 		}
1520 
1521 		*salt_out = salt;
1522 
1523 		if (p->crypto.cipher_auth.auth.alg == RTE_CRYPTO_AUTH_AES_GMAC) {
1524 			if (p->encrypt)
1525 				return &xform[1];
1526 
1527 			xform[0].next = NULL;
1528 			return &xform[0];
1529 		}
1530 
1531 		return &xform[0];
1532 	}
1533 }
1534 
1535 static void
1536 ipsec_xform_get(struct rte_swx_ipsec_sa_params *p,
1537 		struct rte_security_ipsec_xform *ipsec_xform,
1538 		uint32_t salt)
1539 {
1540 	ipsec_xform->spi = p->encap.esp.spi;
1541 
1542 	ipsec_xform->salt = salt;
1543 
1544 	ipsec_xform->options.esn = 0;
1545 	ipsec_xform->options.udp_encap = 0;
1546 	ipsec_xform->options.copy_dscp = 1;
1547 	ipsec_xform->options.copy_flabel = 0;
1548 	ipsec_xform->options.copy_df = 0;
1549 	ipsec_xform->options.dec_ttl = 0;
1550 	ipsec_xform->options.ecn = 1;
1551 	ipsec_xform->options.stats = 0;
1552 	ipsec_xform->options.iv_gen_disable = 0;
1553 	ipsec_xform->options.tunnel_hdr_verify = 0;
1554 	ipsec_xform->options.udp_ports_verify = 0;
1555 	ipsec_xform->options.ip_csum_enable = 0;
1556 	ipsec_xform->options.l4_csum_enable = 0;
1557 	ipsec_xform->options.ip_reassembly_en = 0;
1558 	ipsec_xform->options.reserved_opts = 0;
1559 
1560 	ipsec_xform->direction = p->encrypt ?
1561 		RTE_SECURITY_IPSEC_SA_DIR_EGRESS :
1562 		RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
1563 
1564 	ipsec_xform->proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1565 
1566 	ipsec_xform->mode = p->encap.tunnel_mode ?
1567 		RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
1568 		RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
1569 
1570 	ipsec_xform->tunnel.type = p->encap.tunnel_ipv4 ?
1571 		RTE_SECURITY_IPSEC_TUNNEL_IPV4 :
1572 		RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1573 
1574 	if (p->encap.tunnel_mode) {
1575 		if (p->encap.tunnel_ipv4) {
1576 			ipsec_xform->tunnel.ipv4.src_ip = p->encap.tunnel.ipv4.src_addr;
1577 			ipsec_xform->tunnel.ipv4.dst_ip = p->encap.tunnel.ipv4.dst_addr;
1578 			ipsec_xform->tunnel.ipv4.dscp = 0;
1579 			ipsec_xform->tunnel.ipv4.df = 0;
1580 			ipsec_xform->tunnel.ipv4.ttl = 64;
1581 		} else {
1582 			ipsec_xform->tunnel.ipv6.src_addr = p->encap.tunnel.ipv6.src_addr;
1583 			ipsec_xform->tunnel.ipv6.dst_addr = p->encap.tunnel.ipv6.dst_addr;
1584 			ipsec_xform->tunnel.ipv6.dscp = 0;
1585 			ipsec_xform->tunnel.ipv6.flabel = 0;
1586 			ipsec_xform->tunnel.ipv6.hlimit = 64;
1587 		}
1588 	}
1589 
1590 	ipsec_xform->life.packets_soft_limit = 0;
1591 	ipsec_xform->life.bytes_soft_limit = 0;
1592 	ipsec_xform->life.packets_hard_limit = 0;
1593 	ipsec_xform->life.bytes_hard_limit = 0;
1594 
1595 	ipsec_xform->replay_win_sz = 0;
1596 
1597 	ipsec_xform->esn.value = 0;
1598 
1599 	ipsec_xform->udp.dport = 0;
1600 	ipsec_xform->udp.sport = 0;
1601 }
1602 
1603 static int
1604 ipsec_sa_prm_get(struct rte_swx_ipsec_sa_params *p,
1605 		 struct rte_ipsec_sa_prm *sa_prm,
1606 		 struct rte_ipv4_hdr *ipv4_hdr,
1607 		 struct rte_ipv6_hdr *ipv6_hdr,
1608 		 struct rte_crypto_sym_xform *crypto_xform)
1609 {
1610 	uint32_t salt;
1611 
1612 	memset(sa_prm, 0, sizeof(*sa_prm)); /* Better to be safe than sorry. */
1613 
1614 	sa_prm->userdata = 0; /* Not used. */
1615 
1616 	sa_prm->flags = 0; /* Flag RTE_IPSEC_SAFLAG_SQN_ATOM not enabled. */
1617 
1618 	/*
1619 	 * crypto_xform.
1620 	 */
1621 	sa_prm->crypto_xform = crypto_xform_get(p, crypto_xform, &salt);
1622 	if (!sa_prm->crypto_xform)
1623 		return -EINVAL;
1624 
1625 	/*
1626 	 * ipsec_xform.
1627 	 */
1628 	ipsec_xform_get(p, &sa_prm->ipsec_xform, salt);
1629 
1630 	/*
1631 	 * tunnel / transport.
1632 	 *
1633 	 * Currently, the input IP packet type is assumed to be IPv4. To support both IPv4 and IPv6,
1634 	 * the input packet type should be added to the SA configuration parameters.
1635 	 */
1636 	if (p->encap.tunnel_mode) {
1637 		if (p->encap.tunnel_ipv4) {
1638 			sa_prm->tun.hdr_len = sizeof(struct rte_ipv4_hdr);
1639 			sa_prm->tun.hdr_l3_off = 0;
1640 			sa_prm->tun.next_proto = IPPROTO_IPIP; /* IPv4. */
1641 			sa_prm->tun.hdr = ipv4_hdr;
1642 		} else {
1643 			sa_prm->tun.hdr_len = sizeof(struct rte_ipv6_hdr);
1644 			sa_prm->tun.hdr_l3_off = 0;
1645 			sa_prm->tun.next_proto = IPPROTO_IPIP; /* IPv4. */
1646 			sa_prm->tun.hdr = ipv6_hdr;
1647 		}
1648 	} else {
1649 		sa_prm->trs.proto = IPPROTO_IPIP; /* IPv4. */
1650 	}
1651 
1652 	return 0;
1653 }
1654 
1655 static int
1656 ipsec_session_create(struct rte_swx_ipsec *ipsec,
1657 		     struct rte_swx_ipsec_sa_params *p,
1658 		     struct rte_ipsec_session *s)
1659 {
1660 	struct rte_ipv4_hdr ipv4_hdr;
1661 	struct rte_ipv6_hdr ipv6_hdr;
1662 	struct rte_crypto_sym_xform crypto_xform[2];
1663 	struct rte_ipsec_sa_prm sa_prm;
1664 	struct rte_ipsec_sa *sa = NULL;
1665 	struct rte_cryptodev_sym_session *crypto_session = NULL;
1666 	int sa_size;
1667 	int sa_valid = 0, status = 0;
1668 
1669 	tunnel_ipv4_header_set(&ipv4_hdr, p);
1670 	tunnel_ipv6_header_set(&ipv6_hdr, p);
1671 
1672 	/* IPsec library SA setup. */
1673 	status = ipsec_sa_prm_get(p, &sa_prm, &ipv4_hdr, &ipv6_hdr, crypto_xform);
1674 	if (status)
1675 		goto error;
1676 
1677 	sa_size = rte_ipsec_sa_size(&sa_prm);
1678 	if (sa_size < 0) {
1679 		status = sa_size;
1680 		goto error;
1681 	}
1682 	if (!sa_size) {
1683 		status = -EINVAL;
1684 		goto error;
1685 	}
1686 
1687 	sa = calloc(1, sa_size);
1688 	if (!sa) {
1689 		status = -ENOMEM;
1690 		goto error;
1691 	}
1692 
1693 	sa_size = rte_ipsec_sa_init(sa, &sa_prm, sa_size);
1694 	if (sa_size < 0) {
1695 		status = sa_size;
1696 		goto error;
1697 	}
1698 	if (!sa_size) {
1699 		status = -EINVAL;
1700 		goto error;
1701 	}
1702 
1703 	sa_valid = 1;
1704 
1705 	/* Cryptodev library session setup. */
1706 	crypto_session = rte_cryptodev_sym_session_create(ipsec->dev_id,
1707 							  sa_prm.crypto_xform,
1708 							  ipsec->mp_session);
1709 	if (!crypto_session) {
1710 		status = -ENOMEM;
1711 		goto error;
1712 	}
1713 
1714 	/* IPsec library session setup. */
1715 	s->sa = sa;
1716 	s->type = RTE_SECURITY_ACTION_TYPE_NONE;
1717 	s->crypto.ses = crypto_session;
1718 	s->crypto.dev_id = ipsec->dev_id;
1719 	s->pkt_func.prepare.async = NULL;
1720 	s->pkt_func.process = NULL;
1721 
1722 	return rte_ipsec_session_prepare(s);
1723 
1724 error:
1725 	/* sa. */
1726 	if (sa_valid)
1727 		rte_ipsec_sa_fini(sa);
1728 
1729 	free(sa);
1730 
1731 	/* crypto_session. */
1732 	if (crypto_session)
1733 		rte_cryptodev_sym_session_free(ipsec->dev_id, crypto_session);
1734 
1735 	/* s. */
1736 	memset(s, 0, sizeof(*s));
1737 
1738 	return status;
1739 }
1740 
1741 static void
1742 ipsec_session_free(struct rte_swx_ipsec *ipsec,
1743 		   struct rte_ipsec_session *s)
1744 {
1745 	if (!s)
1746 		return;
1747 
1748 	/* IPsec library SA. */
1749 	if (s->sa)
1750 		rte_ipsec_sa_fini(s->sa);
1751 	free(s->sa);
1752 
1753 	/* Cryptodev library session. */
1754 	if (s->crypto.ses)
1755 		rte_cryptodev_sym_session_free(ipsec->dev_id, s->crypto.ses);
1756 
1757 	/* IPsec library session. */
1758 	memset(s, 0, sizeof(*s));
1759 }
1760 
1761 int
1762 rte_swx_ipsec_sa_add(struct rte_swx_ipsec *ipsec,
1763 		     struct rte_swx_ipsec_sa_params *sa_params,
1764 		     uint32_t *id)
1765 {
1766 	struct ipsec_sa *sa;
1767 	uint32_t sa_id;
1768 	int status;
1769 
1770 	/* Check the input parameters. */
1771 	if (!ipsec || !sa_params || !id)
1772 		return -EINVAL;
1773 
1774 	/* Allocate a free SADB entry. */
1775 	if (!ipsec->n_sa_free_id)
1776 		return -ENOSPC;
1777 
1778 	sa_id = ipsec->sa_free_id[ipsec->n_sa_free_id - 1];
1779 	ipsec->n_sa_free_id--;
1780 
1781 	/* Acquire the SA resources. */
1782 	sa = ipsec_sa_get(ipsec, sa_id);
1783 
1784 	status = ipsec_session_create(ipsec, sa_params, &sa->s);
1785 	if (status) {
1786 		/* Free the allocated SADB entry. */
1787 		ipsec->sa_free_id[ipsec->n_sa_free_id] = sa_id;
1788 		ipsec->n_sa_free_id++;
1789 
1790 		return status;
1791 	}
1792 
1793 	/* Validate the new SA. */
1794 	sa->valid = 1;
1795 	*id = sa_id;
1796 
1797 	return 0;
1798 }
1799 
1800 void
1801 rte_swx_ipsec_sa_delete(struct rte_swx_ipsec *ipsec,
1802 			uint32_t sa_id)
1803 {
1804 	struct ipsec_sa *sa;
1805 
1806 	/* Check the input parameters. */
1807 	if (!ipsec || (sa_id >= ipsec->n_sa_max))
1808 		return;
1809 
1810 	/* Release the SA resources. */
1811 	sa = ipsec_sa_get(ipsec, sa_id);
1812 
1813 	ipsec_session_free(ipsec, &sa->s);
1814 
1815 	/* Free the SADB entry. */
1816 	ipsec->sa_free_id[ipsec->n_sa_free_id] = sa_id;
1817 	ipsec->n_sa_free_id++;
1818 
1819 	/* Invalidate the SA. */
1820 	sa->valid = 0;
1821 }
1822