xref: /dpdk/lib/pipeline/rte_swx_ipsec.c (revision 2ede1422fa57225b0864702083a8c7bea2c5117e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #include <stdalign.h>
6 #include <stdlib.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <arpa/inet.h>
10 
11 #include <rte_common.h>
12 #include <rte_random.h>
13 #include <rte_ip.h>
14 #include <rte_tailq.h>
15 #include <rte_eal_memconfig.h>
16 #include <rte_ring.h>
17 #include <rte_mbuf.h>
18 #include <rte_cryptodev.h>
19 #include <rte_ipsec.h>
20 
21 #include "rte_swx_ipsec.h"
22 
23 #ifndef RTE_SWX_IPSEC_HUGE_PAGES_DISABLE
24 
25 #include <rte_malloc.h>
26 
27 static void *
28 env_calloc(size_t size, size_t alignment, int numa_node)
29 {
30 	return rte_zmalloc_socket(NULL, size, alignment, numa_node);
31 }
32 
33 static void
34 env_free(void *start, size_t size __rte_unused)
35 {
36 	rte_free(start);
37 }
38 
39 #else
40 
41 #include <numa.h>
42 
43 static void *
44 env_calloc(size_t size, size_t alignment __rte_unused, int numa_node)
45 {
46 	void *start;
47 
48 	if (numa_available() == -1)
49 		return NULL;
50 
51 	start = numa_alloc_onnode(size, numa_node);
52 	if (!start)
53 		return NULL;
54 
55 	memset(start, 0, size);
56 	return start;
57 }
58 
59 static void
60 env_free(void *start, size_t size)
61 {
62 	if ((numa_available() == -1) || !start)
63 		return;
64 
65 	numa_free(start, size);
66 }
67 
68 #endif
69 
70 #ifndef RTE_SWX_IPSEC_POOL_CACHE_SIZE
71 #define RTE_SWX_IPSEC_POOL_CACHE_SIZE 256
72 #endif
73 
74 /* The two crypto device mempools have their size set to the number of SAs. The mempool API requires
75  * the mempool size to be at least 1.5 times the size of the mempool cache.
76  */
77 #define N_SA_MIN (RTE_SWX_IPSEC_POOL_CACHE_SIZE * 1.5)
78 
79 struct ipsec_sa {
80 	struct rte_ipsec_session s;
81 	int valid;
82 };
83 
84 struct ipsec_pkts_in {
85 	struct rte_mbuf *pkts[RTE_SWX_IPSEC_BURST_SIZE_MAX];
86 	struct ipsec_sa *sa[RTE_SWX_IPSEC_BURST_SIZE_MAX];
87 	struct rte_ipsec_group groups[RTE_SWX_IPSEC_BURST_SIZE_MAX];
88 	struct rte_crypto_op *group_cops[RTE_SWX_IPSEC_BURST_SIZE_MAX];
89 	struct rte_crypto_op *cops[RTE_SWX_IPSEC_BURST_SIZE_MAX];
90 	uint32_t n_cops;
91 };
92 
93 struct ipsec_pkts_out {
94 	struct rte_crypto_op *cops[RTE_SWX_IPSEC_BURST_SIZE_MAX];
95 	struct rte_mbuf *group_pkts[RTE_SWX_IPSEC_BURST_SIZE_MAX];
96 	struct rte_ipsec_group groups[RTE_SWX_IPSEC_BURST_SIZE_MAX];
97 	struct rte_mbuf *pkts[RTE_SWX_IPSEC_BURST_SIZE_MAX];
98 	uint32_t n_pkts;
99 };
100 
101 struct rte_swx_ipsec {
102 	/*
103 	 * Parameters.
104 	 */
105 
106 	/* IPsec instance name. */
107 	char name[RTE_SWX_IPSEC_NAME_SIZE];
108 
109 	/* Input packet queue. */
110 	struct rte_ring *ring_in;
111 
112 	/* Output packet queue. */
113 	struct rte_ring *ring_out;
114 
115 	/* Crypto device ID. */
116 	uint8_t dev_id;
117 
118 	/* Crypto device queue pair ID. */
119 	uint16_t qp_id;
120 
121 	/* Burst sizes. */
122 	struct rte_swx_ipsec_burst_size bsz;
123 
124 	/* SA table size. */
125 	size_t n_sa_max;
126 
127 	/*
128 	 * Internals.
129 	 */
130 	/* Crypto device buffer pool for sessions. */
131 	struct rte_mempool *mp_session;
132 
133 	/* Pre-crypto packets. */
134 	struct ipsec_pkts_in in;
135 
136 	/* Post-crypto packets. */
137 	struct ipsec_pkts_out out;
138 
139 	/* Crypto device enqueue threshold. */
140 	uint32_t crypto_wr_threshold;
141 
142 	/* Packets currently under crypto device processing. */
143 	uint32_t n_pkts_crypto;
144 
145 	/* List of free SADB positions. */
146 	uint32_t *sa_free_id;
147 
148 	/* Number of elements in the SADB list of free positions. */
149 	size_t n_sa_free_id;
150 
151 	/* Allocated memory total size in bytes. */
152 	size_t total_size;
153 
154 	/* Flag for registration to the global list of instances. */
155 	int registered;
156 
157 	/*
158 	 * Table memory.
159 	 */
160 	alignas(RTE_CACHE_LINE_SIZE) uint8_t memory[];
161 };
162 
163 static inline struct ipsec_sa *
164 ipsec_sa_get(struct rte_swx_ipsec *ipsec, uint32_t sa_id)
165 {
166 	struct ipsec_sa *sadb = (struct ipsec_sa *)ipsec->memory;
167 
168 	return &sadb[sa_id & (ipsec->n_sa_max - 1)];
169 }
170 
171 /* Global list of instances. */
172 TAILQ_HEAD(rte_swx_ipsec_list, rte_tailq_entry);
173 
174 static struct rte_tailq_elem rte_swx_ipsec_tailq = {
175 	.name = "RTE_SWX_IPSEC",
176 };
177 
178 EAL_REGISTER_TAILQ(rte_swx_ipsec_tailq)
179 
180 struct rte_swx_ipsec *
181 rte_swx_ipsec_find(const char *name)
182 {
183 	struct rte_swx_ipsec_list *ipsec_list;
184 	struct rte_tailq_entry *te = NULL;
185 
186 	if (!name ||
187 	    !name[0] ||
188 	    (strnlen(name, RTE_SWX_IPSEC_NAME_SIZE) == RTE_SWX_IPSEC_NAME_SIZE))
189 		return NULL;
190 
191 	ipsec_list = RTE_TAILQ_CAST(rte_swx_ipsec_tailq.head, rte_swx_ipsec_list);
192 
193 	rte_mcfg_tailq_read_lock();
194 
195 	TAILQ_FOREACH(te, ipsec_list, next) {
196 		struct rte_swx_ipsec *ipsec = (struct rte_swx_ipsec *)te->data;
197 
198 		if (!strncmp(name, ipsec->name, sizeof(ipsec->name))) {
199 			rte_mcfg_tailq_read_unlock();
200 			return ipsec;
201 		}
202 	}
203 
204 	rte_mcfg_tailq_read_unlock();
205 	return NULL;
206 }
207 
208 static int
209 ipsec_register(struct rte_swx_ipsec *ipsec)
210 {
211 	struct rte_swx_ipsec_list *ipsec_list;
212 	struct rte_tailq_entry *te = NULL;
213 
214 	ipsec_list = RTE_TAILQ_CAST(rte_swx_ipsec_tailq.head, rte_swx_ipsec_list);
215 
216 	rte_mcfg_tailq_write_lock();
217 
218 	TAILQ_FOREACH(te, ipsec_list, next) {
219 		struct rte_swx_ipsec *elem = (struct rte_swx_ipsec *)te->data;
220 
221 		if (!strncmp(ipsec->name, elem->name, sizeof(ipsec->name))) {
222 			rte_mcfg_tailq_write_unlock();
223 			return -EEXIST;
224 		}
225 	}
226 
227 	te = calloc(1, sizeof(struct rte_tailq_entry));
228 	if (!te) {
229 		rte_mcfg_tailq_write_unlock();
230 		return -ENOMEM;
231 	}
232 
233 	te->data = (void *)ipsec;
234 	TAILQ_INSERT_TAIL(ipsec_list, te, next);
235 	rte_mcfg_tailq_write_unlock();
236 	return 0;
237 }
238 
239 static void
240 ipsec_unregister(struct rte_swx_ipsec *ipsec)
241 {
242 	struct rte_swx_ipsec_list *ipsec_list;
243 	struct rte_tailq_entry *te = NULL;
244 
245 	ipsec_list = RTE_TAILQ_CAST(rte_swx_ipsec_tailq.head, rte_swx_ipsec_list);
246 
247 	rte_mcfg_tailq_write_lock();
248 
249 	TAILQ_FOREACH(te, ipsec_list, next) {
250 		if (te->data == (void *)ipsec) {
251 			TAILQ_REMOVE(ipsec_list, te, next);
252 			rte_mcfg_tailq_write_unlock();
253 			free(te);
254 			return;
255 		}
256 	}
257 
258 	rte_mcfg_tailq_write_unlock();
259 }
260 
261 static void
262 ipsec_session_free(struct rte_swx_ipsec *ipsec, struct rte_ipsec_session *s);
263 
264 void
265 rte_swx_ipsec_free(struct rte_swx_ipsec *ipsec)
266 {
267 	size_t i;
268 
269 	if (!ipsec)
270 		return;
271 
272 	/* Remove the current instance from the global list. */
273 	if (ipsec->registered)
274 		ipsec_unregister(ipsec);
275 
276 	/* SADB. */
277 	for (i = 0; i < ipsec->n_sa_max; i++) {
278 		struct ipsec_sa *sa = ipsec_sa_get(ipsec, i);
279 
280 		if (!sa->valid)
281 			continue;
282 
283 		/* SA session. */
284 		ipsec_session_free(ipsec, &sa->s);
285 	}
286 
287 	/* Crypto device buffer pools. */
288 	rte_mempool_free(ipsec->mp_session);
289 
290 	/* IPsec object memory. */
291 	env_free(ipsec, ipsec->total_size);
292 }
293 
294 int
295 rte_swx_ipsec_create(struct rte_swx_ipsec **ipsec_out,
296 		     const char *name,
297 		     struct rte_swx_ipsec_params *params,
298 		     int numa_node)
299 {
300 	char resource_name[RTE_SWX_IPSEC_NAME_SIZE];
301 	struct rte_swx_ipsec *ipsec = NULL;
302 	struct rte_ring *ring_in, *ring_out;
303 	struct rte_cryptodev_info dev_info;
304 	size_t n_sa_max, sadb_offset, sadb_size, sa_free_id_offset, sa_free_id_size, total_size, i;
305 	uint32_t dev_session_size;
306 	int dev_id, status = 0;
307 
308 	/* Check input parameters. */
309 	if (!ipsec_out ||
310 	    !name ||
311 	    !name[0] ||
312 	    (strnlen((name), RTE_SWX_IPSEC_NAME_SIZE) == RTE_SWX_IPSEC_NAME_SIZE) ||
313 	    !params ||
314 	    (params->bsz.ring_rd > RTE_SWX_IPSEC_BURST_SIZE_MAX) ||
315 	    (params->bsz.ring_wr > RTE_SWX_IPSEC_BURST_SIZE_MAX) ||
316 	    (params->bsz.crypto_wr > RTE_SWX_IPSEC_BURST_SIZE_MAX) ||
317 	    (params->bsz.crypto_rd > RTE_SWX_IPSEC_BURST_SIZE_MAX) ||
318 	    !params->n_sa_max) {
319 		status = -EINVAL;
320 		goto error;
321 	}
322 
323 	ring_in = rte_ring_lookup(params->ring_in_name);
324 	if (!ring_in) {
325 		status = -EINVAL;
326 		goto error;
327 	}
328 
329 	ring_out = rte_ring_lookup(params->ring_out_name);
330 	if (!ring_out) {
331 		status = -EINVAL;
332 		goto error;
333 	}
334 
335 	dev_id = rte_cryptodev_get_dev_id(params->crypto_dev_name);
336 	if (dev_id == -1) {
337 		status = -EINVAL;
338 		goto error;
339 	}
340 
341 	rte_cryptodev_info_get(dev_id, &dev_info);
342 	if (params->crypto_dev_queue_pair_id >= dev_info.max_nb_queue_pairs) {
343 		status = -EINVAL;
344 		goto error;
345 	}
346 
347 	/* Memory allocation. */
348 	n_sa_max = rte_align64pow2(RTE_MAX(params->n_sa_max, N_SA_MIN));
349 
350 	sadb_offset = sizeof(struct rte_swx_ipsec);
351 	sadb_size = RTE_CACHE_LINE_ROUNDUP(n_sa_max * sizeof(struct ipsec_sa));
352 
353 	sa_free_id_offset = sadb_offset + sadb_size;
354 	sa_free_id_size = RTE_CACHE_LINE_ROUNDUP(n_sa_max * sizeof(uint32_t));
355 
356 	total_size = sa_free_id_offset + sa_free_id_size;
357 	ipsec = env_calloc(total_size, RTE_CACHE_LINE_SIZE, numa_node);
358 	if (!ipsec) {
359 		status = -ENOMEM;
360 		goto error;
361 	}
362 
363 	/* Initialization. */
364 	strcpy(ipsec->name, name);
365 	ipsec->ring_in = ring_in;
366 	ipsec->ring_out = ring_out;
367 	ipsec->dev_id = (uint8_t)dev_id;
368 	ipsec->qp_id = params->crypto_dev_queue_pair_id;
369 	memcpy(&ipsec->bsz, &params->bsz, sizeof(struct rte_swx_ipsec_burst_size));
370 	ipsec->n_sa_max = n_sa_max;
371 
372 	ipsec->crypto_wr_threshold = params->bsz.crypto_wr * 3 / 4;
373 
374 	ipsec->sa_free_id = (uint32_t *)&ipsec->memory[sa_free_id_offset];
375 	for (i = 0; i < n_sa_max; i++)
376 		ipsec->sa_free_id[i] = n_sa_max - 1 - i;
377 	ipsec->n_sa_free_id = n_sa_max;
378 
379 	ipsec->total_size = total_size;
380 
381 	/* Crypto device memory pools. */
382 	dev_session_size = rte_cryptodev_sym_get_private_session_size((uint8_t)dev_id);
383 
384 	snprintf(resource_name, sizeof(resource_name), "%s_mp", name);
385 	ipsec->mp_session = rte_cryptodev_sym_session_pool_create(resource_name,
386 		n_sa_max, /* number of pool elements */
387 		dev_session_size, /* pool element size */
388 		RTE_SWX_IPSEC_POOL_CACHE_SIZE, /* pool cache size */
389 		0, /* pool element private data size */
390 		numa_node);
391 	if (!ipsec->mp_session) {
392 		status = -ENOMEM;
393 		goto error;
394 	}
395 
396 	/* Add the current instance to the global list. */
397 	status = ipsec_register(ipsec);
398 	if (status)
399 		goto error;
400 
401 	ipsec->registered = 1;
402 
403 	*ipsec_out = ipsec;
404 	return 0;
405 
406 error:
407 	rte_swx_ipsec_free(ipsec);
408 	return status;
409 }
410 
411 static inline int
412 ipsec_sa_group(struct rte_swx_ipsec *ipsec, int n_pkts)
413 {
414 	struct ipsec_sa *sa;
415 	struct rte_ipsec_group *g;
416 	int n_groups, n_pkts_in_group, i;
417 
418 	sa = ipsec->in.sa[0];
419 
420 	g = &ipsec->in.groups[0];
421 	g->id.ptr = sa;
422 	g->m = &ipsec->in.pkts[0];
423 	n_pkts_in_group = 1;
424 	n_groups = 1;
425 
426 	for (i = 1; i < n_pkts; i++) {
427 		struct ipsec_sa *sa_new = ipsec->in.sa[i];
428 
429 		/* Same SA => Add the current pkt to the same group. */
430 		if (sa_new == sa) {
431 			n_pkts_in_group++;
432 			continue;
433 		}
434 
435 		/* Different SA => Close the current group & add the current pkt to a new group. */
436 		g->cnt = n_pkts_in_group;
437 		sa = sa_new;
438 
439 		g++;
440 		g->id.ptr = sa;
441 		g->m = &ipsec->in.pkts[i];
442 		n_pkts_in_group = 1;
443 		n_groups++;
444 	}
445 
446 	/* Close the last group. */
447 	g->cnt = n_pkts_in_group;
448 
449 	return n_groups;
450 }
451 
452 static inline void
453 ipsec_crypto_enqueue(struct rte_swx_ipsec *ipsec, uint16_t n_cops)
454 {
455 	struct rte_crypto_op **dst0 = ipsec->in.cops, **dst;
456 	struct rte_crypto_op **src = ipsec->in.group_cops;
457 
458 	uint32_t n_pkts_crypto = ipsec->n_pkts_crypto;
459 	uint32_t n_dst = ipsec->in.n_cops;
460 	uint32_t n_dst_max = ipsec->bsz.crypto_wr;
461 	uint32_t n_dst_avail = n_dst_max - n_dst;
462 	uint32_t n_src = n_cops;
463 	uint32_t i;
464 
465 	dst = &dst0[n_dst];
466 
467 	/* Shortcut: If no elements in DST and enough elements in SRC, then simply use SRC directly
468 	 * instead of moving the SRC to DST first and then using DST.
469 	 */
470 	if (!n_dst && n_src >= ipsec->crypto_wr_threshold) {
471 		uint16_t n_ok;
472 
473 		n_ok = rte_cryptodev_enqueue_burst(ipsec->dev_id, ipsec->qp_id, src, n_src);
474 		ipsec->n_pkts_crypto = n_pkts_crypto + n_ok;
475 
476 		for (i = n_ok; i < n_src; i++) {
477 			struct rte_crypto_op *cop = src[i];
478 			struct rte_mbuf *m = cop->sym->m_src;
479 
480 			rte_pktmbuf_free(m);
481 		}
482 
483 		return;
484 	}
485 
486 	/* Move from SRC to DST. Every time DST gets full, send burst from DST. */
487 	for ( ; n_src >= n_dst_avail; ) {
488 		uint32_t n_ok;
489 
490 		/* Move from SRC to DST. */
491 		for (i = 0; i < n_dst_avail; i++)
492 			*dst++ = *src++;
493 
494 		n_src -= n_dst_avail;
495 
496 		/* DST full: send burst from DST. */
497 		n_ok = rte_cryptodev_enqueue_burst(ipsec->dev_id, ipsec->qp_id, dst0, n_dst_max);
498 		n_pkts_crypto += n_ok;
499 
500 		for (i = n_ok ; i < n_dst_max; i++) {
501 			struct rte_crypto_op *cop = dst0[i];
502 			struct rte_mbuf *m = cop->sym->m_src;
503 
504 			rte_pktmbuf_free(m);
505 		}
506 
507 		/* Next iteration. */
508 		dst = dst0;
509 		n_dst = 0;
510 		n_dst_avail = n_dst_max;
511 	}
512 
513 	ipsec->n_pkts_crypto = n_pkts_crypto;
514 
515 	/* Move from SRC to DST. Not enough elements in SRC to get DST full. */
516 	for (i = 0; i < n_src; i++)
517 		*dst++ = *src++;
518 
519 	n_dst += n_src;
520 
521 	ipsec->in.n_cops = n_dst;
522 }
523 
524 /**
525  * Packet buffer anatomy:
526  *
527  * +----------+---------+--------------------------------------------------------------------------+
528  * | Offset   | Size    | Description                                                              |
529  * | (Byte #) | (Bytes) |                                                                          |
530  * +==========+=========+==========================================================================+
531  * | 0        | 128     | Meta-data: struct rte_mbuf.                                              |
532  * |          |         | The buf_addr field points to the start of the packet section.            |
533  * +----------+---------+--------------------------------------------------------------------------+
534  * | 128      | 128     | Meta-data: struct ipsec_mbuf (see below).                                |
535  * +----------+---------+--------------------------------------------------------------------------+
536  * | 256      |         | Packet section.                                                          |
537  * |          |         | The first packet byte is placed at the offset indicated by the struct    |
538  * |          |         | rte_mbuf::data_off field relative to the start of the packet section.    |
539  * +----------+---------+--------------------------------------------------------------------------+
540  */
541 struct ipsec_mbuf {
542 	struct ipsec_sa *sa;
543 	struct rte_crypto_op cop;
544 	struct rte_crypto_sym_op sym_cop;
545 	uint8_t buffer[32]; /* The crypto IV is placed here. */
546 };
547 
548 /* Offset from the start of the struct ipsec_mbuf::cop where the crypto IV will be placed. */
549 #define IV_OFFSET (sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op))
550 
551 #define META_LENGTH sizeof(struct rte_swx_ipsec_input_packet_metadata)
552 
553 static inline void
554 rte_swx_ipsec_pre_crypto(struct rte_swx_ipsec *ipsec)
555 {
556 	int n_pkts, n_groups, i;
557 
558 	/* Read packets from the input ring. */
559 	n_pkts = rte_ring_sc_dequeue_burst(ipsec->ring_in,
560 					   (void **)ipsec->in.pkts,
561 					   ipsec->bsz.ring_rd,
562 					   NULL);
563 	if (!n_pkts)
564 		return;
565 
566 	/* Get the SA for each packet. */
567 	for (i = 0; i < n_pkts; i++) {
568 		struct rte_mbuf *m = ipsec->in.pkts[i];
569 		struct rte_swx_ipsec_input_packet_metadata *meta;
570 		struct rte_ipv4_hdr *ipv4_hdr;
571 		uint32_t sa_id;
572 
573 		meta = rte_pktmbuf_mtod(m, struct rte_swx_ipsec_input_packet_metadata *);
574 		ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, META_LENGTH);
575 
576 		/* Read the SA ID from the IPsec meta-data placed at the front of the IP packet. */
577 		sa_id = ntohl(meta->sa_id);
578 
579 		/* Consume the IPsec meta-data. */
580 		m->data_off += META_LENGTH;
581 		m->data_len -= META_LENGTH;
582 		m->pkt_len -= META_LENGTH;
583 
584 		/* Set the fields required by the IPsec library. */
585 		m->l2_len = 0;
586 		m->l3_len = (ipv4_hdr->version_ihl >> 4 == 4) ?
587 			sizeof(struct rte_ipv4_hdr) :
588 			sizeof(struct rte_ipv6_hdr);
589 
590 		/* Get the SA. */
591 		ipsec->in.sa[i] = ipsec_sa_get(ipsec, sa_id);
592 	}
593 
594 	/* Group packets that share the same SA. */
595 	n_groups = ipsec_sa_group(ipsec, n_pkts);
596 
597 	/* Write each group of packets sharing the same SA to the crypto device. */
598 	for (i = 0; i < n_groups; i++) {
599 		struct rte_ipsec_group *g = &ipsec->in.groups[i];
600 		struct ipsec_sa *sa = g->id.ptr;
601 		struct rte_ipsec_session *s = &sa->s;
602 		uint32_t j;
603 		uint16_t n_pkts_ok;
604 
605 		/* Prepare the crypto ops for the current group. */
606 		for (j = 0; j < g->cnt; j++) {
607 			struct rte_mbuf *m = g->m[j];
608 			struct ipsec_mbuf *priv = rte_mbuf_to_priv(m);
609 
610 			priv->sa = sa;
611 			ipsec->in.group_cops[j] = &priv->cop;
612 		}
613 
614 		n_pkts_ok = rte_ipsec_pkt_crypto_prepare(s, g->m, ipsec->in.group_cops, g->cnt);
615 
616 		for (j = n_pkts_ok; j < g->cnt; j++) {
617 			struct rte_mbuf *m = g->m[j];
618 
619 			rte_pktmbuf_free(m);
620 		}
621 
622 		/* Write the crypto ops of the current group to the crypto device. */
623 		ipsec_crypto_enqueue(ipsec, n_pkts_ok);
624 	}
625 }
626 
627 static inline void
628 ipsec_ring_enqueue(struct rte_swx_ipsec *ipsec, struct rte_ipsec_group *g, uint32_t n_pkts)
629 {
630 	struct rte_mbuf **dst0 = ipsec->out.pkts, **dst;
631 	struct rte_mbuf **src = g->m;
632 
633 	uint32_t n_dst = ipsec->out.n_pkts;
634 	uint32_t n_dst_max = ipsec->bsz.ring_wr;
635 	uint32_t n_dst_avail = n_dst_max - n_dst;
636 	uint32_t n_src = n_pkts;
637 	uint32_t i;
638 
639 	dst = &dst0[n_dst];
640 
641 	/* Move from SRC to DST. Every time DST gets full, send burst from DST. */
642 	for ( ; n_src >= n_dst_avail; ) {
643 		uint32_t n_ok;
644 
645 		/* Move from SRC to DST. */
646 		for (i = 0; i < n_dst_avail; i++)
647 			*dst++ = *src++;
648 
649 		n_src -= n_dst_avail;
650 
651 		/* DST full: send burst from DST. */
652 		n_ok = rte_ring_sp_enqueue_burst(ipsec->ring_out, (void **)dst0, n_dst_max, NULL);
653 
654 		for (i = n_ok ; i < n_dst_max; i++) {
655 			struct rte_mbuf *m = dst[i];
656 
657 			rte_pktmbuf_free(m);
658 		}
659 
660 		/* Next iteration. */
661 		dst = dst0;
662 		n_dst = 0;
663 		n_dst_avail = n_dst_max;
664 	}
665 
666 	/* Move from SRC to DST. Not enough elements in SRC to get DST full. */
667 	for (i = 0; i < n_src; i++)
668 		*dst++ = *src++;
669 
670 	n_dst += n_src;
671 
672 	ipsec->out.n_pkts = n_dst;
673 }
674 
675 static inline void
676 rte_swx_ipsec_post_crypto(struct rte_swx_ipsec *ipsec)
677 {
678 	uint32_t n_pkts_crypto = ipsec->n_pkts_crypto, n_pkts, ng, i;
679 
680 	/* Read the crypto ops from the crypto device. */
681 	if (!n_pkts_crypto)
682 		return;
683 
684 	n_pkts = rte_cryptodev_dequeue_burst(ipsec->dev_id,
685 					     ipsec->qp_id,
686 					     ipsec->out.cops,
687 					     ipsec->bsz.crypto_rd);
688 	if (!n_pkts)
689 		return;
690 
691 	ipsec->n_pkts_crypto = n_pkts_crypto - n_pkts;
692 
693 	/* Group packets that share the same SA. */
694 	ng = rte_ipsec_pkt_crypto_group((const struct rte_crypto_op **)(uintptr_t)ipsec->out.cops,
695 					      ipsec->out.group_pkts,
696 					      ipsec->out.groups,
697 					      n_pkts);
698 
699 	/* Perform post-crypto IPsec processing for each group of packets that share the same SA.
700 	 * Write each group of packets to the output ring.
701 	 */
702 	for (i = 0, n_pkts = 0; i < ng; i++) {
703 		struct rte_ipsec_group *g = &ipsec->out.groups[i];
704 		struct rte_ipsec_session *s = g->id.ptr;
705 		uint32_t n_pkts_ok, j;
706 
707 		/* Perform post-crypto IPsec processing for the current group. */
708 		n_pkts_ok = rte_ipsec_pkt_process(s, g->m, g->cnt);
709 
710 		for (j = n_pkts_ok; j < g->cnt; j++) {
711 			struct rte_mbuf *m = g->m[j];
712 
713 			rte_pktmbuf_free(m);
714 		}
715 
716 		/* Write the packets of the current group to the output ring. */
717 		ipsec_ring_enqueue(ipsec, g, n_pkts_ok);
718 	}
719 }
720 
721 void
722 rte_swx_ipsec_run(struct rte_swx_ipsec *ipsec)
723 {
724 	rte_swx_ipsec_pre_crypto(ipsec);
725 	rte_swx_ipsec_post_crypto(ipsec);
726 }
727 
728 /**
729  * IPsec Control Plane API
730  */
731 struct cipher_alg {
732 	const char *name;
733 	enum rte_crypto_cipher_algorithm alg;
734 	uint32_t iv_size;
735 	uint32_t block_size;
736 	uint32_t key_size;
737 };
738 
739 struct auth_alg {
740 	const char *name;
741 	enum rte_crypto_auth_algorithm alg;
742 	uint32_t iv_size;
743 	uint32_t digest_size;
744 	uint32_t key_size;
745 };
746 
747 struct aead_alg {
748 	const char *name;
749 	enum rte_crypto_aead_algorithm alg;
750 	uint32_t iv_size;
751 	uint32_t block_size;
752 	uint32_t digest_size;
753 	uint32_t key_size;
754 	uint32_t aad_size;
755 };
756 
757 static struct cipher_alg cipher_algs[] = {
758 	[0] = {
759 		.name = "null",
760 		.alg = RTE_CRYPTO_CIPHER_NULL,
761 		.iv_size = 0,
762 		.block_size = 4,
763 		.key_size = 0,
764 	},
765 
766 	[1] = {
767 		.name = "aes-cbc-128",
768 		.alg = RTE_CRYPTO_CIPHER_AES_CBC,
769 		.iv_size = 16,
770 		.block_size = 16,
771 		.key_size = 16,
772 	},
773 
774 	[2] = {
775 		.name = "aes-cbc-192",
776 		.alg = RTE_CRYPTO_CIPHER_AES_CBC,
777 		.iv_size = 16,
778 		.block_size = 16,
779 		.key_size = 24,
780 	},
781 
782 	[3] = {
783 		.name = "aes-cbc-256",
784 		.alg = RTE_CRYPTO_CIPHER_AES_CBC,
785 		.iv_size = 16,
786 		.block_size = 16,
787 		.key_size = 32,
788 	},
789 
790 	[4] = {
791 		.name = "aes-ctr-128",
792 		.alg = RTE_CRYPTO_CIPHER_AES_CTR,
793 		.iv_size = 8,
794 		.block_size = 4,
795 		.key_size = 20,
796 	},
797 
798 	[5] = {
799 		.name = "aes-ctr-192",
800 		.alg = RTE_CRYPTO_CIPHER_AES_CTR,
801 		.iv_size = 16,
802 		.block_size = 16,
803 		.key_size = 28,
804 	},
805 
806 	[6] = {
807 		.name = "aes-ctr-256",
808 		.alg = RTE_CRYPTO_CIPHER_AES_CTR,
809 		.iv_size = 16,
810 		.block_size = 16,
811 		.key_size = 36,
812 	},
813 
814 	[7] = {
815 		.name = "3des-cbc",
816 		.alg = RTE_CRYPTO_CIPHER_3DES_CBC,
817 		.iv_size = 8,
818 		.block_size = 8,
819 		.key_size = 24,
820 	},
821 
822 	[8] = {
823 		.name = "des-cbc",
824 		.alg = RTE_CRYPTO_CIPHER_DES_CBC,
825 		.iv_size = 8,
826 		.block_size = 8,
827 		.key_size = 8,
828 	},
829 };
830 
831 static struct auth_alg auth_algs[] = {
832 	[0] = {
833 		.name = "null",
834 		.alg = RTE_CRYPTO_AUTH_NULL,
835 		.iv_size = 0,
836 		.digest_size = 0,
837 		.key_size = 0,
838 	},
839 
840 	[1] = {
841 		.name = "sha1-hmac",
842 		.alg = RTE_CRYPTO_AUTH_SHA1_HMAC,
843 		.iv_size = 0,
844 		.digest_size = 12,
845 		.key_size = 20,
846 	},
847 
848 	[2] = {
849 		.name = "sha256-hmac",
850 		.alg = RTE_CRYPTO_AUTH_SHA256_HMAC,
851 		.iv_size = 0,
852 		.digest_size = 16,
853 		.key_size = 32,
854 	},
855 
856 	[3] = {
857 		.name = "sha384-hmac",
858 		.alg = RTE_CRYPTO_AUTH_SHA384_HMAC,
859 		.iv_size = 0,
860 		.digest_size = 24,
861 		.key_size = 48,
862 	},
863 
864 	[4] = {
865 		.name = "sha512-hmac",
866 		.alg = RTE_CRYPTO_AUTH_SHA512_HMAC,
867 		.iv_size = 0,
868 		.digest_size = 32,
869 		.key_size = 64,
870 	},
871 
872 	[5] = {
873 		.name = "aes-gmac",
874 		.alg = RTE_CRYPTO_AUTH_AES_GMAC,
875 		.iv_size = 8,
876 		.digest_size = 16,
877 		.key_size = 20,
878 	},
879 
880 	[6] = {
881 		.name = "aes-xcbc-mac-96",
882 		.alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
883 		.iv_size = 0,
884 		.digest_size = 12,
885 		.key_size = 16,
886 	},
887 };
888 
889 static struct aead_alg aead_algs[] = {
890 	[0] = {
891 		.name = "aes-gcm-128",
892 		.alg = RTE_CRYPTO_AEAD_AES_GCM,
893 		.iv_size = 8,
894 		.block_size = 4,
895 		.key_size = 20,
896 		.digest_size = 16,
897 		.aad_size = 8,
898 	},
899 
900 	[1] = {
901 		.name = "aes-gcm-192",
902 		.alg = RTE_CRYPTO_AEAD_AES_GCM,
903 		.iv_size = 8,
904 		.block_size = 4,
905 		.key_size = 28,
906 		.digest_size = 16,
907 		.aad_size = 8,
908 	},
909 
910 	[2] = {
911 		.name = "aes-gcm-256",
912 		.alg = RTE_CRYPTO_AEAD_AES_GCM,
913 		.iv_size = 8,
914 		.block_size = 4,
915 		.key_size = 36,
916 		.digest_size = 16,
917 		.aad_size = 8,
918 	},
919 
920 	[3] = {
921 		.name = "aes-ccm-128",
922 		.alg = RTE_CRYPTO_AEAD_AES_CCM,
923 		.iv_size = 8,
924 		.block_size = 4,
925 		.key_size = 20,
926 		.digest_size = 16,
927 		.aad_size = 8,
928 	},
929 
930 	[4] = {
931 		.name = "aes-ccm-192",
932 		.alg = RTE_CRYPTO_AEAD_AES_CCM,
933 		.iv_size = 8,
934 		.block_size = 4,
935 		.key_size = 28,
936 		.digest_size = 16,
937 		.aad_size = 8,
938 	},
939 
940 	[5] = {
941 		.name = "aes-ccm-256",
942 		.alg = RTE_CRYPTO_AEAD_AES_CCM,
943 		.iv_size = 8,
944 		.block_size = 4,
945 		.key_size = 36,
946 		.digest_size = 16,
947 		.aad_size = 8,
948 	},
949 
950 	[6] = {
951 		.name = "chacha20-poly1305",
952 		.alg = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
953 		.iv_size = 12,
954 		.block_size = 64,
955 		.key_size = 36,
956 		.digest_size = 16,
957 		.aad_size = 8,
958 	},
959 };
960 
961 static struct cipher_alg *
962 cipher_alg_find(const char *name)
963 {
964 	size_t i;
965 
966 	for (i = 0; i < RTE_DIM(cipher_algs); i++) {
967 		struct cipher_alg *alg = &cipher_algs[i];
968 
969 		if (!strcmp(name, alg->name))
970 			return alg;
971 	}
972 
973 	return NULL;
974 }
975 
976 static struct cipher_alg *
977 cipher_alg_find_by_id(enum rte_crypto_cipher_algorithm alg_id, uint32_t key_size)
978 {
979 	size_t i;
980 
981 	for (i = 0; i < RTE_DIM(cipher_algs); i++) {
982 		struct cipher_alg *alg = &cipher_algs[i];
983 
984 		if (alg->alg == alg_id && alg->key_size == key_size)
985 			return alg;
986 	}
987 
988 	return NULL;
989 }
990 
991 static struct auth_alg *
992 auth_alg_find(const char *name)
993 {
994 	size_t i;
995 
996 	for (i = 0; i < RTE_DIM(auth_algs); i++) {
997 		struct auth_alg *alg = &auth_algs[i];
998 
999 		if (!strcmp(name, alg->name))
1000 			return alg;
1001 	}
1002 
1003 	return NULL;
1004 }
1005 
1006 static struct auth_alg *
1007 auth_alg_find_by_id(enum rte_crypto_auth_algorithm alg_id, uint32_t key_size)
1008 {
1009 	size_t i;
1010 
1011 	for (i = 0; i < RTE_DIM(auth_algs); i++) {
1012 		struct auth_alg *alg = &auth_algs[i];
1013 
1014 		if (alg->alg == alg_id && alg->key_size == key_size)
1015 			return alg;
1016 	}
1017 
1018 	return NULL;
1019 }
1020 
1021 static struct aead_alg *
1022 aead_alg_find(const char *name)
1023 {
1024 	size_t i;
1025 
1026 	for (i = 0; i < RTE_DIM(aead_algs); i++) {
1027 		struct aead_alg *alg = &aead_algs[i];
1028 
1029 		if (!strcmp(name, alg->name))
1030 			return alg;
1031 	}
1032 
1033 	return NULL;
1034 }
1035 
1036 static struct aead_alg *
1037 aead_alg_find_by_id(enum rte_crypto_aead_algorithm alg_id, uint32_t key_size)
1038 {
1039 	size_t i;
1040 
1041 	for (i = 0; i < RTE_DIM(aead_algs); i++) {
1042 		struct aead_alg *alg = &aead_algs[i];
1043 
1044 		if (alg->alg == alg_id && alg->key_size == key_size)
1045 			return alg;
1046 	}
1047 
1048 	return NULL;
1049 }
1050 
1051 static int
1052 char_to_hex(char c, uint8_t *val)
1053 {
1054 	if (c >= '0' && c <= '9') {
1055 		*val = c - '0';
1056 		return 0;
1057 	}
1058 
1059 	if (c >= 'A' && c <= 'F') {
1060 		*val = c - 'A' + 10;
1061 		return 0;
1062 	}
1063 
1064 	if (c >= 'a' && c <= 'f') {
1065 		*val = c - 'a' + 10;
1066 		return 0;
1067 	}
1068 
1069 	return -EINVAL;
1070 }
1071 
1072 static int
1073 hex_string_parse(char *src, uint8_t *dst, uint32_t n_dst_bytes)
1074 {
1075 	uint32_t i;
1076 
1077 	/* Check input arguments. */
1078 	if (!src || !src[0] || !dst || !n_dst_bytes)
1079 		return -EINVAL;
1080 
1081 	/* Skip any leading "0x" or "0X" in the src string. */
1082 	if ((src[0] == '0') && (src[1] == 'x' || src[1] == 'X'))
1083 		src += 2;
1084 
1085 	/* Convert each group of two hex characters in the src string to one byte in dst array. */
1086 	for (i = 0; i < n_dst_bytes; i++) {
1087 		uint8_t a, b;
1088 		int status;
1089 
1090 		status = char_to_hex(*src, &a);
1091 		if (status)
1092 			return status;
1093 		src++;
1094 
1095 		status = char_to_hex(*src, &b);
1096 		if (status)
1097 			return status;
1098 		src++;
1099 
1100 		dst[i] = a * 16 + b;
1101 	}
1102 
1103 	/* Check for the end of the src string. */
1104 	if (*src)
1105 		return -EINVAL;
1106 
1107 	return 0;
1108 }
1109 
1110 static int
1111 token_is_comment(const char *token)
1112 {
1113 	if ((token[0] == '#') ||
1114 	    (token[0] == ';') ||
1115 	    ((token[0] == '/') && (token[1] == '/')))
1116 		return 1; /* TRUE. */
1117 
1118 	return 0; /* FALSE. */
1119 }
1120 
1121 #define MAX_TOKENS 64
1122 
1123 #define CHECK(condition, msg)          \
1124 do {                                   \
1125 	if (!(condition)) {            \
1126 		if (errmsg)            \
1127 			*errmsg = msg; \
1128 		goto error;            \
1129 	}                              \
1130 } while (0)
1131 
1132 struct rte_swx_ipsec_sa_params *
1133 rte_swx_ipsec_sa_read(struct rte_swx_ipsec *ipsec __rte_unused,
1134 		      const char *string,
1135 		      int *is_blank_or_comment,
1136 		      const char **errmsg)
1137 {
1138 	char *token_array[MAX_TOKENS], **t;
1139 	struct rte_swx_ipsec_sa_params *p = NULL;
1140 	char *s0 = NULL, *s;
1141 	uint32_t n_tokens = 0;
1142 	int blank_or_comment = 0;
1143 
1144 	/* Check input arguments. */
1145 	CHECK(string && string[0], "NULL input");
1146 
1147 	/* Memory allocation. */
1148 	s0 = strdup(string);
1149 	p = calloc(1, sizeof(struct rte_swx_ipsec_sa_params));
1150 	CHECK(s0 && p, "Not enough memory");
1151 
1152 	/* Parse the string into tokens. */
1153 	for (s = s0; ; ) {
1154 		char *token;
1155 
1156 		token = strtok_r(s, " \f\n\r\t\v", &s);
1157 		if (!token || token_is_comment(token))
1158 			break;
1159 
1160 		CHECK(n_tokens < RTE_DIM(token_array), "Too many tokens");
1161 
1162 		token_array[n_tokens] = token;
1163 		n_tokens++;
1164 	}
1165 
1166 	t = token_array;
1167 	if (!n_tokens) {
1168 		blank_or_comment = 1;
1169 		goto error;
1170 	}
1171 
1172 	/*
1173 	 * Crypto operation.
1174 	 */
1175 	if (!strcmp(t[0], "encrypt"))
1176 		p->encrypt = 1;
1177 	else if (!strcmp(t[0], "decrypt"))
1178 		p->encrypt = 0;
1179 	else
1180 		CHECK(0, "Missing \"encrypt\"/\"decrypt\" keyword");
1181 
1182 	t++;
1183 	n_tokens--;
1184 
1185 	/*
1186 	 * Crypto parameters.
1187 	 */
1188 	CHECK(n_tokens >= 2, "Not enough tokens");
1189 
1190 	if (!strcmp(t[0], "cipher")) {
1191 		struct cipher_alg *cipher_alg;
1192 		struct auth_alg *auth_alg;
1193 		uint32_t key_size;
1194 
1195 		p->crypto.is_aead = 0;
1196 
1197 		/* cipher. */
1198 		cipher_alg = cipher_alg_find(t[1]);
1199 		CHECK(cipher_alg, "Unsupported cipher algorithm");
1200 
1201 		key_size = cipher_alg->key_size;
1202 		p->crypto.cipher_auth.cipher.alg = cipher_alg->alg;
1203 		p->crypto.cipher_auth.cipher.key_size = key_size;
1204 
1205 		t += 2;
1206 		n_tokens -= 2;
1207 
1208 		if (key_size) {
1209 			int status;
1210 
1211 			CHECK(n_tokens >= 2, "Not enough tokens");
1212 			CHECK(!strcmp(t[0], "key"), "Missing cipher \"key\" keyword");
1213 			CHECK(key_size <= RTE_DIM(p->crypto.cipher_auth.cipher.key),
1214 				"Cipher algorithm key too big");
1215 
1216 			status = hex_string_parse(t[1], p->crypto.cipher_auth.cipher.key, key_size);
1217 			CHECK(!status, "Cipher key invalid format");
1218 
1219 			t += 2;
1220 			n_tokens -= 2;
1221 		}
1222 
1223 		/* authentication. */
1224 		CHECK(n_tokens >= 2, "Not enough tokens");
1225 		CHECK(!strcmp(t[0], "auth"), "Missing \"auth\" keyword");
1226 
1227 		auth_alg = auth_alg_find(t[1]);
1228 		CHECK(auth_alg, "Unsupported authentication algorithm");
1229 
1230 		key_size = auth_alg->key_size;
1231 		p->crypto.cipher_auth.auth.alg = auth_alg->alg;
1232 		p->crypto.cipher_auth.auth.key_size = key_size;
1233 
1234 		t += 2;
1235 		n_tokens -= 2;
1236 
1237 		if (key_size) {
1238 			int status;
1239 
1240 			CHECK(n_tokens >= 2, "Not enough tokens");
1241 			CHECK(!strcmp(t[0], "key"), "Missing authentication \"key\" keyword");
1242 			CHECK(key_size <= RTE_DIM(p->crypto.cipher_auth.auth.key),
1243 				"Authentication algorithm key too big");
1244 
1245 			status = hex_string_parse(t[1], p->crypto.cipher_auth.auth.key, key_size);
1246 			CHECK(!status, "Authentication key invalid format");
1247 
1248 			t += 2;
1249 			n_tokens -= 2;
1250 		}
1251 	} else if (!strcmp(t[0], "aead")) {
1252 		struct aead_alg *alg;
1253 		uint32_t key_size;
1254 		int status;
1255 
1256 		p->crypto.is_aead = 1;
1257 
1258 		CHECK(n_tokens >= 4, "Not enough tokens");
1259 		alg = aead_alg_find(t[1]);
1260 		CHECK(alg, "Unsupported AEAD algorithm");
1261 
1262 		key_size = alg->key_size;
1263 		p->crypto.aead.alg = alg->alg;
1264 		p->crypto.aead.key_size = key_size;
1265 
1266 		CHECK(!strcmp(t[2], "key"), "Missing AEAD \"key\" keyword");
1267 		CHECK(key_size <= RTE_DIM(p->crypto.aead.key),
1268 			"AEAD algorithm key too big");
1269 
1270 		status = hex_string_parse(t[3], p->crypto.aead.key, key_size);
1271 		CHECK(!status, "AEAD key invalid format");
1272 
1273 		t += 4;
1274 		n_tokens -= 4;
1275 	} else
1276 		CHECK(0, "Missing \"cipher\"/\"aead\" keyword");
1277 
1278 	/*
1279 	 * Packet ecapsulation parameters.
1280 	 */
1281 	CHECK(n_tokens >= 4, "Not enough tokens");
1282 	CHECK(!strcmp(t[0], "esp"), "Missing \"esp\" keyword");
1283 	CHECK(!strcmp(t[1], "spi"), "Missing \"spi\" keyword");
1284 
1285 	p->encap.esp.spi = strtoul(t[2], &t[2], 0);
1286 	CHECK(!t[2][0], "ESP SPI field invalid format");
1287 
1288 	t += 3;
1289 	n_tokens -= 3;
1290 
1291 	if (!strcmp(t[0], "tunnel")) {
1292 		p->encap.tunnel_mode = 1;
1293 
1294 		CHECK(n_tokens >= 6, "Not enough tokens");
1295 
1296 		if (!strcmp(t[1], "ipv4")) {
1297 			uint32_t addr;
1298 
1299 			p->encap.tunnel_ipv4 = 1;
1300 
1301 			CHECK(!strcmp(t[2], "srcaddr"), "Missing \"srcaddr\" keyword");
1302 
1303 			addr = strtoul(t[3], &t[3], 0);
1304 			CHECK(!t[3][0], "Tunnel IPv4 source address invalid format");
1305 			p->encap.tunnel.ipv4.src_addr.s_addr = htonl(addr);
1306 
1307 			CHECK(!strcmp(t[4], "dstaddr"), "Missing \"dstaddr\" keyword");
1308 
1309 			addr = strtoul(t[5], &t[5], 0);
1310 			CHECK(!t[5][0], "Tunnel IPv4 destination address invalid format");
1311 			p->encap.tunnel.ipv4.dst_addr.s_addr = htonl(addr);
1312 
1313 			t += 6;
1314 			n_tokens -= 6;
1315 		} else if (!strcmp(t[1], "ipv6")) {
1316 			int status;
1317 
1318 			p->encap.tunnel_ipv4 = 0;
1319 
1320 			CHECK(!strcmp(t[2], "srcaddr"), "Missing \"srcaddr\" keyword");
1321 
1322 			status = hex_string_parse(t[3],
1323 						  p->encap.tunnel.ipv6.src_addr.a,
1324 						  16);
1325 			CHECK(!status, "Tunnel IPv6 source address invalid format");
1326 
1327 			CHECK(!strcmp(t[4], "dstaddr"), "Missing \"dstaddr\" keyword");
1328 
1329 			status = hex_string_parse(t[5],
1330 						  p->encap.tunnel.ipv6.dst_addr.a,
1331 						  16);
1332 			CHECK(!status, "Tunnel IPv6 destination address invalid format");
1333 
1334 			t += 6;
1335 			n_tokens -= 6;
1336 		} else
1337 			CHECK(0, "Missing \"ipv4\"/\"ipv6\" keyword");
1338 	} else if (!strcmp(t[0], "transport")) {
1339 		p->encap.tunnel_mode = 0;
1340 
1341 		t++;
1342 		n_tokens--;
1343 	} else
1344 		CHECK(0, "Missing \"tunnel\"/\"transport\" keyword");
1345 
1346 	/*
1347 	 * Any other parameters.
1348 	 */
1349 	CHECK(!n_tokens, "Unexpected trailing tokens");
1350 
1351 	free(s0);
1352 	return p;
1353 
1354 error:
1355 	free(p);
1356 	free(s0);
1357 	if (is_blank_or_comment)
1358 		*is_blank_or_comment = blank_or_comment;
1359 	return NULL;
1360 }
1361 
1362 static void
1363 tunnel_ipv4_header_set(struct rte_ipv4_hdr *h, struct rte_swx_ipsec_sa_params *p)
1364 {
1365 	struct rte_ipv4_hdr ipv4_hdr = {
1366 		.version_ihl = 0x45,
1367 		.type_of_service = 0,
1368 		.total_length = 0, /* Cannot be pre-computed. */
1369 		.packet_id = 0,
1370 		.fragment_offset = 0,
1371 		.time_to_live = 64,
1372 		.next_proto_id = IPPROTO_ESP,
1373 		.hdr_checksum = 0, /* Cannot be pre-computed. */
1374 		.src_addr = p->encap.tunnel.ipv4.src_addr.s_addr,
1375 		.dst_addr = p->encap.tunnel.ipv4.dst_addr.s_addr,
1376 	};
1377 
1378 	memcpy(h, &ipv4_hdr, sizeof(ipv4_hdr));
1379 }
1380 
1381 static void
1382 tunnel_ipv6_header_set(struct rte_ipv6_hdr *h, struct rte_swx_ipsec_sa_params *p)
1383 {
1384 	struct rte_ipv6_hdr ipv6_hdr = {
1385 		.vtc_flow = 0x60000000,
1386 		.payload_len = 0, /* Cannot be pre-computed. */
1387 		.proto = IPPROTO_ESP,
1388 		.hop_limits = 64,
1389 		.src_addr = p->encap.tunnel.ipv6.src_addr,
1390 		.dst_addr = p->encap.tunnel.ipv6.dst_addr,
1391 	};
1392 
1393 	memcpy(h, &ipv6_hdr, sizeof(ipv6_hdr));
1394 }
1395 
1396 /* IPsec library SA parameters. */
1397 static struct rte_crypto_sym_xform *
1398 crypto_xform_get(struct rte_swx_ipsec_sa_params *p,
1399 		struct rte_crypto_sym_xform *xform,
1400 		uint32_t *salt_out)
1401 {
1402 	if (p->crypto.is_aead) {
1403 		struct aead_alg *alg;
1404 		uint32_t key_size, salt, iv_length;
1405 
1406 		alg = aead_alg_find_by_id(p->crypto.aead.alg, p->crypto.aead.key_size);
1407 		if (!alg)
1408 			return NULL;
1409 
1410 		/* salt and salt-related key size adjustment. */
1411 		key_size = p->crypto.aead.key_size - 4;
1412 		memcpy(&salt, &p->crypto.aead.key[key_size], 4);
1413 
1414 		/* IV length. */
1415 		iv_length = 12;
1416 		if (p->crypto.aead.alg == RTE_CRYPTO_AEAD_AES_CCM)
1417 			iv_length = 11;
1418 
1419 		/* xform. */
1420 		xform[0].type = RTE_CRYPTO_SYM_XFORM_AEAD;
1421 		xform[0].aead.op = p->encrypt ?
1422 			RTE_CRYPTO_AEAD_OP_ENCRYPT :
1423 			RTE_CRYPTO_AEAD_OP_DECRYPT;
1424 		xform[0].aead.algo = p->crypto.aead.alg;
1425 		xform[0].aead.key.data = p->crypto.aead.key;
1426 		xform[0].aead.key.length = key_size;
1427 		xform[0].aead.iv.offset = IV_OFFSET;
1428 		xform[0].aead.iv.length = iv_length;
1429 		xform[0].aead.digest_length = alg->digest_size;
1430 		xform[0].aead.aad_length = alg->aad_size;
1431 		xform[0].next = NULL;
1432 
1433 		*salt_out = salt;
1434 		return &xform[0];
1435 	} else {
1436 		struct cipher_alg *cipher_alg;
1437 		struct auth_alg *auth_alg;
1438 		uint32_t cipher_key_size, auth_key_size, salt, auth_iv_length;
1439 
1440 		cipher_alg = cipher_alg_find_by_id(p->crypto.cipher_auth.cipher.alg,
1441 						   p->crypto.cipher_auth.cipher.key_size);
1442 		if (!cipher_alg)
1443 			return NULL;
1444 
1445 		auth_alg = auth_alg_find_by_id(p->crypto.cipher_auth.auth.alg,
1446 					       p->crypto.cipher_auth.auth.key_size);
1447 		if (!auth_alg)
1448 			return NULL;
1449 
1450 		/* salt and salt-related key size adjustment. */
1451 		cipher_key_size = p->crypto.cipher_auth.cipher.key_size;
1452 		auth_key_size = p->crypto.cipher_auth.auth.key_size;
1453 
1454 		switch (p->crypto.cipher_auth.cipher.alg) {
1455 		case RTE_CRYPTO_CIPHER_AES_CBC:
1456 		case RTE_CRYPTO_CIPHER_3DES_CBC:
1457 			salt = rte_rand();
1458 			break;
1459 
1460 		case RTE_CRYPTO_CIPHER_AES_CTR:
1461 			cipher_key_size -= 4;
1462 			memcpy(&salt, &p->crypto.cipher_auth.cipher.key[cipher_key_size], 4);
1463 			break;
1464 
1465 		default:
1466 			salt = 0;
1467 		}
1468 
1469 		if (p->crypto.cipher_auth.auth.alg == RTE_CRYPTO_AUTH_AES_GMAC) {
1470 			auth_key_size -= 4;
1471 			memcpy(&salt, &p->crypto.cipher_auth.auth.key[auth_key_size], 4);
1472 		}
1473 
1474 		/* IV length. */
1475 		auth_iv_length = cipher_alg->iv_size;
1476 		if (p->crypto.cipher_auth.auth.alg == RTE_CRYPTO_AUTH_AES_GMAC)
1477 			auth_iv_length = 12;
1478 
1479 		/* xform. */
1480 		if (p->encrypt) {
1481 			xform[0].type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1482 			xform[0].cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1483 			xform[0].cipher.algo = p->crypto.cipher_auth.cipher.alg;
1484 			xform[0].cipher.key.data = p->crypto.cipher_auth.cipher.key;
1485 			xform[0].cipher.key.length = cipher_key_size;
1486 			xform[0].cipher.iv.offset = IV_OFFSET;
1487 			xform[0].cipher.iv.length = cipher_alg->iv_size;
1488 			xform[0].cipher.dataunit_len = 0;
1489 			xform[0].next = &xform[1];
1490 
1491 			xform[1].type = RTE_CRYPTO_SYM_XFORM_AUTH;
1492 			xform[1].auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
1493 			xform[1].auth.algo = p->crypto.cipher_auth.auth.alg;
1494 			xform[1].auth.key.data = p->crypto.cipher_auth.auth.key;
1495 			xform[1].auth.key.length = auth_key_size;
1496 			xform[1].auth.iv.offset = IV_OFFSET;
1497 			xform[1].auth.iv.length = auth_iv_length;
1498 			xform[1].auth.digest_length = auth_alg->digest_size;
1499 			xform[1].next = NULL;
1500 		} else {
1501 			xform[0].type = RTE_CRYPTO_SYM_XFORM_AUTH;
1502 			xform[0].auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
1503 			xform[0].auth.algo = p->crypto.cipher_auth.auth.alg;
1504 			xform[0].auth.key.data = p->crypto.cipher_auth.auth.key;
1505 			xform[0].auth.key.length = auth_key_size;
1506 			xform[0].auth.iv.offset = IV_OFFSET;
1507 			xform[0].auth.iv.length = auth_iv_length;
1508 			xform[0].auth.digest_length = auth_alg->digest_size;
1509 			xform[0].next = &xform[1];
1510 
1511 			xform[1].type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1512 			xform[1].cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
1513 			xform[1].cipher.algo = p->crypto.cipher_auth.cipher.alg;
1514 			xform[1].cipher.key.data = p->crypto.cipher_auth.cipher.key;
1515 			xform[1].cipher.key.length = cipher_key_size;
1516 			xform[1].cipher.iv.offset = IV_OFFSET;
1517 			xform[1].cipher.iv.length = cipher_alg->iv_size;
1518 			xform[1].cipher.dataunit_len = 0;
1519 			xform[1].next = NULL;
1520 		}
1521 
1522 		*salt_out = salt;
1523 
1524 		if (p->crypto.cipher_auth.auth.alg == RTE_CRYPTO_AUTH_AES_GMAC) {
1525 			if (p->encrypt)
1526 				return &xform[1];
1527 
1528 			xform[0].next = NULL;
1529 			return &xform[0];
1530 		}
1531 
1532 		return &xform[0];
1533 	}
1534 }
1535 
1536 static void
1537 ipsec_xform_get(struct rte_swx_ipsec_sa_params *p,
1538 		struct rte_security_ipsec_xform *ipsec_xform,
1539 		uint32_t salt)
1540 {
1541 	ipsec_xform->spi = p->encap.esp.spi;
1542 
1543 	ipsec_xform->salt = salt;
1544 
1545 	ipsec_xform->options.esn = 0;
1546 	ipsec_xform->options.udp_encap = 0;
1547 	ipsec_xform->options.copy_dscp = 1;
1548 	ipsec_xform->options.copy_flabel = 0;
1549 	ipsec_xform->options.copy_df = 0;
1550 	ipsec_xform->options.dec_ttl = 0;
1551 	ipsec_xform->options.ecn = 1;
1552 	ipsec_xform->options.stats = 0;
1553 	ipsec_xform->options.iv_gen_disable = 0;
1554 	ipsec_xform->options.tunnel_hdr_verify = 0;
1555 	ipsec_xform->options.udp_ports_verify = 0;
1556 	ipsec_xform->options.ip_csum_enable = 0;
1557 	ipsec_xform->options.l4_csum_enable = 0;
1558 	ipsec_xform->options.ip_reassembly_en = 0;
1559 
1560 	ipsec_xform->direction = p->encrypt ?
1561 		RTE_SECURITY_IPSEC_SA_DIR_EGRESS :
1562 		RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
1563 
1564 	ipsec_xform->proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1565 
1566 	ipsec_xform->mode = p->encap.tunnel_mode ?
1567 		RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
1568 		RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
1569 
1570 	ipsec_xform->tunnel.type = p->encap.tunnel_ipv4 ?
1571 		RTE_SECURITY_IPSEC_TUNNEL_IPV4 :
1572 		RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1573 
1574 	if (p->encap.tunnel_mode) {
1575 		if (p->encap.tunnel_ipv4) {
1576 			ipsec_xform->tunnel.ipv4.src_ip = p->encap.tunnel.ipv4.src_addr;
1577 			ipsec_xform->tunnel.ipv4.dst_ip = p->encap.tunnel.ipv4.dst_addr;
1578 			ipsec_xform->tunnel.ipv4.dscp = 0;
1579 			ipsec_xform->tunnel.ipv4.df = 0;
1580 			ipsec_xform->tunnel.ipv4.ttl = 64;
1581 		} else {
1582 			ipsec_xform->tunnel.ipv6.src_addr = p->encap.tunnel.ipv6.src_addr;
1583 			ipsec_xform->tunnel.ipv6.dst_addr = p->encap.tunnel.ipv6.dst_addr;
1584 			ipsec_xform->tunnel.ipv6.dscp = 0;
1585 			ipsec_xform->tunnel.ipv6.flabel = 0;
1586 			ipsec_xform->tunnel.ipv6.hlimit = 64;
1587 		}
1588 	}
1589 
1590 	ipsec_xform->life.packets_soft_limit = 0;
1591 	ipsec_xform->life.bytes_soft_limit = 0;
1592 	ipsec_xform->life.packets_hard_limit = 0;
1593 	ipsec_xform->life.bytes_hard_limit = 0;
1594 
1595 	ipsec_xform->replay_win_sz = 0;
1596 
1597 	ipsec_xform->esn.value = 0;
1598 
1599 	ipsec_xform->udp.dport = 0;
1600 	ipsec_xform->udp.sport = 0;
1601 }
1602 
1603 static int
1604 ipsec_sa_prm_get(struct rte_swx_ipsec_sa_params *p,
1605 		 struct rte_ipsec_sa_prm *sa_prm,
1606 		 struct rte_ipv4_hdr *ipv4_hdr,
1607 		 struct rte_ipv6_hdr *ipv6_hdr,
1608 		 struct rte_crypto_sym_xform *crypto_xform)
1609 {
1610 	uint32_t salt;
1611 
1612 	memset(sa_prm, 0, sizeof(*sa_prm)); /* Better to be safe than sorry. */
1613 
1614 	sa_prm->userdata = 0; /* Not used. */
1615 
1616 	sa_prm->flags = 0; /* Flag RTE_IPSEC_SAFLAG_SQN_ATOM not enabled. */
1617 
1618 	/*
1619 	 * crypto_xform.
1620 	 */
1621 	sa_prm->crypto_xform = crypto_xform_get(p, crypto_xform, &salt);
1622 	if (!sa_prm->crypto_xform)
1623 		return -EINVAL;
1624 
1625 	/*
1626 	 * ipsec_xform.
1627 	 */
1628 	ipsec_xform_get(p, &sa_prm->ipsec_xform, salt);
1629 
1630 	/*
1631 	 * tunnel / transport.
1632 	 *
1633 	 * Currently, the input IP packet type is assumed to be IPv4. To support both IPv4 and IPv6,
1634 	 * the input packet type should be added to the SA configuration parameters.
1635 	 */
1636 	if (p->encap.tunnel_mode) {
1637 		if (p->encap.tunnel_ipv4) {
1638 			sa_prm->tun.hdr_len = sizeof(struct rte_ipv4_hdr);
1639 			sa_prm->tun.hdr_l3_off = 0;
1640 			sa_prm->tun.next_proto = IPPROTO_IPIP; /* IPv4. */
1641 			sa_prm->tun.hdr = ipv4_hdr;
1642 		} else {
1643 			sa_prm->tun.hdr_len = sizeof(struct rte_ipv6_hdr);
1644 			sa_prm->tun.hdr_l3_off = 0;
1645 			sa_prm->tun.next_proto = IPPROTO_IPIP; /* IPv4. */
1646 			sa_prm->tun.hdr = ipv6_hdr;
1647 		}
1648 	} else {
1649 		sa_prm->trs.proto = IPPROTO_IPIP; /* IPv4. */
1650 	}
1651 
1652 	return 0;
1653 }
1654 
1655 static int
1656 ipsec_session_create(struct rte_swx_ipsec *ipsec,
1657 		     struct rte_swx_ipsec_sa_params *p,
1658 		     struct rte_ipsec_session *s)
1659 {
1660 	struct rte_ipv4_hdr ipv4_hdr;
1661 	struct rte_ipv6_hdr ipv6_hdr;
1662 	struct rte_crypto_sym_xform crypto_xform[2];
1663 	struct rte_ipsec_sa_prm sa_prm;
1664 	struct rte_ipsec_sa *sa = NULL;
1665 	struct rte_cryptodev_sym_session *crypto_session = NULL;
1666 	int sa_size;
1667 	int sa_valid = 0, status = 0;
1668 
1669 	tunnel_ipv4_header_set(&ipv4_hdr, p);
1670 	tunnel_ipv6_header_set(&ipv6_hdr, p);
1671 
1672 	/* IPsec library SA setup. */
1673 	status = ipsec_sa_prm_get(p, &sa_prm, &ipv4_hdr, &ipv6_hdr, crypto_xform);
1674 	if (status)
1675 		goto error;
1676 
1677 	sa_size = rte_ipsec_sa_size(&sa_prm);
1678 	if (sa_size < 0) {
1679 		status = sa_size;
1680 		goto error;
1681 	}
1682 	if (!sa_size) {
1683 		status = -EINVAL;
1684 		goto error;
1685 	}
1686 
1687 	sa = calloc(1, sa_size);
1688 	if (!sa) {
1689 		status = -ENOMEM;
1690 		goto error;
1691 	}
1692 
1693 	sa_size = rte_ipsec_sa_init(sa, &sa_prm, sa_size);
1694 	if (sa_size < 0) {
1695 		status = sa_size;
1696 		goto error;
1697 	}
1698 	if (!sa_size) {
1699 		status = -EINVAL;
1700 		goto error;
1701 	}
1702 
1703 	sa_valid = 1;
1704 
1705 	/* Cryptodev library session setup. */
1706 	crypto_session = rte_cryptodev_sym_session_create(ipsec->dev_id,
1707 							  sa_prm.crypto_xform,
1708 							  ipsec->mp_session);
1709 	if (!crypto_session) {
1710 		status = -ENOMEM;
1711 		goto error;
1712 	}
1713 
1714 	/* IPsec library session setup. */
1715 	s->sa = sa;
1716 	s->type = RTE_SECURITY_ACTION_TYPE_NONE;
1717 	s->crypto.ses = crypto_session;
1718 	s->crypto.dev_id = ipsec->dev_id;
1719 	s->pkt_func.prepare.async = NULL;
1720 	s->pkt_func.process = NULL;
1721 
1722 	status = rte_ipsec_session_prepare(s);
1723 	if (status)
1724 		goto error;
1725 
1726 	return 0;
1727 
1728 error:
1729 	/* sa. */
1730 	if (sa_valid)
1731 		rte_ipsec_sa_fini(sa);
1732 
1733 	free(sa);
1734 
1735 	/* crypto_session. */
1736 	if (crypto_session)
1737 		rte_cryptodev_sym_session_free(ipsec->dev_id, crypto_session);
1738 
1739 	/* s. */
1740 	memset(s, 0, sizeof(*s));
1741 
1742 	return status;
1743 }
1744 
1745 static void
1746 ipsec_session_free(struct rte_swx_ipsec *ipsec,
1747 		   struct rte_ipsec_session *s)
1748 {
1749 	if (!s)
1750 		return;
1751 
1752 	/* IPsec library SA. */
1753 	if (s->sa)
1754 		rte_ipsec_sa_fini(s->sa);
1755 	free(s->sa);
1756 
1757 	/* Cryptodev library session. */
1758 	if (s->crypto.ses)
1759 		rte_cryptodev_sym_session_free(ipsec->dev_id, s->crypto.ses);
1760 
1761 	/* IPsec library session. */
1762 	memset(s, 0, sizeof(*s));
1763 }
1764 
1765 int
1766 rte_swx_ipsec_sa_add(struct rte_swx_ipsec *ipsec,
1767 		     struct rte_swx_ipsec_sa_params *sa_params,
1768 		     uint32_t *id)
1769 {
1770 	struct ipsec_sa *sa;
1771 	uint32_t sa_id;
1772 	int status;
1773 
1774 	/* Check the input parameters. */
1775 	if (!ipsec || !sa_params || !id)
1776 		return -EINVAL;
1777 
1778 	/* Allocate a free SADB entry. */
1779 	if (!ipsec->n_sa_free_id)
1780 		return -ENOSPC;
1781 
1782 	sa_id = ipsec->sa_free_id[ipsec->n_sa_free_id - 1];
1783 	ipsec->n_sa_free_id--;
1784 
1785 	/* Acquire the SA resources. */
1786 	sa = ipsec_sa_get(ipsec, sa_id);
1787 
1788 	status = ipsec_session_create(ipsec, sa_params, &sa->s);
1789 	if (status) {
1790 		/* Free the allocated SADB entry. */
1791 		ipsec->sa_free_id[ipsec->n_sa_free_id] = sa_id;
1792 		ipsec->n_sa_free_id++;
1793 
1794 		return status;
1795 	}
1796 
1797 	/* Validate the new SA. */
1798 	sa->valid = 1;
1799 	*id = sa_id;
1800 
1801 	return 0;
1802 }
1803 
1804 void
1805 rte_swx_ipsec_sa_delete(struct rte_swx_ipsec *ipsec,
1806 			uint32_t sa_id)
1807 {
1808 	struct ipsec_sa *sa;
1809 
1810 	/* Check the input parameters. */
1811 	if (!ipsec || (sa_id >= ipsec->n_sa_max))
1812 		return;
1813 
1814 	/* Release the SA resources. */
1815 	sa = ipsec_sa_get(ipsec, sa_id);
1816 
1817 	ipsec_session_free(ipsec, &sa->s);
1818 
1819 	/* Free the SADB entry. */
1820 	ipsec->sa_free_id[ipsec->n_sa_free_id] = sa_id;
1821 	ipsec->n_sa_free_id++;
1822 
1823 	/* Invalidate the SA. */
1824 	sa->valid = 0;
1825 }
1826