xref: /dpdk/lib/pipeline/rte_swx_ipsec.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #include <stdalign.h>
6 #include <stdlib.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <arpa/inet.h>
10 
11 #include <rte_common.h>
12 #include <rte_ip.h>
13 #include <rte_tailq.h>
14 #include <rte_eal_memconfig.h>
15 #include <rte_ring.h>
16 #include <rte_mbuf.h>
17 #include <rte_cryptodev.h>
18 #include <rte_ipsec.h>
19 
20 #include "rte_swx_ipsec.h"
21 
22 #ifndef RTE_SWX_IPSEC_HUGE_PAGES_DISABLE
23 
24 #include <rte_malloc.h>
25 
26 static void *
27 env_calloc(size_t size, size_t alignment, int numa_node)
28 {
29 	return rte_zmalloc_socket(NULL, size, alignment, numa_node);
30 }
31 
32 static void
33 env_free(void *start, size_t size __rte_unused)
34 {
35 	rte_free(start);
36 }
37 
38 #else
39 
40 #include <numa.h>
41 
42 static void *
43 env_calloc(size_t size, size_t alignment __rte_unused, int numa_node)
44 {
45 	void *start;
46 
47 	if (numa_available() == -1)
48 		return NULL;
49 
50 	start = numa_alloc_onnode(size, numa_node);
51 	if (!start)
52 		return NULL;
53 
54 	memset(start, 0, size);
55 	return start;
56 }
57 
58 static void
59 env_free(void *start, size_t size)
60 {
61 	if ((numa_available() == -1) || !start)
62 		return;
63 
64 	numa_free(start, size);
65 }
66 
67 #endif
68 
69 #ifndef RTE_SWX_IPSEC_POOL_CACHE_SIZE
70 #define RTE_SWX_IPSEC_POOL_CACHE_SIZE 256
71 #endif
72 
73 /* The two crypto device mempools have their size set to the number of SAs. The mempool API requires
74  * the mempool size to be at least 1.5 times the size of the mempool cache.
75  */
76 #define N_SA_MIN (RTE_SWX_IPSEC_POOL_CACHE_SIZE * 1.5)
77 
78 struct ipsec_sa {
79 	struct rte_ipsec_session s;
80 	int valid;
81 };
82 
83 struct ipsec_pkts_in {
84 	struct rte_mbuf *pkts[RTE_SWX_IPSEC_BURST_SIZE_MAX];
85 	struct ipsec_sa *sa[RTE_SWX_IPSEC_BURST_SIZE_MAX];
86 	struct rte_ipsec_group groups[RTE_SWX_IPSEC_BURST_SIZE_MAX];
87 	struct rte_crypto_op *group_cops[RTE_SWX_IPSEC_BURST_SIZE_MAX];
88 	struct rte_crypto_op *cops[RTE_SWX_IPSEC_BURST_SIZE_MAX];
89 	uint32_t n_cops;
90 };
91 
92 struct ipsec_pkts_out {
93 	struct rte_crypto_op *cops[RTE_SWX_IPSEC_BURST_SIZE_MAX];
94 	struct rte_mbuf *group_pkts[RTE_SWX_IPSEC_BURST_SIZE_MAX];
95 	struct rte_ipsec_group groups[RTE_SWX_IPSEC_BURST_SIZE_MAX];
96 	struct rte_mbuf *pkts[RTE_SWX_IPSEC_BURST_SIZE_MAX];
97 	uint32_t n_pkts;
98 };
99 
100 struct rte_swx_ipsec {
101 	/*
102 	 * Parameters.
103 	 */
104 
105 	/* IPsec instance name. */
106 	char name[RTE_SWX_IPSEC_NAME_SIZE];
107 
108 	/* Input packet queue. */
109 	struct rte_ring *ring_in;
110 
111 	/* Output packet queue. */
112 	struct rte_ring *ring_out;
113 
114 	/* Crypto device ID. */
115 	uint8_t dev_id;
116 
117 	/* Crypto device queue pair ID. */
118 	uint16_t qp_id;
119 
120 	/* Burst sizes. */
121 	struct rte_swx_ipsec_burst_size bsz;
122 
123 	/* SA table size. */
124 	size_t n_sa_max;
125 
126 	/*
127 	 * Internals.
128 	 */
129 	/* Crypto device buffer pool for sessions. */
130 	struct rte_mempool *mp_session;
131 
132 	/* Pre-crypto packets. */
133 	struct ipsec_pkts_in in;
134 
135 	/* Post-crypto packets. */
136 	struct ipsec_pkts_out out;
137 
138 	/* Crypto device enqueue threshold. */
139 	uint32_t crypto_wr_threshold;
140 
141 	/* Packets currently under crypto device processing. */
142 	uint32_t n_pkts_crypto;
143 
144 	/* List of free SADB positions. */
145 	uint32_t *sa_free_id;
146 
147 	/* Number of elements in the SADB list of free positions. */
148 	size_t n_sa_free_id;
149 
150 	/* Allocated memory total size in bytes. */
151 	size_t total_size;
152 
153 	/* Flag for registration to the global list of instances. */
154 	int registered;
155 
156 	/*
157 	 * Table memory.
158 	 */
159 	alignas(RTE_CACHE_LINE_SIZE) uint8_t memory[];
160 };
161 
162 static inline struct ipsec_sa *
163 ipsec_sa_get(struct rte_swx_ipsec *ipsec, uint32_t sa_id)
164 {
165 	struct ipsec_sa *sadb = (struct ipsec_sa *)ipsec->memory;
166 
167 	return &sadb[sa_id & (ipsec->n_sa_max - 1)];
168 }
169 
170 /* Global list of instances. */
171 TAILQ_HEAD(rte_swx_ipsec_list, rte_tailq_entry);
172 
173 static struct rte_tailq_elem rte_swx_ipsec_tailq = {
174 	.name = "RTE_SWX_IPSEC",
175 };
176 
177 EAL_REGISTER_TAILQ(rte_swx_ipsec_tailq)
178 
179 struct rte_swx_ipsec *
180 rte_swx_ipsec_find(const char *name)
181 {
182 	struct rte_swx_ipsec_list *ipsec_list;
183 	struct rte_tailq_entry *te = NULL;
184 
185 	if (!name ||
186 	    !name[0] ||
187 	    (strnlen(name, RTE_SWX_IPSEC_NAME_SIZE) == RTE_SWX_IPSEC_NAME_SIZE))
188 		return NULL;
189 
190 	ipsec_list = RTE_TAILQ_CAST(rte_swx_ipsec_tailq.head, rte_swx_ipsec_list);
191 
192 	rte_mcfg_tailq_read_lock();
193 
194 	TAILQ_FOREACH(te, ipsec_list, next) {
195 		struct rte_swx_ipsec *ipsec = (struct rte_swx_ipsec *)te->data;
196 
197 		if (!strncmp(name, ipsec->name, sizeof(ipsec->name))) {
198 			rte_mcfg_tailq_read_unlock();
199 			return ipsec;
200 		}
201 	}
202 
203 	rte_mcfg_tailq_read_unlock();
204 	return NULL;
205 }
206 
207 static int
208 ipsec_register(struct rte_swx_ipsec *ipsec)
209 {
210 	struct rte_swx_ipsec_list *ipsec_list;
211 	struct rte_tailq_entry *te = NULL;
212 
213 	ipsec_list = RTE_TAILQ_CAST(rte_swx_ipsec_tailq.head, rte_swx_ipsec_list);
214 
215 	rte_mcfg_tailq_write_lock();
216 
217 	TAILQ_FOREACH(te, ipsec_list, next) {
218 		struct rte_swx_ipsec *elem = (struct rte_swx_ipsec *)te->data;
219 
220 		if (!strncmp(ipsec->name, elem->name, sizeof(ipsec->name))) {
221 			rte_mcfg_tailq_write_unlock();
222 			return -EEXIST;
223 		}
224 	}
225 
226 	te = calloc(1, sizeof(struct rte_tailq_entry));
227 	if (!te) {
228 		rte_mcfg_tailq_write_unlock();
229 		return -ENOMEM;
230 	}
231 
232 	te->data = (void *)ipsec;
233 	TAILQ_INSERT_TAIL(ipsec_list, te, next);
234 	rte_mcfg_tailq_write_unlock();
235 	return 0;
236 }
237 
238 static void
239 ipsec_unregister(struct rte_swx_ipsec *ipsec)
240 {
241 	struct rte_swx_ipsec_list *ipsec_list;
242 	struct rte_tailq_entry *te = NULL;
243 
244 	ipsec_list = RTE_TAILQ_CAST(rte_swx_ipsec_tailq.head, rte_swx_ipsec_list);
245 
246 	rte_mcfg_tailq_write_lock();
247 
248 	TAILQ_FOREACH(te, ipsec_list, next) {
249 		if (te->data == (void *)ipsec) {
250 			TAILQ_REMOVE(ipsec_list, te, next);
251 			rte_mcfg_tailq_write_unlock();
252 			free(te);
253 			return;
254 		}
255 	}
256 
257 	rte_mcfg_tailq_write_unlock();
258 }
259 
260 static void
261 ipsec_session_free(struct rte_swx_ipsec *ipsec, struct rte_ipsec_session *s);
262 
263 void
264 rte_swx_ipsec_free(struct rte_swx_ipsec *ipsec)
265 {
266 	size_t i;
267 
268 	if (!ipsec)
269 		return;
270 
271 	/* Remove the current instance from the global list. */
272 	if (ipsec->registered)
273 		ipsec_unregister(ipsec);
274 
275 	/* SADB. */
276 	for (i = 0; i < ipsec->n_sa_max; i++) {
277 		struct ipsec_sa *sa = ipsec_sa_get(ipsec, i);
278 
279 		if (!sa->valid)
280 			continue;
281 
282 		/* SA session. */
283 		ipsec_session_free(ipsec, &sa->s);
284 	}
285 
286 	/* Crypto device buffer pools. */
287 	rte_mempool_free(ipsec->mp_session);
288 
289 	/* IPsec object memory. */
290 	env_free(ipsec, ipsec->total_size);
291 }
292 
293 int
294 rte_swx_ipsec_create(struct rte_swx_ipsec **ipsec_out,
295 		     const char *name,
296 		     struct rte_swx_ipsec_params *params,
297 		     int numa_node)
298 {
299 	char resource_name[RTE_SWX_IPSEC_NAME_SIZE];
300 	struct rte_swx_ipsec *ipsec = NULL;
301 	struct rte_ring *ring_in, *ring_out;
302 	struct rte_cryptodev_info dev_info;
303 	size_t n_sa_max, sadb_offset, sadb_size, sa_free_id_offset, sa_free_id_size, total_size, i;
304 	uint32_t dev_session_size;
305 	int dev_id, status = 0;
306 
307 	/* Check input parameters. */
308 	if (!ipsec_out ||
309 	    !name ||
310 	    !name[0] ||
311 	    (strnlen((name), RTE_SWX_IPSEC_NAME_SIZE) == RTE_SWX_IPSEC_NAME_SIZE) ||
312 	    !params ||
313 	    (params->bsz.ring_rd > RTE_SWX_IPSEC_BURST_SIZE_MAX) ||
314 	    (params->bsz.ring_wr > RTE_SWX_IPSEC_BURST_SIZE_MAX) ||
315 	    (params->bsz.crypto_wr > RTE_SWX_IPSEC_BURST_SIZE_MAX) ||
316 	    (params->bsz.crypto_rd > RTE_SWX_IPSEC_BURST_SIZE_MAX) ||
317 	    !params->n_sa_max) {
318 		status = -EINVAL;
319 		goto error;
320 	}
321 
322 	ring_in = rte_ring_lookup(params->ring_in_name);
323 	if (!ring_in) {
324 		status = -EINVAL;
325 		goto error;
326 	}
327 
328 	ring_out = rte_ring_lookup(params->ring_out_name);
329 	if (!ring_out) {
330 		status = -EINVAL;
331 		goto error;
332 	}
333 
334 	dev_id = rte_cryptodev_get_dev_id(params->crypto_dev_name);
335 	if (dev_id == -1) {
336 		status = -EINVAL;
337 		goto error;
338 	}
339 
340 	rte_cryptodev_info_get(dev_id, &dev_info);
341 	if (params->crypto_dev_queue_pair_id >= dev_info.max_nb_queue_pairs) {
342 		status = -EINVAL;
343 		goto error;
344 	}
345 
346 	/* Memory allocation. */
347 	n_sa_max = rte_align64pow2(RTE_MAX(params->n_sa_max, N_SA_MIN));
348 
349 	sadb_offset = sizeof(struct rte_swx_ipsec);
350 	sadb_size = RTE_CACHE_LINE_ROUNDUP(n_sa_max * sizeof(struct ipsec_sa));
351 
352 	sa_free_id_offset = sadb_offset + sadb_size;
353 	sa_free_id_size = RTE_CACHE_LINE_ROUNDUP(n_sa_max * sizeof(uint32_t));
354 
355 	total_size = sa_free_id_offset + sa_free_id_size;
356 	ipsec = env_calloc(total_size, RTE_CACHE_LINE_SIZE, numa_node);
357 	if (!ipsec) {
358 		status = -ENOMEM;
359 		goto error;
360 	}
361 
362 	/* Initialization. */
363 	strcpy(ipsec->name, name);
364 	ipsec->ring_in = ring_in;
365 	ipsec->ring_out = ring_out;
366 	ipsec->dev_id = (uint8_t)dev_id;
367 	ipsec->qp_id = params->crypto_dev_queue_pair_id;
368 	memcpy(&ipsec->bsz, &params->bsz, sizeof(struct rte_swx_ipsec_burst_size));
369 	ipsec->n_sa_max = n_sa_max;
370 
371 	ipsec->crypto_wr_threshold = params->bsz.crypto_wr * 3 / 4;
372 
373 	ipsec->sa_free_id = (uint32_t *)&ipsec->memory[sa_free_id_offset];
374 	for (i = 0; i < n_sa_max; i++)
375 		ipsec->sa_free_id[i] = n_sa_max - 1 - i;
376 	ipsec->n_sa_free_id = n_sa_max;
377 
378 	ipsec->total_size = total_size;
379 
380 	/* Crypto device memory pools. */
381 	dev_session_size = rte_cryptodev_sym_get_private_session_size((uint8_t)dev_id);
382 
383 	snprintf(resource_name, sizeof(resource_name), "%s_mp", name);
384 	ipsec->mp_session = rte_cryptodev_sym_session_pool_create(resource_name,
385 		n_sa_max, /* number of pool elements */
386 		dev_session_size, /* pool element size */
387 		RTE_SWX_IPSEC_POOL_CACHE_SIZE, /* pool cache size */
388 		0, /* pool element private data size */
389 		numa_node);
390 	if (!ipsec->mp_session) {
391 		status = -ENOMEM;
392 		goto error;
393 	}
394 
395 	/* Add the current instance to the global list. */
396 	status = ipsec_register(ipsec);
397 	if (status)
398 		goto error;
399 
400 	ipsec->registered = 1;
401 
402 	*ipsec_out = ipsec;
403 	return 0;
404 
405 error:
406 	rte_swx_ipsec_free(ipsec);
407 	return status;
408 }
409 
410 static inline int
411 ipsec_sa_group(struct rte_swx_ipsec *ipsec, int n_pkts)
412 {
413 	struct ipsec_sa *sa;
414 	struct rte_ipsec_group *g;
415 	int n_groups, n_pkts_in_group, i;
416 
417 	sa = ipsec->in.sa[0];
418 
419 	g = &ipsec->in.groups[0];
420 	g->id.ptr = sa;
421 	g->m = &ipsec->in.pkts[0];
422 	n_pkts_in_group = 1;
423 	n_groups = 1;
424 
425 	for (i = 1; i < n_pkts; i++) {
426 		struct ipsec_sa *sa_new = ipsec->in.sa[i];
427 
428 		/* Same SA => Add the current pkt to the same group. */
429 		if (sa_new == sa) {
430 			n_pkts_in_group++;
431 			continue;
432 		}
433 
434 		/* Different SA => Close the current group & add the current pkt to a new group. */
435 		g->cnt = n_pkts_in_group;
436 		sa = sa_new;
437 
438 		g++;
439 		g->id.ptr = sa;
440 		g->m = &ipsec->in.pkts[i];
441 		n_pkts_in_group = 1;
442 		n_groups++;
443 	}
444 
445 	/* Close the last group. */
446 	g->cnt = n_pkts_in_group;
447 
448 	return n_groups;
449 }
450 
451 static inline void
452 ipsec_crypto_enqueue(struct rte_swx_ipsec *ipsec, uint16_t n_cops)
453 {
454 	struct rte_crypto_op **dst0 = ipsec->in.cops, **dst;
455 	struct rte_crypto_op **src = ipsec->in.group_cops;
456 
457 	uint32_t n_pkts_crypto = ipsec->n_pkts_crypto;
458 	uint32_t n_dst = ipsec->in.n_cops;
459 	uint32_t n_dst_max = ipsec->bsz.crypto_wr;
460 	uint32_t n_dst_avail = n_dst_max - n_dst;
461 	uint32_t n_src = n_cops;
462 	uint32_t i;
463 
464 	dst = &dst0[n_dst];
465 
466 	/* Shortcut: If no elements in DST and enough elements in SRC, then simply use SRC directly
467 	 * instead of moving the SRC to DST first and then using DST.
468 	 */
469 	if (!n_dst && n_src >= ipsec->crypto_wr_threshold) {
470 		uint16_t n_ok;
471 
472 		n_ok = rte_cryptodev_enqueue_burst(ipsec->dev_id, ipsec->qp_id, src, n_src);
473 		ipsec->n_pkts_crypto = n_pkts_crypto + n_ok;
474 
475 		for (i = n_ok; i < n_src; i++) {
476 			struct rte_crypto_op *cop = src[i];
477 			struct rte_mbuf *m = cop->sym->m_src;
478 
479 			rte_pktmbuf_free(m);
480 		}
481 
482 		return;
483 	}
484 
485 	/* Move from SRC to DST. Every time DST gets full, send burst from DST. */
486 	for ( ; n_src >= n_dst_avail; ) {
487 		uint32_t n_ok;
488 
489 		/* Move from SRC to DST. */
490 		for (i = 0; i < n_dst_avail; i++)
491 			*dst++ = *src++;
492 
493 		n_src -= n_dst_avail;
494 
495 		/* DST full: send burst from DST. */
496 		n_ok = rte_cryptodev_enqueue_burst(ipsec->dev_id, ipsec->qp_id, dst0, n_dst_max);
497 		n_pkts_crypto += n_ok;
498 
499 		for (i = n_ok ; i < n_dst_max; i++) {
500 			struct rte_crypto_op *cop = dst0[i];
501 			struct rte_mbuf *m = cop->sym->m_src;
502 
503 			rte_pktmbuf_free(m);
504 		}
505 
506 		/* Next iteration. */
507 		dst = dst0;
508 		n_dst = 0;
509 		n_dst_avail = n_dst_max;
510 	}
511 
512 	ipsec->n_pkts_crypto = n_pkts_crypto;
513 
514 	/* Move from SRC to DST. Not enough elements in SRC to get DST full. */
515 	for (i = 0; i < n_src; i++)
516 		*dst++ = *src++;
517 
518 	n_dst += n_src;
519 
520 	ipsec->in.n_cops = n_dst;
521 }
522 
523 /**
524  * Packet buffer anatomy:
525  *
526  * +----------+---------+--------------------------------------------------------------------------+
527  * | Offset   | Size    | Description                                                              |
528  * | (Byte #) | (Bytes) |                                                                          |
529  * +==========+=========+==========================================================================+
530  * | 0        | 128     | Meta-data: struct rte_mbuf.                                              |
531  * |          |         | The buf_addr field points to the start of the packet section.            |
532  * +----------+---------+--------------------------------------------------------------------------+
533  * | 128      | 128     | Meta-data: struct ipsec_mbuf (see below).                                |
534  * +----------+---------+--------------------------------------------------------------------------+
535  * | 256      |         | Packet section.                                                          |
536  * |          |         | The first packet byte is placed at the offset indicated by the struct    |
537  * |          |         | rte_mbuf::data_off field relative to the start of the packet section.    |
538  * +----------+---------+--------------------------------------------------------------------------+
539  */
540 struct ipsec_mbuf {
541 	struct ipsec_sa *sa;
542 	struct rte_crypto_op cop;
543 	struct rte_crypto_sym_op sym_cop;
544 	uint8_t buffer[32]; /* The crypto IV is placed here. */
545 };
546 
547 /* Offset from the start of the struct ipsec_mbuf::cop where the crypto IV will be placed. */
548 #define IV_OFFSET (sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op))
549 
550 #define META_LENGTH sizeof(struct rte_swx_ipsec_input_packet_metadata)
551 
552 static inline void
553 rte_swx_ipsec_pre_crypto(struct rte_swx_ipsec *ipsec)
554 {
555 	int n_pkts, n_groups, i;
556 
557 	/* Read packets from the input ring. */
558 	n_pkts = rte_ring_sc_dequeue_burst(ipsec->ring_in,
559 					   (void **)ipsec->in.pkts,
560 					   ipsec->bsz.ring_rd,
561 					   NULL);
562 	if (!n_pkts)
563 		return;
564 
565 	/* Get the SA for each packet. */
566 	for (i = 0; i < n_pkts; i++) {
567 		struct rte_mbuf *m = ipsec->in.pkts[i];
568 		struct rte_swx_ipsec_input_packet_metadata *meta;
569 		struct rte_ipv4_hdr *ipv4_hdr;
570 		uint32_t sa_id;
571 
572 		meta = rte_pktmbuf_mtod(m, struct rte_swx_ipsec_input_packet_metadata *);
573 		ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, META_LENGTH);
574 
575 		/* Read the SA ID from the IPsec meta-data placed at the front of the IP packet. */
576 		sa_id = ntohl(meta->sa_id);
577 
578 		/* Consume the IPsec meta-data. */
579 		m->data_off += META_LENGTH;
580 		m->data_len -= META_LENGTH;
581 		m->pkt_len -= META_LENGTH;
582 
583 		/* Set the fields required by the IPsec library. */
584 		m->l2_len = 0;
585 		m->l3_len = (ipv4_hdr->version_ihl >> 4 == 4) ?
586 			sizeof(struct rte_ipv4_hdr) :
587 			sizeof(struct rte_ipv6_hdr);
588 
589 		/* Get the SA. */
590 		ipsec->in.sa[i] = ipsec_sa_get(ipsec, sa_id);
591 	}
592 
593 	/* Group packets that share the same SA. */
594 	n_groups = ipsec_sa_group(ipsec, n_pkts);
595 
596 	/* Write each group of packets sharing the same SA to the crypto device. */
597 	for (i = 0; i < n_groups; i++) {
598 		struct rte_ipsec_group *g = &ipsec->in.groups[i];
599 		struct ipsec_sa *sa = g->id.ptr;
600 		struct rte_ipsec_session *s = &sa->s;
601 		uint32_t j;
602 		uint16_t n_pkts_ok;
603 
604 		/* Prepare the crypto ops for the current group. */
605 		for (j = 0; j < g->cnt; j++) {
606 			struct rte_mbuf *m = g->m[j];
607 			struct ipsec_mbuf *priv = rte_mbuf_to_priv(m);
608 
609 			priv->sa = sa;
610 			ipsec->in.group_cops[j] = &priv->cop;
611 		}
612 
613 		n_pkts_ok = rte_ipsec_pkt_crypto_prepare(s, g->m, ipsec->in.group_cops, g->cnt);
614 
615 		for (j = n_pkts_ok; j < g->cnt; j++) {
616 			struct rte_mbuf *m = g->m[j];
617 
618 			rte_pktmbuf_free(m);
619 		}
620 
621 		/* Write the crypto ops of the current group to the crypto device. */
622 		ipsec_crypto_enqueue(ipsec, n_pkts_ok);
623 	}
624 }
625 
626 static inline void
627 ipsec_ring_enqueue(struct rte_swx_ipsec *ipsec, struct rte_ipsec_group *g, uint32_t n_pkts)
628 {
629 	struct rte_mbuf **dst0 = ipsec->out.pkts, **dst;
630 	struct rte_mbuf **src = g->m;
631 
632 	uint32_t n_dst = ipsec->out.n_pkts;
633 	uint32_t n_dst_max = ipsec->bsz.ring_wr;
634 	uint32_t n_dst_avail = n_dst_max - n_dst;
635 	uint32_t n_src = n_pkts;
636 	uint32_t i;
637 
638 	dst = &dst0[n_dst];
639 
640 	/* Move from SRC to DST. Every time DST gets full, send burst from DST. */
641 	for ( ; n_src >= n_dst_avail; ) {
642 		uint32_t n_ok;
643 
644 		/* Move from SRC to DST. */
645 		for (i = 0; i < n_dst_avail; i++)
646 			*dst++ = *src++;
647 
648 		n_src -= n_dst_avail;
649 
650 		/* DST full: send burst from DST. */
651 		n_ok = rte_ring_sp_enqueue_burst(ipsec->ring_out, (void **)dst0, n_dst_max, NULL);
652 
653 		for (i = n_ok ; i < n_dst_max; i++) {
654 			struct rte_mbuf *m = dst[i];
655 
656 			rte_pktmbuf_free(m);
657 		}
658 
659 		/* Next iteration. */
660 		dst = dst0;
661 		n_dst = 0;
662 		n_dst_avail = n_dst_max;
663 	}
664 
665 	/* Move from SRC to DST. Not enough elements in SRC to get DST full. */
666 	for (i = 0; i < n_src; i++)
667 		*dst++ = *src++;
668 
669 	n_dst += n_src;
670 
671 	ipsec->out.n_pkts = n_dst;
672 }
673 
674 static inline void
675 rte_swx_ipsec_post_crypto(struct rte_swx_ipsec *ipsec)
676 {
677 	uint32_t n_pkts_crypto = ipsec->n_pkts_crypto, n_pkts, ng, i;
678 
679 	/* Read the crypto ops from the crypto device. */
680 	if (!n_pkts_crypto)
681 		return;
682 
683 	n_pkts = rte_cryptodev_dequeue_burst(ipsec->dev_id,
684 					     ipsec->qp_id,
685 					     ipsec->out.cops,
686 					     ipsec->bsz.crypto_rd);
687 	if (!n_pkts)
688 		return;
689 
690 	ipsec->n_pkts_crypto = n_pkts_crypto - n_pkts;
691 
692 	/* Group packets that share the same SA. */
693 	ng = rte_ipsec_pkt_crypto_group((const struct rte_crypto_op **)(uintptr_t)ipsec->out.cops,
694 					      ipsec->out.group_pkts,
695 					      ipsec->out.groups,
696 					      n_pkts);
697 
698 	/* Perform post-crypto IPsec processing for each group of packets that share the same SA.
699 	 * Write each group of packets to the output ring.
700 	 */
701 	for (i = 0, n_pkts = 0; i < ng; i++) {
702 		struct rte_ipsec_group *g = &ipsec->out.groups[i];
703 		struct rte_ipsec_session *s = g->id.ptr;
704 		uint32_t n_pkts_ok, j;
705 
706 		/* Perform post-crypto IPsec processing for the current group. */
707 		n_pkts_ok = rte_ipsec_pkt_process(s, g->m, g->cnt);
708 
709 		for (j = n_pkts_ok; j < g->cnt; j++) {
710 			struct rte_mbuf *m = g->m[j];
711 
712 			rte_pktmbuf_free(m);
713 		}
714 
715 		/* Write the packets of the current group to the output ring. */
716 		ipsec_ring_enqueue(ipsec, g, n_pkts_ok);
717 	}
718 }
719 
720 void
721 rte_swx_ipsec_run(struct rte_swx_ipsec *ipsec)
722 {
723 	rte_swx_ipsec_pre_crypto(ipsec);
724 	rte_swx_ipsec_post_crypto(ipsec);
725 }
726 
727 /**
728  * IPsec Control Plane API
729  */
730 struct cipher_alg {
731 	const char *name;
732 	enum rte_crypto_cipher_algorithm alg;
733 	uint32_t iv_size;
734 	uint32_t block_size;
735 	uint32_t key_size;
736 };
737 
738 struct auth_alg {
739 	const char *name;
740 	enum rte_crypto_auth_algorithm alg;
741 	uint32_t iv_size;
742 	uint32_t digest_size;
743 	uint32_t key_size;
744 };
745 
746 struct aead_alg {
747 	const char *name;
748 	enum rte_crypto_aead_algorithm alg;
749 	uint32_t iv_size;
750 	uint32_t block_size;
751 	uint32_t digest_size;
752 	uint32_t key_size;
753 	uint32_t aad_size;
754 };
755 
756 static struct cipher_alg cipher_algs[] = {
757 	[0] = {
758 		.name = "null",
759 		.alg = RTE_CRYPTO_CIPHER_NULL,
760 		.iv_size = 0,
761 		.block_size = 4,
762 		.key_size = 0,
763 	},
764 
765 	[1] = {
766 		.name = "aes-cbc-128",
767 		.alg = RTE_CRYPTO_CIPHER_AES_CBC,
768 		.iv_size = 16,
769 		.block_size = 16,
770 		.key_size = 16,
771 	},
772 
773 	[2] = {
774 		.name = "aes-cbc-192",
775 		.alg = RTE_CRYPTO_CIPHER_AES_CBC,
776 		.iv_size = 16,
777 		.block_size = 16,
778 		.key_size = 24,
779 	},
780 
781 	[3] = {
782 		.name = "aes-cbc-256",
783 		.alg = RTE_CRYPTO_CIPHER_AES_CBC,
784 		.iv_size = 16,
785 		.block_size = 16,
786 		.key_size = 32,
787 	},
788 
789 	[4] = {
790 		.name = "aes-ctr-128",
791 		.alg = RTE_CRYPTO_CIPHER_AES_CTR,
792 		.iv_size = 8,
793 		.block_size = 4,
794 		.key_size = 20,
795 	},
796 
797 	[5] = {
798 		.name = "aes-ctr-192",
799 		.alg = RTE_CRYPTO_CIPHER_AES_CTR,
800 		.iv_size = 16,
801 		.block_size = 16,
802 		.key_size = 28,
803 	},
804 
805 	[6] = {
806 		.name = "aes-ctr-256",
807 		.alg = RTE_CRYPTO_CIPHER_AES_CTR,
808 		.iv_size = 16,
809 		.block_size = 16,
810 		.key_size = 36,
811 	},
812 
813 	[7] = {
814 		.name = "3des-cbc",
815 		.alg = RTE_CRYPTO_CIPHER_3DES_CBC,
816 		.iv_size = 8,
817 		.block_size = 8,
818 		.key_size = 24,
819 	},
820 
821 	[8] = {
822 		.name = "des-cbc",
823 		.alg = RTE_CRYPTO_CIPHER_DES_CBC,
824 		.iv_size = 8,
825 		.block_size = 8,
826 		.key_size = 8,
827 	},
828 };
829 
830 static struct auth_alg auth_algs[] = {
831 	[0] = {
832 		.name = "null",
833 		.alg = RTE_CRYPTO_AUTH_NULL,
834 		.iv_size = 0,
835 		.digest_size = 0,
836 		.key_size = 0,
837 	},
838 
839 	[1] = {
840 		.name = "sha1-hmac",
841 		.alg = RTE_CRYPTO_AUTH_SHA1_HMAC,
842 		.iv_size = 0,
843 		.digest_size = 12,
844 		.key_size = 20,
845 	},
846 
847 	[2] = {
848 		.name = "sha256-hmac",
849 		.alg = RTE_CRYPTO_AUTH_SHA256_HMAC,
850 		.iv_size = 0,
851 		.digest_size = 16,
852 		.key_size = 32,
853 	},
854 
855 	[3] = {
856 		.name = "sha384-hmac",
857 		.alg = RTE_CRYPTO_AUTH_SHA384_HMAC,
858 		.iv_size = 0,
859 		.digest_size = 24,
860 		.key_size = 48,
861 	},
862 
863 	[4] = {
864 		.name = "sha512-hmac",
865 		.alg = RTE_CRYPTO_AUTH_SHA512_HMAC,
866 		.iv_size = 0,
867 		.digest_size = 32,
868 		.key_size = 64,
869 	},
870 
871 	[5] = {
872 		.name = "aes-gmac",
873 		.alg = RTE_CRYPTO_AUTH_AES_GMAC,
874 		.iv_size = 8,
875 		.digest_size = 16,
876 		.key_size = 20,
877 	},
878 
879 	[6] = {
880 		.name = "aes-xcbc-mac-96",
881 		.alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
882 		.iv_size = 0,
883 		.digest_size = 12,
884 		.key_size = 16,
885 	},
886 };
887 
888 static struct aead_alg aead_algs[] = {
889 	[0] = {
890 		.name = "aes-gcm-128",
891 		.alg = RTE_CRYPTO_AEAD_AES_GCM,
892 		.iv_size = 8,
893 		.block_size = 4,
894 		.key_size = 20,
895 		.digest_size = 16,
896 		.aad_size = 8,
897 	},
898 
899 	[1] = {
900 		.name = "aes-gcm-192",
901 		.alg = RTE_CRYPTO_AEAD_AES_GCM,
902 		.iv_size = 8,
903 		.block_size = 4,
904 		.key_size = 28,
905 		.digest_size = 16,
906 		.aad_size = 8,
907 	},
908 
909 	[2] = {
910 		.name = "aes-gcm-256",
911 		.alg = RTE_CRYPTO_AEAD_AES_GCM,
912 		.iv_size = 8,
913 		.block_size = 4,
914 		.key_size = 36,
915 		.digest_size = 16,
916 		.aad_size = 8,
917 	},
918 
919 	[3] = {
920 		.name = "aes-ccm-128",
921 		.alg = RTE_CRYPTO_AEAD_AES_CCM,
922 		.iv_size = 8,
923 		.block_size = 4,
924 		.key_size = 20,
925 		.digest_size = 16,
926 		.aad_size = 8,
927 	},
928 
929 	[4] = {
930 		.name = "aes-ccm-192",
931 		.alg = RTE_CRYPTO_AEAD_AES_CCM,
932 		.iv_size = 8,
933 		.block_size = 4,
934 		.key_size = 28,
935 		.digest_size = 16,
936 		.aad_size = 8,
937 	},
938 
939 	[5] = {
940 		.name = "aes-ccm-256",
941 		.alg = RTE_CRYPTO_AEAD_AES_CCM,
942 		.iv_size = 8,
943 		.block_size = 4,
944 		.key_size = 36,
945 		.digest_size = 16,
946 		.aad_size = 8,
947 	},
948 
949 	[6] = {
950 		.name = "chacha20-poly1305",
951 		.alg = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
952 		.iv_size = 12,
953 		.block_size = 64,
954 		.key_size = 36,
955 		.digest_size = 16,
956 		.aad_size = 8,
957 	},
958 };
959 
960 static struct cipher_alg *
961 cipher_alg_find(const char *name)
962 {
963 	size_t i;
964 
965 	for (i = 0; i < RTE_DIM(cipher_algs); i++) {
966 		struct cipher_alg *alg = &cipher_algs[i];
967 
968 		if (!strcmp(name, alg->name))
969 			return alg;
970 	}
971 
972 	return NULL;
973 }
974 
975 static struct cipher_alg *
976 cipher_alg_find_by_id(enum rte_crypto_cipher_algorithm alg_id, uint32_t key_size)
977 {
978 	size_t i;
979 
980 	for (i = 0; i < RTE_DIM(cipher_algs); i++) {
981 		struct cipher_alg *alg = &cipher_algs[i];
982 
983 		if (alg->alg == alg_id && alg->key_size == key_size)
984 			return alg;
985 	}
986 
987 	return NULL;
988 }
989 
990 static struct auth_alg *
991 auth_alg_find(const char *name)
992 {
993 	size_t i;
994 
995 	for (i = 0; i < RTE_DIM(auth_algs); i++) {
996 		struct auth_alg *alg = &auth_algs[i];
997 
998 		if (!strcmp(name, alg->name))
999 			return alg;
1000 	}
1001 
1002 	return NULL;
1003 }
1004 
1005 static struct auth_alg *
1006 auth_alg_find_by_id(enum rte_crypto_auth_algorithm alg_id, uint32_t key_size)
1007 {
1008 	size_t i;
1009 
1010 	for (i = 0; i < RTE_DIM(auth_algs); i++) {
1011 		struct auth_alg *alg = &auth_algs[i];
1012 
1013 		if (alg->alg == alg_id && alg->key_size == key_size)
1014 			return alg;
1015 	}
1016 
1017 	return NULL;
1018 }
1019 
1020 static struct aead_alg *
1021 aead_alg_find(const char *name)
1022 {
1023 	size_t i;
1024 
1025 	for (i = 0; i < RTE_DIM(aead_algs); i++) {
1026 		struct aead_alg *alg = &aead_algs[i];
1027 
1028 		if (!strcmp(name, alg->name))
1029 			return alg;
1030 	}
1031 
1032 	return NULL;
1033 }
1034 
1035 static struct aead_alg *
1036 aead_alg_find_by_id(enum rte_crypto_aead_algorithm alg_id, uint32_t key_size)
1037 {
1038 	size_t i;
1039 
1040 	for (i = 0; i < RTE_DIM(aead_algs); i++) {
1041 		struct aead_alg *alg = &aead_algs[i];
1042 
1043 		if (alg->alg == alg_id && alg->key_size == key_size)
1044 			return alg;
1045 	}
1046 
1047 	return NULL;
1048 }
1049 
1050 static int
1051 char_to_hex(char c, uint8_t *val)
1052 {
1053 	if (c >= '0' && c <= '9') {
1054 		*val = c - '0';
1055 		return 0;
1056 	}
1057 
1058 	if (c >= 'A' && c <= 'F') {
1059 		*val = c - 'A' + 10;
1060 		return 0;
1061 	}
1062 
1063 	if (c >= 'a' && c <= 'f') {
1064 		*val = c - 'a' + 10;
1065 		return 0;
1066 	}
1067 
1068 	return -EINVAL;
1069 }
1070 
1071 static int
1072 hex_string_parse(char *src, uint8_t *dst, uint32_t n_dst_bytes)
1073 {
1074 	uint32_t i;
1075 
1076 	/* Check input arguments. */
1077 	if (!src || !src[0] || !dst || !n_dst_bytes)
1078 		return -EINVAL;
1079 
1080 	/* Skip any leading "0x" or "0X" in the src string. */
1081 	if ((src[0] == '0') && (src[1] == 'x' || src[1] == 'X'))
1082 		src += 2;
1083 
1084 	/* Convert each group of two hex characters in the src string to one byte in dst array. */
1085 	for (i = 0; i < n_dst_bytes; i++) {
1086 		uint8_t a, b;
1087 		int status;
1088 
1089 		status = char_to_hex(*src, &a);
1090 		if (status)
1091 			return status;
1092 		src++;
1093 
1094 		status = char_to_hex(*src, &b);
1095 		if (status)
1096 			return status;
1097 		src++;
1098 
1099 		dst[i] = a * 16 + b;
1100 	}
1101 
1102 	/* Check for the end of the src string. */
1103 	if (*src)
1104 		return -EINVAL;
1105 
1106 	return 0;
1107 }
1108 
1109 static int
1110 token_is_comment(const char *token)
1111 {
1112 	if ((token[0] == '#') ||
1113 	    (token[0] == ';') ||
1114 	    ((token[0] == '/') && (token[1] == '/')))
1115 		return 1; /* TRUE. */
1116 
1117 	return 0; /* FALSE. */
1118 }
1119 
1120 #define MAX_TOKENS 64
1121 
1122 #define CHECK(condition, msg)          \
1123 do {                                   \
1124 	if (!(condition)) {            \
1125 		if (errmsg)            \
1126 			*errmsg = msg; \
1127 		goto error;            \
1128 	}                              \
1129 } while (0)
1130 
1131 struct rte_swx_ipsec_sa_params *
1132 rte_swx_ipsec_sa_read(struct rte_swx_ipsec *ipsec __rte_unused,
1133 		      const char *string,
1134 		      int *is_blank_or_comment,
1135 		      const char **errmsg)
1136 {
1137 	char *token_array[MAX_TOKENS], **t;
1138 	struct rte_swx_ipsec_sa_params *p = NULL;
1139 	char *s0 = NULL, *s;
1140 	uint32_t n_tokens = 0;
1141 	int blank_or_comment = 0;
1142 
1143 	/* Check input arguments. */
1144 	CHECK(string && string[0], "NULL input");
1145 
1146 	/* Memory allocation. */
1147 	s0 = strdup(string);
1148 	p = calloc(1, sizeof(struct rte_swx_ipsec_sa_params));
1149 	CHECK(s0 && p, "Not enough memory");
1150 
1151 	/* Parse the string into tokens. */
1152 	for (s = s0; ; ) {
1153 		char *token;
1154 
1155 		token = strtok_r(s, " \f\n\r\t\v", &s);
1156 		if (!token || token_is_comment(token))
1157 			break;
1158 
1159 		CHECK(n_tokens < RTE_DIM(token_array), "Too many tokens");
1160 
1161 		token_array[n_tokens] = token;
1162 		n_tokens++;
1163 	}
1164 
1165 	t = token_array;
1166 	if (!n_tokens) {
1167 		blank_or_comment = 1;
1168 		goto error;
1169 	}
1170 
1171 	/*
1172 	 * Crypto operation.
1173 	 */
1174 	if (!strcmp(t[0], "encrypt"))
1175 		p->encrypt = 1;
1176 	else if (!strcmp(t[0], "decrypt"))
1177 		p->encrypt = 0;
1178 	else
1179 		CHECK(0, "Missing \"encrypt\"/\"decrypt\" keyword");
1180 
1181 	t++;
1182 	n_tokens--;
1183 
1184 	/*
1185 	 * Crypto parameters.
1186 	 */
1187 	CHECK(n_tokens >= 2, "Not enough tokens");
1188 
1189 	if (!strcmp(t[0], "cipher")) {
1190 		struct cipher_alg *cipher_alg;
1191 		struct auth_alg *auth_alg;
1192 		uint32_t key_size;
1193 
1194 		p->crypto.is_aead = 0;
1195 
1196 		/* cipher. */
1197 		cipher_alg = cipher_alg_find(t[1]);
1198 		CHECK(cipher_alg, "Unsupported cipher algorithm");
1199 
1200 		key_size = cipher_alg->key_size;
1201 		p->crypto.cipher_auth.cipher.alg = cipher_alg->alg;
1202 		p->crypto.cipher_auth.cipher.key_size = key_size;
1203 
1204 		t += 2;
1205 		n_tokens -= 2;
1206 
1207 		if (key_size) {
1208 			int status;
1209 
1210 			CHECK(n_tokens >= 2, "Not enough tokens");
1211 			CHECK(!strcmp(t[0], "key"), "Missing cipher \"key\" keyword");
1212 			CHECK(key_size <= RTE_DIM(p->crypto.cipher_auth.cipher.key),
1213 				"Cipher algorithm key too big");
1214 
1215 			status = hex_string_parse(t[1], p->crypto.cipher_auth.cipher.key, key_size);
1216 			CHECK(!status, "Cipher key invalid format");
1217 
1218 			t += 2;
1219 			n_tokens -= 2;
1220 		}
1221 
1222 		/* authentication. */
1223 		CHECK(n_tokens >= 2, "Not enough tokens");
1224 		CHECK(!strcmp(t[0], "auth"), "Missing \"auth\" keyword");
1225 
1226 		auth_alg = auth_alg_find(t[1]);
1227 		CHECK(auth_alg, "Unsupported authentication algorithm");
1228 
1229 		key_size = auth_alg->key_size;
1230 		p->crypto.cipher_auth.auth.alg = auth_alg->alg;
1231 		p->crypto.cipher_auth.auth.key_size = key_size;
1232 
1233 		t += 2;
1234 		n_tokens -= 2;
1235 
1236 		if (key_size) {
1237 			int status;
1238 
1239 			CHECK(n_tokens >= 2, "Not enough tokens");
1240 			CHECK(!strcmp(t[0], "key"), "Missing authentication \"key\" keyword");
1241 			CHECK(key_size <= RTE_DIM(p->crypto.cipher_auth.auth.key),
1242 				"Authentication algorithm key too big");
1243 
1244 			status = hex_string_parse(t[1], p->crypto.cipher_auth.auth.key, key_size);
1245 			CHECK(!status, "Authentication key invalid format");
1246 
1247 			t += 2;
1248 			n_tokens -= 2;
1249 		}
1250 	} else if (!strcmp(t[0], "aead")) {
1251 		struct aead_alg *alg;
1252 		uint32_t key_size;
1253 		int status;
1254 
1255 		p->crypto.is_aead = 1;
1256 
1257 		CHECK(n_tokens >= 4, "Not enough tokens");
1258 		alg = aead_alg_find(t[1]);
1259 		CHECK(alg, "Unsupported AEAD algorithm");
1260 
1261 		key_size = alg->key_size;
1262 		p->crypto.aead.alg = alg->alg;
1263 		p->crypto.aead.key_size = key_size;
1264 
1265 		CHECK(!strcmp(t[2], "key"), "Missing AEAD \"key\" keyword");
1266 		CHECK(key_size <= RTE_DIM(p->crypto.aead.key),
1267 			"AEAD algorithm key too big");
1268 
1269 		status = hex_string_parse(t[3], p->crypto.aead.key, key_size);
1270 		CHECK(!status, "AEAD key invalid format");
1271 
1272 		t += 4;
1273 		n_tokens -= 4;
1274 	} else
1275 		CHECK(0, "Missing \"cipher\"/\"aead\" keyword");
1276 
1277 	/*
1278 	 * Packet ecapsulation parameters.
1279 	 */
1280 	CHECK(n_tokens >= 4, "Not enough tokens");
1281 	CHECK(!strcmp(t[0], "esp"), "Missing \"esp\" keyword");
1282 	CHECK(!strcmp(t[1], "spi"), "Missing \"spi\" keyword");
1283 
1284 	p->encap.esp.spi = strtoul(t[2], &t[2], 0);
1285 	CHECK(!t[2][0], "ESP SPI field invalid format");
1286 
1287 	t += 3;
1288 	n_tokens -= 3;
1289 
1290 	if (!strcmp(t[0], "tunnel")) {
1291 		p->encap.tunnel_mode = 1;
1292 
1293 		CHECK(n_tokens >= 6, "Not enough tokens");
1294 
1295 		if (!strcmp(t[1], "ipv4")) {
1296 			uint32_t addr;
1297 
1298 			p->encap.tunnel_ipv4 = 1;
1299 
1300 			CHECK(!strcmp(t[2], "srcaddr"), "Missing \"srcaddr\" keyword");
1301 
1302 			addr = strtoul(t[3], &t[3], 0);
1303 			CHECK(!t[3][0], "Tunnel IPv4 source address invalid format");
1304 			p->encap.tunnel.ipv4.src_addr.s_addr = htonl(addr);
1305 
1306 			CHECK(!strcmp(t[4], "dstaddr"), "Missing \"dstaddr\" keyword");
1307 
1308 			addr = strtoul(t[5], &t[5], 0);
1309 			CHECK(!t[5][0], "Tunnel IPv4 destination address invalid format");
1310 			p->encap.tunnel.ipv4.dst_addr.s_addr = htonl(addr);
1311 
1312 			t += 6;
1313 			n_tokens -= 6;
1314 		} else if (!strcmp(t[1], "ipv6")) {
1315 			int status;
1316 
1317 			p->encap.tunnel_ipv4 = 0;
1318 
1319 			CHECK(!strcmp(t[2], "srcaddr"), "Missing \"srcaddr\" keyword");
1320 
1321 			status = hex_string_parse(t[3],
1322 						  p->encap.tunnel.ipv6.src_addr.s6_addr,
1323 						  16);
1324 			CHECK(!status, "Tunnel IPv6 source address invalid format");
1325 
1326 			CHECK(!strcmp(t[4], "dstaddr"), "Missing \"dstaddr\" keyword");
1327 
1328 			status = hex_string_parse(t[5],
1329 						  p->encap.tunnel.ipv6.dst_addr.s6_addr,
1330 						  16);
1331 			CHECK(!status, "Tunnel IPv6 destination address invalid format");
1332 
1333 			t += 6;
1334 			n_tokens -= 6;
1335 		} else
1336 			CHECK(0, "Missing \"ipv4\"/\"ipv6\" keyword");
1337 	} else if (!strcmp(t[0], "transport")) {
1338 		p->encap.tunnel_mode = 0;
1339 
1340 		t++;
1341 		n_tokens--;
1342 	} else
1343 		CHECK(0, "Missing \"tunnel\"/\"transport\" keyword");
1344 
1345 	/*
1346 	 * Any other parameters.
1347 	 */
1348 	CHECK(!n_tokens, "Unexpected trailing tokens");
1349 
1350 	free(s0);
1351 	return p;
1352 
1353 error:
1354 	free(p);
1355 	free(s0);
1356 	if (is_blank_or_comment)
1357 		*is_blank_or_comment = blank_or_comment;
1358 	return NULL;
1359 }
1360 
1361 static void
1362 tunnel_ipv4_header_set(struct rte_ipv4_hdr *h, struct rte_swx_ipsec_sa_params *p)
1363 {
1364 	struct rte_ipv4_hdr ipv4_hdr = {
1365 		.version_ihl = 0x45,
1366 		.type_of_service = 0,
1367 		.total_length = 0, /* Cannot be pre-computed. */
1368 		.packet_id = 0,
1369 		.fragment_offset = 0,
1370 		.time_to_live = 64,
1371 		.next_proto_id = IPPROTO_ESP,
1372 		.hdr_checksum = 0, /* Cannot be pre-computed. */
1373 		.src_addr = p->encap.tunnel.ipv4.src_addr.s_addr,
1374 		.dst_addr = p->encap.tunnel.ipv4.dst_addr.s_addr,
1375 	};
1376 
1377 	memcpy(h, &ipv4_hdr, sizeof(ipv4_hdr));
1378 }
1379 
1380 static void
1381 tunnel_ipv6_header_set(struct rte_ipv6_hdr *h, struct rte_swx_ipsec_sa_params *p)
1382 {
1383 	struct rte_ipv6_hdr ipv6_hdr = {
1384 		.vtc_flow = 0x60000000,
1385 		.payload_len = 0, /* Cannot be pre-computed. */
1386 		.proto = IPPROTO_ESP,
1387 		.hop_limits = 64,
1388 		.src_addr = {0},
1389 		.dst_addr = {0},
1390 	};
1391 
1392 	memcpy(h, &ipv6_hdr, sizeof(ipv6_hdr));
1393 	memcpy(h->src_addr, p->encap.tunnel.ipv6.src_addr.s6_addr, 16);
1394 	memcpy(h->dst_addr, p->encap.tunnel.ipv6.dst_addr.s6_addr, 16);
1395 }
1396 
1397 /* IPsec library SA parameters. */
1398 static struct rte_crypto_sym_xform *
1399 crypto_xform_get(struct rte_swx_ipsec_sa_params *p,
1400 		struct rte_crypto_sym_xform *xform,
1401 		uint32_t *salt_out)
1402 {
1403 	if (p->crypto.is_aead) {
1404 		struct aead_alg *alg;
1405 		uint32_t key_size, salt, iv_length;
1406 
1407 		alg = aead_alg_find_by_id(p->crypto.aead.alg, p->crypto.aead.key_size);
1408 		if (!alg)
1409 			return NULL;
1410 
1411 		/* salt and salt-related key size adjustment. */
1412 		key_size = p->crypto.aead.key_size - 4;
1413 		memcpy(&salt, &p->crypto.aead.key[key_size], 4);
1414 
1415 		/* IV length. */
1416 		iv_length = 12;
1417 		if (p->crypto.aead.alg == RTE_CRYPTO_AEAD_AES_CCM)
1418 			iv_length = 11;
1419 
1420 		/* xform. */
1421 		xform[0].type = RTE_CRYPTO_SYM_XFORM_AEAD;
1422 		xform[0].aead.op = p->encrypt ?
1423 			RTE_CRYPTO_AEAD_OP_ENCRYPT :
1424 			RTE_CRYPTO_AEAD_OP_DECRYPT;
1425 		xform[0].aead.algo = p->crypto.aead.alg;
1426 		xform[0].aead.key.data = p->crypto.aead.key;
1427 		xform[0].aead.key.length = key_size;
1428 		xform[0].aead.iv.offset = IV_OFFSET;
1429 		xform[0].aead.iv.length = iv_length;
1430 		xform[0].aead.digest_length = alg->digest_size;
1431 		xform[0].aead.aad_length = alg->aad_size;
1432 		xform[0].next = NULL;
1433 
1434 		*salt_out = salt;
1435 		return &xform[0];
1436 	} else {
1437 		struct cipher_alg *cipher_alg;
1438 		struct auth_alg *auth_alg;
1439 		uint32_t cipher_key_size, auth_key_size, salt, auth_iv_length;
1440 
1441 		cipher_alg = cipher_alg_find_by_id(p->crypto.cipher_auth.cipher.alg,
1442 						   p->crypto.cipher_auth.cipher.key_size);
1443 		if (!cipher_alg)
1444 			return NULL;
1445 
1446 		auth_alg = auth_alg_find_by_id(p->crypto.cipher_auth.auth.alg,
1447 					       p->crypto.cipher_auth.auth.key_size);
1448 		if (!auth_alg)
1449 			return NULL;
1450 
1451 		/* salt and salt-related key size adjustment. */
1452 		cipher_key_size = p->crypto.cipher_auth.cipher.key_size;
1453 		auth_key_size = p->crypto.cipher_auth.auth.key_size;
1454 
1455 		switch (p->crypto.cipher_auth.cipher.alg) {
1456 		case RTE_CRYPTO_CIPHER_AES_CBC:
1457 		case RTE_CRYPTO_CIPHER_3DES_CBC:
1458 			salt = (uint32_t)rand();
1459 			break;
1460 
1461 		case RTE_CRYPTO_CIPHER_AES_CTR:
1462 			cipher_key_size -= 4;
1463 			memcpy(&salt, &p->crypto.cipher_auth.cipher.key[cipher_key_size], 4);
1464 			break;
1465 
1466 		default:
1467 			salt = 0;
1468 		}
1469 
1470 		if (p->crypto.cipher_auth.auth.alg == RTE_CRYPTO_AUTH_AES_GMAC) {
1471 			auth_key_size -= 4;
1472 			memcpy(&salt, &p->crypto.cipher_auth.auth.key[auth_key_size], 4);
1473 		}
1474 
1475 		/* IV length. */
1476 		auth_iv_length = cipher_alg->iv_size;
1477 		if (p->crypto.cipher_auth.auth.alg == RTE_CRYPTO_AUTH_AES_GMAC)
1478 			auth_iv_length = 12;
1479 
1480 		/* xform. */
1481 		if (p->encrypt) {
1482 			xform[0].type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1483 			xform[0].cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1484 			xform[0].cipher.algo = p->crypto.cipher_auth.cipher.alg;
1485 			xform[0].cipher.key.data = p->crypto.cipher_auth.cipher.key;
1486 			xform[0].cipher.key.length = cipher_key_size;
1487 			xform[0].cipher.iv.offset = IV_OFFSET;
1488 			xform[0].cipher.iv.length = cipher_alg->iv_size;
1489 			xform[0].cipher.dataunit_len = 0;
1490 			xform[0].next = &xform[1];
1491 
1492 			xform[1].type = RTE_CRYPTO_SYM_XFORM_AUTH;
1493 			xform[1].auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
1494 			xform[1].auth.algo = p->crypto.cipher_auth.auth.alg;
1495 			xform[1].auth.key.data = p->crypto.cipher_auth.auth.key;
1496 			xform[1].auth.key.length = auth_key_size;
1497 			xform[1].auth.iv.offset = IV_OFFSET;
1498 			xform[1].auth.iv.length = auth_iv_length;
1499 			xform[1].auth.digest_length = auth_alg->digest_size;
1500 			xform[1].next = NULL;
1501 		} else {
1502 			xform[0].type = RTE_CRYPTO_SYM_XFORM_AUTH;
1503 			xform[0].auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
1504 			xform[0].auth.algo = p->crypto.cipher_auth.auth.alg;
1505 			xform[0].auth.key.data = p->crypto.cipher_auth.auth.key;
1506 			xform[0].auth.key.length = auth_key_size;
1507 			xform[0].auth.iv.offset = IV_OFFSET;
1508 			xform[0].auth.iv.length = auth_iv_length;
1509 			xform[0].auth.digest_length = auth_alg->digest_size;
1510 			xform[0].next = &xform[1];
1511 
1512 			xform[1].type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1513 			xform[1].cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
1514 			xform[1].cipher.algo = p->crypto.cipher_auth.cipher.alg;
1515 			xform[1].cipher.key.data = p->crypto.cipher_auth.cipher.key;
1516 			xform[1].cipher.key.length = cipher_key_size;
1517 			xform[1].cipher.iv.offset = IV_OFFSET;
1518 			xform[1].cipher.iv.length = cipher_alg->iv_size;
1519 			xform[1].cipher.dataunit_len = 0;
1520 			xform[1].next = NULL;
1521 		}
1522 
1523 		*salt_out = salt;
1524 
1525 		if (p->crypto.cipher_auth.auth.alg == RTE_CRYPTO_AUTH_AES_GMAC) {
1526 			if (p->encrypt)
1527 				return &xform[1];
1528 
1529 			xform[0].next = NULL;
1530 			return &xform[0];
1531 		}
1532 
1533 		return &xform[0];
1534 	}
1535 }
1536 
1537 static void
1538 ipsec_xform_get(struct rte_swx_ipsec_sa_params *p,
1539 		struct rte_security_ipsec_xform *ipsec_xform,
1540 		uint32_t salt)
1541 {
1542 	ipsec_xform->spi = p->encap.esp.spi;
1543 
1544 	ipsec_xform->salt = salt;
1545 
1546 	ipsec_xform->options.esn = 0;
1547 	ipsec_xform->options.udp_encap = 0;
1548 	ipsec_xform->options.copy_dscp = 1;
1549 	ipsec_xform->options.copy_flabel = 0;
1550 	ipsec_xform->options.copy_df = 0;
1551 	ipsec_xform->options.dec_ttl = 0;
1552 	ipsec_xform->options.ecn = 1;
1553 	ipsec_xform->options.stats = 0;
1554 	ipsec_xform->options.iv_gen_disable = 0;
1555 	ipsec_xform->options.tunnel_hdr_verify = 0;
1556 	ipsec_xform->options.udp_ports_verify = 0;
1557 	ipsec_xform->options.ip_csum_enable = 0;
1558 	ipsec_xform->options.l4_csum_enable = 0;
1559 	ipsec_xform->options.ip_reassembly_en = 0;
1560 
1561 	ipsec_xform->direction = p->encrypt ?
1562 		RTE_SECURITY_IPSEC_SA_DIR_EGRESS :
1563 		RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
1564 
1565 	ipsec_xform->proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1566 
1567 	ipsec_xform->mode = p->encap.tunnel_mode ?
1568 		RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
1569 		RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
1570 
1571 	ipsec_xform->tunnel.type = p->encap.tunnel_ipv4 ?
1572 		RTE_SECURITY_IPSEC_TUNNEL_IPV4 :
1573 		RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1574 
1575 	if (p->encap.tunnel_mode) {
1576 		if (p->encap.tunnel_ipv4) {
1577 			ipsec_xform->tunnel.ipv4.src_ip = p->encap.tunnel.ipv4.src_addr;
1578 			ipsec_xform->tunnel.ipv4.dst_ip = p->encap.tunnel.ipv4.dst_addr;
1579 			ipsec_xform->tunnel.ipv4.dscp = 0;
1580 			ipsec_xform->tunnel.ipv4.df = 0;
1581 			ipsec_xform->tunnel.ipv4.ttl = 64;
1582 		} else {
1583 			ipsec_xform->tunnel.ipv6.src_addr = p->encap.tunnel.ipv6.src_addr;
1584 			ipsec_xform->tunnel.ipv6.dst_addr = p->encap.tunnel.ipv6.dst_addr;
1585 			ipsec_xform->tunnel.ipv6.dscp = 0;
1586 			ipsec_xform->tunnel.ipv6.flabel = 0;
1587 			ipsec_xform->tunnel.ipv6.hlimit = 64;
1588 		}
1589 	}
1590 
1591 	ipsec_xform->life.packets_soft_limit = 0;
1592 	ipsec_xform->life.bytes_soft_limit = 0;
1593 	ipsec_xform->life.packets_hard_limit = 0;
1594 	ipsec_xform->life.bytes_hard_limit = 0;
1595 
1596 	ipsec_xform->replay_win_sz = 0;
1597 
1598 	ipsec_xform->esn.value = 0;
1599 
1600 	ipsec_xform->udp.dport = 0;
1601 	ipsec_xform->udp.sport = 0;
1602 }
1603 
1604 static int
1605 ipsec_sa_prm_get(struct rte_swx_ipsec_sa_params *p,
1606 		 struct rte_ipsec_sa_prm *sa_prm,
1607 		 struct rte_ipv4_hdr *ipv4_hdr,
1608 		 struct rte_ipv6_hdr *ipv6_hdr,
1609 		 struct rte_crypto_sym_xform *crypto_xform)
1610 {
1611 	uint32_t salt;
1612 
1613 	memset(sa_prm, 0, sizeof(*sa_prm)); /* Better to be safe than sorry. */
1614 
1615 	sa_prm->userdata = 0; /* Not used. */
1616 
1617 	sa_prm->flags = 0; /* Flag RTE_IPSEC_SAFLAG_SQN_ATOM not enabled. */
1618 
1619 	/*
1620 	 * crypto_xform.
1621 	 */
1622 	sa_prm->crypto_xform = crypto_xform_get(p, crypto_xform, &salt);
1623 	if (!sa_prm->crypto_xform)
1624 		return -EINVAL;
1625 
1626 	/*
1627 	 * ipsec_xform.
1628 	 */
1629 	ipsec_xform_get(p, &sa_prm->ipsec_xform, salt);
1630 
1631 	/*
1632 	 * tunnel / transport.
1633 	 *
1634 	 * Currently, the input IP packet type is assumed to be IPv4. To support both IPv4 and IPv6,
1635 	 * the input packet type should be added to the SA configuration parameters.
1636 	 */
1637 	if (p->encap.tunnel_mode) {
1638 		if (p->encap.tunnel_ipv4) {
1639 			sa_prm->tun.hdr_len = sizeof(struct rte_ipv4_hdr);
1640 			sa_prm->tun.hdr_l3_off = 0;
1641 			sa_prm->tun.next_proto = IPPROTO_IPIP; /* IPv4. */
1642 			sa_prm->tun.hdr = ipv4_hdr;
1643 		} else {
1644 			sa_prm->tun.hdr_len = sizeof(struct rte_ipv6_hdr);
1645 			sa_prm->tun.hdr_l3_off = 0;
1646 			sa_prm->tun.next_proto = IPPROTO_IPIP; /* IPv4. */
1647 			sa_prm->tun.hdr = ipv6_hdr;
1648 		}
1649 	} else {
1650 		sa_prm->trs.proto = IPPROTO_IPIP; /* IPv4. */
1651 	}
1652 
1653 	return 0;
1654 }
1655 
1656 static int
1657 ipsec_session_create(struct rte_swx_ipsec *ipsec,
1658 		     struct rte_swx_ipsec_sa_params *p,
1659 		     struct rte_ipsec_session *s)
1660 {
1661 	struct rte_ipv4_hdr ipv4_hdr;
1662 	struct rte_ipv6_hdr ipv6_hdr;
1663 	struct rte_crypto_sym_xform crypto_xform[2];
1664 	struct rte_ipsec_sa_prm sa_prm;
1665 	struct rte_ipsec_sa *sa = NULL;
1666 	struct rte_cryptodev_sym_session *crypto_session = NULL;
1667 	int sa_size;
1668 	int sa_valid = 0, status = 0;
1669 
1670 	tunnel_ipv4_header_set(&ipv4_hdr, p);
1671 	tunnel_ipv6_header_set(&ipv6_hdr, p);
1672 
1673 	/* IPsec library SA setup. */
1674 	status = ipsec_sa_prm_get(p, &sa_prm, &ipv4_hdr, &ipv6_hdr, crypto_xform);
1675 	if (status)
1676 		goto error;
1677 
1678 	sa_size = rte_ipsec_sa_size(&sa_prm);
1679 	if (sa_size < 0) {
1680 		status = sa_size;
1681 		goto error;
1682 	}
1683 	if (!sa_size) {
1684 		status = -EINVAL;
1685 		goto error;
1686 	}
1687 
1688 	sa = calloc(1, sa_size);
1689 	if (!sa) {
1690 		status = -ENOMEM;
1691 		goto error;
1692 	}
1693 
1694 	sa_size = rte_ipsec_sa_init(sa, &sa_prm, sa_size);
1695 	if (sa_size < 0) {
1696 		status = sa_size;
1697 		goto error;
1698 	}
1699 	if (!sa_size) {
1700 		status = -EINVAL;
1701 		goto error;
1702 	}
1703 
1704 	sa_valid = 1;
1705 
1706 	/* Cryptodev library session setup. */
1707 	crypto_session = rte_cryptodev_sym_session_create(ipsec->dev_id,
1708 							  sa_prm.crypto_xform,
1709 							  ipsec->mp_session);
1710 	if (!crypto_session) {
1711 		status = -ENOMEM;
1712 		goto error;
1713 	}
1714 
1715 	/* IPsec library session setup. */
1716 	s->sa = sa;
1717 	s->type = RTE_SECURITY_ACTION_TYPE_NONE;
1718 	s->crypto.ses = crypto_session;
1719 	s->crypto.dev_id = ipsec->dev_id;
1720 	s->pkt_func.prepare.async = NULL;
1721 	s->pkt_func.process = NULL;
1722 
1723 	status = rte_ipsec_session_prepare(s);
1724 	if (status)
1725 		goto error;
1726 
1727 	return 0;
1728 
1729 error:
1730 	/* sa. */
1731 	if (sa_valid)
1732 		rte_ipsec_sa_fini(sa);
1733 
1734 	free(sa);
1735 
1736 	/* crypto_session. */
1737 	if (crypto_session)
1738 		rte_cryptodev_sym_session_free(ipsec->dev_id, crypto_session);
1739 
1740 	/* s. */
1741 	memset(s, 0, sizeof(*s));
1742 
1743 	return status;
1744 }
1745 
1746 static void
1747 ipsec_session_free(struct rte_swx_ipsec *ipsec,
1748 		   struct rte_ipsec_session *s)
1749 {
1750 	if (!s)
1751 		return;
1752 
1753 	/* IPsec library SA. */
1754 	if (s->sa)
1755 		rte_ipsec_sa_fini(s->sa);
1756 	free(s->sa);
1757 
1758 	/* Cryptodev library session. */
1759 	if (s->crypto.ses)
1760 		rte_cryptodev_sym_session_free(ipsec->dev_id, s->crypto.ses);
1761 
1762 	/* IPsec library session. */
1763 	memset(s, 0, sizeof(*s));
1764 }
1765 
1766 int
1767 rte_swx_ipsec_sa_add(struct rte_swx_ipsec *ipsec,
1768 		     struct rte_swx_ipsec_sa_params *sa_params,
1769 		     uint32_t *id)
1770 {
1771 	struct ipsec_sa *sa;
1772 	uint32_t sa_id;
1773 	int status;
1774 
1775 	/* Check the input parameters. */
1776 	if (!ipsec || !sa_params || !id)
1777 		return -EINVAL;
1778 
1779 	/* Allocate a free SADB entry. */
1780 	if (!ipsec->n_sa_free_id)
1781 		return -ENOSPC;
1782 
1783 	sa_id = ipsec->sa_free_id[ipsec->n_sa_free_id - 1];
1784 	ipsec->n_sa_free_id--;
1785 
1786 	/* Acquire the SA resources. */
1787 	sa = ipsec_sa_get(ipsec, sa_id);
1788 
1789 	status = ipsec_session_create(ipsec, sa_params, &sa->s);
1790 	if (status) {
1791 		/* Free the allocated SADB entry. */
1792 		ipsec->sa_free_id[ipsec->n_sa_free_id] = sa_id;
1793 		ipsec->n_sa_free_id++;
1794 
1795 		return status;
1796 	}
1797 
1798 	/* Validate the new SA. */
1799 	sa->valid = 1;
1800 	*id = sa_id;
1801 
1802 	return 0;
1803 }
1804 
1805 void
1806 rte_swx_ipsec_sa_delete(struct rte_swx_ipsec *ipsec,
1807 			uint32_t sa_id)
1808 {
1809 	struct ipsec_sa *sa;
1810 
1811 	/* Check the input parameters. */
1812 	if (!ipsec || (sa_id >= ipsec->n_sa_max))
1813 		return;
1814 
1815 	/* Release the SA resources. */
1816 	sa = ipsec_sa_get(ipsec, sa_id);
1817 
1818 	ipsec_session_free(ipsec, &sa->s);
1819 
1820 	/* Free the SADB entry. */
1821 	ipsec->sa_free_id[ipsec->n_sa_free_id] = sa_id;
1822 	ipsec->n_sa_free_id++;
1823 
1824 	/* Invalidate the SA. */
1825 	sa->valid = 0;
1826 }
1827