xref: /dpdk/examples/ipsec-secgw/ipsec_worker.c (revision 9cd9d3e702fba4700539c1a2eddac13dd14ecf70)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  * Copyright (C) 2020 Marvell International Ltd.
4  */
5 #include <rte_acl.h>
6 #include <rte_event_eth_tx_adapter.h>
7 #include <rte_lpm.h>
8 #include <rte_lpm6.h>
9 
10 #include "event_helper.h"
11 #include "ipsec.h"
12 #include "ipsec-secgw.h"
13 #include "ipsec_worker.h"
14 
15 static inline enum pkt_type
16 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
17 {
18 	struct rte_ether_hdr *eth;
19 
20 	eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
21 	if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
22 		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
23 				offsetof(struct ip, ip_p));
24 		if (**nlp == IPPROTO_ESP)
25 			return PKT_TYPE_IPSEC_IPV4;
26 		else
27 			return PKT_TYPE_PLAIN_IPV4;
28 	} else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
29 		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
30 				offsetof(struct ip6_hdr, ip6_nxt));
31 		if (**nlp == IPPROTO_ESP)
32 			return PKT_TYPE_IPSEC_IPV6;
33 		else
34 			return PKT_TYPE_PLAIN_IPV6;
35 	}
36 
37 	/* Unknown/Unsupported type */
38 	return PKT_TYPE_INVALID;
39 }
40 
41 static inline void
42 update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
43 {
44 	struct rte_ether_hdr *ethhdr;
45 
46 	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
47 	memcpy(&ethhdr->s_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
48 	memcpy(&ethhdr->d_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
49 }
50 
51 static inline void
52 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
53 {
54 	/* Save the destination port in the mbuf */
55 	m->port = port_id;
56 
57 	/* Save eth queue for Tx */
58 	rte_event_eth_tx_adapter_txq_set(m, 0);
59 }
60 
61 static inline void
62 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
63 		struct rte_security_session **sess_tbl, uint16_t size)
64 {
65 	struct rte_ipsec_session *pri_sess;
66 	struct ipsec_sa *sa;
67 	uint32_t i;
68 
69 	if (!sa_out)
70 		return;
71 
72 	for (i = 0; i < sa_out->nb_sa; i++) {
73 
74 		sa = &sa_out->sa[i];
75 		if (!sa)
76 			continue;
77 
78 		pri_sess = ipsec_get_primary_session(sa);
79 		if (!pri_sess)
80 			continue;
81 
82 		if (pri_sess->type !=
83 			RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
84 
85 			RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
86 				pri_sess->type);
87 			continue;
88 		}
89 
90 		if (sa->portid >= size) {
91 			RTE_LOG(ERR, IPSEC,
92 				"Port id >= than table size %d, %d\n",
93 				sa->portid, size);
94 			continue;
95 		}
96 
97 		/* Use only first inline session found for a given port */
98 		if (sess_tbl[sa->portid])
99 			continue;
100 		sess_tbl[sa->portid] = pri_sess->security.ses;
101 	}
102 }
103 
104 static inline int
105 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
106 {
107 	uint32_t res;
108 
109 	if (unlikely(sp == NULL))
110 		return 0;
111 
112 	rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
113 			DEFAULT_MAX_CATEGORIES);
114 
115 	if (unlikely(res == 0)) {
116 		/* No match */
117 		return 0;
118 	}
119 
120 	if (res == DISCARD)
121 		return 0;
122 	else if (res == BYPASS) {
123 		*sa_idx = -1;
124 		return 1;
125 	}
126 
127 	*sa_idx = res - 1;
128 	return 1;
129 }
130 
131 static inline uint16_t
132 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
133 {
134 	uint32_t dst_ip;
135 	uint16_t offset;
136 	uint32_t hop;
137 	int ret;
138 
139 	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
140 	dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
141 	dst_ip = rte_be_to_cpu_32(dst_ip);
142 
143 	ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
144 
145 	if (ret == 0) {
146 		/* We have a hit */
147 		return hop;
148 	}
149 
150 	/* else */
151 	return RTE_MAX_ETHPORTS;
152 }
153 
154 /* TODO: To be tested */
155 static inline uint16_t
156 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
157 {
158 	uint8_t dst_ip[16];
159 	uint8_t *ip6_dst;
160 	uint16_t offset;
161 	uint32_t hop;
162 	int ret;
163 
164 	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
165 	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
166 	memcpy(&dst_ip[0], ip6_dst, 16);
167 
168 	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
169 
170 	if (ret == 0) {
171 		/* We have a hit */
172 		return hop;
173 	}
174 
175 	/* else */
176 	return RTE_MAX_ETHPORTS;
177 }
178 
179 static inline uint16_t
180 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
181 {
182 	if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
183 		return route4_pkt(pkt, rt->rt4_ctx);
184 	else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
185 		return route6_pkt(pkt, rt->rt6_ctx);
186 
187 	return RTE_MAX_ETHPORTS;
188 }
189 
190 static inline int
191 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
192 		struct rte_event *ev)
193 {
194 	struct ipsec_sa *sa = NULL;
195 	struct rte_mbuf *pkt;
196 	uint16_t port_id = 0;
197 	enum pkt_type type;
198 	uint32_t sa_idx;
199 	uint8_t *nlp;
200 
201 	/* Get pkt from event */
202 	pkt = ev->mbuf;
203 
204 	/* Check the packet type */
205 	type = process_ipsec_get_pkt_type(pkt, &nlp);
206 
207 	switch (type) {
208 	case PKT_TYPE_PLAIN_IPV4:
209 		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
210 			if (unlikely(pkt->ol_flags &
211 				     PKT_RX_SEC_OFFLOAD_FAILED)) {
212 				RTE_LOG(ERR, IPSEC,
213 					"Inbound security offload failed\n");
214 				goto drop_pkt_and_exit;
215 			}
216 			sa = pkt->userdata;
217 		}
218 
219 		/* Check if we have a match */
220 		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
221 			/* No valid match */
222 			goto drop_pkt_and_exit;
223 		}
224 		break;
225 
226 	case PKT_TYPE_PLAIN_IPV6:
227 		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
228 			if (unlikely(pkt->ol_flags &
229 				     PKT_RX_SEC_OFFLOAD_FAILED)) {
230 				RTE_LOG(ERR, IPSEC,
231 					"Inbound security offload failed\n");
232 				goto drop_pkt_and_exit;
233 			}
234 			sa = pkt->userdata;
235 		}
236 
237 		/* Check if we have a match */
238 		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
239 			/* No valid match */
240 			goto drop_pkt_and_exit;
241 		}
242 		break;
243 
244 	default:
245 		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
246 		goto drop_pkt_and_exit;
247 	}
248 
249 	/* Check if the packet has to be bypassed */
250 	if (sa_idx == BYPASS)
251 		goto route_and_send_pkt;
252 
253 	/* Validate sa_idx */
254 	if (sa_idx >= ctx->sa_ctx->nb_sa)
255 		goto drop_pkt_and_exit;
256 
257 	/* Else the packet has to be protected with SA */
258 
259 	/* If the packet was IPsec processed, then SA pointer should be set */
260 	if (sa == NULL)
261 		goto drop_pkt_and_exit;
262 
263 	/* SPI on the packet should match with the one in SA */
264 	if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
265 		goto drop_pkt_and_exit;
266 
267 route_and_send_pkt:
268 	port_id = get_route(pkt, rt, type);
269 	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
270 		/* no match */
271 		goto drop_pkt_and_exit;
272 	}
273 	/* else, we have a matching route */
274 
275 	/* Update mac addresses */
276 	update_mac_addrs(pkt, port_id);
277 
278 	/* Update the event with the dest port */
279 	ipsec_event_pre_forward(pkt, port_id);
280 	return PKT_FORWARDED;
281 
282 drop_pkt_and_exit:
283 	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
284 	rte_pktmbuf_free(pkt);
285 	ev->mbuf = NULL;
286 	return PKT_DROPPED;
287 }
288 
289 static inline int
290 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
291 		struct rte_event *ev)
292 {
293 	struct rte_ipsec_session *sess;
294 	struct sa_ctx *sa_ctx;
295 	struct rte_mbuf *pkt;
296 	uint16_t port_id = 0;
297 	struct ipsec_sa *sa;
298 	enum pkt_type type;
299 	uint32_t sa_idx;
300 	uint8_t *nlp;
301 
302 	/* Get pkt from event */
303 	pkt = ev->mbuf;
304 
305 	/* Check the packet type */
306 	type = process_ipsec_get_pkt_type(pkt, &nlp);
307 
308 	switch (type) {
309 	case PKT_TYPE_PLAIN_IPV4:
310 		/* Check if we have a match */
311 		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
312 			/* No valid match */
313 			goto drop_pkt_and_exit;
314 		}
315 		break;
316 	case PKT_TYPE_PLAIN_IPV6:
317 		/* Check if we have a match */
318 		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
319 			/* No valid match */
320 			goto drop_pkt_and_exit;
321 		}
322 		break;
323 	default:
324 		/*
325 		 * Only plain IPv4 & IPv6 packets are allowed
326 		 * on protected port. Drop the rest.
327 		 */
328 		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
329 		goto drop_pkt_and_exit;
330 	}
331 
332 	/* Check if the packet has to be bypassed */
333 	if (sa_idx == BYPASS) {
334 		port_id = get_route(pkt, rt, type);
335 		if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
336 			/* no match */
337 			goto drop_pkt_and_exit;
338 		}
339 		/* else, we have a matching route */
340 		goto send_pkt;
341 	}
342 
343 	/* Validate sa_idx */
344 	if (sa_idx >= ctx->sa_ctx->nb_sa)
345 		goto drop_pkt_and_exit;
346 
347 	/* Else the packet has to be protected */
348 
349 	/* Get SA ctx*/
350 	sa_ctx = ctx->sa_ctx;
351 
352 	/* Get SA */
353 	sa = &(sa_ctx->sa[sa_idx]);
354 
355 	/* Get IPsec session */
356 	sess = ipsec_get_primary_session(sa);
357 
358 	/* Allow only inline protocol for now */
359 	if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
360 		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
361 		goto drop_pkt_and_exit;
362 	}
363 
364 	if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
365 		pkt->userdata = sess->security.ses;
366 
367 	/* Mark the packet for Tx security offload */
368 	pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
369 
370 	/* Get the port to which this pkt need to be submitted */
371 	port_id = sa->portid;
372 
373 send_pkt:
374 	/* Update mac addresses */
375 	update_mac_addrs(pkt, port_id);
376 
377 	/* Update the event with the dest port */
378 	ipsec_event_pre_forward(pkt, port_id);
379 	return PKT_FORWARDED;
380 
381 drop_pkt_and_exit:
382 	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
383 	rte_pktmbuf_free(pkt);
384 	ev->mbuf = NULL;
385 	return PKT_DROPPED;
386 }
387 
388 /*
389  * Event mode exposes various operating modes depending on the
390  * capabilities of the event device and the operating mode
391  * selected.
392  */
393 
394 /* Workers registered */
395 #define IPSEC_EVENTMODE_WORKERS		2
396 
397 /*
398  * Event mode worker
399  * Operating parameters : non-burst - Tx internal port - driver mode
400  */
401 static void
402 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
403 		uint8_t nb_links)
404 {
405 	struct rte_security_session *sess_tbl[RTE_MAX_ETHPORTS] = { NULL };
406 	unsigned int nb_rx = 0;
407 	struct rte_mbuf *pkt;
408 	struct rte_event ev;
409 	uint32_t lcore_id;
410 	int32_t socket_id;
411 	int16_t port_id;
412 
413 	/* Check if we have links registered for this lcore */
414 	if (nb_links == 0) {
415 		/* No links registered - exit */
416 		return;
417 	}
418 
419 	/* Get core ID */
420 	lcore_id = rte_lcore_id();
421 
422 	/* Get socket ID */
423 	socket_id = rte_lcore_to_socket_id(lcore_id);
424 
425 	/*
426 	 * Prepare security sessions table. In outbound driver mode
427 	 * we always use first session configured for a given port
428 	 */
429 	prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, sess_tbl,
430 			RTE_MAX_ETHPORTS);
431 
432 	RTE_LOG(INFO, IPSEC,
433 		"Launching event mode worker (non-burst - Tx internal port - "
434 		"driver mode) on lcore %d\n", lcore_id);
435 
436 	/* We have valid links */
437 
438 	/* Check if it's single link */
439 	if (nb_links != 1) {
440 		RTE_LOG(INFO, IPSEC,
441 			"Multiple links not supported. Using first link\n");
442 	}
443 
444 	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
445 			links[0].event_port_id);
446 	while (!force_quit) {
447 		/* Read packet from event queues */
448 		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
449 				links[0].event_port_id,
450 				&ev,	/* events */
451 				1,	/* nb_events */
452 				0	/* timeout_ticks */);
453 
454 		if (nb_rx == 0)
455 			continue;
456 
457 		pkt = ev.mbuf;
458 		port_id = pkt->port;
459 
460 		rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
461 
462 		/* Process packet */
463 		ipsec_event_pre_forward(pkt, port_id);
464 
465 		if (!is_unprotected_port(port_id)) {
466 
467 			if (unlikely(!sess_tbl[port_id])) {
468 				rte_pktmbuf_free(pkt);
469 				continue;
470 			}
471 
472 			/* Save security session */
473 			pkt->userdata = sess_tbl[port_id];
474 
475 			/* Mark the packet for Tx security offload */
476 			pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
477 		}
478 
479 		/*
480 		 * Since tx internal port is available, events can be
481 		 * directly enqueued to the adapter and it would be
482 		 * internally submitted to the eth device.
483 		 */
484 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
485 				links[0].event_port_id,
486 				&ev,	/* events */
487 				1,	/* nb_events */
488 				0	/* flags */);
489 	}
490 }
491 
492 /*
493  * Event mode worker
494  * Operating parameters : non-burst - Tx internal port - app mode
495  */
496 static void
497 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
498 		uint8_t nb_links)
499 {
500 	struct lcore_conf_ev_tx_int_port_wrkr lconf;
501 	unsigned int nb_rx = 0;
502 	struct rte_event ev;
503 	uint32_t lcore_id;
504 	int32_t socket_id;
505 	int ret;
506 
507 	/* Check if we have links registered for this lcore */
508 	if (nb_links == 0) {
509 		/* No links registered - exit */
510 		return;
511 	}
512 
513 	/* We have valid links */
514 
515 	/* Get core ID */
516 	lcore_id = rte_lcore_id();
517 
518 	/* Get socket ID */
519 	socket_id = rte_lcore_to_socket_id(lcore_id);
520 
521 	/* Save routing table */
522 	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
523 	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
524 	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
525 	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
526 	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
527 	lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
528 	lconf.inbound.session_priv_pool =
529 			socket_ctx[socket_id].session_priv_pool;
530 	lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
531 	lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
532 	lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
533 	lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
534 	lconf.outbound.session_priv_pool =
535 			socket_ctx[socket_id].session_priv_pool;
536 
537 	RTE_LOG(INFO, IPSEC,
538 		"Launching event mode worker (non-burst - Tx internal port - "
539 		"app mode) on lcore %d\n", lcore_id);
540 
541 	/* Check if it's single link */
542 	if (nb_links != 1) {
543 		RTE_LOG(INFO, IPSEC,
544 			"Multiple links not supported. Using first link\n");
545 	}
546 
547 	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
548 		links[0].event_port_id);
549 
550 	while (!force_quit) {
551 		/* Read packet from event queues */
552 		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
553 				links[0].event_port_id,
554 				&ev,     /* events */
555 				1,       /* nb_events */
556 				0        /* timeout_ticks */);
557 
558 		if (nb_rx == 0)
559 			continue;
560 
561 		if (unlikely(ev.event_type != RTE_EVENT_TYPE_ETHDEV)) {
562 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
563 				ev.event_type);
564 
565 			continue;
566 		}
567 
568 		if (is_unprotected_port(ev.mbuf->port))
569 			ret = process_ipsec_ev_inbound(&lconf.inbound,
570 							&lconf.rt, &ev);
571 		else
572 			ret = process_ipsec_ev_outbound(&lconf.outbound,
573 							&lconf.rt, &ev);
574 		if (ret != 1)
575 			/* The pkt has been dropped */
576 			continue;
577 
578 		/*
579 		 * Since tx internal port is available, events can be
580 		 * directly enqueued to the adapter and it would be
581 		 * internally submitted to the eth device.
582 		 */
583 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
584 				links[0].event_port_id,
585 				&ev,	/* events */
586 				1,	/* nb_events */
587 				0	/* flags */);
588 	}
589 }
590 
591 static uint8_t
592 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
593 {
594 	struct eh_app_worker_params *wrkr;
595 	uint8_t nb_wrkr_param = 0;
596 
597 	/* Save workers */
598 	wrkr = wrkrs;
599 
600 	/* Non-burst - Tx internal port - driver mode */
601 	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
602 	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
603 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
604 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
605 	wrkr++;
606 	nb_wrkr_param++;
607 
608 	/* Non-burst - Tx internal port - app mode */
609 	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
610 	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
611 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
612 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
613 	nb_wrkr_param++;
614 
615 	return nb_wrkr_param;
616 }
617 
618 static void
619 ipsec_eventmode_worker(struct eh_conf *conf)
620 {
621 	struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
622 					{{{0} }, NULL } };
623 	uint8_t nb_wrkr_param;
624 
625 	/* Populate l2fwd_wrkr params */
626 	nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
627 
628 	/*
629 	 * Launch correct worker after checking
630 	 * the event device's capabilities.
631 	 */
632 	eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
633 }
634 
635 int ipsec_launch_one_lcore(void *args)
636 {
637 	struct eh_conf *conf;
638 
639 	conf = (struct eh_conf *)args;
640 
641 	if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
642 		/* Run in poll mode */
643 		ipsec_poll_mode_worker();
644 	} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
645 		/* Run in event mode */
646 		ipsec_eventmode_worker(conf);
647 	}
648 	return 0;
649 }
650