xref: /dpdk/examples/ipsec-secgw/ipsec_worker.c (revision 03ab51eafda992874a48c392ca66ffb577fe2b71)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  * Copyright (C) 2020 Marvell International Ltd.
4  */
5 #include <rte_acl.h>
6 #include <rte_event_eth_tx_adapter.h>
7 #include <rte_lpm.h>
8 #include <rte_lpm6.h>
9 
10 #include "event_helper.h"
11 #include "ipsec.h"
12 #include "ipsec-secgw.h"
13 #include "ipsec_worker.h"
14 
15 struct port_drv_mode_data {
16 	struct rte_security_session *sess;
17 	struct rte_security_ctx *ctx;
18 };
19 
20 static inline enum pkt_type
21 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
22 {
23 	struct rte_ether_hdr *eth;
24 	uint32_t ptype = pkt->packet_type;
25 
26 	eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
27 	rte_prefetch0(eth);
28 
29 	if (RTE_ETH_IS_IPV4_HDR(ptype)) {
30 		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
31 				offsetof(struct ip, ip_p));
32 		if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
33 			return PKT_TYPE_IPSEC_IPV4;
34 		else
35 			return PKT_TYPE_PLAIN_IPV4;
36 	} else if (RTE_ETH_IS_IPV6_HDR(ptype)) {
37 		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
38 				offsetof(struct ip6_hdr, ip6_nxt));
39 		if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
40 			return PKT_TYPE_IPSEC_IPV6;
41 		else
42 			return PKT_TYPE_PLAIN_IPV6;
43 	}
44 
45 	/* Unknown/Unsupported type */
46 	return PKT_TYPE_INVALID;
47 }
48 
49 static inline void
50 update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
51 {
52 	struct rte_ether_hdr *ethhdr;
53 
54 	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
55 	memcpy(&ethhdr->src_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
56 	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
57 }
58 
59 static inline void
60 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
61 {
62 	/* Save the destination port in the mbuf */
63 	m->port = port_id;
64 
65 	/* Save eth queue for Tx */
66 	rte_event_eth_tx_adapter_txq_set(m, 0);
67 }
68 
69 static inline void
70 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
71 			 struct port_drv_mode_data *data,
72 			 uint16_t size)
73 {
74 	struct rte_ipsec_session *pri_sess;
75 	struct ipsec_sa *sa;
76 	uint32_t i;
77 
78 	if (!sa_out)
79 		return;
80 
81 	for (i = 0; i < sa_out->nb_sa; i++) {
82 
83 		sa = &sa_out->sa[i];
84 		if (!sa)
85 			continue;
86 
87 		pri_sess = ipsec_get_primary_session(sa);
88 		if (!pri_sess)
89 			continue;
90 
91 		if (pri_sess->type !=
92 			RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
93 
94 			RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
95 				pri_sess->type);
96 			continue;
97 		}
98 
99 		if (sa->portid >= size) {
100 			RTE_LOG(ERR, IPSEC,
101 				"Port id >= than table size %d, %d\n",
102 				sa->portid, size);
103 			continue;
104 		}
105 
106 		/* Use only first inline session found for a given port */
107 		if (data[sa->portid].sess)
108 			continue;
109 		data[sa->portid].sess = pri_sess->security.ses;
110 		data[sa->portid].ctx = pri_sess->security.ctx;
111 	}
112 }
113 
114 static inline int
115 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
116 {
117 	uint32_t res;
118 
119 	if (unlikely(sp == NULL))
120 		return 0;
121 
122 	rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
123 			DEFAULT_MAX_CATEGORIES);
124 
125 	if (unlikely(res == DISCARD))
126 		return 0;
127 	else if (res == BYPASS) {
128 		*sa_idx = -1;
129 		return 1;
130 	}
131 
132 	*sa_idx = res - 1;
133 	return 1;
134 }
135 
136 static inline uint16_t
137 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
138 {
139 	uint32_t dst_ip;
140 	uint16_t offset;
141 	uint32_t hop;
142 	int ret;
143 
144 	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
145 	dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
146 	dst_ip = rte_be_to_cpu_32(dst_ip);
147 
148 	ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
149 
150 	if (ret == 0) {
151 		/* We have a hit */
152 		return hop;
153 	}
154 
155 	/* else */
156 	return RTE_MAX_ETHPORTS;
157 }
158 
159 /* TODO: To be tested */
160 static inline uint16_t
161 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
162 {
163 	uint8_t dst_ip[16];
164 	uint8_t *ip6_dst;
165 	uint16_t offset;
166 	uint32_t hop;
167 	int ret;
168 
169 	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
170 	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
171 	memcpy(&dst_ip[0], ip6_dst, 16);
172 
173 	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
174 
175 	if (ret == 0) {
176 		/* We have a hit */
177 		return hop;
178 	}
179 
180 	/* else */
181 	return RTE_MAX_ETHPORTS;
182 }
183 
184 static inline uint16_t
185 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
186 {
187 	if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
188 		return route4_pkt(pkt, rt->rt4_ctx);
189 	else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
190 		return route6_pkt(pkt, rt->rt6_ctx);
191 
192 	return RTE_MAX_ETHPORTS;
193 }
194 
195 static inline int
196 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
197 		struct rte_event *ev)
198 {
199 	struct ipsec_sa *sa = NULL;
200 	struct rte_mbuf *pkt;
201 	uint16_t port_id = 0;
202 	enum pkt_type type;
203 	uint32_t sa_idx;
204 	uint8_t *nlp;
205 
206 	/* Get pkt from event */
207 	pkt = ev->mbuf;
208 
209 	/* Check the packet type */
210 	type = process_ipsec_get_pkt_type(pkt, &nlp);
211 
212 	switch (type) {
213 	case PKT_TYPE_PLAIN_IPV4:
214 		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
215 			if (unlikely(pkt->ol_flags &
216 				     PKT_RX_SEC_OFFLOAD_FAILED)) {
217 				RTE_LOG(ERR, IPSEC,
218 					"Inbound security offload failed\n");
219 				goto drop_pkt_and_exit;
220 			}
221 			sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
222 		}
223 
224 		/* Check if we have a match */
225 		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
226 			/* No valid match */
227 			goto drop_pkt_and_exit;
228 		}
229 		break;
230 
231 	case PKT_TYPE_PLAIN_IPV6:
232 		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
233 			if (unlikely(pkt->ol_flags &
234 				     PKT_RX_SEC_OFFLOAD_FAILED)) {
235 				RTE_LOG(ERR, IPSEC,
236 					"Inbound security offload failed\n");
237 				goto drop_pkt_and_exit;
238 			}
239 			sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
240 		}
241 
242 		/* Check if we have a match */
243 		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
244 			/* No valid match */
245 			goto drop_pkt_and_exit;
246 		}
247 		break;
248 
249 	default:
250 		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
251 		goto drop_pkt_and_exit;
252 	}
253 
254 	/* Check if the packet has to be bypassed */
255 	if (sa_idx == BYPASS)
256 		goto route_and_send_pkt;
257 
258 	/* Validate sa_idx */
259 	if (sa_idx >= ctx->sa_ctx->nb_sa)
260 		goto drop_pkt_and_exit;
261 
262 	/* Else the packet has to be protected with SA */
263 
264 	/* If the packet was IPsec processed, then SA pointer should be set */
265 	if (sa == NULL)
266 		goto drop_pkt_and_exit;
267 
268 	/* SPI on the packet should match with the one in SA */
269 	if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
270 		goto drop_pkt_and_exit;
271 
272 route_and_send_pkt:
273 	port_id = get_route(pkt, rt, type);
274 	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
275 		/* no match */
276 		goto drop_pkt_and_exit;
277 	}
278 	/* else, we have a matching route */
279 
280 	/* Update mac addresses */
281 	update_mac_addrs(pkt, port_id);
282 
283 	/* Update the event with the dest port */
284 	ipsec_event_pre_forward(pkt, port_id);
285 	return PKT_FORWARDED;
286 
287 drop_pkt_and_exit:
288 	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
289 	rte_pktmbuf_free(pkt);
290 	ev->mbuf = NULL;
291 	return PKT_DROPPED;
292 }
293 
294 static inline int
295 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
296 		struct rte_event *ev)
297 {
298 	struct rte_ipsec_session *sess;
299 	struct sa_ctx *sa_ctx;
300 	struct rte_mbuf *pkt;
301 	uint16_t port_id = 0;
302 	struct ipsec_sa *sa;
303 	enum pkt_type type;
304 	uint32_t sa_idx;
305 	uint8_t *nlp;
306 
307 	/* Get pkt from event */
308 	pkt = ev->mbuf;
309 
310 	/* Check the packet type */
311 	type = process_ipsec_get_pkt_type(pkt, &nlp);
312 
313 	switch (type) {
314 	case PKT_TYPE_PLAIN_IPV4:
315 		/* Check if we have a match */
316 		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
317 			/* No valid match */
318 			goto drop_pkt_and_exit;
319 		}
320 		break;
321 	case PKT_TYPE_PLAIN_IPV6:
322 		/* Check if we have a match */
323 		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
324 			/* No valid match */
325 			goto drop_pkt_and_exit;
326 		}
327 		break;
328 	default:
329 		/*
330 		 * Only plain IPv4 & IPv6 packets are allowed
331 		 * on protected port. Drop the rest.
332 		 */
333 		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
334 		goto drop_pkt_and_exit;
335 	}
336 
337 	/* Check if the packet has to be bypassed */
338 	if (sa_idx == BYPASS) {
339 		port_id = get_route(pkt, rt, type);
340 		if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
341 			/* no match */
342 			goto drop_pkt_and_exit;
343 		}
344 		/* else, we have a matching route */
345 		goto send_pkt;
346 	}
347 
348 	/* Validate sa_idx */
349 	if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
350 		goto drop_pkt_and_exit;
351 
352 	/* Else the packet has to be protected */
353 
354 	/* Get SA ctx*/
355 	sa_ctx = ctx->sa_ctx;
356 
357 	/* Get SA */
358 	sa = &(sa_ctx->sa[sa_idx]);
359 
360 	/* Get IPsec session */
361 	sess = ipsec_get_primary_session(sa);
362 
363 	/* Allow only inline protocol for now */
364 	if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
365 		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
366 		goto drop_pkt_and_exit;
367 	}
368 
369 	rte_security_set_pkt_metadata(sess->security.ctx,
370 				      sess->security.ses, pkt, NULL);
371 
372 	/* Mark the packet for Tx security offload */
373 	pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
374 
375 	/* Get the port to which this pkt need to be submitted */
376 	port_id = sa->portid;
377 
378 send_pkt:
379 	/* Provide L2 len for Outbound processing */
380 	pkt->l2_len = RTE_ETHER_HDR_LEN;
381 
382 	/* Update mac addresses */
383 	update_mac_addrs(pkt, port_id);
384 
385 	/* Update the event with the dest port */
386 	ipsec_event_pre_forward(pkt, port_id);
387 	return PKT_FORWARDED;
388 
389 drop_pkt_and_exit:
390 	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
391 	rte_pktmbuf_free(pkt);
392 	ev->mbuf = NULL;
393 	return PKT_DROPPED;
394 }
395 
396 /*
397  * Event mode exposes various operating modes depending on the
398  * capabilities of the event device and the operating mode
399  * selected.
400  */
401 
402 /* Workers registered */
403 #define IPSEC_EVENTMODE_WORKERS		2
404 
405 /*
406  * Event mode worker
407  * Operating parameters : non-burst - Tx internal port - driver mode
408  */
409 static void
410 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
411 		uint8_t nb_links)
412 {
413 	struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
414 	unsigned int nb_rx = 0;
415 	struct rte_mbuf *pkt;
416 	struct rte_event ev;
417 	uint32_t lcore_id;
418 	int32_t socket_id;
419 	int16_t port_id;
420 
421 	/* Check if we have links registered for this lcore */
422 	if (nb_links == 0) {
423 		/* No links registered - exit */
424 		return;
425 	}
426 
427 	memset(&data, 0, sizeof(struct port_drv_mode_data));
428 
429 	/* Get core ID */
430 	lcore_id = rte_lcore_id();
431 
432 	/* Get socket ID */
433 	socket_id = rte_lcore_to_socket_id(lcore_id);
434 
435 	/*
436 	 * Prepare security sessions table. In outbound driver mode
437 	 * we always use first session configured for a given port
438 	 */
439 	prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
440 				 RTE_MAX_ETHPORTS);
441 
442 	RTE_LOG(INFO, IPSEC,
443 		"Launching event mode worker (non-burst - Tx internal port - "
444 		"driver mode) on lcore %d\n", lcore_id);
445 
446 	/* We have valid links */
447 
448 	/* Check if it's single link */
449 	if (nb_links != 1) {
450 		RTE_LOG(INFO, IPSEC,
451 			"Multiple links not supported. Using first link\n");
452 	}
453 
454 	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
455 			links[0].event_port_id);
456 	while (!force_quit) {
457 		/* Read packet from event queues */
458 		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
459 				links[0].event_port_id,
460 				&ev,	/* events */
461 				1,	/* nb_events */
462 				0	/* timeout_ticks */);
463 
464 		if (nb_rx == 0)
465 			continue;
466 
467 		pkt = ev.mbuf;
468 		port_id = pkt->port;
469 
470 		rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
471 
472 		/* Process packet */
473 		ipsec_event_pre_forward(pkt, port_id);
474 
475 		if (!is_unprotected_port(port_id)) {
476 
477 			if (unlikely(!data[port_id].sess)) {
478 				rte_pktmbuf_free(pkt);
479 				continue;
480 			}
481 
482 			/* Save security session */
483 			rte_security_set_pkt_metadata(data[port_id].ctx,
484 						      data[port_id].sess, pkt,
485 						      NULL);
486 
487 			/* Mark the packet for Tx security offload */
488 			pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
489 
490 			/* Provide L2 len for Outbound processing */
491 			pkt->l2_len = RTE_ETHER_HDR_LEN;
492 		}
493 
494 		/*
495 		 * Since tx internal port is available, events can be
496 		 * directly enqueued to the adapter and it would be
497 		 * internally submitted to the eth device.
498 		 */
499 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
500 				links[0].event_port_id,
501 				&ev,	/* events */
502 				1,	/* nb_events */
503 				0	/* flags */);
504 	}
505 }
506 
507 /*
508  * Event mode worker
509  * Operating parameters : non-burst - Tx internal port - app mode
510  */
511 static void
512 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
513 		uint8_t nb_links)
514 {
515 	struct lcore_conf_ev_tx_int_port_wrkr lconf;
516 	unsigned int nb_rx = 0;
517 	struct rte_event ev;
518 	uint32_t lcore_id;
519 	int32_t socket_id;
520 	int ret;
521 
522 	/* Check if we have links registered for this lcore */
523 	if (nb_links == 0) {
524 		/* No links registered - exit */
525 		return;
526 	}
527 
528 	/* We have valid links */
529 
530 	/* Get core ID */
531 	lcore_id = rte_lcore_id();
532 
533 	/* Get socket ID */
534 	socket_id = rte_lcore_to_socket_id(lcore_id);
535 
536 	/* Save routing table */
537 	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
538 	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
539 	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
540 	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
541 	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
542 	lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
543 	lconf.inbound.session_priv_pool =
544 			socket_ctx[socket_id].session_priv_pool;
545 	lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
546 	lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
547 	lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
548 	lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
549 	lconf.outbound.session_priv_pool =
550 			socket_ctx[socket_id].session_priv_pool;
551 
552 	RTE_LOG(INFO, IPSEC,
553 		"Launching event mode worker (non-burst - Tx internal port - "
554 		"app mode) on lcore %d\n", lcore_id);
555 
556 	/* Check if it's single link */
557 	if (nb_links != 1) {
558 		RTE_LOG(INFO, IPSEC,
559 			"Multiple links not supported. Using first link\n");
560 	}
561 
562 	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
563 		links[0].event_port_id);
564 
565 	while (!force_quit) {
566 		/* Read packet from event queues */
567 		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
568 				links[0].event_port_id,
569 				&ev,     /* events */
570 				1,       /* nb_events */
571 				0        /* timeout_ticks */);
572 
573 		if (nb_rx == 0)
574 			continue;
575 
576 		if (unlikely(ev.event_type != RTE_EVENT_TYPE_ETHDEV)) {
577 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
578 				ev.event_type);
579 
580 			continue;
581 		}
582 
583 		if (is_unprotected_port(ev.mbuf->port))
584 			ret = process_ipsec_ev_inbound(&lconf.inbound,
585 							&lconf.rt, &ev);
586 		else
587 			ret = process_ipsec_ev_outbound(&lconf.outbound,
588 							&lconf.rt, &ev);
589 		if (ret != 1)
590 			/* The pkt has been dropped */
591 			continue;
592 
593 		/*
594 		 * Since tx internal port is available, events can be
595 		 * directly enqueued to the adapter and it would be
596 		 * internally submitted to the eth device.
597 		 */
598 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
599 				links[0].event_port_id,
600 				&ev,	/* events */
601 				1,	/* nb_events */
602 				0	/* flags */);
603 	}
604 }
605 
606 static uint8_t
607 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
608 {
609 	struct eh_app_worker_params *wrkr;
610 	uint8_t nb_wrkr_param = 0;
611 
612 	/* Save workers */
613 	wrkr = wrkrs;
614 
615 	/* Non-burst - Tx internal port - driver mode */
616 	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
617 	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
618 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
619 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
620 	wrkr++;
621 	nb_wrkr_param++;
622 
623 	/* Non-burst - Tx internal port - app mode */
624 	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
625 	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
626 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
627 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
628 	nb_wrkr_param++;
629 
630 	return nb_wrkr_param;
631 }
632 
633 static void
634 ipsec_eventmode_worker(struct eh_conf *conf)
635 {
636 	struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
637 					{{{0} }, NULL } };
638 	uint8_t nb_wrkr_param;
639 
640 	/* Populate l2fwd_wrkr params */
641 	nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
642 
643 	/*
644 	 * Launch correct worker after checking
645 	 * the event device's capabilities.
646 	 */
647 	eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
648 }
649 
650 int ipsec_launch_one_lcore(void *args)
651 {
652 	struct eh_conf *conf;
653 
654 	conf = (struct eh_conf *)args;
655 
656 	if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
657 		/* Run in poll mode */
658 		ipsec_poll_mode_worker();
659 	} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
660 		/* Run in event mode */
661 		ipsec_eventmode_worker(conf);
662 	}
663 	return 0;
664 }
665