xref: /dpdk/examples/ipsec-secgw/ipsec_worker.c (revision c9902a15bd005b6d4fe072cf7b60fe4ee679155f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  * Copyright (C) 2020 Marvell International Ltd.
4  */
5 #include <rte_acl.h>
6 #include <rte_event_eth_tx_adapter.h>
7 #include <rte_lpm.h>
8 #include <rte_lpm6.h>
9 
10 #include "event_helper.h"
11 #include "ipsec.h"
12 #include "ipsec-secgw.h"
13 #include "ipsec_worker.h"
14 
15 struct port_drv_mode_data {
16 	struct rte_security_session *sess;
17 	struct rte_security_ctx *ctx;
18 };
19 
20 static inline enum pkt_type
21 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
22 {
23 	struct rte_ether_hdr *eth;
24 
25 	eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
26 	if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
27 		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
28 				offsetof(struct ip, ip_p));
29 		if (**nlp == IPPROTO_ESP)
30 			return PKT_TYPE_IPSEC_IPV4;
31 		else
32 			return PKT_TYPE_PLAIN_IPV4;
33 	} else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
34 		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
35 				offsetof(struct ip6_hdr, ip6_nxt));
36 		if (**nlp == IPPROTO_ESP)
37 			return PKT_TYPE_IPSEC_IPV6;
38 		else
39 			return PKT_TYPE_PLAIN_IPV6;
40 	}
41 
42 	/* Unknown/Unsupported type */
43 	return PKT_TYPE_INVALID;
44 }
45 
46 static inline void
47 update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
48 {
49 	struct rte_ether_hdr *ethhdr;
50 
51 	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
52 	memcpy(&ethhdr->s_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
53 	memcpy(&ethhdr->d_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
54 }
55 
56 static inline void
57 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
58 {
59 	/* Save the destination port in the mbuf */
60 	m->port = port_id;
61 
62 	/* Save eth queue for Tx */
63 	rte_event_eth_tx_adapter_txq_set(m, 0);
64 }
65 
66 static inline void
67 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
68 			 struct port_drv_mode_data *data,
69 			 uint16_t size)
70 {
71 	struct rte_ipsec_session *pri_sess;
72 	struct ipsec_sa *sa;
73 	uint32_t i;
74 
75 	if (!sa_out)
76 		return;
77 
78 	for (i = 0; i < sa_out->nb_sa; i++) {
79 
80 		sa = &sa_out->sa[i];
81 		if (!sa)
82 			continue;
83 
84 		pri_sess = ipsec_get_primary_session(sa);
85 		if (!pri_sess)
86 			continue;
87 
88 		if (pri_sess->type !=
89 			RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
90 
91 			RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
92 				pri_sess->type);
93 			continue;
94 		}
95 
96 		if (sa->portid >= size) {
97 			RTE_LOG(ERR, IPSEC,
98 				"Port id >= than table size %d, %d\n",
99 				sa->portid, size);
100 			continue;
101 		}
102 
103 		/* Use only first inline session found for a given port */
104 		if (data[sa->portid].sess)
105 			continue;
106 		data[sa->portid].sess = pri_sess->security.ses;
107 		data[sa->portid].ctx = pri_sess->security.ctx;
108 	}
109 }
110 
111 static inline int
112 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
113 {
114 	uint32_t res;
115 
116 	if (unlikely(sp == NULL))
117 		return 0;
118 
119 	rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
120 			DEFAULT_MAX_CATEGORIES);
121 
122 	if (unlikely(res == DISCARD))
123 		return 0;
124 	else if (res == BYPASS) {
125 		*sa_idx = -1;
126 		return 1;
127 	}
128 
129 	*sa_idx = res - 1;
130 	return 1;
131 }
132 
133 static inline uint16_t
134 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
135 {
136 	uint32_t dst_ip;
137 	uint16_t offset;
138 	uint32_t hop;
139 	int ret;
140 
141 	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
142 	dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
143 	dst_ip = rte_be_to_cpu_32(dst_ip);
144 
145 	ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
146 
147 	if (ret == 0) {
148 		/* We have a hit */
149 		return hop;
150 	}
151 
152 	/* else */
153 	return RTE_MAX_ETHPORTS;
154 }
155 
156 /* TODO: To be tested */
157 static inline uint16_t
158 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
159 {
160 	uint8_t dst_ip[16];
161 	uint8_t *ip6_dst;
162 	uint16_t offset;
163 	uint32_t hop;
164 	int ret;
165 
166 	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
167 	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
168 	memcpy(&dst_ip[0], ip6_dst, 16);
169 
170 	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
171 
172 	if (ret == 0) {
173 		/* We have a hit */
174 		return hop;
175 	}
176 
177 	/* else */
178 	return RTE_MAX_ETHPORTS;
179 }
180 
181 static inline uint16_t
182 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
183 {
184 	if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
185 		return route4_pkt(pkt, rt->rt4_ctx);
186 	else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
187 		return route6_pkt(pkt, rt->rt6_ctx);
188 
189 	return RTE_MAX_ETHPORTS;
190 }
191 
192 static inline int
193 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
194 		struct rte_event *ev)
195 {
196 	struct ipsec_sa *sa = NULL;
197 	struct rte_mbuf *pkt;
198 	uint16_t port_id = 0;
199 	enum pkt_type type;
200 	uint32_t sa_idx;
201 	uint8_t *nlp;
202 
203 	/* Get pkt from event */
204 	pkt = ev->mbuf;
205 
206 	/* Check the packet type */
207 	type = process_ipsec_get_pkt_type(pkt, &nlp);
208 
209 	switch (type) {
210 	case PKT_TYPE_PLAIN_IPV4:
211 		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
212 			if (unlikely(pkt->ol_flags &
213 				     PKT_RX_SEC_OFFLOAD_FAILED)) {
214 				RTE_LOG(ERR, IPSEC,
215 					"Inbound security offload failed\n");
216 				goto drop_pkt_and_exit;
217 			}
218 			sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
219 		}
220 
221 		/* Check if we have a match */
222 		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
223 			/* No valid match */
224 			goto drop_pkt_and_exit;
225 		}
226 		break;
227 
228 	case PKT_TYPE_PLAIN_IPV6:
229 		if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
230 			if (unlikely(pkt->ol_flags &
231 				     PKT_RX_SEC_OFFLOAD_FAILED)) {
232 				RTE_LOG(ERR, IPSEC,
233 					"Inbound security offload failed\n");
234 				goto drop_pkt_and_exit;
235 			}
236 			sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
237 		}
238 
239 		/* Check if we have a match */
240 		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
241 			/* No valid match */
242 			goto drop_pkt_and_exit;
243 		}
244 		break;
245 
246 	default:
247 		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
248 		goto drop_pkt_and_exit;
249 	}
250 
251 	/* Check if the packet has to be bypassed */
252 	if (sa_idx == BYPASS)
253 		goto route_and_send_pkt;
254 
255 	/* Validate sa_idx */
256 	if (sa_idx >= ctx->sa_ctx->nb_sa)
257 		goto drop_pkt_and_exit;
258 
259 	/* Else the packet has to be protected with SA */
260 
261 	/* If the packet was IPsec processed, then SA pointer should be set */
262 	if (sa == NULL)
263 		goto drop_pkt_and_exit;
264 
265 	/* SPI on the packet should match with the one in SA */
266 	if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
267 		goto drop_pkt_and_exit;
268 
269 route_and_send_pkt:
270 	port_id = get_route(pkt, rt, type);
271 	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
272 		/* no match */
273 		goto drop_pkt_and_exit;
274 	}
275 	/* else, we have a matching route */
276 
277 	/* Update mac addresses */
278 	update_mac_addrs(pkt, port_id);
279 
280 	/* Update the event with the dest port */
281 	ipsec_event_pre_forward(pkt, port_id);
282 	return PKT_FORWARDED;
283 
284 drop_pkt_and_exit:
285 	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
286 	rte_pktmbuf_free(pkt);
287 	ev->mbuf = NULL;
288 	return PKT_DROPPED;
289 }
290 
291 static inline int
292 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
293 		struct rte_event *ev)
294 {
295 	struct rte_ipsec_session *sess;
296 	struct sa_ctx *sa_ctx;
297 	struct rte_mbuf *pkt;
298 	uint16_t port_id = 0;
299 	struct ipsec_sa *sa;
300 	enum pkt_type type;
301 	uint32_t sa_idx;
302 	uint8_t *nlp;
303 
304 	/* Get pkt from event */
305 	pkt = ev->mbuf;
306 
307 	/* Check the packet type */
308 	type = process_ipsec_get_pkt_type(pkt, &nlp);
309 
310 	switch (type) {
311 	case PKT_TYPE_PLAIN_IPV4:
312 		/* Check if we have a match */
313 		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
314 			/* No valid match */
315 			goto drop_pkt_and_exit;
316 		}
317 		break;
318 	case PKT_TYPE_PLAIN_IPV6:
319 		/* Check if we have a match */
320 		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
321 			/* No valid match */
322 			goto drop_pkt_and_exit;
323 		}
324 		break;
325 	default:
326 		/*
327 		 * Only plain IPv4 & IPv6 packets are allowed
328 		 * on protected port. Drop the rest.
329 		 */
330 		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
331 		goto drop_pkt_and_exit;
332 	}
333 
334 	/* Check if the packet has to be bypassed */
335 	if (sa_idx == BYPASS) {
336 		port_id = get_route(pkt, rt, type);
337 		if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
338 			/* no match */
339 			goto drop_pkt_and_exit;
340 		}
341 		/* else, we have a matching route */
342 		goto send_pkt;
343 	}
344 
345 	/* Validate sa_idx */
346 	if (sa_idx >= ctx->sa_ctx->nb_sa)
347 		goto drop_pkt_and_exit;
348 
349 	/* Else the packet has to be protected */
350 
351 	/* Get SA ctx*/
352 	sa_ctx = ctx->sa_ctx;
353 
354 	/* Get SA */
355 	sa = &(sa_ctx->sa[sa_idx]);
356 
357 	/* Get IPsec session */
358 	sess = ipsec_get_primary_session(sa);
359 
360 	/* Allow only inline protocol for now */
361 	if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
362 		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
363 		goto drop_pkt_and_exit;
364 	}
365 
366 	rte_security_set_pkt_metadata(sess->security.ctx,
367 				      sess->security.ses, pkt, NULL);
368 
369 	/* Mark the packet for Tx security offload */
370 	pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
371 
372 	/* Get the port to which this pkt need to be submitted */
373 	port_id = sa->portid;
374 
375 send_pkt:
376 	/* Provide L2 len for Outbound processing */
377 	pkt->l2_len = RTE_ETHER_HDR_LEN;
378 
379 	/* Update mac addresses */
380 	update_mac_addrs(pkt, port_id);
381 
382 	/* Update the event with the dest port */
383 	ipsec_event_pre_forward(pkt, port_id);
384 	return PKT_FORWARDED;
385 
386 drop_pkt_and_exit:
387 	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
388 	rte_pktmbuf_free(pkt);
389 	ev->mbuf = NULL;
390 	return PKT_DROPPED;
391 }
392 
393 /*
394  * Event mode exposes various operating modes depending on the
395  * capabilities of the event device and the operating mode
396  * selected.
397  */
398 
399 /* Workers registered */
400 #define IPSEC_EVENTMODE_WORKERS		2
401 
402 /*
403  * Event mode worker
404  * Operating parameters : non-burst - Tx internal port - driver mode
405  */
406 static void
407 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
408 		uint8_t nb_links)
409 {
410 	struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
411 	unsigned int nb_rx = 0;
412 	struct rte_mbuf *pkt;
413 	struct rte_event ev;
414 	uint32_t lcore_id;
415 	int32_t socket_id;
416 	int16_t port_id;
417 
418 	/* Check if we have links registered for this lcore */
419 	if (nb_links == 0) {
420 		/* No links registered - exit */
421 		return;
422 	}
423 
424 	memset(&data, 0, sizeof(struct port_drv_mode_data));
425 
426 	/* Get core ID */
427 	lcore_id = rte_lcore_id();
428 
429 	/* Get socket ID */
430 	socket_id = rte_lcore_to_socket_id(lcore_id);
431 
432 	/*
433 	 * Prepare security sessions table. In outbound driver mode
434 	 * we always use first session configured for a given port
435 	 */
436 	prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
437 				 RTE_MAX_ETHPORTS);
438 
439 	RTE_LOG(INFO, IPSEC,
440 		"Launching event mode worker (non-burst - Tx internal port - "
441 		"driver mode) on lcore %d\n", lcore_id);
442 
443 	/* We have valid links */
444 
445 	/* Check if it's single link */
446 	if (nb_links != 1) {
447 		RTE_LOG(INFO, IPSEC,
448 			"Multiple links not supported. Using first link\n");
449 	}
450 
451 	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
452 			links[0].event_port_id);
453 	while (!force_quit) {
454 		/* Read packet from event queues */
455 		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
456 				links[0].event_port_id,
457 				&ev,	/* events */
458 				1,	/* nb_events */
459 				0	/* timeout_ticks */);
460 
461 		if (nb_rx == 0)
462 			continue;
463 
464 		pkt = ev.mbuf;
465 		port_id = pkt->port;
466 
467 		rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
468 
469 		/* Process packet */
470 		ipsec_event_pre_forward(pkt, port_id);
471 
472 		if (!is_unprotected_port(port_id)) {
473 
474 			if (unlikely(!data[port_id].sess)) {
475 				rte_pktmbuf_free(pkt);
476 				continue;
477 			}
478 
479 			/* Save security session */
480 			rte_security_set_pkt_metadata(data[port_id].ctx,
481 						      data[port_id].sess, pkt,
482 						      NULL);
483 
484 			/* Mark the packet for Tx security offload */
485 			pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
486 
487 			/* Provide L2 len for Outbound processing */
488 			pkt->l2_len = RTE_ETHER_HDR_LEN;
489 		}
490 
491 		/*
492 		 * Since tx internal port is available, events can be
493 		 * directly enqueued to the adapter and it would be
494 		 * internally submitted to the eth device.
495 		 */
496 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
497 				links[0].event_port_id,
498 				&ev,	/* events */
499 				1,	/* nb_events */
500 				0	/* flags */);
501 	}
502 }
503 
504 /*
505  * Event mode worker
506  * Operating parameters : non-burst - Tx internal port - app mode
507  */
508 static void
509 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
510 		uint8_t nb_links)
511 {
512 	struct lcore_conf_ev_tx_int_port_wrkr lconf;
513 	unsigned int nb_rx = 0;
514 	struct rte_event ev;
515 	uint32_t lcore_id;
516 	int32_t socket_id;
517 	int ret;
518 
519 	/* Check if we have links registered for this lcore */
520 	if (nb_links == 0) {
521 		/* No links registered - exit */
522 		return;
523 	}
524 
525 	/* We have valid links */
526 
527 	/* Get core ID */
528 	lcore_id = rte_lcore_id();
529 
530 	/* Get socket ID */
531 	socket_id = rte_lcore_to_socket_id(lcore_id);
532 
533 	/* Save routing table */
534 	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
535 	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
536 	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
537 	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
538 	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
539 	lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
540 	lconf.inbound.session_priv_pool =
541 			socket_ctx[socket_id].session_priv_pool;
542 	lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
543 	lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
544 	lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
545 	lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
546 	lconf.outbound.session_priv_pool =
547 			socket_ctx[socket_id].session_priv_pool;
548 
549 	RTE_LOG(INFO, IPSEC,
550 		"Launching event mode worker (non-burst - Tx internal port - "
551 		"app mode) on lcore %d\n", lcore_id);
552 
553 	/* Check if it's single link */
554 	if (nb_links != 1) {
555 		RTE_LOG(INFO, IPSEC,
556 			"Multiple links not supported. Using first link\n");
557 	}
558 
559 	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
560 		links[0].event_port_id);
561 
562 	while (!force_quit) {
563 		/* Read packet from event queues */
564 		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
565 				links[0].event_port_id,
566 				&ev,     /* events */
567 				1,       /* nb_events */
568 				0        /* timeout_ticks */);
569 
570 		if (nb_rx == 0)
571 			continue;
572 
573 		if (unlikely(ev.event_type != RTE_EVENT_TYPE_ETHDEV)) {
574 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
575 				ev.event_type);
576 
577 			continue;
578 		}
579 
580 		if (is_unprotected_port(ev.mbuf->port))
581 			ret = process_ipsec_ev_inbound(&lconf.inbound,
582 							&lconf.rt, &ev);
583 		else
584 			ret = process_ipsec_ev_outbound(&lconf.outbound,
585 							&lconf.rt, &ev);
586 		if (ret != 1)
587 			/* The pkt has been dropped */
588 			continue;
589 
590 		/*
591 		 * Since tx internal port is available, events can be
592 		 * directly enqueued to the adapter and it would be
593 		 * internally submitted to the eth device.
594 		 */
595 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
596 				links[0].event_port_id,
597 				&ev,	/* events */
598 				1,	/* nb_events */
599 				0	/* flags */);
600 	}
601 }
602 
603 static uint8_t
604 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
605 {
606 	struct eh_app_worker_params *wrkr;
607 	uint8_t nb_wrkr_param = 0;
608 
609 	/* Save workers */
610 	wrkr = wrkrs;
611 
612 	/* Non-burst - Tx internal port - driver mode */
613 	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
614 	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
615 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
616 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
617 	wrkr++;
618 	nb_wrkr_param++;
619 
620 	/* Non-burst - Tx internal port - app mode */
621 	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
622 	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
623 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
624 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
625 	nb_wrkr_param++;
626 
627 	return nb_wrkr_param;
628 }
629 
630 static void
631 ipsec_eventmode_worker(struct eh_conf *conf)
632 {
633 	struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
634 					{{{0} }, NULL } };
635 	uint8_t nb_wrkr_param;
636 
637 	/* Populate l2fwd_wrkr params */
638 	nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
639 
640 	/*
641 	 * Launch correct worker after checking
642 	 * the event device's capabilities.
643 	 */
644 	eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
645 }
646 
647 int ipsec_launch_one_lcore(void *args)
648 {
649 	struct eh_conf *conf;
650 
651 	conf = (struct eh_conf *)args;
652 
653 	if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
654 		/* Run in poll mode */
655 		ipsec_poll_mode_worker();
656 	} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
657 		/* Run in event mode */
658 		ipsec_eventmode_worker(conf);
659 	}
660 	return 0;
661 }
662