xref: /dpdk/examples/ipsec-secgw/ipsec_worker.c (revision 4965dda045d372e5001a3a9dd8714032dab8f782)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  * Copyright (C) 2020 Marvell International Ltd.
4  */
5 #include <rte_event_eth_tx_adapter.h>
6 
7 #include "event_helper.h"
8 #include "ipsec.h"
9 #include "ipsec-secgw.h"
10 
11 static inline void
12 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
13 {
14 	/* Save the destination port in the mbuf */
15 	m->port = port_id;
16 
17 	/* Save eth queue for Tx */
18 	rte_event_eth_tx_adapter_txq_set(m, 0);
19 }
20 
21 static inline void
22 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
23 		struct rte_security_session **sess_tbl, uint16_t size)
24 {
25 	struct rte_ipsec_session *pri_sess;
26 	struct ipsec_sa *sa;
27 	uint32_t i;
28 
29 	if (!sa_out)
30 		return;
31 
32 	for (i = 0; i < sa_out->nb_sa; i++) {
33 
34 		sa = &sa_out->sa[i];
35 		if (!sa)
36 			continue;
37 
38 		pri_sess = ipsec_get_primary_session(sa);
39 		if (!pri_sess)
40 			continue;
41 
42 		if (pri_sess->type !=
43 			RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
44 
45 			RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
46 				pri_sess->type);
47 			continue;
48 		}
49 
50 		if (sa->portid >= size) {
51 			RTE_LOG(ERR, IPSEC,
52 				"Port id >= than table size %d, %d\n",
53 				sa->portid, size);
54 			continue;
55 		}
56 
57 		/* Use only first inline session found for a given port */
58 		if (sess_tbl[sa->portid])
59 			continue;
60 		sess_tbl[sa->portid] = pri_sess->security.ses;
61 	}
62 }
63 
64 /*
65  * Event mode exposes various operating modes depending on the
66  * capabilities of the event device and the operating mode
67  * selected.
68  */
69 
70 /* Workers registered */
71 #define IPSEC_EVENTMODE_WORKERS		1
72 
73 /*
74  * Event mode worker
75  * Operating parameters : non-burst - Tx internal port - driver mode
76  */
77 static void
78 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
79 		uint8_t nb_links)
80 {
81 	struct rte_security_session *sess_tbl[RTE_MAX_ETHPORTS] = { NULL };
82 	unsigned int nb_rx = 0;
83 	struct rte_mbuf *pkt;
84 	struct rte_event ev;
85 	uint32_t lcore_id;
86 	int32_t socket_id;
87 	int16_t port_id;
88 
89 	/* Check if we have links registered for this lcore */
90 	if (nb_links == 0) {
91 		/* No links registered - exit */
92 		return;
93 	}
94 
95 	/* Get core ID */
96 	lcore_id = rte_lcore_id();
97 
98 	/* Get socket ID */
99 	socket_id = rte_lcore_to_socket_id(lcore_id);
100 
101 	/*
102 	 * Prepare security sessions table. In outbound driver mode
103 	 * we always use first session configured for a given port
104 	 */
105 	prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, sess_tbl,
106 			RTE_MAX_ETHPORTS);
107 
108 	RTE_LOG(INFO, IPSEC,
109 		"Launching event mode worker (non-burst - Tx internal port - "
110 		"driver mode) on lcore %d\n", lcore_id);
111 
112 	/* We have valid links */
113 
114 	/* Check if it's single link */
115 	if (nb_links != 1) {
116 		RTE_LOG(INFO, IPSEC,
117 			"Multiple links not supported. Using first link\n");
118 	}
119 
120 	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
121 			links[0].event_port_id);
122 	while (!force_quit) {
123 		/* Read packet from event queues */
124 		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
125 				links[0].event_port_id,
126 				&ev,	/* events */
127 				1,	/* nb_events */
128 				0	/* timeout_ticks */);
129 
130 		if (nb_rx == 0)
131 			continue;
132 
133 		pkt = ev.mbuf;
134 		port_id = pkt->port;
135 
136 		rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
137 
138 		/* Process packet */
139 		ipsec_event_pre_forward(pkt, port_id);
140 
141 		if (!is_unprotected_port(port_id)) {
142 
143 			if (unlikely(!sess_tbl[port_id])) {
144 				rte_pktmbuf_free(pkt);
145 				continue;
146 			}
147 
148 			/* Save security session */
149 			pkt->udata64 = (uint64_t) sess_tbl[port_id];
150 
151 			/* Mark the packet for Tx security offload */
152 			pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
153 		}
154 
155 		/*
156 		 * Since tx internal port is available, events can be
157 		 * directly enqueued to the adapter and it would be
158 		 * internally submitted to the eth device.
159 		 */
160 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
161 				links[0].event_port_id,
162 				&ev,	/* events */
163 				1,	/* nb_events */
164 				0	/* flags */);
165 	}
166 }
167 
168 static uint8_t
169 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
170 {
171 	struct eh_app_worker_params *wrkr;
172 	uint8_t nb_wrkr_param = 0;
173 
174 	/* Save workers */
175 	wrkr = wrkrs;
176 
177 	/* Non-burst - Tx internal port - driver mode */
178 	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
179 	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
180 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
181 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
182 	wrkr++;
183 
184 	return nb_wrkr_param;
185 }
186 
187 static void
188 ipsec_eventmode_worker(struct eh_conf *conf)
189 {
190 	struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
191 					{{{0} }, NULL } };
192 	uint8_t nb_wrkr_param;
193 
194 	/* Populate l2fwd_wrkr params */
195 	nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
196 
197 	/*
198 	 * Launch correct worker after checking
199 	 * the event device's capabilities.
200 	 */
201 	eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
202 }
203 
204 int ipsec_launch_one_lcore(void *args)
205 {
206 	struct eh_conf *conf;
207 
208 	conf = (struct eh_conf *)args;
209 
210 	if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
211 		/* Run in poll mode */
212 		ipsec_poll_mode_worker();
213 	} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
214 		/* Run in event mode */
215 		ipsec_eventmode_worker(conf);
216 	}
217 	return 0;
218 }
219