xref: /dpdk/drivers/net/softnic/rte_eth_softnic_thread.c (revision 202905f3ee4db9df45f00da90da99e0655e60ef6)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4 
5 #include <stdlib.h>
6 
7 #include <rte_cycles.h>
8 #include <rte_lcore.h>
9 #include <rte_ring.h>
10 
11 #include "rte_eth_softnic_internals.h"
12 
13 /**
14  * Master thread: data plane thread init
15  */
16 void
17 softnic_thread_free(struct pmd_internals *softnic)
18 {
19 	uint32_t i;
20 
21 	RTE_LCORE_FOREACH_SLAVE(i) {
22 		struct softnic_thread *t = &softnic->thread[i];
23 
24 		/* MSGQs */
25 		if (t->msgq_req)
26 			rte_ring_free(t->msgq_req);
27 
28 		if (t->msgq_rsp)
29 			rte_ring_free(t->msgq_rsp);
30 	}
31 }
32 
33 int
34 softnic_thread_init(struct pmd_internals *softnic)
35 {
36 	uint32_t i;
37 
38 	RTE_LCORE_FOREACH_SLAVE(i) {
39 		char ring_name[NAME_MAX];
40 		struct rte_ring *msgq_req, *msgq_rsp;
41 		struct softnic_thread *t = &softnic->thread[i];
42 		struct softnic_thread_data *t_data = &softnic->thread_data[i];
43 		uint32_t cpu_id = rte_lcore_to_socket_id(i);
44 
45 		/* MSGQs */
46 		snprintf(ring_name, sizeof(ring_name), "%s-TH%u-REQ",
47 			softnic->params.name,
48 			i);
49 
50 		msgq_req = rte_ring_create(ring_name,
51 			THREAD_MSGQ_SIZE,
52 			cpu_id,
53 			RING_F_SP_ENQ | RING_F_SC_DEQ);
54 
55 		if (msgq_req == NULL) {
56 			softnic_thread_free(softnic);
57 			return -1;
58 		}
59 
60 		snprintf(ring_name, sizeof(ring_name), "%s-TH%u-RSP",
61 			softnic->params.name,
62 			i);
63 
64 		msgq_rsp = rte_ring_create(ring_name,
65 			THREAD_MSGQ_SIZE,
66 			cpu_id,
67 			RING_F_SP_ENQ | RING_F_SC_DEQ);
68 
69 		if (msgq_rsp == NULL) {
70 			softnic_thread_free(softnic);
71 			return -1;
72 		}
73 
74 		/* Master thread records */
75 		t->msgq_req = msgq_req;
76 		t->msgq_rsp = msgq_rsp;
77 		t->enabled = 1;
78 
79 		/* Data plane thread records */
80 		t_data->n_pipelines = 0;
81 		t_data->msgq_req = msgq_req;
82 		t_data->msgq_rsp = msgq_rsp;
83 		t_data->timer_period =
84 			(rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
85 		t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
86 		t_data->time_next_min = t_data->time_next;
87 	}
88 
89 	return 0;
90 }
91 
92 static inline int
93 thread_is_running(uint32_t thread_id)
94 {
95 	enum rte_lcore_state_t thread_state;
96 
97 	thread_state = rte_eal_get_lcore_state(thread_id);
98 	return (thread_state == RUNNING)? 1 : 0;
99 }
100 
101 /**
102  * Pipeline is running when:
103  *    (A) Pipeline is mapped to a data plane thread AND
104  *    (B) Its data plane thread is in RUNNING state.
105  */
106 static inline int
107 pipeline_is_running(struct pipeline *p)
108 {
109 	if (p->enabled == 0)
110 		return 0;
111 
112 	return thread_is_running(p->thread_id);
113 }
114 
115 /**
116  * Master thread & data plane threads: message passing
117  */
118 enum thread_req_type {
119 	THREAD_REQ_MAX
120 };
121 
122 struct thread_msg_req {
123 	enum thread_req_type type;
124 };
125 
126 struct thread_msg_rsp {
127 	int status;
128 };
129 
130 /**
131  * Data plane threads: message handling
132  */
133 static inline struct thread_msg_req *
134 thread_msg_recv(struct rte_ring *msgq_req)
135 {
136 	struct thread_msg_req *req;
137 
138 	int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
139 
140 	if (status != 0)
141 		return NULL;
142 
143 	return req;
144 }
145 
146 static inline void
147 thread_msg_send(struct rte_ring *msgq_rsp,
148 	struct thread_msg_rsp *rsp)
149 {
150 	int status;
151 
152 	do {
153 		status = rte_ring_sp_enqueue(msgq_rsp, rsp);
154 	} while (status == -ENOBUFS);
155 }
156 
157 static void
158 thread_msg_handle(struct softnic_thread_data *t)
159 {
160 	for ( ; ; ) {
161 		struct thread_msg_req *req;
162 		struct thread_msg_rsp *rsp;
163 
164 		req = thread_msg_recv(t->msgq_req);
165 		if (req == NULL)
166 			break;
167 
168 		switch (req->type) {
169 		default:
170 			rsp = (struct thread_msg_rsp *)req;
171 			rsp->status = -1;
172 		}
173 
174 		thread_msg_send(t->msgq_rsp, rsp);
175 	}
176 }
177 
178 /**
179  * Master thread & data plane threads: message passing
180  */
181 enum pipeline_req_type {
182 	/* Port IN */
183 	PIPELINE_REQ_PORT_IN_ENABLE,
184 	PIPELINE_REQ_PORT_IN_DISABLE,
185 
186 	PIPELINE_REQ_MAX
187 };
188 
189 struct pipeline_msg_req {
190 	enum pipeline_req_type type;
191 	uint32_t id; /* Port IN, port OUT or table ID */
192 };
193 
194 struct pipeline_msg_rsp {
195 	int status;
196 };
197 
198 /**
199  * Master thread
200  */
201 static struct pipeline_msg_req *
202 pipeline_msg_alloc(void)
203 {
204 	size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
205 		sizeof(struct pipeline_msg_rsp));
206 
207 	return calloc(1, size);
208 }
209 
210 static void
211 pipeline_msg_free(struct pipeline_msg_rsp *rsp)
212 {
213 	free(rsp);
214 }
215 
216 static struct pipeline_msg_rsp *
217 pipeline_msg_send_recv(struct pipeline *p,
218 	struct pipeline_msg_req *req)
219 {
220 	struct rte_ring *msgq_req = p->msgq_req;
221 	struct rte_ring *msgq_rsp = p->msgq_rsp;
222 	struct pipeline_msg_rsp *rsp;
223 	int status;
224 
225 	/* send */
226 	do {
227 		status = rte_ring_sp_enqueue(msgq_req, req);
228 	} while (status == -ENOBUFS);
229 
230 	/* recv */
231 	do {
232 		status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
233 	} while (status != 0);
234 
235 	return rsp;
236 }
237 
238 int
239 softnic_pipeline_port_in_enable(struct pmd_internals *softnic,
240 	const char *pipeline_name,
241 	uint32_t port_id)
242 {
243 	struct pipeline *p;
244 	struct pipeline_msg_req *req;
245 	struct pipeline_msg_rsp *rsp;
246 	int status;
247 
248 	/* Check input params */
249 	if (pipeline_name == NULL)
250 		return -1;
251 
252 	p = softnic_pipeline_find(softnic, pipeline_name);
253 	if (p == NULL ||
254 		port_id >= p->n_ports_in)
255 		return -1;
256 
257 	if (!pipeline_is_running(p)) {
258 		status = rte_pipeline_port_in_enable(p->p, port_id);
259 		return status;
260 	}
261 
262 	/* Allocate request */
263 	req = pipeline_msg_alloc();
264 	if (req == NULL)
265 		return -1;
266 
267 	/* Write request */
268 	req->type = PIPELINE_REQ_PORT_IN_ENABLE;
269 	req->id = port_id;
270 
271 	/* Send request and wait for response */
272 	rsp = pipeline_msg_send_recv(p, req);
273 	if (rsp == NULL)
274 		return -1;
275 
276 	/* Read response */
277 	status = rsp->status;
278 
279 	/* Free response */
280 	pipeline_msg_free(rsp);
281 
282 	return status;
283 }
284 
285 int
286 softnic_pipeline_port_in_disable(struct pmd_internals *softnic,
287 	const char *pipeline_name,
288 	uint32_t port_id)
289 {
290 	struct pipeline *p;
291 	struct pipeline_msg_req *req;
292 	struct pipeline_msg_rsp *rsp;
293 	int status;
294 
295 	/* Check input params */
296 	if (pipeline_name == NULL)
297 		return -1;
298 
299 	p = softnic_pipeline_find(softnic, pipeline_name);
300 	if (p == NULL ||
301 		port_id >= p->n_ports_in)
302 		return -1;
303 
304 	if (!pipeline_is_running(p)) {
305 		status = rte_pipeline_port_in_disable(p->p, port_id);
306 		return status;
307 	}
308 
309 	/* Allocate request */
310 	req = pipeline_msg_alloc();
311 	if (req == NULL)
312 		return -1;
313 
314 	/* Write request */
315 	req->type = PIPELINE_REQ_PORT_IN_DISABLE;
316 	req->id = port_id;
317 
318 	/* Send request and wait for response */
319 	rsp = pipeline_msg_send_recv(p, req);
320 	if (rsp == NULL)
321 		return -1;
322 
323 	/* Read response */
324 	status = rsp->status;
325 
326 	/* Free response */
327 	pipeline_msg_free(rsp);
328 
329 	return status;
330 }
331 
332 /**
333  * Data plane threads: message handling
334  */
335 static inline struct pipeline_msg_req *
336 pipeline_msg_recv(struct rte_ring *msgq_req)
337 {
338 	struct pipeline_msg_req *req;
339 
340 	int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
341 
342 	if (status != 0)
343 		return NULL;
344 
345 	return req;
346 }
347 
348 static inline void
349 pipeline_msg_send(struct rte_ring *msgq_rsp,
350 	struct pipeline_msg_rsp *rsp)
351 {
352 	int status;
353 
354 	do {
355 		status = rte_ring_sp_enqueue(msgq_rsp, rsp);
356 	} while (status == -ENOBUFS);
357 }
358 
359 static struct pipeline_msg_rsp *
360 pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
361 	struct pipeline_msg_req *req)
362 {
363 	struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
364 	uint32_t port_id = req->id;
365 
366 	rsp->status = rte_pipeline_port_in_enable(p->p,
367 		port_id);
368 
369 	return rsp;
370 }
371 
372 static struct pipeline_msg_rsp *
373 pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
374 	struct pipeline_msg_req *req)
375 {
376 	struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
377 	uint32_t port_id = req->id;
378 
379 	rsp->status = rte_pipeline_port_in_disable(p->p,
380 		port_id);
381 
382 	return rsp;
383 }
384 
385 static void
386 pipeline_msg_handle(struct pipeline_data *p)
387 {
388 	for ( ; ; ) {
389 		struct pipeline_msg_req *req;
390 		struct pipeline_msg_rsp *rsp;
391 
392 		req = pipeline_msg_recv(p->msgq_req);
393 		if (req == NULL)
394 			break;
395 
396 		switch (req->type) {
397 		case PIPELINE_REQ_PORT_IN_ENABLE:
398 			rsp = pipeline_msg_handle_port_in_enable(p, req);
399 			break;
400 
401 		case PIPELINE_REQ_PORT_IN_DISABLE:
402 			rsp = pipeline_msg_handle_port_in_disable(p, req);
403 			break;
404 
405 		default:
406 			rsp = (struct pipeline_msg_rsp *)req;
407 			rsp->status = -1;
408 		}
409 
410 		pipeline_msg_send(p->msgq_rsp, rsp);
411 	}
412 }
413 
414 /**
415  * Data plane threads: main
416  */
417 int
418 rte_pmd_softnic_run(uint16_t port_id)
419 {
420 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
421 	struct pmd_internals *softnic;
422 	struct softnic_thread_data *t;
423 	uint32_t thread_id, j;
424 
425 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
426 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
427 #endif
428 
429 	softnic = dev->data->dev_private;
430 	thread_id = rte_lcore_id();
431 	t = &softnic->thread_data[thread_id];
432 	t->iter++;
433 
434 	/* Data Plane */
435 	for (j = 0; j < t->n_pipelines; j++)
436 		rte_pipeline_run(t->p[j]);
437 
438 	/* Control Plane */
439 	if ((t->iter & 0xFLLU) == 0) {
440 		uint64_t time = rte_get_tsc_cycles();
441 		uint64_t time_next_min = UINT64_MAX;
442 
443 		if (time < t->time_next_min)
444 			return 0;
445 
446 		/* Pipeline message queues */
447 		for (j = 0; j < t->n_pipelines; j++) {
448 			struct pipeline_data *p =
449 				&t->pipeline_data[j];
450 			uint64_t time_next = p->time_next;
451 
452 			if (time_next <= time) {
453 				pipeline_msg_handle(p);
454 				rte_pipeline_flush(p->p);
455 				time_next = time + p->timer_period;
456 				p->time_next = time_next;
457 			}
458 
459 			if (time_next < time_next_min)
460 				time_next_min = time_next;
461 		}
462 
463 		/* Thread message queues */
464 		{
465 			uint64_t time_next = t->time_next;
466 
467 			if (time_next <= time) {
468 				thread_msg_handle(t);
469 				time_next = time + t->timer_period;
470 				t->time_next = time_next;
471 			}
472 
473 			if (time_next < time_next_min)
474 				time_next_min = time_next;
475 		}
476 
477 		t->time_next_min = time_next_min;
478 	}
479 
480 	return 0;
481 }
482