xref: /dpdk/drivers/net/softnic/rte_eth_softnic_thread.c (revision fa0a52a70866bb479835fba68252bc2633dc5898)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4 
5 #include <stdlib.h>
6 
7 #include <rte_common.h>
8 #include <rte_cycles.h>
9 #include <rte_lcore.h>
10 #include <rte_service_component.h>
11 #include <rte_ring.h>
12 
13 #include "rte_eth_softnic_internals.h"
14 
15 /**
16  * Main thread: data plane thread init
17  */
18 void
softnic_thread_free(struct pmd_internals * softnic)19 softnic_thread_free(struct pmd_internals *softnic)
20 {
21 	uint32_t i;
22 
23 	RTE_LCORE_FOREACH_WORKER(i) {
24 		struct softnic_thread *t = &softnic->thread[i];
25 
26 		/* MSGQs */
27 		rte_ring_free(t->msgq_req);
28 
29 		rte_ring_free(t->msgq_rsp);
30 	}
31 }
32 
33 int
softnic_thread_init(struct pmd_internals * softnic)34 softnic_thread_init(struct pmd_internals *softnic)
35 {
36 	uint32_t i;
37 
38 	for (i = 0; i < RTE_MAX_LCORE; i++) {
39 		char ring_name[NAME_MAX];
40 		struct rte_ring *msgq_req, *msgq_rsp;
41 		struct softnic_thread *t = &softnic->thread[i];
42 		struct softnic_thread_data *t_data = &softnic->thread_data[i];
43 		uint32_t cpu_id = rte_lcore_to_socket_id(i);
44 
45 		/* MSGQs */
46 		snprintf(ring_name, sizeof(ring_name), "%s-TH%u-REQ",
47 			softnic->params.name,
48 			i);
49 
50 		msgq_req = rte_ring_create(ring_name,
51 			THREAD_MSGQ_SIZE,
52 			cpu_id,
53 			RING_F_SP_ENQ | RING_F_SC_DEQ);
54 
55 		if (msgq_req == NULL) {
56 			softnic_thread_free(softnic);
57 			return -1;
58 		}
59 
60 		snprintf(ring_name, sizeof(ring_name), "%s-TH%u-RSP",
61 			softnic->params.name,
62 			i);
63 
64 		msgq_rsp = rte_ring_create(ring_name,
65 			THREAD_MSGQ_SIZE,
66 			cpu_id,
67 			RING_F_SP_ENQ | RING_F_SC_DEQ);
68 
69 		if (msgq_rsp == NULL) {
70 			softnic_thread_free(softnic);
71 			return -1;
72 		}
73 
74 		/* Main thread records */
75 		t->msgq_req = msgq_req;
76 		t->msgq_rsp = msgq_rsp;
77 		t->service_id = UINT32_MAX;
78 
79 		/* Data plane thread records */
80 		t_data->n_pipelines = 0;
81 		t_data->msgq_req = msgq_req;
82 		t_data->msgq_rsp = msgq_rsp;
83 		t_data->timer_period =
84 			(rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
85 		t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
86 	}
87 
88 	return 0;
89 }
90 
91 static inline int
thread_is_valid(struct pmd_internals * softnic,uint32_t thread_id)92 thread_is_valid(struct pmd_internals *softnic, uint32_t thread_id)
93 {
94 	if (thread_id >= RTE_MAX_LCORE)
95 		return 0; /* FALSE */
96 
97 	if (thread_id == rte_get_main_lcore())
98 		return 0; /* FALSE */
99 
100 	if (softnic->params.sc && rte_lcore_has_role(thread_id, ROLE_SERVICE))
101 		return 1; /* TRUE */
102 	if (!softnic->params.sc && rte_lcore_has_role(thread_id, ROLE_RTE))
103 		return 1; /* TRUE */
104 
105 	return 0; /* FALSE */
106 }
107 
108 static inline int
thread_is_running(uint32_t thread_id)109 thread_is_running(uint32_t thread_id)
110 {
111 	enum rte_lcore_state_t thread_state;
112 
113 	thread_state = rte_eal_get_lcore_state(thread_id);
114 	return (thread_state == RUNNING)? 1 : 0;
115 }
116 
117 static int32_t
118 rte_pmd_softnic_run_internal(void *arg);
119 
120 static inline int
thread_sc_service_up(struct pmd_internals * softnic,uint32_t thread_id)121 thread_sc_service_up(struct pmd_internals *softnic, uint32_t thread_id)
122 {
123 	struct rte_service_spec service_params;
124 	struct softnic_thread *t = &softnic->thread[thread_id];
125 	struct rte_eth_dev *dev;
126 	int status;
127 
128 	/* service params */
129 	dev = rte_eth_dev_get_by_name(softnic->params.name);
130 	if (!dev)
131 		return -EINVAL;
132 
133 	snprintf(service_params.name, sizeof(service_params.name), "%s_%u",
134 		softnic->params.name,
135 		thread_id);
136 	service_params.callback = rte_pmd_softnic_run_internal;
137 	service_params.callback_userdata = dev;
138 	service_params.capabilities = 0;
139 	service_params.socket_id = (int)softnic->params.cpu_id;
140 
141 	/* service register */
142 	status = rte_service_component_register(&service_params, &t->service_id);
143 	if (status)
144 		return status;
145 
146 	status = rte_service_component_runstate_set(t->service_id, 1);
147 	if (status) {
148 		rte_service_component_unregister(t->service_id);
149 		t->service_id = UINT32_MAX;
150 		return status;
151 	}
152 
153 	status = rte_service_runstate_set(t->service_id, 1);
154 	if (status) {
155 		rte_service_component_runstate_set(t->service_id, 0);
156 		rte_service_component_unregister(t->service_id);
157 		t->service_id = UINT32_MAX;
158 		return status;
159 	}
160 
161 	/* service map to thread */
162 	status = rte_service_map_lcore_set(t->service_id, thread_id, 1);
163 	if (status) {
164 		rte_service_runstate_set(t->service_id, 0);
165 		rte_service_component_runstate_set(t->service_id, 0);
166 		rte_service_component_unregister(t->service_id);
167 		t->service_id = UINT32_MAX;
168 		return status;
169 	}
170 
171 	return 0;
172 }
173 
174 static inline void
thread_sc_service_down(struct pmd_internals * softnic,uint32_t thread_id)175 thread_sc_service_down(struct pmd_internals *softnic, uint32_t thread_id)
176 {
177 	struct softnic_thread *t = &softnic->thread[thread_id];
178 
179 	/* service unmap from thread */
180 	rte_service_map_lcore_set(t->service_id, thread_id, 0);
181 
182 	/* service unregister */
183 	rte_service_runstate_set(t->service_id, 0);
184 	rte_service_component_runstate_set(t->service_id, 0);
185 	rte_service_component_unregister(t->service_id);
186 
187 	t->service_id = UINT32_MAX;
188 }
189 
190 void
softnic_thread_pipeline_disable_all(struct pmd_internals * softnic)191 softnic_thread_pipeline_disable_all(struct pmd_internals *softnic)
192 {
193 	uint32_t thread_id;
194 
195 	for (thread_id = 0; thread_id < RTE_MAX_LCORE; thread_id++) {
196 		struct softnic_thread_data *td = &softnic->thread_data[thread_id];
197 
198 		if (!thread_is_valid(softnic, thread_id))
199 			continue;
200 
201 		if (softnic->params.sc && td->n_pipelines)
202 			thread_sc_service_down(softnic, thread_id);
203 
204 		td->n_pipelines = 0;
205 	}
206 }
207 
208 /**
209  * Main thread & data plane threads: message passing
210  */
211 enum thread_req_type {
212 	THREAD_REQ_PIPELINE_ENABLE = 0,
213 	THREAD_REQ_PIPELINE_DISABLE,
214 	THREAD_REQ_MAX
215 };
216 
217 struct thread_msg_req {
218 	enum thread_req_type type;
219 
220 	union {
221 		struct {
222 			struct rte_swx_pipeline *p;
223 		} pipeline_enable;
224 
225 		struct {
226 			struct rte_swx_pipeline *p;
227 		} pipeline_disable;
228 	};
229 };
230 
231 struct thread_msg_rsp {
232 	int status;
233 };
234 
235 /**
236  * Main thread
237  */
238 static struct thread_msg_req *
thread_msg_alloc(void)239 thread_msg_alloc(void)
240 {
241 	size_t size = RTE_MAX(sizeof(struct thread_msg_req),
242 		sizeof(struct thread_msg_rsp));
243 
244 	return calloc(1, size);
245 }
246 
247 static void
thread_msg_free(struct thread_msg_rsp * rsp)248 thread_msg_free(struct thread_msg_rsp *rsp)
249 {
250 	free(rsp);
251 }
252 
253 static struct thread_msg_rsp *
thread_msg_send_recv(struct pmd_internals * softnic,uint32_t thread_id,struct thread_msg_req * req)254 thread_msg_send_recv(struct pmd_internals *softnic,
255 	uint32_t thread_id,
256 	struct thread_msg_req *req)
257 {
258 	struct softnic_thread *t = &softnic->thread[thread_id];
259 	struct rte_ring *msgq_req = t->msgq_req;
260 	struct rte_ring *msgq_rsp = t->msgq_rsp;
261 	struct thread_msg_rsp *rsp;
262 	int status;
263 
264 	/* send */
265 	do {
266 		status = rte_ring_sp_enqueue(msgq_req, req);
267 	} while (status == -ENOBUFS);
268 
269 	/* recv */
270 	do {
271 		status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
272 	} while (status != 0);
273 
274 	return rsp;
275 }
276 
277 int
softnic_thread_pipeline_enable(struct pmd_internals * softnic,uint32_t thread_id,struct pipeline * p)278 softnic_thread_pipeline_enable(struct pmd_internals *softnic,
279 	uint32_t thread_id,
280 	struct pipeline *p)
281 {
282 	struct thread_msg_req *req;
283 	struct thread_msg_rsp *rsp;
284 	uint32_t n_pipelines;
285 	int status;
286 
287 	/* Check input params */
288 	if (!thread_is_valid(softnic, thread_id) ||
289 		(p == NULL) ||
290 		p->enabled)
291 		return -1;
292 
293 	n_pipelines = softnic_pipeline_thread_count(softnic, thread_id);
294 	if (n_pipelines >= THREAD_PIPELINES_MAX)
295 		return -1;
296 
297 	if (softnic->params.sc && (n_pipelines == 0)) {
298 		status = thread_sc_service_up(softnic, thread_id);
299 		if (status)
300 			return status;
301 	}
302 
303 	if (!thread_is_running(thread_id)) {
304 		struct softnic_thread_data *td = &softnic->thread_data[thread_id];
305 
306 		/* Data plane thread */
307 		td->p[td->n_pipelines] = p->p;
308 		td->n_pipelines++;
309 
310 		/* Pipeline */
311 		p->thread_id = thread_id;
312 		p->enabled = 1;
313 
314 		return 0;
315 	}
316 
317 	/* Allocate request */
318 	req = thread_msg_alloc();
319 	if (req == NULL)
320 		return -1;
321 
322 	/* Write request */
323 	req->type = THREAD_REQ_PIPELINE_ENABLE;
324 	req->pipeline_enable.p = p->p;
325 
326 	/* Send request and wait for response */
327 	rsp = thread_msg_send_recv(softnic, thread_id, req);
328 
329 	/* Read response */
330 	status = rsp->status;
331 
332 	/* Free response */
333 	thread_msg_free(rsp);
334 
335 	/* Request completion */
336 	if (status)
337 		return status;
338 
339 	p->thread_id = thread_id;
340 	p->enabled = 1;
341 
342 	return 0;
343 }
344 
345 int
softnic_thread_pipeline_disable(struct pmd_internals * softnic,uint32_t thread_id,struct pipeline * p)346 softnic_thread_pipeline_disable(struct pmd_internals *softnic,
347 	uint32_t thread_id,
348 	struct pipeline *p)
349 {
350 	struct thread_msg_req *req;
351 	struct thread_msg_rsp *rsp;
352 	uint32_t n_pipelines;
353 	int status;
354 
355 	/* Check input params */
356 	if (!thread_is_valid(softnic, thread_id) ||
357 		(p == NULL) ||
358 		(p->enabled && (p->thread_id != thread_id)))
359 		return -1;
360 
361 	if (p->enabled == 0)
362 		return 0;
363 
364 	if (!thread_is_running(thread_id)) {
365 		struct softnic_thread_data *td = &softnic->thread_data[thread_id];
366 		uint32_t i;
367 
368 		for (i = 0; i < td->n_pipelines; i++) {
369 			if (td->p[i] != p->p)
370 				continue;
371 
372 			/* Data plane thread */
373 			if (i < td->n_pipelines - 1)
374 				td->p[i] = td->p[td->n_pipelines - 1];
375 
376 			td->n_pipelines--;
377 
378 			/* Pipeline */
379 			p->enabled = 0;
380 
381 			break;
382 		}
383 
384 		if (softnic->params.sc && (td->n_pipelines == 0))
385 			thread_sc_service_down(softnic, thread_id);
386 
387 		return 0;
388 	}
389 
390 	/* Allocate request */
391 	req = thread_msg_alloc();
392 	if (req == NULL)
393 		return -1;
394 
395 	/* Write request */
396 	req->type = THREAD_REQ_PIPELINE_DISABLE;
397 	req->pipeline_disable.p = p->p;
398 
399 	/* Send request and wait for response */
400 	rsp = thread_msg_send_recv(softnic, thread_id, req);
401 
402 	/* Read response */
403 	status = rsp->status;
404 
405 	/* Free response */
406 	thread_msg_free(rsp);
407 
408 	/* Request completion */
409 	if (status)
410 		return status;
411 
412 	p->enabled = 0;
413 
414 	n_pipelines = softnic_pipeline_thread_count(softnic, thread_id);
415 	if (softnic->params.sc && (n_pipelines == 0))
416 		thread_sc_service_down(softnic, thread_id);
417 
418 	return 0;
419 }
420 
421 /**
422  * Data plane threads: message handling
423  */
424 static inline struct thread_msg_req *
thread_msg_recv(struct rte_ring * msgq_req)425 thread_msg_recv(struct rte_ring *msgq_req)
426 {
427 	struct thread_msg_req *req;
428 
429 	int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
430 
431 	if (status != 0)
432 		return NULL;
433 
434 	return req;
435 }
436 
437 static inline void
thread_msg_send(struct rte_ring * msgq_rsp,struct thread_msg_rsp * rsp)438 thread_msg_send(struct rte_ring *msgq_rsp,
439 	struct thread_msg_rsp *rsp)
440 {
441 	int status;
442 
443 	do {
444 		status = rte_ring_sp_enqueue(msgq_rsp, rsp);
445 	} while (status == -ENOBUFS);
446 }
447 
448 static struct thread_msg_rsp *
thread_msg_handle_pipeline_enable(struct softnic_thread_data * t,struct thread_msg_req * req)449 thread_msg_handle_pipeline_enable(struct softnic_thread_data *t,
450 	struct thread_msg_req *req)
451 {
452 	struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req;
453 
454 	/* Request */
455 	t->p[t->n_pipelines] = req->pipeline_enable.p;
456 	t->n_pipelines++;
457 
458 	/* Response */
459 	rsp->status = 0;
460 	return rsp;
461 }
462 
463 static struct thread_msg_rsp *
thread_msg_handle_pipeline_disable(struct softnic_thread_data * t,struct thread_msg_req * req)464 thread_msg_handle_pipeline_disable(struct softnic_thread_data *t,
465 	struct thread_msg_req *req)
466 {
467 	struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req;
468 	uint32_t n_pipelines = t->n_pipelines;
469 	struct rte_swx_pipeline *pipeline = req->pipeline_disable.p;
470 	uint32_t i;
471 
472 	/* find pipeline */
473 	for (i = 0; i < n_pipelines; i++) {
474 		if (t->p[i] != pipeline)
475 			continue;
476 
477 		if (i < n_pipelines - 1)
478 			t->p[i] = t->p[n_pipelines - 1];
479 
480 		t->n_pipelines--;
481 
482 		rsp->status = 0;
483 		return rsp;
484 	}
485 
486 	/* should not get here */
487 	rsp->status = 0;
488 	return rsp;
489 }
490 
491 static void
thread_msg_handle(struct softnic_thread_data * t)492 thread_msg_handle(struct softnic_thread_data *t)
493 {
494 	for ( ; ; ) {
495 		struct thread_msg_req *req;
496 		struct thread_msg_rsp *rsp;
497 
498 		req = thread_msg_recv(t->msgq_req);
499 		if (req == NULL)
500 			break;
501 
502 		switch (req->type) {
503 		case THREAD_REQ_PIPELINE_ENABLE:
504 			rsp = thread_msg_handle_pipeline_enable(t, req);
505 			break;
506 
507 		case THREAD_REQ_PIPELINE_DISABLE:
508 			rsp = thread_msg_handle_pipeline_disable(t, req);
509 			break;
510 
511 		default:
512 			rsp = (struct thread_msg_rsp *)req;
513 			rsp->status = -1;
514 		}
515 
516 		thread_msg_send(t->msgq_rsp, rsp);
517 	}
518 }
519 
520 /**
521  * Data plane threads: main
522  */
523 static int32_t
rte_pmd_softnic_run_internal(void * arg)524 rte_pmd_softnic_run_internal(void *arg)
525 {
526 	struct rte_eth_dev *dev = arg;
527 	struct pmd_internals *softnic;
528 	struct softnic_thread_data *t;
529 	uint32_t thread_id, j;
530 
531 	softnic = dev->data->dev_private;
532 	thread_id = rte_lcore_id();
533 	t = &softnic->thread_data[thread_id];
534 	t->iter++;
535 
536 	/* Data Plane */
537 	for (j = 0; j < t->n_pipelines; j++)
538 		rte_swx_pipeline_run(t->p[j], PIPELINE_INSTR_QUANTA);
539 
540 	/* Control Plane */
541 	if ((t->iter & 0xFLLU) == 0) {
542 		uint64_t time = rte_get_tsc_cycles();
543 		uint64_t time_next = t->time_next;
544 
545 		if (time < time_next)
546 			return 0;
547 
548 		/* Thread message queues */
549 		thread_msg_handle(t);
550 
551 		t->time_next = time_next + t->timer_period;
552 	}
553 
554 	return 0;
555 }
556 
557 int
rte_pmd_softnic_run(uint16_t port_id)558 rte_pmd_softnic_run(uint16_t port_id)
559 {
560 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
561 
562 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
563 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
564 #endif
565 
566 	return (int)rte_pmd_softnic_run_internal(dev);
567 }
568