xref: /dpdk/lib/eal/freebsd/eal_thread.c (revision 2490bb897182f57de80fd924dd3ae48dda819b8c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <errno.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <pthread_np.h>
12 #include <sys/queue.h>
13 #include <sys/thr.h>
14 
15 #include <rte_debug.h>
16 #include <rte_atomic.h>
17 #include <rte_launch.h>
18 #include <rte_log.h>
19 #include <rte_memory.h>
20 #include <rte_per_lcore.h>
21 #include <rte_eal.h>
22 #include <rte_lcore.h>
23 #include <rte_eal_trace.h>
24 
25 #include "eal_private.h"
26 #include "eal_thread.h"
27 
28 /*
29  * Send a message to a worker lcore identified by worker_id to call a
30  * function f with argument arg. Once the execution is done, the
31  * remote lcore switches to WAIT state.
32  */
33 int
34 rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned worker_id)
35 {
36 	int n;
37 	char c = 0;
38 	int m2w = lcore_config[worker_id].pipe_main2worker[1];
39 	int w2m = lcore_config[worker_id].pipe_worker2main[0];
40 	int rc = -EBUSY;
41 
42 	/* Check if the worker is in 'WAIT' state. Use acquire order
43 	 * since 'state' variable is used as the guard variable.
44 	 */
45 	if (__atomic_load_n(&lcore_config[worker_id].state,
46 					__ATOMIC_ACQUIRE) != WAIT)
47 		goto finish;
48 
49 	lcore_config[worker_id].arg = arg;
50 	/* Ensure that all the memory operations are completed
51 	 * before the worker thread starts running the function.
52 	 * Use worker thread function as the guard variable.
53 	 */
54 	__atomic_store_n(&lcore_config[worker_id].f, f, __ATOMIC_RELEASE);
55 
56 	/* send message */
57 	n = 0;
58 	while (n == 0 || (n < 0 && errno == EINTR))
59 		n = write(m2w, &c, 1);
60 	if (n < 0)
61 		rte_panic("cannot write on configuration pipe\n");
62 
63 	/* wait ack */
64 	do {
65 		n = read(w2m, &c, 1);
66 	} while (n < 0 && errno == EINTR);
67 
68 	if (n <= 0)
69 		rte_panic("cannot read on configuration pipe\n");
70 
71 	rc = 0;
72 finish:
73 	rte_eal_trace_thread_remote_launch(f, arg, worker_id, rc);
74 	return rc;
75 }
76 
77 /* main loop of threads */
78 __rte_noreturn void *
79 eal_thread_loop(__rte_unused void *arg)
80 {
81 	char c;
82 	int n, ret;
83 	unsigned lcore_id;
84 	pthread_t thread_id;
85 	int m2w, w2m;
86 	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
87 
88 	thread_id = pthread_self();
89 
90 	/* retrieve our lcore_id from the configuration structure */
91 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
92 		if (thread_id == lcore_config[lcore_id].thread_id)
93 			break;
94 	}
95 	if (lcore_id == RTE_MAX_LCORE)
96 		rte_panic("cannot retrieve lcore id\n");
97 
98 	m2w = lcore_config[lcore_id].pipe_main2worker[0];
99 	w2m = lcore_config[lcore_id].pipe_worker2main[1];
100 
101 	__rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset);
102 
103 	ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
104 	RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
105 		lcore_id, thread_id, cpuset, ret == 0 ? "" : "...");
106 
107 	rte_eal_trace_thread_lcore_ready(lcore_id, cpuset);
108 
109 	/* read on our pipe to get commands */
110 	while (1) {
111 		lcore_function_t *f;
112 		void *fct_arg;
113 
114 		/* wait command */
115 		do {
116 			n = read(m2w, &c, 1);
117 		} while (n < 0 && errno == EINTR);
118 
119 		if (n <= 0)
120 			rte_panic("cannot read on configuration pipe\n");
121 
122 		/* Set the state to 'RUNNING'. Use release order
123 		 * since 'state' variable is used as the guard variable.
124 		 */
125 		__atomic_store_n(&lcore_config[lcore_id].state, RUNNING,
126 					__ATOMIC_RELEASE);
127 
128 		/* send ack */
129 		n = 0;
130 		while (n == 0 || (n < 0 && errno == EINTR))
131 			n = write(w2m, &c, 1);
132 		if (n < 0)
133 			rte_panic("cannot write on configuration pipe\n");
134 
135 		/* Load 'f' with acquire order to ensure that
136 		 * the memory operations from the main thread
137 		 * are accessed only after update to 'f' is visible.
138 		 * Wait till the update to 'f' is visible to the worker.
139 		 */
140 		while ((f = __atomic_load_n(&lcore_config[lcore_id].f,
141 			__ATOMIC_ACQUIRE)) == NULL)
142 			rte_pause();
143 
144 		/* call the function and store the return value */
145 		fct_arg = lcore_config[lcore_id].arg;
146 		ret = f(fct_arg);
147 		lcore_config[lcore_id].ret = ret;
148 		lcore_config[lcore_id].f = NULL;
149 		lcore_config[lcore_id].arg = NULL;
150 
151 		/* Store the state with release order to ensure that
152 		 * the memory operations from the worker thread
153 		 * are completed before the state is updated.
154 		 * Use 'state' as the guard variable.
155 		 */
156 		__atomic_store_n(&lcore_config[lcore_id].state, WAIT,
157 					__ATOMIC_RELEASE);
158 	}
159 
160 	/* never reached */
161 	/* pthread_exit(NULL); */
162 	/* return NULL; */
163 }
164 
165 /* require calling thread tid by gettid() */
166 int rte_sys_gettid(void)
167 {
168 	long lwpid;
169 	thr_self(&lwpid);
170 	return (int)lwpid;
171 }
172 
173 int rte_thread_setname(pthread_t id, const char *name)
174 {
175 	/* this BSD function returns no error */
176 	pthread_set_name_np(id, name);
177 	return 0;
178 }
179 
180 int rte_thread_getname(pthread_t id, char *name, size_t len)
181 {
182 	RTE_SET_USED(id);
183 	RTE_SET_USED(name);
184 	RTE_SET_USED(len);
185 
186 	return -ENOTSUP;
187 }
188