xref: /dpdk/lib/eal/common/eal_common_thread.c (revision 592ab76f9f0f41993bebb44da85c37750a93ece9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <pthread.h>
8 #include <sched.h>
9 #include <assert.h>
10 #include <string.h>
11 
12 #include <rte_eal_trace.h>
13 #include <rte_errno.h>
14 #include <rte_lcore.h>
15 #include <rte_log.h>
16 #include <rte_memory.h>
17 #include <rte_trace_point.h>
18 
19 #include "eal_internal_cfg.h"
20 #include "eal_private.h"
21 #include "eal_thread.h"
22 #include "eal_trace.h"
23 
24 RTE_DEFINE_PER_LCORE(unsigned int, _lcore_id) = LCORE_ID_ANY;
25 RTE_DEFINE_PER_LCORE(int, _thread_id) = -1;
26 static RTE_DEFINE_PER_LCORE(unsigned int, _socket_id) =
27 	(unsigned int)SOCKET_ID_ANY;
28 static RTE_DEFINE_PER_LCORE(rte_cpuset_t, _cpuset);
29 
30 unsigned rte_socket_id(void)
31 {
32 	return RTE_PER_LCORE(_socket_id);
33 }
34 
35 static int
36 eal_cpuset_socket_id(rte_cpuset_t *cpusetp)
37 {
38 	unsigned cpu = 0;
39 	int socket_id = SOCKET_ID_ANY;
40 	int sid;
41 
42 	if (cpusetp == NULL)
43 		return SOCKET_ID_ANY;
44 
45 	do {
46 		if (!CPU_ISSET(cpu, cpusetp))
47 			continue;
48 
49 		if (socket_id == SOCKET_ID_ANY)
50 			socket_id = eal_cpu_socket_id(cpu);
51 
52 		sid = eal_cpu_socket_id(cpu);
53 		if (socket_id != sid) {
54 			socket_id = SOCKET_ID_ANY;
55 			break;
56 		}
57 
58 	} while (++cpu < CPU_SETSIZE);
59 
60 	return socket_id;
61 }
62 
63 static void
64 thread_update_affinity(rte_cpuset_t *cpusetp)
65 {
66 	unsigned int lcore_id = rte_lcore_id();
67 
68 	/* store socket_id in TLS for quick access */
69 	RTE_PER_LCORE(_socket_id) =
70 		eal_cpuset_socket_id(cpusetp);
71 
72 	/* store cpuset in TLS for quick access */
73 	memmove(&RTE_PER_LCORE(_cpuset), cpusetp,
74 		sizeof(rte_cpuset_t));
75 
76 	if (lcore_id != (unsigned)LCORE_ID_ANY) {
77 		/* EAL thread will update lcore_config */
78 		lcore_config[lcore_id].socket_id = RTE_PER_LCORE(_socket_id);
79 		memmove(&lcore_config[lcore_id].cpuset, cpusetp,
80 			sizeof(rte_cpuset_t));
81 	}
82 }
83 
84 int
85 rte_thread_set_affinity(rte_cpuset_t *cpusetp)
86 {
87 	if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
88 			cpusetp) != 0) {
89 		RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
90 		return -1;
91 	}
92 
93 	thread_update_affinity(cpusetp);
94 	return 0;
95 }
96 
97 void
98 rte_thread_get_affinity(rte_cpuset_t *cpusetp)
99 {
100 	assert(cpusetp);
101 	memmove(cpusetp, &RTE_PER_LCORE(_cpuset),
102 		sizeof(rte_cpuset_t));
103 }
104 
105 int
106 eal_thread_dump_affinity(rte_cpuset_t *cpuset, char *str, unsigned int size)
107 {
108 	unsigned cpu;
109 	int ret;
110 	unsigned int out = 0;
111 
112 	for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
113 		if (!CPU_ISSET(cpu, cpuset))
114 			continue;
115 
116 		ret = snprintf(str + out,
117 			       size - out, "%u,", cpu);
118 		if (ret < 0 || (unsigned)ret >= size - out) {
119 			/* string will be truncated */
120 			ret = -1;
121 			goto exit;
122 		}
123 
124 		out += ret;
125 	}
126 
127 	ret = 0;
128 exit:
129 	/* remove the last separator */
130 	if (out > 0)
131 		str[out - 1] = '\0';
132 
133 	return ret;
134 }
135 
136 int
137 eal_thread_dump_current_affinity(char *str, unsigned int size)
138 {
139 	rte_cpuset_t cpuset;
140 
141 	rte_thread_get_affinity(&cpuset);
142 	return eal_thread_dump_affinity(&cpuset, str, size);
143 }
144 
145 void
146 __rte_thread_init(unsigned int lcore_id, rte_cpuset_t *cpuset)
147 {
148 	/* set the lcore ID in per-lcore memory area */
149 	RTE_PER_LCORE(_lcore_id) = lcore_id;
150 
151 	/* acquire system unique id */
152 	rte_gettid();
153 
154 	thread_update_affinity(cpuset);
155 
156 	__rte_trace_mem_per_thread_alloc();
157 }
158 
159 void
160 __rte_thread_uninit(void)
161 {
162 	trace_mem_per_thread_free();
163 
164 	RTE_PER_LCORE(_lcore_id) = LCORE_ID_ANY;
165 }
166 
167 /* main loop of threads */
168 __rte_noreturn void *
169 eal_thread_loop(void *arg)
170 {
171 	unsigned int lcore_id = (uintptr_t)arg;
172 	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
173 	int ret;
174 
175 	__rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset);
176 
177 	ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
178 	RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
179 		lcore_id, (uintptr_t)pthread_self(), cpuset,
180 		ret == 0 ? "" : "...");
181 
182 	rte_eal_trace_thread_lcore_ready(lcore_id, cpuset);
183 
184 	/* read on our pipe to get commands */
185 	while (1) {
186 		lcore_function_t *f;
187 		void *fct_arg;
188 
189 		eal_thread_wait_command();
190 
191 		/* Set the state to 'RUNNING'. Use release order
192 		 * since 'state' variable is used as the guard variable.
193 		 */
194 		__atomic_store_n(&lcore_config[lcore_id].state, RUNNING,
195 			__ATOMIC_RELEASE);
196 
197 		eal_thread_ack_command();
198 
199 		/* Load 'f' with acquire order to ensure that
200 		 * the memory operations from the main thread
201 		 * are accessed only after update to 'f' is visible.
202 		 * Wait till the update to 'f' is visible to the worker.
203 		 */
204 		while ((f = __atomic_load_n(&lcore_config[lcore_id].f,
205 				__ATOMIC_ACQUIRE)) == NULL)
206 			rte_pause();
207 
208 		/* call the function and store the return value */
209 		fct_arg = lcore_config[lcore_id].arg;
210 		ret = f(fct_arg);
211 		lcore_config[lcore_id].ret = ret;
212 		lcore_config[lcore_id].f = NULL;
213 		lcore_config[lcore_id].arg = NULL;
214 
215 		/* Store the state with release order to ensure that
216 		 * the memory operations from the worker thread
217 		 * are completed before the state is updated.
218 		 * Use 'state' as the guard variable.
219 		 */
220 		__atomic_store_n(&lcore_config[lcore_id].state, WAIT,
221 			__ATOMIC_RELEASE);
222 	}
223 
224 	/* never reached */
225 	/* pthread_exit(NULL); */
226 	/* return NULL; */
227 }
228 
229 enum __rte_ctrl_thread_status {
230 	CTRL_THREAD_LAUNCHING, /* Yet to call pthread_create function */
231 	CTRL_THREAD_RUNNING, /* Control thread is running successfully */
232 	CTRL_THREAD_ERROR /* Control thread encountered an error */
233 };
234 
235 struct rte_thread_ctrl_params {
236 	void *(*start_routine)(void *);
237 	void *arg;
238 	int ret;
239 	/* Control thread status.
240 	 * If the status is CTRL_THREAD_ERROR, 'ret' has the error code.
241 	 */
242 	enum __rte_ctrl_thread_status ctrl_thread_status;
243 };
244 
245 static void *ctrl_thread_init(void *arg)
246 {
247 	struct internal_config *internal_conf =
248 		eal_get_internal_configuration();
249 	rte_cpuset_t *cpuset = &internal_conf->ctrl_cpuset;
250 	struct rte_thread_ctrl_params *params = arg;
251 	void *(*start_routine)(void *) = params->start_routine;
252 	void *routine_arg = params->arg;
253 
254 	__rte_thread_init(rte_lcore_id(), cpuset);
255 	params->ret = pthread_setaffinity_np(pthread_self(), sizeof(*cpuset),
256 		cpuset);
257 	if (params->ret != 0) {
258 		__atomic_store_n(&params->ctrl_thread_status,
259 			CTRL_THREAD_ERROR, __ATOMIC_RELEASE);
260 		return NULL;
261 	}
262 
263 	__atomic_store_n(&params->ctrl_thread_status,
264 		CTRL_THREAD_RUNNING, __ATOMIC_RELEASE);
265 
266 	return start_routine(routine_arg);
267 }
268 
269 int
270 rte_ctrl_thread_create(pthread_t *thread, const char *name,
271 		const pthread_attr_t *attr,
272 		void *(*start_routine)(void *), void *arg)
273 {
274 	struct rte_thread_ctrl_params *params;
275 	enum __rte_ctrl_thread_status ctrl_thread_status;
276 	int ret;
277 
278 	params = malloc(sizeof(*params));
279 	if (!params)
280 		return -ENOMEM;
281 
282 	params->start_routine = start_routine;
283 	params->arg = arg;
284 	params->ret = 0;
285 	params->ctrl_thread_status = CTRL_THREAD_LAUNCHING;
286 
287 	ret = pthread_create(thread, attr, ctrl_thread_init, (void *)params);
288 	if (ret != 0) {
289 		free(params);
290 		return -ret;
291 	}
292 
293 	if (name != NULL) {
294 		ret = rte_thread_setname(*thread, name);
295 		if (ret < 0)
296 			RTE_LOG(DEBUG, EAL,
297 				"Cannot set name for ctrl thread\n");
298 	}
299 
300 	/* Wait for the control thread to initialize successfully */
301 	while ((ctrl_thread_status =
302 			__atomic_load_n(&params->ctrl_thread_status,
303 			__ATOMIC_ACQUIRE)) == CTRL_THREAD_LAUNCHING) {
304 		/* Yield the CPU. Using sched_yield call requires maintaining
305 		 * another implementation for Windows as sched_yield is not
306 		 * supported on Windows.
307 		 */
308 		rte_delay_us_sleep(1);
309 	}
310 
311 	/* Check if the control thread encountered an error */
312 	if (ctrl_thread_status == CTRL_THREAD_ERROR) {
313 		/* ctrl thread is exiting */
314 		pthread_join(*thread, NULL);
315 	}
316 
317 	ret = params->ret;
318 	free(params);
319 
320 	return -ret;
321 }
322 
323 int
324 rte_thread_register(void)
325 {
326 	unsigned int lcore_id;
327 	rte_cpuset_t cpuset;
328 
329 	/* EAL init flushes all lcores, we can't register before. */
330 	if (eal_get_internal_configuration()->init_complete != 1) {
331 		RTE_LOG(DEBUG, EAL, "Called %s before EAL init.\n", __func__);
332 		rte_errno = EINVAL;
333 		return -1;
334 	}
335 	if (!rte_mp_disable()) {
336 		RTE_LOG(ERR, EAL, "Multiprocess in use, registering non-EAL threads is not supported.\n");
337 		rte_errno = EINVAL;
338 		return -1;
339 	}
340 	if (pthread_getaffinity_np(pthread_self(), sizeof(cpuset),
341 			&cpuset) != 0)
342 		CPU_ZERO(&cpuset);
343 	lcore_id = eal_lcore_non_eal_allocate();
344 	if (lcore_id >= RTE_MAX_LCORE)
345 		lcore_id = LCORE_ID_ANY;
346 	__rte_thread_init(lcore_id, &cpuset);
347 	if (lcore_id == LCORE_ID_ANY) {
348 		rte_errno = ENOMEM;
349 		return -1;
350 	}
351 	RTE_LOG(DEBUG, EAL, "Registered non-EAL thread as lcore %u.\n",
352 		lcore_id);
353 	return 0;
354 }
355 
356 void
357 rte_thread_unregister(void)
358 {
359 	unsigned int lcore_id = rte_lcore_id();
360 
361 	if (lcore_id != LCORE_ID_ANY)
362 		eal_lcore_non_eal_release(lcore_id);
363 	__rte_thread_uninit();
364 	if (lcore_id != LCORE_ID_ANY)
365 		RTE_LOG(DEBUG, EAL, "Unregistered non-EAL thread (was lcore %u).\n",
366 			lcore_id);
367 }
368