xref: /dpdk/lib/eal/common/eal_common_thread.c (revision f9dfb59edbccae50e7c5508348aa2b4b84413048)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <errno.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <pthread.h>
9 #include <sched.h>
10 #include <assert.h>
11 #include <string.h>
12 
13 #include <rte_eal_trace.h>
14 #include <rte_errno.h>
15 #include <rte_lcore.h>
16 #include <rte_log.h>
17 #include <rte_memory.h>
18 #include <rte_trace_point.h>
19 
20 #include "eal_internal_cfg.h"
21 #include "eal_private.h"
22 #include "eal_thread.h"
23 #include "eal_trace.h"
24 
25 RTE_DEFINE_PER_LCORE(unsigned int, _lcore_id) = LCORE_ID_ANY;
26 RTE_DEFINE_PER_LCORE(int, _thread_id) = -1;
27 static RTE_DEFINE_PER_LCORE(unsigned int, _socket_id) =
28 	(unsigned int)SOCKET_ID_ANY;
29 static RTE_DEFINE_PER_LCORE(rte_cpuset_t, _cpuset);
30 
31 unsigned rte_socket_id(void)
32 {
33 	return RTE_PER_LCORE(_socket_id);
34 }
35 
36 static int
37 eal_cpuset_socket_id(rte_cpuset_t *cpusetp)
38 {
39 	unsigned cpu = 0;
40 	int socket_id = SOCKET_ID_ANY;
41 	int sid;
42 
43 	if (cpusetp == NULL)
44 		return SOCKET_ID_ANY;
45 
46 	do {
47 		if (!CPU_ISSET(cpu, cpusetp))
48 			continue;
49 
50 		if (socket_id == SOCKET_ID_ANY)
51 			socket_id = eal_cpu_socket_id(cpu);
52 
53 		sid = eal_cpu_socket_id(cpu);
54 		if (socket_id != sid) {
55 			socket_id = SOCKET_ID_ANY;
56 			break;
57 		}
58 
59 	} while (++cpu < CPU_SETSIZE);
60 
61 	return socket_id;
62 }
63 
64 static void
65 thread_update_affinity(rte_cpuset_t *cpusetp)
66 {
67 	unsigned int lcore_id = rte_lcore_id();
68 
69 	/* store socket_id in TLS for quick access */
70 	RTE_PER_LCORE(_socket_id) =
71 		eal_cpuset_socket_id(cpusetp);
72 
73 	/* store cpuset in TLS for quick access */
74 	memmove(&RTE_PER_LCORE(_cpuset), cpusetp,
75 		sizeof(rte_cpuset_t));
76 
77 	if (lcore_id != (unsigned)LCORE_ID_ANY) {
78 		/* EAL thread will update lcore_config */
79 		lcore_config[lcore_id].socket_id = RTE_PER_LCORE(_socket_id);
80 		memmove(&lcore_config[lcore_id].cpuset, cpusetp,
81 			sizeof(rte_cpuset_t));
82 	}
83 }
84 
85 int
86 rte_thread_set_affinity(rte_cpuset_t *cpusetp)
87 {
88 	if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
89 			cpusetp) != 0) {
90 		RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
91 		return -1;
92 	}
93 
94 	thread_update_affinity(cpusetp);
95 	return 0;
96 }
97 
98 void
99 rte_thread_get_affinity(rte_cpuset_t *cpusetp)
100 {
101 	assert(cpusetp);
102 	memmove(cpusetp, &RTE_PER_LCORE(_cpuset),
103 		sizeof(rte_cpuset_t));
104 }
105 
106 int
107 eal_thread_dump_affinity(rte_cpuset_t *cpuset, char *str, unsigned int size)
108 {
109 	unsigned cpu;
110 	int ret;
111 	unsigned int out = 0;
112 
113 	for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
114 		if (!CPU_ISSET(cpu, cpuset))
115 			continue;
116 
117 		ret = snprintf(str + out,
118 			       size - out, "%u,", cpu);
119 		if (ret < 0 || (unsigned)ret >= size - out) {
120 			/* string will be truncated */
121 			ret = -1;
122 			goto exit;
123 		}
124 
125 		out += ret;
126 	}
127 
128 	ret = 0;
129 exit:
130 	/* remove the last separator */
131 	if (out > 0)
132 		str[out - 1] = '\0';
133 
134 	return ret;
135 }
136 
137 int
138 eal_thread_dump_current_affinity(char *str, unsigned int size)
139 {
140 	rte_cpuset_t cpuset;
141 
142 	rte_thread_get_affinity(&cpuset);
143 	return eal_thread_dump_affinity(&cpuset, str, size);
144 }
145 
146 void
147 __rte_thread_init(unsigned int lcore_id, rte_cpuset_t *cpuset)
148 {
149 	/* set the lcore ID in per-lcore memory area */
150 	RTE_PER_LCORE(_lcore_id) = lcore_id;
151 
152 	/* acquire system unique id */
153 	rte_gettid();
154 
155 	thread_update_affinity(cpuset);
156 
157 	__rte_trace_mem_per_thread_alloc();
158 }
159 
160 void
161 __rte_thread_uninit(void)
162 {
163 	trace_mem_per_thread_free();
164 
165 	RTE_PER_LCORE(_lcore_id) = LCORE_ID_ANY;
166 }
167 
168 /* main loop of threads */
169 __rte_noreturn void *
170 eal_thread_loop(void *arg)
171 {
172 	unsigned int lcore_id = (uintptr_t)arg;
173 	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
174 	int ret;
175 
176 	__rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset);
177 
178 	ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
179 	RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
180 		lcore_id, (uintptr_t)pthread_self(), cpuset,
181 		ret == 0 ? "" : "...");
182 
183 	rte_eal_trace_thread_lcore_ready(lcore_id, cpuset);
184 
185 	/* read on our pipe to get commands */
186 	while (1) {
187 		lcore_function_t *f;
188 		void *fct_arg;
189 
190 		eal_thread_wait_command();
191 
192 		/* Set the state to 'RUNNING'. Use release order
193 		 * since 'state' variable is used as the guard variable.
194 		 */
195 		__atomic_store_n(&lcore_config[lcore_id].state, RUNNING,
196 			__ATOMIC_RELEASE);
197 
198 		eal_thread_ack_command();
199 
200 		/* Load 'f' with acquire order to ensure that
201 		 * the memory operations from the main thread
202 		 * are accessed only after update to 'f' is visible.
203 		 * Wait till the update to 'f' is visible to the worker.
204 		 */
205 		while ((f = __atomic_load_n(&lcore_config[lcore_id].f,
206 				__ATOMIC_ACQUIRE)) == NULL)
207 			rte_pause();
208 
209 		/* call the function and store the return value */
210 		fct_arg = lcore_config[lcore_id].arg;
211 		ret = f(fct_arg);
212 		lcore_config[lcore_id].ret = ret;
213 		lcore_config[lcore_id].f = NULL;
214 		lcore_config[lcore_id].arg = NULL;
215 
216 		/* Store the state with release order to ensure that
217 		 * the memory operations from the worker thread
218 		 * are completed before the state is updated.
219 		 * Use 'state' as the guard variable.
220 		 */
221 		__atomic_store_n(&lcore_config[lcore_id].state, WAIT,
222 			__ATOMIC_RELEASE);
223 	}
224 
225 	/* never reached */
226 	/* pthread_exit(NULL); */
227 	/* return NULL; */
228 }
229 
230 enum __rte_ctrl_thread_status {
231 	CTRL_THREAD_LAUNCHING, /* Yet to call pthread_create function */
232 	CTRL_THREAD_RUNNING, /* Control thread is running successfully */
233 	CTRL_THREAD_ERROR /* Control thread encountered an error */
234 };
235 
236 struct rte_thread_ctrl_params {
237 	void *(*start_routine)(void *);
238 	void *arg;
239 	int ret;
240 	/* Control thread status.
241 	 * If the status is CTRL_THREAD_ERROR, 'ret' has the error code.
242 	 */
243 	enum __rte_ctrl_thread_status ctrl_thread_status;
244 };
245 
246 static void *ctrl_thread_init(void *arg)
247 {
248 	struct internal_config *internal_conf =
249 		eal_get_internal_configuration();
250 	rte_cpuset_t *cpuset = &internal_conf->ctrl_cpuset;
251 	struct rte_thread_ctrl_params *params = arg;
252 	void *(*start_routine)(void *) = params->start_routine;
253 	void *routine_arg = params->arg;
254 
255 	__rte_thread_init(rte_lcore_id(), cpuset);
256 	params->ret = pthread_setaffinity_np(pthread_self(), sizeof(*cpuset),
257 		cpuset);
258 	if (params->ret != 0) {
259 		__atomic_store_n(&params->ctrl_thread_status,
260 			CTRL_THREAD_ERROR, __ATOMIC_RELEASE);
261 		return NULL;
262 	}
263 
264 	__atomic_store_n(&params->ctrl_thread_status,
265 		CTRL_THREAD_RUNNING, __ATOMIC_RELEASE);
266 
267 	return start_routine(routine_arg);
268 }
269 
270 int
271 rte_ctrl_thread_create(pthread_t *thread, const char *name,
272 		const pthread_attr_t *attr,
273 		void *(*start_routine)(void *), void *arg)
274 {
275 	struct rte_thread_ctrl_params *params;
276 	enum __rte_ctrl_thread_status ctrl_thread_status;
277 	int ret;
278 
279 	params = malloc(sizeof(*params));
280 	if (!params)
281 		return -ENOMEM;
282 
283 	params->start_routine = start_routine;
284 	params->arg = arg;
285 	params->ret = 0;
286 	params->ctrl_thread_status = CTRL_THREAD_LAUNCHING;
287 
288 	ret = pthread_create(thread, attr, ctrl_thread_init, (void *)params);
289 	if (ret != 0) {
290 		free(params);
291 		return -ret;
292 	}
293 
294 	if (name != NULL) {
295 		ret = rte_thread_setname(*thread, name);
296 		if (ret < 0)
297 			RTE_LOG(DEBUG, EAL,
298 				"Cannot set name for ctrl thread\n");
299 	}
300 
301 	/* Wait for the control thread to initialize successfully */
302 	while ((ctrl_thread_status =
303 			__atomic_load_n(&params->ctrl_thread_status,
304 			__ATOMIC_ACQUIRE)) == CTRL_THREAD_LAUNCHING) {
305 		/* Yield the CPU. Using sched_yield call requires maintaining
306 		 * another implementation for Windows as sched_yield is not
307 		 * supported on Windows.
308 		 */
309 		rte_delay_us_sleep(1);
310 	}
311 
312 	/* Check if the control thread encountered an error */
313 	if (ctrl_thread_status == CTRL_THREAD_ERROR) {
314 		/* ctrl thread is exiting */
315 		pthread_join(*thread, NULL);
316 	}
317 
318 	ret = params->ret;
319 	free(params);
320 
321 	return -ret;
322 }
323 
324 int
325 rte_thread_register(void)
326 {
327 	unsigned int lcore_id;
328 	rte_cpuset_t cpuset;
329 
330 	/* EAL init flushes all lcores, we can't register before. */
331 	if (eal_get_internal_configuration()->init_complete != 1) {
332 		RTE_LOG(DEBUG, EAL, "Called %s before EAL init.\n", __func__);
333 		rte_errno = EINVAL;
334 		return -1;
335 	}
336 	if (!rte_mp_disable()) {
337 		RTE_LOG(ERR, EAL, "Multiprocess in use, registering non-EAL threads is not supported.\n");
338 		rte_errno = EINVAL;
339 		return -1;
340 	}
341 	if (pthread_getaffinity_np(pthread_self(), sizeof(cpuset),
342 			&cpuset) != 0)
343 		CPU_ZERO(&cpuset);
344 	lcore_id = eal_lcore_non_eal_allocate();
345 	if (lcore_id >= RTE_MAX_LCORE)
346 		lcore_id = LCORE_ID_ANY;
347 	__rte_thread_init(lcore_id, &cpuset);
348 	if (lcore_id == LCORE_ID_ANY) {
349 		rte_errno = ENOMEM;
350 		return -1;
351 	}
352 	RTE_LOG(DEBUG, EAL, "Registered non-EAL thread as lcore %u.\n",
353 		lcore_id);
354 	return 0;
355 }
356 
357 void
358 rte_thread_unregister(void)
359 {
360 	unsigned int lcore_id = rte_lcore_id();
361 
362 	if (lcore_id != LCORE_ID_ANY)
363 		eal_lcore_non_eal_release(lcore_id);
364 	__rte_thread_uninit();
365 	if (lcore_id != LCORE_ID_ANY)
366 		RTE_LOG(DEBUG, EAL, "Unregistered non-EAL thread (was lcore %u).\n",
367 			lcore_id);
368 }
369 
370 int
371 rte_thread_attr_init(rte_thread_attr_t *attr)
372 {
373 	if (attr == NULL)
374 		return EINVAL;
375 
376 	CPU_ZERO(&attr->cpuset);
377 	attr->priority = RTE_THREAD_PRIORITY_NORMAL;
378 
379 	return 0;
380 }
381 
382 int
383 rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr,
384 		enum rte_thread_priority priority)
385 {
386 	if (thread_attr == NULL)
387 		return EINVAL;
388 
389 	thread_attr->priority = priority;
390 
391 	return 0;
392 }
393 
394 int
395 rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr,
396 		rte_cpuset_t *cpuset)
397 {
398 	if (thread_attr == NULL)
399 		return EINVAL;
400 
401 	if (cpuset == NULL)
402 		return EINVAL;
403 
404 	thread_attr->cpuset = *cpuset;
405 
406 	return 0;
407 }
408 
409 int
410 rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr,
411 		rte_cpuset_t *cpuset)
412 {
413 	if (thread_attr == NULL)
414 		return EINVAL;
415 
416 	if (cpuset == NULL)
417 		return EINVAL;
418 
419 	*cpuset = thread_attr->cpuset;
420 
421 	return 0;
422 }
423