xref: /dpdk/lib/eal/common/eal_common_thread.c (revision 3a80d7fb2ecdd6e8e48e56e3726b26980fa2a089)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <errno.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <pthread.h>
9 #include <sched.h>
10 #include <assert.h>
11 #include <string.h>
12 
13 #include <rte_eal_trace.h>
14 #include <rte_errno.h>
15 #include <rte_lcore.h>
16 #include <rte_log.h>
17 #include <rte_memory.h>
18 #include <rte_trace_point.h>
19 
20 #include "eal_internal_cfg.h"
21 #include "eal_private.h"
22 #include "eal_thread.h"
23 #include "eal_trace.h"
24 
25 RTE_DEFINE_PER_LCORE(unsigned int, _lcore_id) = LCORE_ID_ANY;
26 RTE_DEFINE_PER_LCORE(int, _thread_id) = -1;
27 static RTE_DEFINE_PER_LCORE(unsigned int, _socket_id) =
28 	(unsigned int)SOCKET_ID_ANY;
29 static RTE_DEFINE_PER_LCORE(rte_cpuset_t, _cpuset);
30 
31 unsigned rte_socket_id(void)
32 {
33 	return RTE_PER_LCORE(_socket_id);
34 }
35 
36 static int
37 eal_cpuset_socket_id(rte_cpuset_t *cpusetp)
38 {
39 	unsigned cpu = 0;
40 	int socket_id = SOCKET_ID_ANY;
41 	int sid;
42 
43 	if (cpusetp == NULL)
44 		return SOCKET_ID_ANY;
45 
46 	do {
47 		if (!CPU_ISSET(cpu, cpusetp))
48 			continue;
49 
50 		if (socket_id == SOCKET_ID_ANY)
51 			socket_id = eal_cpu_socket_id(cpu);
52 
53 		sid = eal_cpu_socket_id(cpu);
54 		if (socket_id != sid) {
55 			socket_id = SOCKET_ID_ANY;
56 			break;
57 		}
58 
59 	} while (++cpu < CPU_SETSIZE);
60 
61 	return socket_id;
62 }
63 
64 static void
65 thread_update_affinity(rte_cpuset_t *cpusetp)
66 {
67 	unsigned int lcore_id = rte_lcore_id();
68 
69 	/* store socket_id in TLS for quick access */
70 	RTE_PER_LCORE(_socket_id) =
71 		eal_cpuset_socket_id(cpusetp);
72 
73 	/* store cpuset in TLS for quick access */
74 	memmove(&RTE_PER_LCORE(_cpuset), cpusetp,
75 		sizeof(rte_cpuset_t));
76 
77 	if (lcore_id != (unsigned)LCORE_ID_ANY) {
78 		/* EAL thread will update lcore_config */
79 		lcore_config[lcore_id].socket_id = RTE_PER_LCORE(_socket_id);
80 		memmove(&lcore_config[lcore_id].cpuset, cpusetp,
81 			sizeof(rte_cpuset_t));
82 	}
83 }
84 
85 int
86 rte_thread_set_affinity(rte_cpuset_t *cpusetp)
87 {
88 	if (rte_thread_set_affinity_by_id(rte_thread_self(), cpusetp) != 0) {
89 		RTE_LOG(ERR, EAL, "rte_thread_set_affinity_by_id failed\n");
90 		return -1;
91 	}
92 
93 	thread_update_affinity(cpusetp);
94 	return 0;
95 }
96 
97 void
98 rte_thread_get_affinity(rte_cpuset_t *cpusetp)
99 {
100 	assert(cpusetp);
101 	memmove(cpusetp, &RTE_PER_LCORE(_cpuset),
102 		sizeof(rte_cpuset_t));
103 }
104 
105 int
106 eal_thread_dump_affinity(rte_cpuset_t *cpuset, char *str, unsigned int size)
107 {
108 	unsigned cpu;
109 	int ret;
110 	unsigned int out = 0;
111 
112 	for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
113 		if (!CPU_ISSET(cpu, cpuset))
114 			continue;
115 
116 		ret = snprintf(str + out,
117 			       size - out, "%u,", cpu);
118 		if (ret < 0 || (unsigned)ret >= size - out) {
119 			/* string will be truncated */
120 			ret = -1;
121 			goto exit;
122 		}
123 
124 		out += ret;
125 	}
126 
127 	ret = 0;
128 exit:
129 	/* remove the last separator */
130 	if (out > 0)
131 		str[out - 1] = '\0';
132 
133 	return ret;
134 }
135 
136 int
137 eal_thread_dump_current_affinity(char *str, unsigned int size)
138 {
139 	rte_cpuset_t cpuset;
140 
141 	rte_thread_get_affinity(&cpuset);
142 	return eal_thread_dump_affinity(&cpuset, str, size);
143 }
144 
145 void
146 __rte_thread_init(unsigned int lcore_id, rte_cpuset_t *cpuset)
147 {
148 	/* set the lcore ID in per-lcore memory area */
149 	RTE_PER_LCORE(_lcore_id) = lcore_id;
150 
151 	/* acquire system unique id */
152 	rte_gettid();
153 
154 	thread_update_affinity(cpuset);
155 
156 	__rte_trace_mem_per_thread_alloc();
157 }
158 
159 void
160 __rte_thread_uninit(void)
161 {
162 	trace_mem_per_thread_free();
163 
164 	RTE_PER_LCORE(_lcore_id) = LCORE_ID_ANY;
165 }
166 
167 /* main loop of threads */
168 __rte_noreturn uint32_t
169 eal_thread_loop(void *arg)
170 {
171 	unsigned int lcore_id = (uintptr_t)arg;
172 	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
173 	int ret;
174 
175 	__rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset);
176 
177 	ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
178 	RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
179 		lcore_id, (uintptr_t)pthread_self(), cpuset,
180 		ret == 0 ? "" : "...");
181 
182 	rte_eal_trace_thread_lcore_ready(lcore_id, cpuset);
183 
184 	/* read on our pipe to get commands */
185 	while (1) {
186 		lcore_function_t *f;
187 		void *fct_arg;
188 
189 		eal_thread_wait_command();
190 
191 		/* Set the state to 'RUNNING'. Use release order
192 		 * since 'state' variable is used as the guard variable.
193 		 */
194 		__atomic_store_n(&lcore_config[lcore_id].state, RUNNING,
195 			__ATOMIC_RELEASE);
196 
197 		eal_thread_ack_command();
198 
199 		/* Load 'f' with acquire order to ensure that
200 		 * the memory operations from the main thread
201 		 * are accessed only after update to 'f' is visible.
202 		 * Wait till the update to 'f' is visible to the worker.
203 		 */
204 		while ((f = __atomic_load_n(&lcore_config[lcore_id].f,
205 				__ATOMIC_ACQUIRE)) == NULL)
206 			rte_pause();
207 
208 		/* call the function and store the return value */
209 		fct_arg = lcore_config[lcore_id].arg;
210 		ret = f(fct_arg);
211 		lcore_config[lcore_id].ret = ret;
212 		lcore_config[lcore_id].f = NULL;
213 		lcore_config[lcore_id].arg = NULL;
214 
215 		/* Store the state with release order to ensure that
216 		 * the memory operations from the worker thread
217 		 * are completed before the state is updated.
218 		 * Use 'state' as the guard variable.
219 		 */
220 		__atomic_store_n(&lcore_config[lcore_id].state, WAIT,
221 			__ATOMIC_RELEASE);
222 	}
223 
224 	/* never reached */
225 	/* return 0; */
226 }
227 
228 enum __rte_ctrl_thread_status {
229 	CTRL_THREAD_LAUNCHING, /* Yet to call pthread_create function */
230 	CTRL_THREAD_RUNNING, /* Control thread is running successfully */
231 	CTRL_THREAD_ERROR /* Control thread encountered an error */
232 };
233 
234 struct rte_thread_ctrl_params {
235 	void *(*start_routine)(void *);
236 	void *arg;
237 	int ret;
238 	/* Control thread status.
239 	 * If the status is CTRL_THREAD_ERROR, 'ret' has the error code.
240 	 */
241 	enum __rte_ctrl_thread_status ctrl_thread_status;
242 };
243 
244 static void *ctrl_thread_init(void *arg)
245 {
246 	struct internal_config *internal_conf =
247 		eal_get_internal_configuration();
248 	rte_cpuset_t *cpuset = &internal_conf->ctrl_cpuset;
249 	struct rte_thread_ctrl_params *params = arg;
250 	void *(*start_routine)(void *) = params->start_routine;
251 	void *routine_arg = params->arg;
252 
253 	__rte_thread_init(rte_lcore_id(), cpuset);
254 	params->ret = rte_thread_set_affinity_by_id(rte_thread_self(), cpuset);
255 	if (params->ret != 0) {
256 		__atomic_store_n(&params->ctrl_thread_status,
257 			CTRL_THREAD_ERROR, __ATOMIC_RELEASE);
258 		return NULL;
259 	}
260 
261 	__atomic_store_n(&params->ctrl_thread_status,
262 		CTRL_THREAD_RUNNING, __ATOMIC_RELEASE);
263 
264 	return start_routine(routine_arg);
265 }
266 
267 int
268 rte_ctrl_thread_create(pthread_t *thread, const char *name,
269 		const pthread_attr_t *attr,
270 		void *(*start_routine)(void *), void *arg)
271 {
272 	struct rte_thread_ctrl_params *params;
273 	enum __rte_ctrl_thread_status ctrl_thread_status;
274 	int ret;
275 
276 	params = malloc(sizeof(*params));
277 	if (!params)
278 		return -ENOMEM;
279 
280 	params->start_routine = start_routine;
281 	params->arg = arg;
282 	params->ret = 0;
283 	params->ctrl_thread_status = CTRL_THREAD_LAUNCHING;
284 
285 	ret = pthread_create(thread, attr, ctrl_thread_init, (void *)params);
286 	if (ret != 0) {
287 		free(params);
288 		return -ret;
289 	}
290 
291 	if (name != NULL) {
292 		ret = rte_thread_setname(*thread, name);
293 		if (ret < 0)
294 			RTE_LOG(DEBUG, EAL,
295 				"Cannot set name for ctrl thread\n");
296 	}
297 
298 	/* Wait for the control thread to initialize successfully */
299 	while ((ctrl_thread_status =
300 			__atomic_load_n(&params->ctrl_thread_status,
301 			__ATOMIC_ACQUIRE)) == CTRL_THREAD_LAUNCHING) {
302 		/* Yield the CPU. Using sched_yield call requires maintaining
303 		 * another implementation for Windows as sched_yield is not
304 		 * supported on Windows.
305 		 */
306 		rte_delay_us_sleep(1);
307 	}
308 
309 	/* Check if the control thread encountered an error */
310 	if (ctrl_thread_status == CTRL_THREAD_ERROR) {
311 		/* ctrl thread is exiting */
312 		pthread_join(*thread, NULL);
313 	}
314 
315 	ret = params->ret;
316 	free(params);
317 
318 	return -ret;
319 }
320 
321 int
322 rte_thread_register(void)
323 {
324 	unsigned int lcore_id;
325 	rte_cpuset_t cpuset;
326 
327 	/* EAL init flushes all lcores, we can't register before. */
328 	if (eal_get_internal_configuration()->init_complete != 1) {
329 		RTE_LOG(DEBUG, EAL, "Called %s before EAL init.\n", __func__);
330 		rte_errno = EINVAL;
331 		return -1;
332 	}
333 	if (!rte_mp_disable()) {
334 		RTE_LOG(ERR, EAL, "Multiprocess in use, registering non-EAL threads is not supported.\n");
335 		rte_errno = EINVAL;
336 		return -1;
337 	}
338 	if (rte_thread_get_affinity_by_id(rte_thread_self(), &cpuset) != 0)
339 		CPU_ZERO(&cpuset);
340 	lcore_id = eal_lcore_non_eal_allocate();
341 	if (lcore_id >= RTE_MAX_LCORE)
342 		lcore_id = LCORE_ID_ANY;
343 	__rte_thread_init(lcore_id, &cpuset);
344 	if (lcore_id == LCORE_ID_ANY) {
345 		rte_errno = ENOMEM;
346 		return -1;
347 	}
348 	RTE_LOG(DEBUG, EAL, "Registered non-EAL thread as lcore %u.\n",
349 		lcore_id);
350 	return 0;
351 }
352 
353 void
354 rte_thread_unregister(void)
355 {
356 	unsigned int lcore_id = rte_lcore_id();
357 
358 	if (lcore_id != LCORE_ID_ANY)
359 		eal_lcore_non_eal_release(lcore_id);
360 	__rte_thread_uninit();
361 	if (lcore_id != LCORE_ID_ANY)
362 		RTE_LOG(DEBUG, EAL, "Unregistered non-EAL thread (was lcore %u).\n",
363 			lcore_id);
364 }
365 
366 int
367 rte_thread_attr_init(rte_thread_attr_t *attr)
368 {
369 	if (attr == NULL)
370 		return EINVAL;
371 
372 	CPU_ZERO(&attr->cpuset);
373 	attr->priority = RTE_THREAD_PRIORITY_NORMAL;
374 
375 	return 0;
376 }
377 
378 int
379 rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr,
380 		enum rte_thread_priority priority)
381 {
382 	if (thread_attr == NULL)
383 		return EINVAL;
384 
385 	thread_attr->priority = priority;
386 
387 	return 0;
388 }
389 
390 int
391 rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr,
392 		rte_cpuset_t *cpuset)
393 {
394 	if (thread_attr == NULL)
395 		return EINVAL;
396 
397 	if (cpuset == NULL)
398 		return EINVAL;
399 
400 	thread_attr->cpuset = *cpuset;
401 
402 	return 0;
403 }
404 
405 int
406 rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr,
407 		rte_cpuset_t *cpuset)
408 {
409 	if (thread_attr == NULL)
410 		return EINVAL;
411 
412 	if (cpuset == NULL)
413 		return EINVAL;
414 
415 	*cpuset = thread_attr->cpuset;
416 
417 	return 0;
418 }
419