xref: /dpdk/lib/eal/linux/eal_thread.c (revision 9ad3a41ab2a10db0059e1decdbf3ec038f348e08)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <errno.h>
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <unistd.h>
9 #include <pthread.h>
10 #include <sys/syscall.h>
11 
12 #include <rte_debug.h>
13 #include <rte_launch.h>
14 #include <rte_log.h>
15 #include <rte_eal.h>
16 #include <rte_lcore.h>
17 #include <rte_eal_trace.h>
18 
19 #include "eal_private.h"
20 #include "eal_thread.h"
21 
22 /*
23  * Send a message to a worker lcore identified by worker_id to call a
24  * function f with argument arg. Once the execution is done, the
25  * remote lcore switches to WAIT state.
26  */
27 int
28 rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned int worker_id)
29 {
30 	int n;
31 	char c = 0;
32 	int m2w = lcore_config[worker_id].pipe_main2worker[1];
33 	int w2m = lcore_config[worker_id].pipe_worker2main[0];
34 	int rc = -EBUSY;
35 
36 	/* Check if the worker is in 'WAIT' state. Use acquire order
37 	 * since 'state' variable is used as the guard variable.
38 	 */
39 	if (__atomic_load_n(&lcore_config[worker_id].state,
40 					__ATOMIC_ACQUIRE) != WAIT)
41 		goto finish;
42 
43 	lcore_config[worker_id].arg = arg;
44 	/* Ensure that all the memory operations are completed
45 	 * before the worker thread starts running the function.
46 	 * Use worker thread function pointer as the guard variable.
47 	 */
48 	__atomic_store_n(&lcore_config[worker_id].f, f, __ATOMIC_RELEASE);
49 
50 	/* send message */
51 	n = 0;
52 	while (n == 0 || (n < 0 && errno == EINTR))
53 		n = write(m2w, &c, 1);
54 	if (n < 0)
55 		rte_panic("cannot write on configuration pipe\n");
56 
57 	/* wait ack */
58 	do {
59 		n = read(w2m, &c, 1);
60 	} while (n < 0 && errno == EINTR);
61 
62 	if (n <= 0)
63 		rte_panic("cannot read on configuration pipe\n");
64 
65 	rc = 0;
66 finish:
67 	rte_eal_trace_thread_remote_launch(f, arg, worker_id, rc);
68 	return rc;
69 }
70 
71 /* main loop of threads */
72 __rte_noreturn void *
73 eal_thread_loop(__rte_unused void *arg)
74 {
75 	char c;
76 	int n, ret;
77 	unsigned lcore_id;
78 	pthread_t thread_id;
79 	int m2w, w2m;
80 	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
81 
82 	thread_id = pthread_self();
83 
84 	/* retrieve our lcore_id from the configuration structure */
85 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
86 		if (thread_id == lcore_config[lcore_id].thread_id)
87 			break;
88 	}
89 	if (lcore_id == RTE_MAX_LCORE)
90 		rte_panic("cannot retrieve lcore id\n");
91 
92 	m2w = lcore_config[lcore_id].pipe_main2worker[0];
93 	w2m = lcore_config[lcore_id].pipe_worker2main[1];
94 
95 	__rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset);
96 
97 	ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
98 	RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
99 		lcore_id, (uintptr_t)thread_id, cpuset, ret == 0 ? "" : "...");
100 
101 	rte_eal_trace_thread_lcore_ready(lcore_id, cpuset);
102 
103 	/* read on our pipe to get commands */
104 	while (1) {
105 		lcore_function_t *f;
106 		void *fct_arg;
107 
108 		/* wait command */
109 		do {
110 			n = read(m2w, &c, 1);
111 		} while (n < 0 && errno == EINTR);
112 
113 		if (n <= 0)
114 			rte_panic("cannot read on configuration pipe\n");
115 
116 		/* Set the state to 'RUNNING'. Use release order
117 		 * since 'state' variable is used as the guard variable.
118 		 */
119 		__atomic_store_n(&lcore_config[lcore_id].state, RUNNING,
120 					__ATOMIC_RELEASE);
121 
122 		/* send ack */
123 		n = 0;
124 		while (n == 0 || (n < 0 && errno == EINTR))
125 			n = write(w2m, &c, 1);
126 		if (n < 0)
127 			rte_panic("cannot write on configuration pipe\n");
128 
129 		/* Load 'f' with acquire order to ensure that
130 		 * the memory operations from the main thread
131 		 * are accessed only after update to 'f' is visible.
132 		 * Wait till the update to 'f' is visible to the worker.
133 		 */
134 		while ((f = __atomic_load_n(&lcore_config[lcore_id].f,
135 			__ATOMIC_ACQUIRE)) == NULL)
136 			rte_pause();
137 
138 		/* call the function and store the return value */
139 		fct_arg = lcore_config[lcore_id].arg;
140 		ret = f(fct_arg);
141 		lcore_config[lcore_id].ret = ret;
142 		lcore_config[lcore_id].f = NULL;
143 		lcore_config[lcore_id].arg = NULL;
144 
145 		/* Store the state with release order to ensure that
146 		 * the memory operations from the worker thread
147 		 * are completed before the state is updated.
148 		 * Use 'state' as the guard variable.
149 		 */
150 		__atomic_store_n(&lcore_config[lcore_id].state, WAIT,
151 					__ATOMIC_RELEASE);
152 	}
153 
154 	/* never reached */
155 	/* pthread_exit(NULL); */
156 	/* return NULL; */
157 }
158 
159 /* require calling thread tid by gettid() */
160 int rte_sys_gettid(void)
161 {
162 	return (int)syscall(SYS_gettid);
163 }
164 
165 int rte_thread_setname(pthread_t id, const char *name)
166 {
167 	int ret = ENOSYS;
168 #if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
169 #if __GLIBC_PREREQ(2, 12)
170 	char truncated[16];
171 
172 	strlcpy(truncated, name, sizeof(truncated));
173 	ret = pthread_setname_np(id, truncated);
174 #endif
175 #endif
176 	RTE_SET_USED(id);
177 	RTE_SET_USED(name);
178 	return -ret;
179 }
180 
181 int rte_thread_getname(pthread_t id, char *name, size_t len)
182 {
183 	int ret = ENOSYS;
184 #if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
185 #if __GLIBC_PREREQ(2, 12)
186 	ret = pthread_getname_np(id, name, len);
187 #endif
188 #endif
189 	RTE_SET_USED(id);
190 	RTE_SET_USED(name);
191 	RTE_SET_USED(len);
192 	return -ret;
193 
194 }
195