xref: /dpdk/lib/eal/common/eal_common_lcore.c (revision c9902a15bd005b6d4fe072cf7b60fe4ee679155f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <unistd.h>
6 #include <limits.h>
7 #include <string.h>
8 
9 #include <rte_common.h>
10 #include <rte_debug.h>
11 #include <rte_eal.h>
12 #include <rte_errno.h>
13 #include <rte_lcore.h>
14 #include <rte_log.h>
15 #include <rte_rwlock.h>
16 
17 #include "eal_memcfg.h"
18 #include "eal_private.h"
19 #include "eal_thread.h"
20 
21 unsigned int rte_get_main_lcore(void)
22 {
23 	return rte_eal_get_configuration()->main_lcore;
24 }
25 
26 unsigned int rte_lcore_count(void)
27 {
28 	return rte_eal_get_configuration()->lcore_count;
29 }
30 
31 int rte_lcore_index(int lcore_id)
32 {
33 	if (unlikely(lcore_id >= RTE_MAX_LCORE))
34 		return -1;
35 
36 	if (lcore_id < 0) {
37 		if (rte_lcore_id() == LCORE_ID_ANY)
38 			return -1;
39 
40 		lcore_id = (int)rte_lcore_id();
41 	}
42 
43 	return lcore_config[lcore_id].core_index;
44 }
45 
46 int rte_lcore_to_cpu_id(int lcore_id)
47 {
48 	if (unlikely(lcore_id >= RTE_MAX_LCORE))
49 		return -1;
50 
51 	if (lcore_id < 0) {
52 		if (rte_lcore_id() == LCORE_ID_ANY)
53 			return -1;
54 
55 		lcore_id = (int)rte_lcore_id();
56 	}
57 
58 	return lcore_config[lcore_id].core_id;
59 }
60 
61 rte_cpuset_t rte_lcore_cpuset(unsigned int lcore_id)
62 {
63 	return lcore_config[lcore_id].cpuset;
64 }
65 
66 enum rte_lcore_role_t
67 rte_eal_lcore_role(unsigned int lcore_id)
68 {
69 	struct rte_config *cfg = rte_eal_get_configuration();
70 
71 	if (lcore_id >= RTE_MAX_LCORE)
72 		return ROLE_OFF;
73 	return cfg->lcore_role[lcore_id];
74 }
75 
76 int
77 rte_lcore_has_role(unsigned int lcore_id, enum rte_lcore_role_t role)
78 {
79 	struct rte_config *cfg = rte_eal_get_configuration();
80 
81 	if (lcore_id >= RTE_MAX_LCORE)
82 		return -EINVAL;
83 
84 	return cfg->lcore_role[lcore_id] == role;
85 }
86 
87 int rte_lcore_is_enabled(unsigned int lcore_id)
88 {
89 	struct rte_config *cfg = rte_eal_get_configuration();
90 
91 	if (lcore_id >= RTE_MAX_LCORE)
92 		return 0;
93 	return cfg->lcore_role[lcore_id] == ROLE_RTE;
94 }
95 
96 unsigned int rte_get_next_lcore(unsigned int i, int skip_main, int wrap)
97 {
98 	i++;
99 	if (wrap)
100 		i %= RTE_MAX_LCORE;
101 
102 	while (i < RTE_MAX_LCORE) {
103 		if (!rte_lcore_is_enabled(i) ||
104 		    (skip_main && (i == rte_get_main_lcore()))) {
105 			i++;
106 			if (wrap)
107 				i %= RTE_MAX_LCORE;
108 			continue;
109 		}
110 		break;
111 	}
112 	return i;
113 }
114 
115 unsigned int
116 rte_lcore_to_socket_id(unsigned int lcore_id)
117 {
118 	return lcore_config[lcore_id].socket_id;
119 }
120 
121 static int
122 socket_id_cmp(const void *a, const void *b)
123 {
124 	const int *lcore_id_a = a;
125 	const int *lcore_id_b = b;
126 
127 	if (*lcore_id_a < *lcore_id_b)
128 		return -1;
129 	if (*lcore_id_a > *lcore_id_b)
130 		return 1;
131 	return 0;
132 }
133 
134 /*
135  * Parse /sys/devices/system/cpu to get the number of physical and logical
136  * processors on the machine. The function will fill the cpu_info
137  * structure.
138  */
139 int
140 rte_eal_cpu_init(void)
141 {
142 	/* pointer to global configuration */
143 	struct rte_config *config = rte_eal_get_configuration();
144 	unsigned lcore_id;
145 	unsigned count = 0;
146 	unsigned int socket_id, prev_socket_id;
147 	int lcore_to_socket_id[RTE_MAX_LCORE];
148 
149 	/*
150 	 * Parse the maximum set of logical cores, detect the subset of running
151 	 * ones and enable them by default.
152 	 */
153 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
154 		lcore_config[lcore_id].core_index = count;
155 
156 		/* init cpuset for per lcore config */
157 		CPU_ZERO(&lcore_config[lcore_id].cpuset);
158 
159 		/* find socket first */
160 		socket_id = eal_cpu_socket_id(lcore_id);
161 		lcore_to_socket_id[lcore_id] = socket_id;
162 
163 		if (eal_cpu_detected(lcore_id) == 0) {
164 			config->lcore_role[lcore_id] = ROLE_OFF;
165 			lcore_config[lcore_id].core_index = -1;
166 			continue;
167 		}
168 
169 		/* By default, lcore 1:1 map to cpu id */
170 		CPU_SET(lcore_id, &lcore_config[lcore_id].cpuset);
171 
172 		/* By default, each detected core is enabled */
173 		config->lcore_role[lcore_id] = ROLE_RTE;
174 		lcore_config[lcore_id].core_role = ROLE_RTE;
175 		lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
176 		lcore_config[lcore_id].socket_id = socket_id;
177 		RTE_LOG(DEBUG, EAL, "Detected lcore %u as "
178 				"core %u on socket %u\n",
179 				lcore_id, lcore_config[lcore_id].core_id,
180 				lcore_config[lcore_id].socket_id);
181 		count++;
182 	}
183 	for (; lcore_id < CPU_SETSIZE; lcore_id++) {
184 		if (eal_cpu_detected(lcore_id) == 0)
185 			continue;
186 		RTE_LOG(DEBUG, EAL, "Skipped lcore %u as core %u on socket %u\n",
187 			lcore_id, eal_cpu_core_id(lcore_id),
188 			eal_cpu_socket_id(lcore_id));
189 	}
190 
191 	/* Set the count of enabled logical cores of the EAL configuration */
192 	config->lcore_count = count;
193 	RTE_LOG(DEBUG, EAL,
194 			"Maximum logical cores by configuration: %u\n",
195 			RTE_MAX_LCORE);
196 	RTE_LOG(INFO, EAL, "Detected CPU lcores: %u\n", config->lcore_count);
197 
198 	/* sort all socket id's in ascending order */
199 	qsort(lcore_to_socket_id, RTE_DIM(lcore_to_socket_id),
200 			sizeof(lcore_to_socket_id[0]), socket_id_cmp);
201 
202 	prev_socket_id = -1;
203 	config->numa_node_count = 0;
204 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
205 		socket_id = lcore_to_socket_id[lcore_id];
206 		if (socket_id != prev_socket_id)
207 			config->numa_nodes[config->numa_node_count++] =
208 					socket_id;
209 		prev_socket_id = socket_id;
210 	}
211 	RTE_LOG(INFO, EAL, "Detected NUMA nodes: %u\n", config->numa_node_count);
212 
213 	return 0;
214 }
215 
216 unsigned int
217 rte_socket_count(void)
218 {
219 	const struct rte_config *config = rte_eal_get_configuration();
220 	return config->numa_node_count;
221 }
222 
223 int
224 rte_socket_id_by_idx(unsigned int idx)
225 {
226 	const struct rte_config *config = rte_eal_get_configuration();
227 	if (idx >= config->numa_node_count) {
228 		rte_errno = EINVAL;
229 		return -1;
230 	}
231 	return config->numa_nodes[idx];
232 }
233 
234 static rte_rwlock_t lcore_lock = RTE_RWLOCK_INITIALIZER;
235 struct lcore_callback {
236 	TAILQ_ENTRY(lcore_callback) next;
237 	char *name;
238 	rte_lcore_init_cb init;
239 	rte_lcore_uninit_cb uninit;
240 	void *arg;
241 };
242 static TAILQ_HEAD(lcore_callbacks_head, lcore_callback) lcore_callbacks =
243 	TAILQ_HEAD_INITIALIZER(lcore_callbacks);
244 
245 static int
246 callback_init(struct lcore_callback *callback, unsigned int lcore_id)
247 {
248 	if (callback->init == NULL)
249 		return 0;
250 	RTE_LOG(DEBUG, EAL, "Call init for lcore callback %s, lcore_id %u\n",
251 		callback->name, lcore_id);
252 	return callback->init(lcore_id, callback->arg);
253 }
254 
255 static void
256 callback_uninit(struct lcore_callback *callback, unsigned int lcore_id)
257 {
258 	if (callback->uninit == NULL)
259 		return;
260 	RTE_LOG(DEBUG, EAL, "Call uninit for lcore callback %s, lcore_id %u\n",
261 		callback->name, lcore_id);
262 	callback->uninit(lcore_id, callback->arg);
263 }
264 
265 static void
266 free_callback(struct lcore_callback *callback)
267 {
268 	free(callback->name);
269 	free(callback);
270 }
271 
272 void *
273 rte_lcore_callback_register(const char *name, rte_lcore_init_cb init,
274 	rte_lcore_uninit_cb uninit, void *arg)
275 {
276 	struct rte_config *cfg = rte_eal_get_configuration();
277 	struct lcore_callback *callback;
278 	unsigned int lcore_id;
279 
280 	if (name == NULL)
281 		return NULL;
282 	callback = calloc(1, sizeof(*callback));
283 	if (callback == NULL)
284 		return NULL;
285 	if (asprintf(&callback->name, "%s-%p", name, arg) == -1) {
286 		free(callback);
287 		return NULL;
288 	}
289 	callback->init = init;
290 	callback->uninit = uninit;
291 	callback->arg = arg;
292 	rte_rwlock_write_lock(&lcore_lock);
293 	if (callback->init == NULL)
294 		goto no_init;
295 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
296 		if (cfg->lcore_role[lcore_id] == ROLE_OFF)
297 			continue;
298 		if (callback_init(callback, lcore_id) == 0)
299 			continue;
300 		/* Callback refused init for this lcore, uninitialize all
301 		 * previous lcore.
302 		 */
303 		while (lcore_id-- != 0) {
304 			if (cfg->lcore_role[lcore_id] == ROLE_OFF)
305 				continue;
306 			callback_uninit(callback, lcore_id);
307 		}
308 		free_callback(callback);
309 		callback = NULL;
310 		goto out;
311 	}
312 no_init:
313 	TAILQ_INSERT_TAIL(&lcore_callbacks, callback, next);
314 	RTE_LOG(DEBUG, EAL, "Registered new lcore callback %s (%sinit, %suninit).\n",
315 		callback->name, callback->init == NULL ? "NO " : "",
316 		callback->uninit == NULL ? "NO " : "");
317 out:
318 	rte_rwlock_write_unlock(&lcore_lock);
319 	return callback;
320 }
321 
322 void
323 rte_lcore_callback_unregister(void *handle)
324 {
325 	struct rte_config *cfg = rte_eal_get_configuration();
326 	struct lcore_callback *callback = handle;
327 	unsigned int lcore_id;
328 
329 	if (callback == NULL)
330 		return;
331 	rte_rwlock_write_lock(&lcore_lock);
332 	if (callback->uninit == NULL)
333 		goto no_uninit;
334 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
335 		if (cfg->lcore_role[lcore_id] == ROLE_OFF)
336 			continue;
337 		callback_uninit(callback, lcore_id);
338 	}
339 no_uninit:
340 	TAILQ_REMOVE(&lcore_callbacks, callback, next);
341 	rte_rwlock_write_unlock(&lcore_lock);
342 	RTE_LOG(DEBUG, EAL, "Unregistered lcore callback %s-%p.\n",
343 		callback->name, callback->arg);
344 	free_callback(callback);
345 }
346 
347 unsigned int
348 eal_lcore_non_eal_allocate(void)
349 {
350 	struct rte_config *cfg = rte_eal_get_configuration();
351 	struct lcore_callback *callback;
352 	struct lcore_callback *prev;
353 	unsigned int lcore_id;
354 
355 	rte_rwlock_write_lock(&lcore_lock);
356 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
357 		if (cfg->lcore_role[lcore_id] != ROLE_OFF)
358 			continue;
359 		cfg->lcore_role[lcore_id] = ROLE_NON_EAL;
360 		cfg->lcore_count++;
361 		break;
362 	}
363 	if (lcore_id == RTE_MAX_LCORE) {
364 		RTE_LOG(DEBUG, EAL, "No lcore available.\n");
365 		goto out;
366 	}
367 	TAILQ_FOREACH(callback, &lcore_callbacks, next) {
368 		if (callback_init(callback, lcore_id) == 0)
369 			continue;
370 		/* Callback refused init for this lcore, call uninit for all
371 		 * previous callbacks.
372 		 */
373 		prev = TAILQ_PREV(callback, lcore_callbacks_head, next);
374 		while (prev != NULL) {
375 			callback_uninit(prev, lcore_id);
376 			prev = TAILQ_PREV(prev, lcore_callbacks_head, next);
377 		}
378 		RTE_LOG(DEBUG, EAL, "Initialization refused for lcore %u.\n",
379 			lcore_id);
380 		cfg->lcore_role[lcore_id] = ROLE_OFF;
381 		cfg->lcore_count--;
382 		lcore_id = RTE_MAX_LCORE;
383 		goto out;
384 	}
385 out:
386 	rte_rwlock_write_unlock(&lcore_lock);
387 	return lcore_id;
388 }
389 
390 void
391 eal_lcore_non_eal_release(unsigned int lcore_id)
392 {
393 	struct rte_config *cfg = rte_eal_get_configuration();
394 	struct lcore_callback *callback;
395 
396 	rte_rwlock_write_lock(&lcore_lock);
397 	if (cfg->lcore_role[lcore_id] != ROLE_NON_EAL)
398 		goto out;
399 	TAILQ_FOREACH(callback, &lcore_callbacks, next)
400 		callback_uninit(callback, lcore_id);
401 	cfg->lcore_role[lcore_id] = ROLE_OFF;
402 	cfg->lcore_count--;
403 out:
404 	rte_rwlock_write_unlock(&lcore_lock);
405 }
406 
407 int
408 rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
409 {
410 	struct rte_config *cfg = rte_eal_get_configuration();
411 	unsigned int lcore_id;
412 	int ret = 0;
413 
414 	rte_rwlock_read_lock(&lcore_lock);
415 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
416 		if (cfg->lcore_role[lcore_id] == ROLE_OFF)
417 			continue;
418 		ret = cb(lcore_id, arg);
419 		if (ret != 0)
420 			break;
421 	}
422 	rte_rwlock_read_unlock(&lcore_lock);
423 	return ret;
424 }
425 
426 static int
427 lcore_dump_cb(unsigned int lcore_id, void *arg)
428 {
429 	struct rte_config *cfg = rte_eal_get_configuration();
430 	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
431 	const char *role;
432 	FILE *f = arg;
433 	int ret;
434 
435 	switch (cfg->lcore_role[lcore_id]) {
436 	case ROLE_RTE:
437 		role = "RTE";
438 		break;
439 	case ROLE_SERVICE:
440 		role = "SERVICE";
441 		break;
442 	case ROLE_NON_EAL:
443 		role = "NON_EAL";
444 		break;
445 	default:
446 		role = "UNKNOWN";
447 		break;
448 	}
449 
450 	ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
451 		sizeof(cpuset));
452 	fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
453 		rte_lcore_to_socket_id(lcore_id), role, cpuset,
454 		ret == 0 ? "" : "...");
455 	return 0;
456 }
457 
458 void
459 rte_lcore_dump(FILE *f)
460 {
461 	rte_lcore_iterate(lcore_dump_cb, f);
462 }
463