xref: /dpdk/lib/eal/windows/rte_thread.c (revision da7e701151ea8b742d4c38ace3e4fefd1b4507fc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 Mellanox Technologies, Ltd
3  * Copyright (C) 2022 Microsoft Corporation
4  */
5 
6 #include <errno.h>
7 #include <wchar.h>
8 
9 #include <rte_eal.h>
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_stdatomic.h>
13 #include <rte_thread.h>
14 
15 #include "eal_windows.h"
16 
17 struct eal_tls_key {
18 	DWORD thread_index;
19 };
20 
21 struct thread_routine_ctx {
22 	rte_thread_func thread_func;
23 	RTE_ATOMIC(bool) thread_init_failed;
24 	void *routine_args;
25 };
26 
27 /* Translates the most common error codes related to threads */
28 static int
29 thread_translate_win32_error(DWORD error)
30 {
31 	switch (error) {
32 	case ERROR_SUCCESS:
33 		return 0;
34 
35 	case ERROR_INVALID_PARAMETER:
36 		return EINVAL;
37 
38 	case ERROR_INVALID_HANDLE:
39 		return EFAULT;
40 
41 	case ERROR_NOT_ENOUGH_MEMORY:
42 		/* FALLTHROUGH */
43 	case ERROR_NO_SYSTEM_RESOURCES:
44 		return ENOMEM;
45 
46 	case ERROR_PRIVILEGE_NOT_HELD:
47 		/* FALLTHROUGH */
48 	case ERROR_ACCESS_DENIED:
49 		return EACCES;
50 
51 	case ERROR_ALREADY_EXISTS:
52 		return EEXIST;
53 
54 	case ERROR_POSSIBLE_DEADLOCK:
55 		return EDEADLK;
56 
57 	case ERROR_INVALID_FUNCTION:
58 		/* FALLTHROUGH */
59 	case ERROR_CALL_NOT_IMPLEMENTED:
60 		return ENOSYS;
61 	}
62 
63 	return EINVAL;
64 }
65 
66 static int
67 thread_log_last_error(const char *message)
68 {
69 	DWORD error = GetLastError();
70 	RTE_LOG(DEBUG, EAL, "GetLastError()=%lu: %s\n", error, message);
71 
72 	return thread_translate_win32_error(error);
73 }
74 
75 static int
76 thread_map_priority_to_os_value(enum rte_thread_priority eal_pri, int *os_pri,
77 	DWORD *pri_class)
78 {
79 	/* Clear the output parameters. */
80 	*os_pri = -1;
81 	*pri_class = -1;
82 
83 	switch (eal_pri) {
84 	case RTE_THREAD_PRIORITY_NORMAL:
85 		*pri_class = NORMAL_PRIORITY_CLASS;
86 		*os_pri = THREAD_PRIORITY_NORMAL;
87 		break;
88 	case RTE_THREAD_PRIORITY_REALTIME_CRITICAL:
89 		*pri_class = REALTIME_PRIORITY_CLASS;
90 		*os_pri = THREAD_PRIORITY_TIME_CRITICAL;
91 		break;
92 	default:
93 		RTE_LOG(DEBUG, EAL, "The requested priority value is invalid.\n");
94 		return EINVAL;
95 	}
96 
97 	return 0;
98 }
99 
100 static int
101 thread_map_os_priority_to_eal_value(int os_pri, DWORD pri_class,
102 	enum rte_thread_priority *eal_pri)
103 {
104 	switch (pri_class) {
105 	case NORMAL_PRIORITY_CLASS:
106 		if (os_pri == THREAD_PRIORITY_NORMAL) {
107 			*eal_pri = RTE_THREAD_PRIORITY_NORMAL;
108 			return 0;
109 		}
110 		break;
111 	case HIGH_PRIORITY_CLASS:
112 		RTE_LOG(WARNING, EAL, "The OS priority class is high not real-time.\n");
113 		/* FALLTHROUGH */
114 	case REALTIME_PRIORITY_CLASS:
115 		if (os_pri == THREAD_PRIORITY_TIME_CRITICAL) {
116 			*eal_pri = RTE_THREAD_PRIORITY_REALTIME_CRITICAL;
117 			return 0;
118 		}
119 		break;
120 	default:
121 		RTE_LOG(DEBUG, EAL, "The OS priority value does not map to an EAL-defined priority.\n");
122 		return EINVAL;
123 	}
124 
125 	return 0;
126 }
127 
128 static int
129 convert_cpuset_to_affinity(const rte_cpuset_t *cpuset,
130 		PGROUP_AFFINITY affinity)
131 {
132 	int ret = 0;
133 	PGROUP_AFFINITY cpu_affinity = NULL;
134 	unsigned int cpu_idx;
135 
136 	memset(affinity, 0, sizeof(GROUP_AFFINITY));
137 	affinity->Group = (USHORT)-1;
138 
139 	/* Check that all cpus of the set belong to the same processor group and
140 	 * accumulate thread affinity to be applied.
141 	 */
142 	for (cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) {
143 		if (!CPU_ISSET(cpu_idx, cpuset))
144 			continue;
145 
146 		cpu_affinity = eal_get_cpu_affinity(cpu_idx);
147 
148 		if (affinity->Group == (USHORT)-1) {
149 			affinity->Group = cpu_affinity->Group;
150 		} else if (affinity->Group != cpu_affinity->Group) {
151 			RTE_LOG(DEBUG, EAL, "All processors must belong to the same processor group\n");
152 			ret = ENOTSUP;
153 			goto cleanup;
154 		}
155 
156 		affinity->Mask |= cpu_affinity->Mask;
157 	}
158 
159 	if (affinity->Mask == 0) {
160 		ret = EINVAL;
161 		goto cleanup;
162 	}
163 
164 cleanup:
165 	return ret;
166 }
167 
168 static DWORD
169 thread_func_wrapper(void *arg)
170 {
171 	struct thread_routine_ctx ctx = *(struct thread_routine_ctx *)arg;
172 	const bool thread_exit = rte_atomic_load_explicit(
173 		&ctx.thread_init_failed, rte_memory_order_acquire);
174 
175 	free(arg);
176 
177 	if (thread_exit)
178 		return 0;
179 
180 	return (DWORD)ctx.thread_func(ctx.routine_args);
181 }
182 
183 int
184 rte_thread_create(rte_thread_t *thread_id,
185 		  const rte_thread_attr_t *thread_attr,
186 		  rte_thread_func thread_func, void *args)
187 {
188 	int ret = 0;
189 	DWORD tid;
190 	HANDLE thread_handle = NULL;
191 	GROUP_AFFINITY thread_affinity;
192 	struct thread_routine_ctx *ctx;
193 	bool thread_exit = false;
194 
195 	ctx = calloc(1, sizeof(*ctx));
196 	if (ctx == NULL) {
197 		RTE_LOG(DEBUG, EAL, "Insufficient memory for thread context allocations\n");
198 		ret = ENOMEM;
199 		goto cleanup;
200 	}
201 	ctx->routine_args = args;
202 	ctx->thread_func = thread_func;
203 	ctx->thread_init_failed = false;
204 
205 	thread_handle = CreateThread(NULL, 0, thread_func_wrapper, ctx,
206 		CREATE_SUSPENDED, &tid);
207 	if (thread_handle == NULL) {
208 		ret = thread_log_last_error("CreateThread()");
209 		goto cleanup;
210 	}
211 	thread_id->opaque_id = tid;
212 
213 	if (thread_attr != NULL) {
214 		if (CPU_COUNT(&thread_attr->cpuset) > 0) {
215 			ret = convert_cpuset_to_affinity(
216 							&thread_attr->cpuset,
217 							&thread_affinity
218 							);
219 			if (ret != 0) {
220 				RTE_LOG(DEBUG, EAL, "Unable to convert cpuset to thread affinity\n");
221 				thread_exit = true;
222 				goto resume_thread;
223 			}
224 
225 			if (!SetThreadGroupAffinity(thread_handle,
226 						    &thread_affinity, NULL)) {
227 				ret = thread_log_last_error("SetThreadGroupAffinity()");
228 				thread_exit = true;
229 				goto resume_thread;
230 			}
231 		}
232 		ret = rte_thread_set_priority(*thread_id,
233 				thread_attr->priority);
234 		if (ret != 0) {
235 			RTE_LOG(DEBUG, EAL, "Unable to set thread priority\n");
236 			thread_exit = true;
237 			goto resume_thread;
238 		}
239 	}
240 
241 resume_thread:
242 	rte_atomic_store_explicit(&ctx->thread_init_failed, thread_exit, rte_memory_order_release);
243 
244 	if (ResumeThread(thread_handle) == (DWORD)-1) {
245 		ret = thread_log_last_error("ResumeThread()");
246 		goto cleanup;
247 	}
248 
249 	ctx = NULL;
250 cleanup:
251 	free(ctx);
252 	if (thread_handle != NULL) {
253 		CloseHandle(thread_handle);
254 		thread_handle = NULL;
255 	}
256 
257 	return ret;
258 }
259 
260 int
261 rte_thread_join(rte_thread_t thread_id, uint32_t *value_ptr)
262 {
263 	HANDLE thread_handle;
264 	DWORD result;
265 	DWORD exit_code = 0;
266 	BOOL err;
267 	int ret = 0;
268 
269 	thread_handle = OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION,
270 				   FALSE, thread_id.opaque_id);
271 	if (thread_handle == NULL) {
272 		ret = thread_log_last_error("OpenThread()");
273 		goto cleanup;
274 	}
275 
276 	result = WaitForSingleObject(thread_handle, INFINITE);
277 	if (result != WAIT_OBJECT_0) {
278 		ret = thread_log_last_error("WaitForSingleObject()");
279 		goto cleanup;
280 	}
281 
282 	if (value_ptr != NULL) {
283 		err = GetExitCodeThread(thread_handle, &exit_code);
284 		if (err == 0) {
285 			ret = thread_log_last_error("GetExitCodeThread()");
286 			goto cleanup;
287 		}
288 		*value_ptr = exit_code;
289 	}
290 
291 cleanup:
292 	if (thread_handle != NULL) {
293 		CloseHandle(thread_handle);
294 		thread_handle = NULL;
295 	}
296 
297 	return ret;
298 }
299 
300 int
301 rte_thread_detach(rte_thread_t thread_id)
302 {
303 	/* No resources that need to be released. */
304 	RTE_SET_USED(thread_id);
305 
306 	return 0;
307 }
308 
309 int
310 rte_thread_equal(rte_thread_t t1, rte_thread_t t2)
311 {
312 	return t1.opaque_id == t2.opaque_id;
313 }
314 
315 rte_thread_t
316 rte_thread_self(void)
317 {
318 	rte_thread_t thread_id;
319 
320 	thread_id.opaque_id = GetCurrentThreadId();
321 
322 	return thread_id;
323 }
324 
325 void
326 rte_thread_set_name(rte_thread_t thread_id, const char *thread_name)
327 {
328 	int ret = 0;
329 	wchar_t wname[RTE_THREAD_NAME_SIZE];
330 	mbstate_t state = {0};
331 	size_t rv;
332 	HANDLE thread_handle;
333 
334 	thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE,
335 		thread_id.opaque_id);
336 	if (thread_handle == NULL) {
337 		ret = thread_log_last_error("OpenThread()");
338 		goto cleanup;
339 	}
340 
341 	memset(wname, 0, sizeof(wname));
342 	rv = mbsrtowcs(wname, &thread_name, RTE_DIM(wname) - 1, &state);
343 	if (rv == (size_t)-1) {
344 		ret = EILSEQ;
345 		goto cleanup;
346 	}
347 
348 #ifndef RTE_TOOLCHAIN_GCC
349 	if (FAILED(SetThreadDescription(thread_handle, wname))) {
350 		ret = EINVAL;
351 		goto cleanup;
352 	}
353 #else
354 	ret = ENOTSUP;
355 	goto cleanup;
356 #endif
357 
358 cleanup:
359 	if (thread_handle != NULL)
360 		CloseHandle(thread_handle);
361 
362 	if (ret != 0)
363 		RTE_LOG(DEBUG, EAL, "Failed to set thread name\n");
364 }
365 
366 int
367 rte_thread_get_priority(rte_thread_t thread_id,
368 	enum rte_thread_priority *priority)
369 {
370 	HANDLE thread_handle = NULL;
371 	DWORD pri_class;
372 	int os_pri;
373 	int ret;
374 
375 	pri_class = GetPriorityClass(GetCurrentProcess());
376 	if (pri_class == 0) {
377 		ret = thread_log_last_error("GetPriorityClass()");
378 		goto cleanup;
379 	}
380 
381 	thread_handle = OpenThread(THREAD_SET_INFORMATION |
382 		THREAD_QUERY_INFORMATION, FALSE, thread_id.opaque_id);
383 	if (thread_handle == NULL) {
384 		ret = thread_log_last_error("OpenThread()");
385 		goto cleanup;
386 	}
387 
388 	os_pri = GetThreadPriority(thread_handle);
389 	if (os_pri == THREAD_PRIORITY_ERROR_RETURN) {
390 		ret = thread_log_last_error("GetThreadPriority()");
391 		goto cleanup;
392 	}
393 
394 	ret = thread_map_os_priority_to_eal_value(os_pri, pri_class, priority);
395 	if (ret != 0)
396 		goto cleanup;
397 
398 cleanup:
399 	if (thread_handle != NULL)
400 		CloseHandle(thread_handle);
401 
402 	return ret;
403 }
404 
405 int
406 rte_thread_set_priority(rte_thread_t thread_id,
407 			enum rte_thread_priority priority)
408 {
409 	HANDLE thread_handle;
410 	DWORD priority_class;
411 	int os_priority;
412 	int ret = 0;
413 
414 	thread_handle = OpenThread(THREAD_SET_INFORMATION |
415 		THREAD_QUERY_INFORMATION, FALSE, thread_id.opaque_id);
416 	if (thread_handle == NULL) {
417 		ret = thread_log_last_error("OpenThread()");
418 		goto cleanup;
419 	}
420 
421 	ret = thread_map_priority_to_os_value(priority, &os_priority,
422 		&priority_class);
423 	if (ret != 0)
424 		goto cleanup;
425 
426 	if (!SetPriorityClass(GetCurrentProcess(), priority_class)) {
427 		ret = thread_log_last_error("SetPriorityClass()");
428 		goto cleanup;
429 	}
430 
431 	if (!SetThreadPriority(thread_handle, os_priority)) {
432 		ret = thread_log_last_error("SetThreadPriority()");
433 		goto cleanup;
434 	}
435 
436 cleanup:
437 	if (thread_handle != NULL)
438 		CloseHandle(thread_handle);
439 
440 	return ret;
441 }
442 
443 int
444 rte_thread_key_create(rte_thread_key *key,
445 		__rte_unused void (*destructor)(void *))
446 {
447 	*key = malloc(sizeof(**key));
448 	if ((*key) == NULL) {
449 		RTE_LOG(DEBUG, EAL, "Cannot allocate TLS key.\n");
450 		rte_errno = ENOMEM;
451 		return -1;
452 	}
453 	(*key)->thread_index = TlsAlloc();
454 	if ((*key)->thread_index == TLS_OUT_OF_INDEXES) {
455 		RTE_LOG_WIN32_ERR("TlsAlloc()");
456 		free(*key);
457 		rte_errno = ENOEXEC;
458 		return -1;
459 	}
460 	return 0;
461 }
462 
463 int
464 rte_thread_key_delete(rte_thread_key key)
465 {
466 	if (!key) {
467 		RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n");
468 		rte_errno = EINVAL;
469 		return -1;
470 	}
471 	if (!TlsFree(key->thread_index)) {
472 		RTE_LOG_WIN32_ERR("TlsFree()");
473 		free(key);
474 		rte_errno = ENOEXEC;
475 		return -1;
476 	}
477 	free(key);
478 	return 0;
479 }
480 
481 int
482 rte_thread_value_set(rte_thread_key key, const void *value)
483 {
484 	char *p;
485 
486 	if (!key) {
487 		RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n");
488 		rte_errno = EINVAL;
489 		return -1;
490 	}
491 	/* discard const qualifier */
492 	p = (char *) (uintptr_t) value;
493 	if (!TlsSetValue(key->thread_index, p)) {
494 		RTE_LOG_WIN32_ERR("TlsSetValue()");
495 		rte_errno = ENOEXEC;
496 		return -1;
497 	}
498 	return 0;
499 }
500 
501 void *
502 rte_thread_value_get(rte_thread_key key)
503 {
504 	void *output;
505 
506 	if (!key) {
507 		RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n");
508 		rte_errno = EINVAL;
509 		return NULL;
510 	}
511 	output = TlsGetValue(key->thread_index);
512 	if (GetLastError() != ERROR_SUCCESS) {
513 		RTE_LOG_WIN32_ERR("TlsGetValue()");
514 		rte_errno = ENOEXEC;
515 		return NULL;
516 	}
517 	return output;
518 }
519 
520 int
521 rte_thread_set_affinity_by_id(rte_thread_t thread_id,
522 		const rte_cpuset_t *cpuset)
523 {
524 	int ret = 0;
525 	GROUP_AFFINITY thread_affinity;
526 	HANDLE thread_handle = NULL;
527 
528 	if (cpuset == NULL) {
529 		ret = EINVAL;
530 		goto cleanup;
531 	}
532 
533 	ret = convert_cpuset_to_affinity(cpuset, &thread_affinity);
534 	if (ret != 0) {
535 		RTE_LOG(DEBUG, EAL, "Unable to convert cpuset to thread affinity\n");
536 		goto cleanup;
537 	}
538 
539 	thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE,
540 		thread_id.opaque_id);
541 	if (thread_handle == NULL) {
542 		ret = thread_log_last_error("OpenThread()");
543 		goto cleanup;
544 	}
545 
546 	if (!SetThreadGroupAffinity(thread_handle, &thread_affinity, NULL)) {
547 		ret = thread_log_last_error("SetThreadGroupAffinity()");
548 		goto cleanup;
549 	}
550 
551 cleanup:
552 	if (thread_handle != NULL) {
553 		CloseHandle(thread_handle);
554 		thread_handle = NULL;
555 	}
556 
557 	return ret;
558 }
559 
560 int
561 rte_thread_get_affinity_by_id(rte_thread_t thread_id,
562 		rte_cpuset_t *cpuset)
563 {
564 	HANDLE thread_handle = NULL;
565 	PGROUP_AFFINITY cpu_affinity;
566 	GROUP_AFFINITY thread_affinity;
567 	unsigned int cpu_idx;
568 	int ret = 0;
569 
570 	if (cpuset == NULL) {
571 		ret = EINVAL;
572 		goto cleanup;
573 	}
574 
575 	thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE,
576 		thread_id.opaque_id);
577 	if (thread_handle == NULL) {
578 		ret = thread_log_last_error("OpenThread()");
579 		goto cleanup;
580 	}
581 
582 	/* obtain previous thread affinity */
583 	if (!GetThreadGroupAffinity(thread_handle, &thread_affinity)) {
584 		ret = thread_log_last_error("GetThreadGroupAffinity()");
585 		goto cleanup;
586 	}
587 
588 	CPU_ZERO(cpuset);
589 
590 	/* Convert affinity to DPDK cpu set */
591 	for (cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) {
592 
593 		cpu_affinity = eal_get_cpu_affinity(cpu_idx);
594 
595 		if ((cpu_affinity->Group == thread_affinity.Group) &&
596 		   ((cpu_affinity->Mask & thread_affinity.Mask) != 0)) {
597 			CPU_SET(cpu_idx, cpuset);
598 		}
599 	}
600 
601 cleanup:
602 	if (thread_handle != NULL) {
603 		CloseHandle(thread_handle);
604 		thread_handle = NULL;
605 	}
606 	return ret;
607 }
608