xref: /dpdk/lib/eal/windows/rte_thread.c (revision c56185fc183fc0532d2f03aaf04bbf0989ea91a5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021 Mellanox Technologies, Ltd
3  * Copyright (C) 2022 Microsoft Corporation
4  */
5 
6 #include <errno.h>
7 #include <wchar.h>
8 
9 #include <rte_eal.h>
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_thread.h>
13 
14 #include "eal_windows.h"
15 
16 struct eal_tls_key {
17 	DWORD thread_index;
18 };
19 
20 struct thread_routine_ctx {
21 	rte_thread_func thread_func;
22 	bool thread_init_failed;
23 	void *routine_args;
24 };
25 
26 /* Translates the most common error codes related to threads */
27 static int
28 thread_translate_win32_error(DWORD error)
29 {
30 	switch (error) {
31 	case ERROR_SUCCESS:
32 		return 0;
33 
34 	case ERROR_INVALID_PARAMETER:
35 		return EINVAL;
36 
37 	case ERROR_INVALID_HANDLE:
38 		return EFAULT;
39 
40 	case ERROR_NOT_ENOUGH_MEMORY:
41 		/* FALLTHROUGH */
42 	case ERROR_NO_SYSTEM_RESOURCES:
43 		return ENOMEM;
44 
45 	case ERROR_PRIVILEGE_NOT_HELD:
46 		/* FALLTHROUGH */
47 	case ERROR_ACCESS_DENIED:
48 		return EACCES;
49 
50 	case ERROR_ALREADY_EXISTS:
51 		return EEXIST;
52 
53 	case ERROR_POSSIBLE_DEADLOCK:
54 		return EDEADLK;
55 
56 	case ERROR_INVALID_FUNCTION:
57 		/* FALLTHROUGH */
58 	case ERROR_CALL_NOT_IMPLEMENTED:
59 		return ENOSYS;
60 	}
61 
62 	return EINVAL;
63 }
64 
65 static int
66 thread_log_last_error(const char *message)
67 {
68 	DWORD error = GetLastError();
69 	RTE_LOG(DEBUG, EAL, "GetLastError()=%lu: %s\n", error, message);
70 
71 	return thread_translate_win32_error(error);
72 }
73 
74 static int
75 thread_map_priority_to_os_value(enum rte_thread_priority eal_pri, int *os_pri,
76 	DWORD *pri_class)
77 {
78 	/* Clear the output parameters. */
79 	*os_pri = -1;
80 	*pri_class = -1;
81 
82 	switch (eal_pri) {
83 	case RTE_THREAD_PRIORITY_NORMAL:
84 		*pri_class = NORMAL_PRIORITY_CLASS;
85 		*os_pri = THREAD_PRIORITY_NORMAL;
86 		break;
87 	case RTE_THREAD_PRIORITY_REALTIME_CRITICAL:
88 		*pri_class = REALTIME_PRIORITY_CLASS;
89 		*os_pri = THREAD_PRIORITY_TIME_CRITICAL;
90 		break;
91 	default:
92 		RTE_LOG(DEBUG, EAL, "The requested priority value is invalid.\n");
93 		return EINVAL;
94 	}
95 
96 	return 0;
97 }
98 
99 static int
100 thread_map_os_priority_to_eal_value(int os_pri, DWORD pri_class,
101 	enum rte_thread_priority *eal_pri)
102 {
103 	switch (pri_class) {
104 	case NORMAL_PRIORITY_CLASS:
105 		if (os_pri == THREAD_PRIORITY_NORMAL) {
106 			*eal_pri = RTE_THREAD_PRIORITY_NORMAL;
107 			return 0;
108 		}
109 		break;
110 	case HIGH_PRIORITY_CLASS:
111 		RTE_LOG(WARNING, EAL, "The OS priority class is high not real-time.\n");
112 		/* FALLTHROUGH */
113 	case REALTIME_PRIORITY_CLASS:
114 		if (os_pri == THREAD_PRIORITY_TIME_CRITICAL) {
115 			*eal_pri = RTE_THREAD_PRIORITY_REALTIME_CRITICAL;
116 			return 0;
117 		}
118 		break;
119 	default:
120 		RTE_LOG(DEBUG, EAL, "The OS priority value does not map to an EAL-defined priority.\n");
121 		return EINVAL;
122 	}
123 
124 	return 0;
125 }
126 
127 static int
128 convert_cpuset_to_affinity(const rte_cpuset_t *cpuset,
129 		PGROUP_AFFINITY affinity)
130 {
131 	int ret = 0;
132 	PGROUP_AFFINITY cpu_affinity = NULL;
133 	unsigned int cpu_idx;
134 
135 	memset(affinity, 0, sizeof(GROUP_AFFINITY));
136 	affinity->Group = (USHORT)-1;
137 
138 	/* Check that all cpus of the set belong to the same processor group and
139 	 * accumulate thread affinity to be applied.
140 	 */
141 	for (cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) {
142 		if (!CPU_ISSET(cpu_idx, cpuset))
143 			continue;
144 
145 		cpu_affinity = eal_get_cpu_affinity(cpu_idx);
146 
147 		if (affinity->Group == (USHORT)-1) {
148 			affinity->Group = cpu_affinity->Group;
149 		} else if (affinity->Group != cpu_affinity->Group) {
150 			RTE_LOG(DEBUG, EAL, "All processors must belong to the same processor group\n");
151 			ret = ENOTSUP;
152 			goto cleanup;
153 		}
154 
155 		affinity->Mask |= cpu_affinity->Mask;
156 	}
157 
158 	if (affinity->Mask == 0) {
159 		ret = EINVAL;
160 		goto cleanup;
161 	}
162 
163 cleanup:
164 	return ret;
165 }
166 
167 static DWORD
168 thread_func_wrapper(void *arg)
169 {
170 	struct thread_routine_ctx ctx = *(struct thread_routine_ctx *)arg;
171 	const bool thread_exit = __atomic_load_n(&ctx.thread_init_failed, __ATOMIC_ACQUIRE);
172 
173 	free(arg);
174 
175 	if (thread_exit)
176 		return 0;
177 
178 	return (DWORD)ctx.thread_func(ctx.routine_args);
179 }
180 
181 int
182 rte_thread_create(rte_thread_t *thread_id,
183 		  const rte_thread_attr_t *thread_attr,
184 		  rte_thread_func thread_func, void *args)
185 {
186 	int ret = 0;
187 	DWORD tid;
188 	HANDLE thread_handle = NULL;
189 	GROUP_AFFINITY thread_affinity;
190 	struct thread_routine_ctx *ctx;
191 	bool thread_exit = false;
192 
193 	ctx = calloc(1, sizeof(*ctx));
194 	if (ctx == NULL) {
195 		RTE_LOG(DEBUG, EAL, "Insufficient memory for thread context allocations\n");
196 		ret = ENOMEM;
197 		goto cleanup;
198 	}
199 	ctx->routine_args = args;
200 	ctx->thread_func = thread_func;
201 	ctx->thread_init_failed = false;
202 
203 	thread_handle = CreateThread(NULL, 0, thread_func_wrapper, ctx,
204 		CREATE_SUSPENDED, &tid);
205 	if (thread_handle == NULL) {
206 		ret = thread_log_last_error("CreateThread()");
207 		goto cleanup;
208 	}
209 	thread_id->opaque_id = tid;
210 
211 	if (thread_attr != NULL) {
212 		if (CPU_COUNT(&thread_attr->cpuset) > 0) {
213 			ret = convert_cpuset_to_affinity(
214 							&thread_attr->cpuset,
215 							&thread_affinity
216 							);
217 			if (ret != 0) {
218 				RTE_LOG(DEBUG, EAL, "Unable to convert cpuset to thread affinity\n");
219 				thread_exit = true;
220 				goto resume_thread;
221 			}
222 
223 			if (!SetThreadGroupAffinity(thread_handle,
224 						    &thread_affinity, NULL)) {
225 				ret = thread_log_last_error("SetThreadGroupAffinity()");
226 				thread_exit = true;
227 				goto resume_thread;
228 			}
229 		}
230 		ret = rte_thread_set_priority(*thread_id,
231 				thread_attr->priority);
232 		if (ret != 0) {
233 			RTE_LOG(DEBUG, EAL, "Unable to set thread priority\n");
234 			thread_exit = true;
235 			goto resume_thread;
236 		}
237 	}
238 
239 resume_thread:
240 	__atomic_store_n(&ctx->thread_init_failed, thread_exit, __ATOMIC_RELEASE);
241 
242 	if (ResumeThread(thread_handle) == (DWORD)-1) {
243 		ret = thread_log_last_error("ResumeThread()");
244 		goto cleanup;
245 	}
246 
247 	ctx = NULL;
248 cleanup:
249 	free(ctx);
250 	if (thread_handle != NULL) {
251 		CloseHandle(thread_handle);
252 		thread_handle = NULL;
253 	}
254 
255 	return ret;
256 }
257 
258 int
259 rte_thread_join(rte_thread_t thread_id, uint32_t *value_ptr)
260 {
261 	HANDLE thread_handle;
262 	DWORD result;
263 	DWORD exit_code = 0;
264 	BOOL err;
265 	int ret = 0;
266 
267 	thread_handle = OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION,
268 				   FALSE, thread_id.opaque_id);
269 	if (thread_handle == NULL) {
270 		ret = thread_log_last_error("OpenThread()");
271 		goto cleanup;
272 	}
273 
274 	result = WaitForSingleObject(thread_handle, INFINITE);
275 	if (result != WAIT_OBJECT_0) {
276 		ret = thread_log_last_error("WaitForSingleObject()");
277 		goto cleanup;
278 	}
279 
280 	if (value_ptr != NULL) {
281 		err = GetExitCodeThread(thread_handle, &exit_code);
282 		if (err == 0) {
283 			ret = thread_log_last_error("GetExitCodeThread()");
284 			goto cleanup;
285 		}
286 		*value_ptr = exit_code;
287 	}
288 
289 cleanup:
290 	if (thread_handle != NULL) {
291 		CloseHandle(thread_handle);
292 		thread_handle = NULL;
293 	}
294 
295 	return ret;
296 }
297 
298 int
299 rte_thread_detach(rte_thread_t thread_id)
300 {
301 	/* No resources that need to be released. */
302 	RTE_SET_USED(thread_id);
303 
304 	return 0;
305 }
306 
307 int
308 rte_thread_equal(rte_thread_t t1, rte_thread_t t2)
309 {
310 	return t1.opaque_id == t2.opaque_id;
311 }
312 
313 rte_thread_t
314 rte_thread_self(void)
315 {
316 	rte_thread_t thread_id;
317 
318 	thread_id.opaque_id = GetCurrentThreadId();
319 
320 	return thread_id;
321 }
322 
323 void
324 rte_thread_set_name(rte_thread_t thread_id, const char *thread_name)
325 {
326 	int ret = 0;
327 	wchar_t wname[RTE_THREAD_NAME_SIZE];
328 	mbstate_t state = {0};
329 	size_t rv;
330 	HANDLE thread_handle;
331 
332 	thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE,
333 		thread_id.opaque_id);
334 	if (thread_handle == NULL) {
335 		ret = thread_log_last_error("OpenThread()");
336 		goto cleanup;
337 	}
338 
339 	memset(wname, 0, sizeof(wname));
340 	rv = mbsrtowcs(wname, &thread_name, RTE_DIM(wname) - 1, &state);
341 	if (rv == (size_t)-1) {
342 		ret = EILSEQ;
343 		goto cleanup;
344 	}
345 
346 #ifndef RTE_TOOLCHAIN_GCC
347 	if (FAILED(SetThreadDescription(thread_handle, wname))) {
348 		ret = EINVAL;
349 		goto cleanup;
350 	}
351 #else
352 	ret = ENOTSUP;
353 	goto cleanup;
354 #endif
355 
356 cleanup:
357 	if (thread_handle != NULL)
358 		CloseHandle(thread_handle);
359 
360 	if (ret != 0)
361 		RTE_LOG(DEBUG, EAL, "Failed to set thread name\n");
362 }
363 
364 int
365 rte_thread_get_priority(rte_thread_t thread_id,
366 	enum rte_thread_priority *priority)
367 {
368 	HANDLE thread_handle = NULL;
369 	DWORD pri_class;
370 	int os_pri;
371 	int ret;
372 
373 	pri_class = GetPriorityClass(GetCurrentProcess());
374 	if (pri_class == 0) {
375 		ret = thread_log_last_error("GetPriorityClass()");
376 		goto cleanup;
377 	}
378 
379 	thread_handle = OpenThread(THREAD_SET_INFORMATION |
380 		THREAD_QUERY_INFORMATION, FALSE, thread_id.opaque_id);
381 	if (thread_handle == NULL) {
382 		ret = thread_log_last_error("OpenThread()");
383 		goto cleanup;
384 	}
385 
386 	os_pri = GetThreadPriority(thread_handle);
387 	if (os_pri == THREAD_PRIORITY_ERROR_RETURN) {
388 		ret = thread_log_last_error("GetThreadPriority()");
389 		goto cleanup;
390 	}
391 
392 	ret = thread_map_os_priority_to_eal_value(os_pri, pri_class, priority);
393 	if (ret != 0)
394 		goto cleanup;
395 
396 cleanup:
397 	if (thread_handle != NULL)
398 		CloseHandle(thread_handle);
399 
400 	return ret;
401 }
402 
403 int
404 rte_thread_set_priority(rte_thread_t thread_id,
405 			enum rte_thread_priority priority)
406 {
407 	HANDLE thread_handle;
408 	DWORD priority_class;
409 	int os_priority;
410 	int ret = 0;
411 
412 	thread_handle = OpenThread(THREAD_SET_INFORMATION |
413 		THREAD_QUERY_INFORMATION, FALSE, thread_id.opaque_id);
414 	if (thread_handle == NULL) {
415 		ret = thread_log_last_error("OpenThread()");
416 		goto cleanup;
417 	}
418 
419 	ret = thread_map_priority_to_os_value(priority, &os_priority,
420 		&priority_class);
421 	if (ret != 0)
422 		goto cleanup;
423 
424 	if (!SetPriorityClass(GetCurrentProcess(), priority_class)) {
425 		ret = thread_log_last_error("SetPriorityClass()");
426 		goto cleanup;
427 	}
428 
429 	if (!SetThreadPriority(thread_handle, os_priority)) {
430 		ret = thread_log_last_error("SetThreadPriority()");
431 		goto cleanup;
432 	}
433 
434 cleanup:
435 	if (thread_handle != NULL)
436 		CloseHandle(thread_handle);
437 
438 	return ret;
439 }
440 
441 int
442 rte_thread_key_create(rte_thread_key *key,
443 		__rte_unused void (*destructor)(void *))
444 {
445 	*key = malloc(sizeof(**key));
446 	if ((*key) == NULL) {
447 		RTE_LOG(DEBUG, EAL, "Cannot allocate TLS key.\n");
448 		rte_errno = ENOMEM;
449 		return -1;
450 	}
451 	(*key)->thread_index = TlsAlloc();
452 	if ((*key)->thread_index == TLS_OUT_OF_INDEXES) {
453 		RTE_LOG_WIN32_ERR("TlsAlloc()");
454 		free(*key);
455 		rte_errno = ENOEXEC;
456 		return -1;
457 	}
458 	return 0;
459 }
460 
461 int
462 rte_thread_key_delete(rte_thread_key key)
463 {
464 	if (!key) {
465 		RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n");
466 		rte_errno = EINVAL;
467 		return -1;
468 	}
469 	if (!TlsFree(key->thread_index)) {
470 		RTE_LOG_WIN32_ERR("TlsFree()");
471 		free(key);
472 		rte_errno = ENOEXEC;
473 		return -1;
474 	}
475 	free(key);
476 	return 0;
477 }
478 
479 int
480 rte_thread_value_set(rte_thread_key key, const void *value)
481 {
482 	char *p;
483 
484 	if (!key) {
485 		RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n");
486 		rte_errno = EINVAL;
487 		return -1;
488 	}
489 	/* discard const qualifier */
490 	p = (char *) (uintptr_t) value;
491 	if (!TlsSetValue(key->thread_index, p)) {
492 		RTE_LOG_WIN32_ERR("TlsSetValue()");
493 		rte_errno = ENOEXEC;
494 		return -1;
495 	}
496 	return 0;
497 }
498 
499 void *
500 rte_thread_value_get(rte_thread_key key)
501 {
502 	void *output;
503 
504 	if (!key) {
505 		RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n");
506 		rte_errno = EINVAL;
507 		return NULL;
508 	}
509 	output = TlsGetValue(key->thread_index);
510 	if (GetLastError() != ERROR_SUCCESS) {
511 		RTE_LOG_WIN32_ERR("TlsGetValue()");
512 		rte_errno = ENOEXEC;
513 		return NULL;
514 	}
515 	return output;
516 }
517 
518 int
519 rte_thread_set_affinity_by_id(rte_thread_t thread_id,
520 		const rte_cpuset_t *cpuset)
521 {
522 	int ret = 0;
523 	GROUP_AFFINITY thread_affinity;
524 	HANDLE thread_handle = NULL;
525 
526 	if (cpuset == NULL) {
527 		ret = EINVAL;
528 		goto cleanup;
529 	}
530 
531 	ret = convert_cpuset_to_affinity(cpuset, &thread_affinity);
532 	if (ret != 0) {
533 		RTE_LOG(DEBUG, EAL, "Unable to convert cpuset to thread affinity\n");
534 		goto cleanup;
535 	}
536 
537 	thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE,
538 		thread_id.opaque_id);
539 	if (thread_handle == NULL) {
540 		ret = thread_log_last_error("OpenThread()");
541 		goto cleanup;
542 	}
543 
544 	if (!SetThreadGroupAffinity(thread_handle, &thread_affinity, NULL)) {
545 		ret = thread_log_last_error("SetThreadGroupAffinity()");
546 		goto cleanup;
547 	}
548 
549 cleanup:
550 	if (thread_handle != NULL) {
551 		CloseHandle(thread_handle);
552 		thread_handle = NULL;
553 	}
554 
555 	return ret;
556 }
557 
558 int
559 rte_thread_get_affinity_by_id(rte_thread_t thread_id,
560 		rte_cpuset_t *cpuset)
561 {
562 	HANDLE thread_handle = NULL;
563 	PGROUP_AFFINITY cpu_affinity;
564 	GROUP_AFFINITY thread_affinity;
565 	unsigned int cpu_idx;
566 	int ret = 0;
567 
568 	if (cpuset == NULL) {
569 		ret = EINVAL;
570 		goto cleanup;
571 	}
572 
573 	thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE,
574 		thread_id.opaque_id);
575 	if (thread_handle == NULL) {
576 		ret = thread_log_last_error("OpenThread()");
577 		goto cleanup;
578 	}
579 
580 	/* obtain previous thread affinity */
581 	if (!GetThreadGroupAffinity(thread_handle, &thread_affinity)) {
582 		ret = thread_log_last_error("GetThreadGroupAffinity()");
583 		goto cleanup;
584 	}
585 
586 	CPU_ZERO(cpuset);
587 
588 	/* Convert affinity to DPDK cpu set */
589 	for (cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) {
590 
591 		cpu_affinity = eal_get_cpu_affinity(cpu_idx);
592 
593 		if ((cpu_affinity->Group == thread_affinity.Group) &&
594 		   ((cpu_affinity->Mask & thread_affinity.Mask) != 0)) {
595 			CPU_SET(cpu_idx, cpuset);
596 		}
597 	}
598 
599 cleanup:
600 	if (thread_handle != NULL) {
601 		CloseHandle(thread_handle);
602 		thread_handle = NULL;
603 	}
604 	return ret;
605 }
606