1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 Mellanox Technologies, Ltd
3 * Copyright (C) 2022 Microsoft Corporation
4 */
5
6 #include <errno.h>
7 #include <wchar.h>
8
9 #include <rte_eal.h>
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_stdatomic.h>
13 #include <rte_thread.h>
14
15 #include "eal_private.h"
16 #include "eal_windows.h"
17
18 struct eal_tls_key {
19 DWORD thread_index;
20 };
21
22 struct thread_routine_ctx {
23 rte_thread_func thread_func;
24 RTE_ATOMIC(bool) thread_init_failed;
25 void *routine_args;
26 };
27
28 /* Translates the most common error codes related to threads */
29 static int
thread_translate_win32_error(DWORD error)30 thread_translate_win32_error(DWORD error)
31 {
32 switch (error) {
33 case ERROR_SUCCESS:
34 return 0;
35
36 case ERROR_INVALID_PARAMETER:
37 return EINVAL;
38
39 case ERROR_INVALID_HANDLE:
40 return EFAULT;
41
42 case ERROR_NOT_ENOUGH_MEMORY:
43 /* FALLTHROUGH */
44 case ERROR_NO_SYSTEM_RESOURCES:
45 return ENOMEM;
46
47 case ERROR_PRIVILEGE_NOT_HELD:
48 /* FALLTHROUGH */
49 case ERROR_ACCESS_DENIED:
50 return EACCES;
51
52 case ERROR_ALREADY_EXISTS:
53 return EEXIST;
54
55 case ERROR_POSSIBLE_DEADLOCK:
56 return EDEADLK;
57
58 case ERROR_INVALID_FUNCTION:
59 /* FALLTHROUGH */
60 case ERROR_CALL_NOT_IMPLEMENTED:
61 return ENOSYS;
62 }
63
64 return EINVAL;
65 }
66
67 static int
thread_log_last_error(const char * message)68 thread_log_last_error(const char *message)
69 {
70 DWORD error = GetLastError();
71 EAL_LOG(DEBUG, "GetLastError()=%lu: %s", error, message);
72
73 return thread_translate_win32_error(error);
74 }
75
76 static int
thread_map_priority_to_os_value(enum rte_thread_priority eal_pri,int * os_pri,DWORD * pri_class)77 thread_map_priority_to_os_value(enum rte_thread_priority eal_pri, int *os_pri,
78 DWORD *pri_class)
79 {
80 /* Clear the output parameters. */
81 *os_pri = -1;
82 *pri_class = -1;
83
84 switch (eal_pri) {
85 case RTE_THREAD_PRIORITY_NORMAL:
86 *pri_class = NORMAL_PRIORITY_CLASS;
87 *os_pri = THREAD_PRIORITY_NORMAL;
88 break;
89 case RTE_THREAD_PRIORITY_REALTIME_CRITICAL:
90 *pri_class = REALTIME_PRIORITY_CLASS;
91 *os_pri = THREAD_PRIORITY_TIME_CRITICAL;
92 break;
93 default:
94 EAL_LOG(DEBUG, "The requested priority value is invalid.");
95 return EINVAL;
96 }
97
98 return 0;
99 }
100
101 static int
thread_map_os_priority_to_eal_value(int os_pri,DWORD pri_class,enum rte_thread_priority * eal_pri)102 thread_map_os_priority_to_eal_value(int os_pri, DWORD pri_class,
103 enum rte_thread_priority *eal_pri)
104 {
105 switch (pri_class) {
106 case NORMAL_PRIORITY_CLASS:
107 if (os_pri == THREAD_PRIORITY_NORMAL) {
108 *eal_pri = RTE_THREAD_PRIORITY_NORMAL;
109 return 0;
110 }
111 break;
112 case HIGH_PRIORITY_CLASS:
113 EAL_LOG(WARNING, "The OS priority class is high not real-time.");
114 /* FALLTHROUGH */
115 case REALTIME_PRIORITY_CLASS:
116 if (os_pri == THREAD_PRIORITY_TIME_CRITICAL) {
117 *eal_pri = RTE_THREAD_PRIORITY_REALTIME_CRITICAL;
118 return 0;
119 }
120 break;
121 default:
122 EAL_LOG(DEBUG, "The OS priority value does not map to an EAL-defined priority.");
123 return EINVAL;
124 }
125
126 return 0;
127 }
128
129 static int
convert_cpuset_to_affinity(const rte_cpuset_t * cpuset,PGROUP_AFFINITY affinity)130 convert_cpuset_to_affinity(const rte_cpuset_t *cpuset,
131 PGROUP_AFFINITY affinity)
132 {
133 int ret = 0;
134 PGROUP_AFFINITY cpu_affinity = NULL;
135 unsigned int cpu_idx;
136
137 memset(affinity, 0, sizeof(GROUP_AFFINITY));
138 affinity->Group = (USHORT)-1;
139
140 /* Check that all cpus of the set belong to the same processor group and
141 * accumulate thread affinity to be applied.
142 */
143 for (cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) {
144 if (!CPU_ISSET(cpu_idx, cpuset))
145 continue;
146
147 cpu_affinity = eal_get_cpu_affinity(cpu_idx);
148
149 if (affinity->Group == (USHORT)-1) {
150 affinity->Group = cpu_affinity->Group;
151 } else if (affinity->Group != cpu_affinity->Group) {
152 EAL_LOG(DEBUG, "All processors must belong to the same processor group");
153 ret = ENOTSUP;
154 goto cleanup;
155 }
156
157 affinity->Mask |= cpu_affinity->Mask;
158 }
159
160 if (affinity->Mask == 0) {
161 ret = EINVAL;
162 goto cleanup;
163 }
164
165 cleanup:
166 return ret;
167 }
168
169 static DWORD
thread_func_wrapper(void * arg)170 thread_func_wrapper(void *arg)
171 {
172 struct thread_routine_ctx ctx = *(struct thread_routine_ctx *)arg;
173 const bool thread_exit = rte_atomic_load_explicit(
174 &ctx.thread_init_failed, rte_memory_order_acquire);
175
176 free(arg);
177
178 if (thread_exit)
179 return 0;
180
181 return (DWORD)ctx.thread_func(ctx.routine_args);
182 }
183
184 int
rte_thread_create(rte_thread_t * thread_id,const rte_thread_attr_t * thread_attr,rte_thread_func thread_func,void * args)185 rte_thread_create(rte_thread_t *thread_id,
186 const rte_thread_attr_t *thread_attr,
187 rte_thread_func thread_func, void *args)
188 {
189 int ret = 0;
190 DWORD tid;
191 HANDLE thread_handle = NULL;
192 GROUP_AFFINITY thread_affinity;
193 struct thread_routine_ctx *ctx;
194 bool thread_exit = false;
195
196 ctx = calloc(1, sizeof(*ctx));
197 if (ctx == NULL) {
198 EAL_LOG(DEBUG, "Insufficient memory for thread context allocations");
199 ret = ENOMEM;
200 goto cleanup;
201 }
202 ctx->routine_args = args;
203 ctx->thread_func = thread_func;
204 ctx->thread_init_failed = false;
205
206 thread_handle = CreateThread(NULL, 0, thread_func_wrapper, ctx,
207 CREATE_SUSPENDED, &tid);
208 if (thread_handle == NULL) {
209 ret = thread_log_last_error("CreateThread()");
210 goto cleanup;
211 }
212 thread_id->opaque_id = tid;
213
214 if (thread_attr != NULL) {
215 if (CPU_COUNT(&thread_attr->cpuset) > 0) {
216 ret = convert_cpuset_to_affinity(
217 &thread_attr->cpuset,
218 &thread_affinity
219 );
220 if (ret != 0) {
221 EAL_LOG(DEBUG, "Unable to convert cpuset to thread affinity");
222 thread_exit = true;
223 goto resume_thread;
224 }
225
226 if (!SetThreadGroupAffinity(thread_handle,
227 &thread_affinity, NULL)) {
228 ret = thread_log_last_error("SetThreadGroupAffinity()");
229 thread_exit = true;
230 goto resume_thread;
231 }
232 }
233 ret = rte_thread_set_priority(*thread_id,
234 thread_attr->priority);
235 if (ret != 0) {
236 EAL_LOG(DEBUG, "Unable to set thread priority");
237 thread_exit = true;
238 goto resume_thread;
239 }
240 }
241
242 resume_thread:
243 rte_atomic_store_explicit(&ctx->thread_init_failed, thread_exit, rte_memory_order_release);
244
245 if (ResumeThread(thread_handle) == (DWORD)-1) {
246 ret = thread_log_last_error("ResumeThread()");
247 goto cleanup;
248 }
249
250 ctx = NULL;
251 cleanup:
252 free(ctx);
253 if (thread_handle != NULL) {
254 CloseHandle(thread_handle);
255 thread_handle = NULL;
256 }
257
258 return ret;
259 }
260
261 int
rte_thread_join(rte_thread_t thread_id,uint32_t * value_ptr)262 rte_thread_join(rte_thread_t thread_id, uint32_t *value_ptr)
263 {
264 HANDLE thread_handle;
265 DWORD result;
266 DWORD exit_code = 0;
267 BOOL err;
268 int ret = 0;
269
270 thread_handle = OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION,
271 FALSE, thread_id.opaque_id);
272 if (thread_handle == NULL) {
273 ret = thread_log_last_error("OpenThread()");
274 goto cleanup;
275 }
276
277 result = WaitForSingleObject(thread_handle, INFINITE);
278 if (result != WAIT_OBJECT_0) {
279 ret = thread_log_last_error("WaitForSingleObject()");
280 goto cleanup;
281 }
282
283 if (value_ptr != NULL) {
284 err = GetExitCodeThread(thread_handle, &exit_code);
285 if (err == 0) {
286 ret = thread_log_last_error("GetExitCodeThread()");
287 goto cleanup;
288 }
289 *value_ptr = exit_code;
290 }
291
292 cleanup:
293 if (thread_handle != NULL) {
294 CloseHandle(thread_handle);
295 thread_handle = NULL;
296 }
297
298 return ret;
299 }
300
301 int
rte_thread_detach(rte_thread_t thread_id)302 rte_thread_detach(rte_thread_t thread_id)
303 {
304 /* No resources that need to be released. */
305 RTE_SET_USED(thread_id);
306
307 return 0;
308 }
309
310 int
rte_thread_equal(rte_thread_t t1,rte_thread_t t2)311 rte_thread_equal(rte_thread_t t1, rte_thread_t t2)
312 {
313 return t1.opaque_id == t2.opaque_id;
314 }
315
316 rte_thread_t
rte_thread_self(void)317 rte_thread_self(void)
318 {
319 rte_thread_t thread_id;
320
321 thread_id.opaque_id = GetCurrentThreadId();
322
323 return thread_id;
324 }
325
326 void
rte_thread_set_name(rte_thread_t thread_id,const char * thread_name)327 rte_thread_set_name(rte_thread_t thread_id, const char *thread_name)
328 {
329 int ret = 0;
330 wchar_t wname[RTE_THREAD_NAME_SIZE];
331 mbstate_t state = {0};
332 size_t rv;
333 HANDLE thread_handle;
334
335 thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE,
336 thread_id.opaque_id);
337 if (thread_handle == NULL) {
338 ret = thread_log_last_error("OpenThread()");
339 goto cleanup;
340 }
341
342 memset(wname, 0, sizeof(wname));
343 rv = mbsrtowcs(wname, &thread_name, RTE_DIM(wname) - 1, &state);
344 if (rv == (size_t)-1) {
345 ret = EILSEQ;
346 goto cleanup;
347 }
348
349 #ifndef RTE_TOOLCHAIN_GCC
350 if (FAILED(SetThreadDescription(thread_handle, wname))) {
351 ret = EINVAL;
352 goto cleanup;
353 }
354 #else
355 ret = ENOTSUP;
356 goto cleanup;
357 #endif
358
359 cleanup:
360 if (thread_handle != NULL)
361 CloseHandle(thread_handle);
362
363 if (ret != 0)
364 EAL_LOG(DEBUG, "Failed to set thread name");
365 }
366
367 int
rte_thread_get_priority(rte_thread_t thread_id,enum rte_thread_priority * priority)368 rte_thread_get_priority(rte_thread_t thread_id,
369 enum rte_thread_priority *priority)
370 {
371 HANDLE thread_handle = NULL;
372 DWORD pri_class;
373 int os_pri;
374 int ret;
375
376 pri_class = GetPriorityClass(GetCurrentProcess());
377 if (pri_class == 0) {
378 ret = thread_log_last_error("GetPriorityClass()");
379 goto cleanup;
380 }
381
382 thread_handle = OpenThread(THREAD_SET_INFORMATION |
383 THREAD_QUERY_INFORMATION, FALSE, thread_id.opaque_id);
384 if (thread_handle == NULL) {
385 ret = thread_log_last_error("OpenThread()");
386 goto cleanup;
387 }
388
389 os_pri = GetThreadPriority(thread_handle);
390 if (os_pri == THREAD_PRIORITY_ERROR_RETURN) {
391 ret = thread_log_last_error("GetThreadPriority()");
392 goto cleanup;
393 }
394
395 ret = thread_map_os_priority_to_eal_value(os_pri, pri_class, priority);
396 if (ret != 0)
397 goto cleanup;
398
399 cleanup:
400 if (thread_handle != NULL)
401 CloseHandle(thread_handle);
402
403 return ret;
404 }
405
406 int
rte_thread_set_priority(rte_thread_t thread_id,enum rte_thread_priority priority)407 rte_thread_set_priority(rte_thread_t thread_id,
408 enum rte_thread_priority priority)
409 {
410 HANDLE thread_handle;
411 DWORD priority_class;
412 int os_priority;
413 int ret = 0;
414
415 thread_handle = OpenThread(THREAD_SET_INFORMATION |
416 THREAD_QUERY_INFORMATION, FALSE, thread_id.opaque_id);
417 if (thread_handle == NULL) {
418 ret = thread_log_last_error("OpenThread()");
419 goto cleanup;
420 }
421
422 ret = thread_map_priority_to_os_value(priority, &os_priority,
423 &priority_class);
424 if (ret != 0)
425 goto cleanup;
426
427 if (!SetPriorityClass(GetCurrentProcess(), priority_class)) {
428 ret = thread_log_last_error("SetPriorityClass()");
429 goto cleanup;
430 }
431
432 if (!SetThreadPriority(thread_handle, os_priority)) {
433 ret = thread_log_last_error("SetThreadPriority()");
434 goto cleanup;
435 }
436
437 cleanup:
438 if (thread_handle != NULL)
439 CloseHandle(thread_handle);
440
441 return ret;
442 }
443
444 int
rte_thread_key_create(rte_thread_key * key,__rte_unused void (* destructor)(void *))445 rte_thread_key_create(rte_thread_key *key,
446 __rte_unused void (*destructor)(void *))
447 {
448 *key = malloc(sizeof(**key));
449 if ((*key) == NULL) {
450 EAL_LOG(DEBUG, "Cannot allocate TLS key.");
451 rte_errno = ENOMEM;
452 return -1;
453 }
454 (*key)->thread_index = TlsAlloc();
455 if ((*key)->thread_index == TLS_OUT_OF_INDEXES) {
456 RTE_LOG_WIN32_ERR("TlsAlloc()");
457 free(*key);
458 rte_errno = ENOEXEC;
459 return -1;
460 }
461 return 0;
462 }
463
464 int
rte_thread_key_delete(rte_thread_key key)465 rte_thread_key_delete(rte_thread_key key)
466 {
467 if (!key) {
468 EAL_LOG(DEBUG, "Invalid TLS key.");
469 rte_errno = EINVAL;
470 return -1;
471 }
472 if (!TlsFree(key->thread_index)) {
473 RTE_LOG_WIN32_ERR("TlsFree()");
474 free(key);
475 rte_errno = ENOEXEC;
476 return -1;
477 }
478 free(key);
479 return 0;
480 }
481
482 int
rte_thread_value_set(rte_thread_key key,const void * value)483 rte_thread_value_set(rte_thread_key key, const void *value)
484 {
485 char *p;
486
487 if (!key) {
488 EAL_LOG(DEBUG, "Invalid TLS key.");
489 rte_errno = EINVAL;
490 return -1;
491 }
492 /* discard const qualifier */
493 p = (char *) (uintptr_t) value;
494 if (!TlsSetValue(key->thread_index, p)) {
495 RTE_LOG_WIN32_ERR("TlsSetValue()");
496 rte_errno = ENOEXEC;
497 return -1;
498 }
499 return 0;
500 }
501
502 void *
rte_thread_value_get(rte_thread_key key)503 rte_thread_value_get(rte_thread_key key)
504 {
505 void *output;
506
507 if (!key) {
508 EAL_LOG(DEBUG, "Invalid TLS key.");
509 rte_errno = EINVAL;
510 return NULL;
511 }
512 output = TlsGetValue(key->thread_index);
513 if (GetLastError() != ERROR_SUCCESS) {
514 RTE_LOG_WIN32_ERR("TlsGetValue()");
515 rte_errno = ENOEXEC;
516 return NULL;
517 }
518 return output;
519 }
520
521 int
rte_thread_set_affinity_by_id(rte_thread_t thread_id,const rte_cpuset_t * cpuset)522 rte_thread_set_affinity_by_id(rte_thread_t thread_id,
523 const rte_cpuset_t *cpuset)
524 {
525 int ret = 0;
526 GROUP_AFFINITY thread_affinity;
527 HANDLE thread_handle = NULL;
528
529 if (cpuset == NULL) {
530 ret = EINVAL;
531 goto cleanup;
532 }
533
534 ret = convert_cpuset_to_affinity(cpuset, &thread_affinity);
535 if (ret != 0) {
536 EAL_LOG(DEBUG, "Unable to convert cpuset to thread affinity");
537 goto cleanup;
538 }
539
540 thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE,
541 thread_id.opaque_id);
542 if (thread_handle == NULL) {
543 ret = thread_log_last_error("OpenThread()");
544 goto cleanup;
545 }
546
547 if (!SetThreadGroupAffinity(thread_handle, &thread_affinity, NULL)) {
548 ret = thread_log_last_error("SetThreadGroupAffinity()");
549 goto cleanup;
550 }
551
552 cleanup:
553 if (thread_handle != NULL) {
554 CloseHandle(thread_handle);
555 thread_handle = NULL;
556 }
557
558 return ret;
559 }
560
561 int
rte_thread_get_affinity_by_id(rte_thread_t thread_id,rte_cpuset_t * cpuset)562 rte_thread_get_affinity_by_id(rte_thread_t thread_id,
563 rte_cpuset_t *cpuset)
564 {
565 HANDLE thread_handle = NULL;
566 PGROUP_AFFINITY cpu_affinity;
567 GROUP_AFFINITY thread_affinity;
568 unsigned int cpu_idx;
569 int ret = 0;
570
571 if (cpuset == NULL) {
572 ret = EINVAL;
573 goto cleanup;
574 }
575
576 thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE,
577 thread_id.opaque_id);
578 if (thread_handle == NULL) {
579 ret = thread_log_last_error("OpenThread()");
580 goto cleanup;
581 }
582
583 /* obtain previous thread affinity */
584 if (!GetThreadGroupAffinity(thread_handle, &thread_affinity)) {
585 ret = thread_log_last_error("GetThreadGroupAffinity()");
586 goto cleanup;
587 }
588
589 CPU_ZERO(cpuset);
590
591 /* Convert affinity to DPDK cpu set */
592 for (cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) {
593
594 cpu_affinity = eal_get_cpu_affinity(cpu_idx);
595
596 if ((cpu_affinity->Group == thread_affinity.Group) &&
597 ((cpu_affinity->Mask & thread_affinity.Mask) != 0)) {
598 CPU_SET(cpu_idx, cpuset);
599 }
600 }
601
602 cleanup:
603 if (thread_handle != NULL) {
604 CloseHandle(thread_handle);
605 thread_handle = NULL;
606 }
607 return ret;
608 }
609