xref: /llvm-project/openmp/runtime/src/z_Linux_util.cpp (revision 9e7c0b1385baa1acffb62e0589ff100dd972cc0d)
1 /*
2  * z_Linux_util.cpp -- platform specific routines.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_i18n.h"
16 #include "kmp_io.h"
17 #include "kmp_itt.h"
18 #include "kmp_lock.h"
19 #include "kmp_stats.h"
20 #include "kmp_str.h"
21 #include "kmp_wait_release.h"
22 #include "kmp_wrapper_getpid.h"
23 
24 #if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
25 #include <alloca.h>
26 #endif
27 #include <math.h> // HUGE_VAL.
28 #if KMP_OS_LINUX
29 #include <semaphore.h>
30 #endif // KMP_OS_LINUX
31 #include <sys/resource.h>
32 #if !KMP_OS_AIX
33 #include <sys/syscall.h>
34 #endif
35 #include <sys/time.h>
36 #include <sys/times.h>
37 #include <unistd.h>
38 
39 #if KMP_OS_LINUX
40 #include <sys/sysinfo.h>
41 #if KMP_USE_FUTEX
42 // We should really include <futex.h>, but that causes compatibility problems on
43 // different Linux* OS distributions that either require that you include (or
44 // break when you try to include) <pci/types.h>. Since all we need is the two
45 // macros below (which are part of the kernel ABI, so can't change) we just
46 // define the constants here and don't include <futex.h>
47 #ifndef FUTEX_WAIT
48 #define FUTEX_WAIT 0
49 #endif
50 #ifndef FUTEX_WAKE
51 #define FUTEX_WAKE 1
52 #endif
53 #endif
54 #elif KMP_OS_DARWIN
55 #include <mach/mach.h>
56 #include <sys/sysctl.h>
57 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
58 #include <sys/types.h>
59 #include <sys/sysctl.h>
60 #include <sys/user.h>
61 #include <pthread_np.h>
62 #if KMP_OS_DRAGONFLY
63 #include <kvm.h>
64 #endif
65 #elif KMP_OS_NETBSD || KMP_OS_OPENBSD
66 #include <sys/types.h>
67 #include <sys/sysctl.h>
68 #elif KMP_OS_SOLARIS
69 #include <sys/loadavg.h>
70 #endif
71 
72 #include <ctype.h>
73 #include <dirent.h>
74 #include <fcntl.h>
75 
76 struct kmp_sys_timer {
77   struct timespec start;
78 };
79 
80 #ifndef TIMEVAL_TO_TIMESPEC
81 // Convert timeval to timespec.
82 #define TIMEVAL_TO_TIMESPEC(tv, ts)                                            \
83   do {                                                                         \
84     (ts)->tv_sec = (tv)->tv_sec;                                               \
85     (ts)->tv_nsec = (tv)->tv_usec * 1000;                                      \
86   } while (0)
87 #endif
88 
89 // Convert timespec to nanoseconds.
90 #define TS2NS(timespec)                                                        \
91   (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
92 
93 static struct kmp_sys_timer __kmp_sys_timer_data;
94 
95 #if KMP_HANDLE_SIGNALS
96 typedef void (*sig_func_t)(int);
97 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[NSIG];
98 static sigset_t __kmp_sigset;
99 #endif
100 
101 static int __kmp_init_runtime = FALSE;
102 
103 static int __kmp_fork_count = 0;
104 
105 static pthread_condattr_t __kmp_suspend_cond_attr;
106 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
107 
108 static kmp_cond_align_t __kmp_wait_cv;
109 static kmp_mutex_align_t __kmp_wait_mx;
110 
111 kmp_uint64 __kmp_ticks_per_msec = 1000000;
112 kmp_uint64 __kmp_ticks_per_usec = 1000;
113 
114 #ifdef DEBUG_SUSPEND
115 static void __kmp_print_cond(char *buffer, kmp_cond_align_t *cond) {
116   KMP_SNPRINTF(buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))",
117                cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
118                cond->c_cond.__c_waiting);
119 }
120 #endif
121 
122 #if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED)
123 
124 /* Affinity support */
125 
126 void __kmp_affinity_bind_thread(int which) {
127   KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
128               "Illegal set affinity operation when not capable");
129 
130   kmp_affin_mask_t *mask;
131   KMP_CPU_ALLOC_ON_STACK(mask);
132   KMP_CPU_ZERO(mask);
133   KMP_CPU_SET(which, mask);
134   __kmp_set_system_affinity(mask, TRUE);
135   KMP_CPU_FREE_FROM_STACK(mask);
136 }
137 
138 /* Determine if we can access affinity functionality on this version of
139  * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
140  * __kmp_affin_mask_size to the appropriate value (0 means not capable). */
141 void __kmp_affinity_determine_capable(const char *env_var) {
142   // Check and see if the OS supports thread affinity.
143 
144 #if KMP_OS_LINUX
145 #define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
146 #define KMP_CPU_SET_TRY_SIZE CACHE_LINE
147 #elif KMP_OS_FREEBSD
148 #define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
149 #endif
150 
151   int verbose = __kmp_affinity.flags.verbose;
152   int warnings = __kmp_affinity.flags.warnings;
153   enum affinity_type type = __kmp_affinity.type;
154 
155 #if KMP_OS_LINUX
156   long gCode;
157   unsigned char *buf;
158   buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
159 
160   // If the syscall returns a suggestion for the size,
161   // then we don't have to search for an appropriate size.
162   gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
163   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
164                 "initial getaffinity call returned %ld errno = %d\n",
165                 gCode, errno));
166 
167   if (gCode < 0 && errno != EINVAL) {
168     // System call not supported
169     if (verbose ||
170         (warnings && (type != affinity_none) && (type != affinity_default) &&
171          (type != affinity_disabled))) {
172       int error = errno;
173       kmp_msg_t err_code = KMP_ERR(error);
174       __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
175                 err_code, __kmp_msg_null);
176       if (__kmp_generate_warnings == kmp_warnings_off) {
177         __kmp_str_free(&err_code.str);
178       }
179     }
180     KMP_AFFINITY_DISABLE();
181     KMP_INTERNAL_FREE(buf);
182     return;
183   } else if (gCode > 0) {
184     // The optimal situation: the OS returns the size of the buffer it expects.
185     KMP_AFFINITY_ENABLE(gCode);
186     KA_TRACE(10, ("__kmp_affinity_determine_capable: "
187                   "affinity supported (mask size %d)\n",
188                   (int)__kmp_affin_mask_size));
189     KMP_INTERNAL_FREE(buf);
190     return;
191   }
192 
193   // Call the getaffinity system call repeatedly with increasing set sizes
194   // until we succeed, or reach an upper bound on the search.
195   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
196                 "searching for proper set size\n"));
197   int size;
198   for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
199     gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
200     KA_TRACE(30, ("__kmp_affinity_determine_capable: "
201                   "getaffinity for mask size %ld returned %ld errno = %d\n",
202                   size, gCode, errno));
203 
204     if (gCode < 0) {
205       if (errno == ENOSYS) {
206         // We shouldn't get here
207         KA_TRACE(30, ("__kmp_affinity_determine_capable: "
208                       "inconsistent OS call behavior: errno == ENOSYS for mask "
209                       "size %d\n",
210                       size));
211         if (verbose ||
212             (warnings && (type != affinity_none) &&
213              (type != affinity_default) && (type != affinity_disabled))) {
214           int error = errno;
215           kmp_msg_t err_code = KMP_ERR(error);
216           __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
217                     err_code, __kmp_msg_null);
218           if (__kmp_generate_warnings == kmp_warnings_off) {
219             __kmp_str_free(&err_code.str);
220           }
221         }
222         KMP_AFFINITY_DISABLE();
223         KMP_INTERNAL_FREE(buf);
224         return;
225       }
226       continue;
227     }
228 
229     KMP_AFFINITY_ENABLE(gCode);
230     KA_TRACE(10, ("__kmp_affinity_determine_capable: "
231                   "affinity supported (mask size %d)\n",
232                   (int)__kmp_affin_mask_size));
233     KMP_INTERNAL_FREE(buf);
234     return;
235   }
236 #elif KMP_OS_FREEBSD
237   long gCode;
238   unsigned char *buf;
239   buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
240   gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
241                                  reinterpret_cast<cpuset_t *>(buf));
242   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
243                 "initial getaffinity call returned %d errno = %d\n",
244                 gCode, errno));
245   if (gCode == 0) {
246     KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
247     KA_TRACE(10, ("__kmp_affinity_determine_capable: "
248                   "affinity supported (mask size %d)\n",
249                   (int)__kmp_affin_mask_size));
250     KMP_INTERNAL_FREE(buf);
251     return;
252   }
253 #endif
254   KMP_INTERNAL_FREE(buf);
255 
256   // Affinity is not supported
257   KMP_AFFINITY_DISABLE();
258   KA_TRACE(10, ("__kmp_affinity_determine_capable: "
259                 "cannot determine mask size - affinity not supported\n"));
260   if (verbose || (warnings && (type != affinity_none) &&
261                   (type != affinity_default) && (type != affinity_disabled))) {
262     KMP_WARNING(AffCantGetMaskSize, env_var);
263   }
264 }
265 
266 #endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
267 
268 #if KMP_USE_FUTEX
269 
270 int __kmp_futex_determine_capable() {
271   int loc = 0;
272   long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
273   int retval = (rc == 0) || (errno != ENOSYS);
274 
275   KA_TRACE(10,
276            ("__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
277   KA_TRACE(10, ("__kmp_futex_determine_capable: futex syscall%s supported\n",
278                 retval ? "" : " not"));
279 
280   return retval;
281 }
282 
283 #endif // KMP_USE_FUTEX
284 
285 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_WASM) && (!KMP_ASM_INTRINS)
286 /* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
287    use compare_and_store for these routines */
288 
289 kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 d) {
290   kmp_int8 old_value, new_value;
291 
292   old_value = TCR_1(*p);
293   new_value = old_value | d;
294 
295   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
296     KMP_CPU_PAUSE();
297     old_value = TCR_1(*p);
298     new_value = old_value | d;
299   }
300   return old_value;
301 }
302 
303 kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 d) {
304   kmp_int8 old_value, new_value;
305 
306   old_value = TCR_1(*p);
307   new_value = old_value & d;
308 
309   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
310     KMP_CPU_PAUSE();
311     old_value = TCR_1(*p);
312     new_value = old_value & d;
313   }
314   return old_value;
315 }
316 
317 kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 d) {
318   kmp_uint32 old_value, new_value;
319 
320   old_value = TCR_4(*p);
321   new_value = old_value | d;
322 
323   while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
324     KMP_CPU_PAUSE();
325     old_value = TCR_4(*p);
326     new_value = old_value | d;
327   }
328   return old_value;
329 }
330 
331 kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) {
332   kmp_uint32 old_value, new_value;
333 
334   old_value = TCR_4(*p);
335   new_value = old_value & d;
336 
337   while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
338     KMP_CPU_PAUSE();
339     old_value = TCR_4(*p);
340     new_value = old_value & d;
341   }
342   return old_value;
343 }
344 
345 #if KMP_ARCH_X86 || KMP_ARCH_WASM
346 kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 d) {
347   kmp_int8 old_value, new_value;
348 
349   old_value = TCR_1(*p);
350   new_value = old_value + d;
351 
352   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
353     KMP_CPU_PAUSE();
354     old_value = TCR_1(*p);
355     new_value = old_value + d;
356   }
357   return old_value;
358 }
359 
360 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) {
361   kmp_int64 old_value, new_value;
362 
363   old_value = TCR_8(*p);
364   new_value = old_value + d;
365 
366   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
367     KMP_CPU_PAUSE();
368     old_value = TCR_8(*p);
369     new_value = old_value + d;
370   }
371   return old_value;
372 }
373 #endif /* KMP_ARCH_X86 */
374 
375 kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) {
376   kmp_uint64 old_value, new_value;
377 
378   old_value = TCR_8(*p);
379   new_value = old_value | d;
380   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
381     KMP_CPU_PAUSE();
382     old_value = TCR_8(*p);
383     new_value = old_value | d;
384   }
385   return old_value;
386 }
387 
388 kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) {
389   kmp_uint64 old_value, new_value;
390 
391   old_value = TCR_8(*p);
392   new_value = old_value & d;
393   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
394     KMP_CPU_PAUSE();
395     old_value = TCR_8(*p);
396     new_value = old_value & d;
397   }
398   return old_value;
399 }
400 
401 #endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
402 
403 void __kmp_terminate_thread(int gtid) {
404   int status;
405   kmp_info_t *th = __kmp_threads[gtid];
406 
407   if (!th)
408     return;
409 
410 #ifdef KMP_CANCEL_THREADS
411   KA_TRACE(10, ("__kmp_terminate_thread: kill (%d)\n", gtid));
412   status = pthread_cancel(th->th.th_info.ds.ds_thread);
413   if (status != 0 && status != ESRCH) {
414     __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
415                 __kmp_msg_null);
416   }
417 #endif
418   KMP_YIELD(TRUE);
419 } //
420 
421 /* Set thread stack info according to values returned by pthread_getattr_np().
422    If values are unreasonable, assume call failed and use incremental stack
423    refinement method instead. Returns TRUE if the stack parameters could be
424    determined exactly, FALSE if incremental refinement is necessary. */
425 static kmp_int32 __kmp_set_stack_info(int gtid, kmp_info_t *th) {
426   int stack_data;
427 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
428     KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
429   pthread_attr_t attr;
430   int status;
431   size_t size = 0;
432   void *addr = 0;
433 
434   /* Always do incremental stack refinement for ubermaster threads since the
435      initial thread stack range can be reduced by sibling thread creation so
436      pthread_attr_getstack may cause thread gtid aliasing */
437   if (!KMP_UBER_GTID(gtid)) {
438 
439     /* Fetch the real thread attributes */
440     status = pthread_attr_init(&attr);
441     KMP_CHECK_SYSFAIL("pthread_attr_init", status);
442 #if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
443     status = pthread_attr_get_np(pthread_self(), &attr);
444     KMP_CHECK_SYSFAIL("pthread_attr_get_np", status);
445 #else
446     status = pthread_getattr_np(pthread_self(), &attr);
447     KMP_CHECK_SYSFAIL("pthread_getattr_np", status);
448 #endif
449     status = pthread_attr_getstack(&attr, &addr, &size);
450     KMP_CHECK_SYSFAIL("pthread_attr_getstack", status);
451     KA_TRACE(60,
452              ("__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
453               " %lu, low addr: %p\n",
454               gtid, size, addr));
455     status = pthread_attr_destroy(&attr);
456     KMP_CHECK_SYSFAIL("pthread_attr_destroy", status);
457   }
458 
459   if (size != 0 && addr != 0) { // was stack parameter determination successful?
460     /* Store the correct base and size */
461     TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
462     TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
463     TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
464     return TRUE;
465   }
466 #endif /* KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD  \
467           || KMP_OS_HURD || KMP_OS_SOLARIS */
468   /* Use incremental refinement starting from initial conservative estimate */
469   TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
470   TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
471   TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
472   return FALSE;
473 }
474 
475 static void *__kmp_launch_worker(void *thr) {
476   int status, old_type, old_state;
477 #ifdef KMP_BLOCK_SIGNALS
478   sigset_t new_set, old_set;
479 #endif /* KMP_BLOCK_SIGNALS */
480   void *exit_val;
481 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
482     KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS
483   void *volatile padding = 0;
484 #endif
485   int gtid;
486 
487   gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
488   __kmp_gtid_set_specific(gtid);
489 #ifdef KMP_TDATA_GTID
490   __kmp_gtid = gtid;
491 #endif
492 #if KMP_STATS_ENABLED
493   // set thread local index to point to thread-specific stats
494   __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
495   __kmp_stats_thread_ptr->startLife();
496   KMP_SET_THREAD_STATE(IDLE);
497   KMP_INIT_PARTITIONED_TIMERS(OMP_idle);
498 #endif
499 
500 #if USE_ITT_BUILD
501   __kmp_itt_thread_name(gtid);
502 #endif /* USE_ITT_BUILD */
503 
504 #if KMP_AFFINITY_SUPPORTED
505   __kmp_affinity_bind_init_mask(gtid);
506 #endif
507 
508 #ifdef KMP_CANCEL_THREADS
509   status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
510   KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
511   // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
512   status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
513   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
514 #endif
515 
516 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
517   // Set FP control regs to be a copy of the parallel initialization thread's.
518   __kmp_clear_x87_fpu_status_word();
519   __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
520   __kmp_load_mxcsr(&__kmp_init_mxcsr);
521 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
522 
523 #ifdef KMP_BLOCK_SIGNALS
524   status = sigfillset(&new_set);
525   KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
526   status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
527   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
528 #endif /* KMP_BLOCK_SIGNALS */
529 
530 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
531     KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS
532   if (__kmp_stkoffset > 0 && gtid > 0) {
533     padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
534     (void)padding;
535   }
536 #endif
537 
538   KMP_MB();
539   __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
540 
541   __kmp_check_stack_overlap((kmp_info_t *)thr);
542 
543   exit_val = __kmp_launch_thread((kmp_info_t *)thr);
544 
545 #ifdef KMP_BLOCK_SIGNALS
546   status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
547   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
548 #endif /* KMP_BLOCK_SIGNALS */
549 
550   return exit_val;
551 }
552 
553 #if KMP_USE_MONITOR
554 /* The monitor thread controls all of the threads in the complex */
555 
556 static void *__kmp_launch_monitor(void *thr) {
557   int status, old_type, old_state;
558 #ifdef KMP_BLOCK_SIGNALS
559   sigset_t new_set;
560 #endif /* KMP_BLOCK_SIGNALS */
561   struct timespec interval;
562 
563   KMP_MB(); /* Flush all pending memory write invalidates.  */
564 
565   KA_TRACE(10, ("__kmp_launch_monitor: #1 launched\n"));
566 
567   /* register us as the monitor thread */
568   __kmp_gtid_set_specific(KMP_GTID_MONITOR);
569 #ifdef KMP_TDATA_GTID
570   __kmp_gtid = KMP_GTID_MONITOR;
571 #endif
572 
573   KMP_MB();
574 
575 #if USE_ITT_BUILD
576   // Instruct Intel(R) Threading Tools to ignore monitor thread.
577   __kmp_itt_thread_ignore();
578 #endif /* USE_ITT_BUILD */
579 
580   __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
581                        (kmp_info_t *)thr);
582 
583   __kmp_check_stack_overlap((kmp_info_t *)thr);
584 
585 #ifdef KMP_CANCEL_THREADS
586   status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
587   KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
588   // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
589   status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
590   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
591 #endif
592 
593 #if KMP_REAL_TIME_FIX
594   // This is a potential fix which allows application with real-time scheduling
595   // policy work. However, decision about the fix is not made yet, so it is
596   // disabled by default.
597   { // Are program started with real-time scheduling policy?
598     int sched = sched_getscheduler(0);
599     if (sched == SCHED_FIFO || sched == SCHED_RR) {
600       // Yes, we are a part of real-time application. Try to increase the
601       // priority of the monitor.
602       struct sched_param param;
603       int max_priority = sched_get_priority_max(sched);
604       int rc;
605       KMP_WARNING(RealTimeSchedNotSupported);
606       sched_getparam(0, &param);
607       if (param.sched_priority < max_priority) {
608         param.sched_priority += 1;
609         rc = sched_setscheduler(0, sched, &param);
610         if (rc != 0) {
611           int error = errno;
612           kmp_msg_t err_code = KMP_ERR(error);
613           __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
614                     err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
615           if (__kmp_generate_warnings == kmp_warnings_off) {
616             __kmp_str_free(&err_code.str);
617           }
618         }
619       } else {
620         // We cannot abort here, because number of CPUs may be enough for all
621         // the threads, including the monitor thread, so application could
622         // potentially work...
623         __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
624                   KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
625                   __kmp_msg_null);
626       }
627     }
628     // AC: free thread that waits for monitor started
629     TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
630   }
631 #endif // KMP_REAL_TIME_FIX
632 
633   KMP_MB(); /* Flush all pending memory write invalidates.  */
634 
635   if (__kmp_monitor_wakeups == 1) {
636     interval.tv_sec = 1;
637     interval.tv_nsec = 0;
638   } else {
639     interval.tv_sec = 0;
640     interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
641   }
642 
643   KA_TRACE(10, ("__kmp_launch_monitor: #2 monitor\n"));
644 
645   while (!TCR_4(__kmp_global.g.g_done)) {
646     struct timespec now;
647     struct timeval tval;
648 
649     /*  This thread monitors the state of the system */
650 
651     KA_TRACE(15, ("__kmp_launch_monitor: update\n"));
652 
653     status = gettimeofday(&tval, NULL);
654     KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
655     TIMEVAL_TO_TIMESPEC(&tval, &now);
656 
657     now.tv_sec += interval.tv_sec;
658     now.tv_nsec += interval.tv_nsec;
659 
660     if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
661       now.tv_sec += 1;
662       now.tv_nsec -= KMP_NSEC_PER_SEC;
663     }
664 
665     status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
666     KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
667     // AC: the monitor should not fall asleep if g_done has been set
668     if (!TCR_4(__kmp_global.g.g_done)) { // check once more under mutex
669       status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
670                                       &__kmp_wait_mx.m_mutex, &now);
671       if (status != 0) {
672         if (status != ETIMEDOUT && status != EINTR) {
673           KMP_SYSFAIL("pthread_cond_timedwait", status);
674         }
675       }
676     }
677     status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
678     KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
679 
680     TCW_4(__kmp_global.g.g_time.dt.t_value,
681           TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
682 
683     KMP_MB(); /* Flush all pending memory write invalidates.  */
684   }
685 
686   KA_TRACE(10, ("__kmp_launch_monitor: #3 cleanup\n"));
687 
688 #ifdef KMP_BLOCK_SIGNALS
689   status = sigfillset(&new_set);
690   KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
691   status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
692   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
693 #endif /* KMP_BLOCK_SIGNALS */
694 
695   KA_TRACE(10, ("__kmp_launch_monitor: #4 finished\n"));
696 
697   if (__kmp_global.g.g_abort != 0) {
698     /* now we need to terminate the worker threads  */
699     /* the value of t_abort is the signal we caught */
700 
701     int gtid;
702 
703     KA_TRACE(10, ("__kmp_launch_monitor: #5 terminate sig=%d\n",
704                   __kmp_global.g.g_abort));
705 
706     /* terminate the OpenMP worker threads */
707     /* TODO this is not valid for sibling threads!!
708      * the uber master might not be 0 anymore.. */
709     for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
710       __kmp_terminate_thread(gtid);
711 
712     __kmp_cleanup();
713 
714     KA_TRACE(10, ("__kmp_launch_monitor: #6 raise sig=%d\n",
715                   __kmp_global.g.g_abort));
716 
717     if (__kmp_global.g.g_abort > 0)
718       raise(__kmp_global.g.g_abort);
719   }
720 
721   KA_TRACE(10, ("__kmp_launch_monitor: #7 exit\n"));
722 
723   return thr;
724 }
725 #endif // KMP_USE_MONITOR
726 
727 void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size) {
728   pthread_t handle;
729   pthread_attr_t thread_attr;
730   int status;
731 
732   th->th.th_info.ds.ds_gtid = gtid;
733 
734 #if KMP_STATS_ENABLED
735   // sets up worker thread stats
736   __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
737 
738   // th->th.th_stats is used to transfer thread-specific stats-pointer to
739   // __kmp_launch_worker. So when thread is created (goes into
740   // __kmp_launch_worker) it will set its thread local pointer to
741   // th->th.th_stats
742   if (!KMP_UBER_GTID(gtid)) {
743     th->th.th_stats = __kmp_stats_list->push_back(gtid);
744   } else {
745     // For root threads, __kmp_stats_thread_ptr is set in __kmp_register_root(),
746     // so set the th->th.th_stats field to it.
747     th->th.th_stats = __kmp_stats_thread_ptr;
748   }
749   __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
750 
751 #endif // KMP_STATS_ENABLED
752 
753   if (KMP_UBER_GTID(gtid)) {
754     KA_TRACE(10, ("__kmp_create_worker: uber thread (%d)\n", gtid));
755     th->th.th_info.ds.ds_thread = pthread_self();
756     __kmp_set_stack_info(gtid, th);
757     __kmp_check_stack_overlap(th);
758     return;
759   }
760 
761   KA_TRACE(10, ("__kmp_create_worker: try to create thread (%d)\n", gtid));
762 
763   KMP_MB(); /* Flush all pending memory write invalidates.  */
764 
765 #ifdef KMP_THREAD_ATTR
766   status = pthread_attr_init(&thread_attr);
767   if (status != 0) {
768     __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
769   }
770   status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
771   if (status != 0) {
772     __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
773   }
774 
775   /* Set stack size for this thread now.
776      The multiple of 2 is there because on some machines, requesting an unusual
777      stacksize causes the thread to have an offset before the dummy alloca()
778      takes place to create the offset.  Since we want the user to have a
779      sufficient stacksize AND support a stack offset, we alloca() twice the
780      offset so that the upcoming alloca() does not eliminate any premade offset,
781      and also gives the user the stack space they requested for all threads */
782   stack_size += gtid * __kmp_stkoffset * 2;
783 
784   KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
785                 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
786                 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
787 
788 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
789   status = pthread_attr_setstacksize(&thread_attr, stack_size);
790 #ifdef KMP_BACKUP_STKSIZE
791   if (status != 0) {
792     if (!__kmp_env_stksize) {
793       stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
794       __kmp_stksize = KMP_BACKUP_STKSIZE;
795       KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
796                     "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
797                     "bytes\n",
798                     gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
799       status = pthread_attr_setstacksize(&thread_attr, stack_size);
800     }
801   }
802 #endif /* KMP_BACKUP_STKSIZE */
803   if (status != 0) {
804     __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
805                 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
806   }
807 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
808 
809 #endif /* KMP_THREAD_ATTR */
810 
811   status =
812       pthread_create(&handle, &thread_attr, __kmp_launch_worker, (void *)th);
813   if (status != 0 || !handle) { // ??? Why do we check handle??
814 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
815     if (status == EINVAL) {
816       __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
817                   KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
818     }
819     if (status == ENOMEM) {
820       __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
821                   KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
822     }
823 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
824     if (status == EAGAIN) {
825       __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
826                   KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
827     }
828     KMP_SYSFAIL("pthread_create", status);
829   }
830 
831   th->th.th_info.ds.ds_thread = handle;
832 
833 #ifdef KMP_THREAD_ATTR
834   status = pthread_attr_destroy(&thread_attr);
835   if (status) {
836     kmp_msg_t err_code = KMP_ERR(status);
837     __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
838               __kmp_msg_null);
839     if (__kmp_generate_warnings == kmp_warnings_off) {
840       __kmp_str_free(&err_code.str);
841     }
842   }
843 #endif /* KMP_THREAD_ATTR */
844 
845   KMP_MB(); /* Flush all pending memory write invalidates.  */
846 
847   KA_TRACE(10, ("__kmp_create_worker: done creating thread (%d)\n", gtid));
848 
849 } // __kmp_create_worker
850 
851 #if KMP_USE_MONITOR
852 void __kmp_create_monitor(kmp_info_t *th) {
853   pthread_t handle;
854   pthread_attr_t thread_attr;
855   size_t size;
856   int status;
857   int auto_adj_size = FALSE;
858 
859   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
860     // We don't need monitor thread in case of MAX_BLOCKTIME
861     KA_TRACE(10, ("__kmp_create_monitor: skipping monitor thread because of "
862                   "MAX blocktime\n"));
863     th->th.th_info.ds.ds_tid = 0; // this makes reap_monitor no-op
864     th->th.th_info.ds.ds_gtid = 0;
865     return;
866   }
867   KA_TRACE(10, ("__kmp_create_monitor: try to create monitor\n"));
868 
869   KMP_MB(); /* Flush all pending memory write invalidates.  */
870 
871   th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
872   th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
873 #if KMP_REAL_TIME_FIX
874   TCW_4(__kmp_global.g.g_time.dt.t_value,
875         -1); // Will use it for synchronization a bit later.
876 #else
877   TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
878 #endif // KMP_REAL_TIME_FIX
879 
880 #ifdef KMP_THREAD_ATTR
881   if (__kmp_monitor_stksize == 0) {
882     __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
883     auto_adj_size = TRUE;
884   }
885   status = pthread_attr_init(&thread_attr);
886   if (status != 0) {
887     __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
888   }
889   status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
890   if (status != 0) {
891     __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
892   }
893 
894 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
895   status = pthread_attr_getstacksize(&thread_attr, &size);
896   KMP_CHECK_SYSFAIL("pthread_attr_getstacksize", status);
897 #else
898   size = __kmp_sys_min_stksize;
899 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
900 #endif /* KMP_THREAD_ATTR */
901 
902   if (__kmp_monitor_stksize == 0) {
903     __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
904   }
905   if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
906     __kmp_monitor_stksize = __kmp_sys_min_stksize;
907   }
908 
909   KA_TRACE(10, ("__kmp_create_monitor: default stacksize = %lu bytes,"
910                 "requested stacksize = %lu bytes\n",
911                 size, __kmp_monitor_stksize));
912 
913 retry:
914 
915 /* Set stack size for this thread now. */
916 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
917   KA_TRACE(10, ("__kmp_create_monitor: setting stacksize = %lu bytes,",
918                 __kmp_monitor_stksize));
919   status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
920   if (status != 0) {
921     if (auto_adj_size) {
922       __kmp_monitor_stksize *= 2;
923       goto retry;
924     }
925     kmp_msg_t err_code = KMP_ERR(status);
926     __kmp_msg(kmp_ms_warning, // should this be fatal?  BB
927               KMP_MSG(CantSetMonitorStackSize, (long int)__kmp_monitor_stksize),
928               err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
929     if (__kmp_generate_warnings == kmp_warnings_off) {
930       __kmp_str_free(&err_code.str);
931     }
932   }
933 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
934 
935   status =
936       pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (void *)th);
937 
938   if (status != 0) {
939 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
940     if (status == EINVAL) {
941       if (auto_adj_size && (__kmp_monitor_stksize < (size_t)0x40000000)) {
942         __kmp_monitor_stksize *= 2;
943         goto retry;
944       }
945       __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
946                   KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
947                   __kmp_msg_null);
948     }
949     if (status == ENOMEM) {
950       __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
951                   KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
952                   __kmp_msg_null);
953     }
954 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
955     if (status == EAGAIN) {
956       __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
957                   KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
958     }
959     KMP_SYSFAIL("pthread_create", status);
960   }
961 
962   th->th.th_info.ds.ds_thread = handle;
963 
964 #if KMP_REAL_TIME_FIX
965   // Wait for the monitor thread is really started and set its *priority*.
966   KMP_DEBUG_ASSERT(sizeof(kmp_uint32) ==
967                    sizeof(__kmp_global.g.g_time.dt.t_value));
968   __kmp_wait_4((kmp_uint32 volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
969                &__kmp_neq_4, NULL);
970 #endif // KMP_REAL_TIME_FIX
971 
972 #ifdef KMP_THREAD_ATTR
973   status = pthread_attr_destroy(&thread_attr);
974   if (status != 0) {
975     kmp_msg_t err_code = KMP_ERR(status);
976     __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
977               __kmp_msg_null);
978     if (__kmp_generate_warnings == kmp_warnings_off) {
979       __kmp_str_free(&err_code.str);
980     }
981   }
982 #endif
983 
984   KMP_MB(); /* Flush all pending memory write invalidates.  */
985 
986   KA_TRACE(10, ("__kmp_create_monitor: monitor created %#.8lx\n",
987                 th->th.th_info.ds.ds_thread));
988 
989 } // __kmp_create_monitor
990 #endif // KMP_USE_MONITOR
991 
992 void __kmp_exit_thread(int exit_status) {
993 #if KMP_OS_WASI
994 // TODO: the wasm32-wasi-threads target does not yet support pthread_exit.
995 #else
996   pthread_exit((void *)(intptr_t)exit_status);
997 #endif
998 } // __kmp_exit_thread
999 
1000 #if KMP_USE_MONITOR
1001 void __kmp_resume_monitor();
1002 
1003 extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
1004   int status;
1005   void *exit_val;
1006 
1007   KA_TRACE(10, ("__kmp_reap_monitor: try to reap monitor thread with handle"
1008                 " %#.8lx\n",
1009                 th->th.th_info.ds.ds_thread));
1010 
1011   // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
1012   // If both tid and gtid are 0, it means the monitor did not ever start.
1013   // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
1014   KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1015   if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1016     KA_TRACE(10, ("__kmp_reap_monitor: monitor did not start, returning\n"));
1017     return;
1018   }
1019 
1020   KMP_MB(); /* Flush all pending memory write invalidates.  */
1021 
1022   /* First, check to see whether the monitor thread exists to wake it up. This
1023      is to avoid performance problem when the monitor sleeps during
1024      blocktime-size interval */
1025 
1026   status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1027   if (status != ESRCH) {
1028     __kmp_resume_monitor(); // Wake up the monitor thread
1029   }
1030   KA_TRACE(10, ("__kmp_reap_monitor: try to join with monitor\n"));
1031   status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1032   if (exit_val != th) {
1033     __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1034   }
1035 
1036   th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1037   th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1038 
1039   KA_TRACE(10, ("__kmp_reap_monitor: done reaping monitor thread with handle"
1040                 " %#.8lx\n",
1041                 th->th.th_info.ds.ds_thread));
1042 
1043   KMP_MB(); /* Flush all pending memory write invalidates.  */
1044 }
1045 #else
1046 // Empty symbol to export (see exports_so.txt) when
1047 // monitor thread feature is disabled
1048 extern "C" void __kmp_reap_monitor(kmp_info_t *th) { (void)th; }
1049 #endif // KMP_USE_MONITOR
1050 
1051 void __kmp_reap_worker(kmp_info_t *th) {
1052   int status;
1053   void *exit_val;
1054 
1055   KMP_MB(); /* Flush all pending memory write invalidates.  */
1056 
1057   KA_TRACE(
1058       10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1059 
1060   status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1061 #ifdef KMP_DEBUG
1062   /* Don't expose these to the user until we understand when they trigger */
1063   if (status != 0) {
1064     __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1065   }
1066   if (exit_val != th) {
1067     KA_TRACE(10, ("__kmp_reap_worker: worker T#%d did not reap properly, "
1068                   "exit_val = %p\n",
1069                   th->th.th_info.ds.ds_gtid, exit_val));
1070   }
1071 #else
1072   (void)status; // unused variable
1073 #endif /* KMP_DEBUG */
1074 
1075   KA_TRACE(10, ("__kmp_reap_worker: done reaping T#%d\n",
1076                 th->th.th_info.ds.ds_gtid));
1077 
1078   KMP_MB(); /* Flush all pending memory write invalidates.  */
1079 }
1080 
1081 #if KMP_HANDLE_SIGNALS
1082 
1083 static void __kmp_null_handler(int signo) {
1084   //  Do nothing, for doing SIG_IGN-type actions.
1085 } // __kmp_null_handler
1086 
1087 static void __kmp_team_handler(int signo) {
1088   if (__kmp_global.g.g_abort == 0) {
1089 /* Stage 1 signal handler, let's shut down all of the threads */
1090 #ifdef KMP_DEBUG
1091     __kmp_debug_printf("__kmp_team_handler: caught signal = %d\n", signo);
1092 #endif
1093     switch (signo) {
1094     case SIGHUP:
1095     case SIGINT:
1096     case SIGQUIT:
1097     case SIGILL:
1098     case SIGABRT:
1099     case SIGFPE:
1100     case SIGBUS:
1101     case SIGSEGV:
1102 #ifdef SIGSYS
1103     case SIGSYS:
1104 #endif
1105     case SIGTERM:
1106       if (__kmp_debug_buf) {
1107         __kmp_dump_debug_buffer();
1108       }
1109       __kmp_unregister_library(); // cleanup shared memory
1110       KMP_MB(); // Flush all pending memory write invalidates.
1111       TCW_4(__kmp_global.g.g_abort, signo);
1112       KMP_MB(); // Flush all pending memory write invalidates.
1113       TCW_4(__kmp_global.g.g_done, TRUE);
1114       KMP_MB(); // Flush all pending memory write invalidates.
1115       break;
1116     default:
1117 #ifdef KMP_DEBUG
1118       __kmp_debug_printf("__kmp_team_handler: unknown signal type");
1119 #endif
1120       break;
1121     }
1122   }
1123 } // __kmp_team_handler
1124 
1125 static void __kmp_sigaction(int signum, const struct sigaction *act,
1126                             struct sigaction *oldact) {
1127   int rc = sigaction(signum, act, oldact);
1128   KMP_CHECK_SYSFAIL_ERRNO("sigaction", rc);
1129 }
1130 
1131 static void __kmp_install_one_handler(int sig, sig_func_t handler_func,
1132                                       int parallel_init) {
1133   KMP_MB(); // Flush all pending memory write invalidates.
1134   KB_TRACE(60,
1135            ("__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1136   if (parallel_init) {
1137     struct sigaction new_action;
1138     struct sigaction old_action;
1139     new_action.sa_handler = handler_func;
1140     new_action.sa_flags = 0;
1141     sigfillset(&new_action.sa_mask);
1142     __kmp_sigaction(sig, &new_action, &old_action);
1143     if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1144       sigaddset(&__kmp_sigset, sig);
1145     } else {
1146       // Restore/keep user's handler if one previously installed.
1147       __kmp_sigaction(sig, &old_action, NULL);
1148     }
1149   } else {
1150     // Save initial/system signal handlers to see if user handlers installed.
1151     __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1152   }
1153   KMP_MB(); // Flush all pending memory write invalidates.
1154 } // __kmp_install_one_handler
1155 
1156 static void __kmp_remove_one_handler(int sig) {
1157   KB_TRACE(60, ("__kmp_remove_one_handler( %d )\n", sig));
1158   if (sigismember(&__kmp_sigset, sig)) {
1159     struct sigaction old;
1160     KMP_MB(); // Flush all pending memory write invalidates.
1161     __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1162     if ((old.sa_handler != __kmp_team_handler) &&
1163         (old.sa_handler != __kmp_null_handler)) {
1164       // Restore the users signal handler.
1165       KB_TRACE(10, ("__kmp_remove_one_handler: oops, not our handler, "
1166                     "restoring: sig=%d\n",
1167                     sig));
1168       __kmp_sigaction(sig, &old, NULL);
1169     }
1170     sigdelset(&__kmp_sigset, sig);
1171     KMP_MB(); // Flush all pending memory write invalidates.
1172   }
1173 } // __kmp_remove_one_handler
1174 
1175 void __kmp_install_signals(int parallel_init) {
1176   KB_TRACE(10, ("__kmp_install_signals( %d )\n", parallel_init));
1177   if (__kmp_handle_signals || !parallel_init) {
1178     // If ! parallel_init, we do not install handlers, just save original
1179     // handlers. Let us do it even __handle_signals is 0.
1180     sigemptyset(&__kmp_sigset);
1181     __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1182     __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1183     __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1184     __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1185     __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1186     __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1187     __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1188     __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1189 #ifdef SIGSYS
1190     __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1191 #endif // SIGSYS
1192     __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1193 #ifdef SIGPIPE
1194     __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1195 #endif // SIGPIPE
1196   }
1197 } // __kmp_install_signals
1198 
1199 void __kmp_remove_signals(void) {
1200   int sig;
1201   KB_TRACE(10, ("__kmp_remove_signals()\n"));
1202   for (sig = 1; sig < NSIG; ++sig) {
1203     __kmp_remove_one_handler(sig);
1204   }
1205 } // __kmp_remove_signals
1206 
1207 #endif // KMP_HANDLE_SIGNALS
1208 
1209 void __kmp_enable(int new_state) {
1210 #ifdef KMP_CANCEL_THREADS
1211   int status, old_state;
1212   status = pthread_setcancelstate(new_state, &old_state);
1213   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1214   KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1215 #endif
1216 }
1217 
1218 void __kmp_disable(int *old_state) {
1219 #ifdef KMP_CANCEL_THREADS
1220   int status;
1221   status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1222   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1223 #endif
1224 }
1225 
1226 static void __kmp_atfork_prepare(void) {
1227   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1228   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1229 }
1230 
1231 static void __kmp_atfork_parent(void) {
1232   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1233   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1234 }
1235 
1236 /* Reset the library so execution in the child starts "all over again" with
1237    clean data structures in initial states.  Don't worry about freeing memory
1238    allocated by parent, just abandon it to be safe. */
1239 static void __kmp_atfork_child(void) {
1240   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1241   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1242   /* TODO make sure this is done right for nested/sibling */
1243   // ATT:  Memory leaks are here? TODO: Check it and fix.
1244   /* KMP_ASSERT( 0 ); */
1245 
1246   ++__kmp_fork_count;
1247 
1248 #if KMP_AFFINITY_SUPPORTED
1249 #if KMP_OS_LINUX || KMP_OS_FREEBSD
1250   // reset the affinity in the child to the initial thread
1251   // affinity in the parent
1252   kmp_set_thread_affinity_mask_initial();
1253 #endif
1254   // Set default not to bind threads tightly in the child (we're expecting
1255   // over-subscription after the fork and this can improve things for
1256   // scripting languages that use OpenMP inside process-parallel code).
1257   if (__kmp_nested_proc_bind.bind_types != NULL) {
1258     __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1259   }
1260   for (kmp_affinity_t *affinity : __kmp_affinities)
1261     *affinity = KMP_AFFINITY_INIT(affinity->env_var);
1262   __kmp_affin_fullMask = nullptr;
1263   __kmp_affin_origMask = nullptr;
1264   __kmp_topology = nullptr;
1265 #endif // KMP_AFFINITY_SUPPORTED
1266 
1267 #if KMP_USE_MONITOR
1268   __kmp_init_monitor = 0;
1269 #endif
1270   __kmp_init_parallel = FALSE;
1271   __kmp_init_middle = FALSE;
1272   __kmp_init_serial = FALSE;
1273   TCW_4(__kmp_init_gtid, FALSE);
1274   __kmp_init_common = FALSE;
1275 
1276   TCW_4(__kmp_init_user_locks, FALSE);
1277 #if !KMP_USE_DYNAMIC_LOCK
1278   __kmp_user_lock_table.used = 1;
1279   __kmp_user_lock_table.allocated = 0;
1280   __kmp_user_lock_table.table = NULL;
1281   __kmp_lock_blocks = NULL;
1282 #endif
1283 
1284   __kmp_all_nth = 0;
1285   TCW_4(__kmp_nth, 0);
1286 
1287   __kmp_thread_pool = NULL;
1288   __kmp_thread_pool_insert_pt = NULL;
1289   __kmp_team_pool = NULL;
1290 
1291   /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate
1292      here so threadprivate doesn't use stale data */
1293   KA_TRACE(10, ("__kmp_atfork_child: checking cache address list %p\n",
1294                 __kmp_threadpriv_cache_list));
1295 
1296   while (__kmp_threadpriv_cache_list != NULL) {
1297 
1298     if (*__kmp_threadpriv_cache_list->addr != NULL) {
1299       KC_TRACE(50, ("__kmp_atfork_child: zeroing cache at address %p\n",
1300                     &(*__kmp_threadpriv_cache_list->addr)));
1301 
1302       *__kmp_threadpriv_cache_list->addr = NULL;
1303     }
1304     __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1305   }
1306 
1307   __kmp_init_runtime = FALSE;
1308 
1309   /* reset statically initialized locks */
1310   __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1311   __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1312   __kmp_init_bootstrap_lock(&__kmp_console_lock);
1313   __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1314 
1315 #if USE_ITT_BUILD
1316   __kmp_itt_reset(); // reset ITT's global state
1317 #endif /* USE_ITT_BUILD */
1318 
1319   {
1320     // Child process often get terminated without any use of OpenMP. That might
1321     // cause mapped shared memory file to be left unattended. Thus we postpone
1322     // library registration till middle initialization in the child process.
1323     __kmp_need_register_serial = FALSE;
1324     __kmp_serial_initialize();
1325   }
1326 
1327   /* This is necessary to make sure no stale data is left around */
1328   /* AC: customers complain that we use unsafe routines in the atfork
1329      handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1330      in dynamic_link when check the presence of shared tbbmalloc library.
1331      Suggestion is to make the library initialization lazier, similar
1332      to what done for __kmpc_begin(). */
1333   // TODO: synchronize all static initializations with regular library
1334   //       startup; look at kmp_global.cpp and etc.
1335   //__kmp_internal_begin ();
1336 }
1337 
1338 void __kmp_register_atfork(void) {
1339   if (__kmp_need_register_atfork) {
1340 #if !KMP_OS_WASI
1341     int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1342                                 __kmp_atfork_child);
1343     KMP_CHECK_SYSFAIL("pthread_atfork", status);
1344 #endif
1345     __kmp_need_register_atfork = FALSE;
1346   }
1347 }
1348 
1349 void __kmp_suspend_initialize(void) {
1350   int status;
1351   status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1352   KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1353   status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1354   KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1355 }
1356 
1357 void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1358   int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1359   int new_value = __kmp_fork_count + 1;
1360   // Return if already initialized
1361   if (old_value == new_value)
1362     return;
1363   // Wait, then return if being initialized
1364   if (old_value == -1 || !__kmp_atomic_compare_store(
1365                              &th->th.th_suspend_init_count, old_value, -1)) {
1366     while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1367       KMP_CPU_PAUSE();
1368     }
1369   } else {
1370     // Claim to be the initializer and do initializations
1371     int status;
1372     status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1373                                &__kmp_suspend_cond_attr);
1374     KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1375     status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1376                                 &__kmp_suspend_mutex_attr);
1377     KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1378     KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1379   }
1380 }
1381 
1382 void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1383   if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1384     /* this means we have initialize the suspension pthread objects for this
1385        thread in this instance of the process */
1386     int status;
1387 
1388     status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1389     if (status != 0 && status != EBUSY) {
1390       KMP_SYSFAIL("pthread_cond_destroy", status);
1391     }
1392     status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1393     if (status != 0 && status != EBUSY) {
1394       KMP_SYSFAIL("pthread_mutex_destroy", status);
1395     }
1396     --th->th.th_suspend_init_count;
1397     KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1398                      __kmp_fork_count);
1399   }
1400 }
1401 
1402 // return true if lock obtained, false otherwise
1403 int __kmp_try_suspend_mx(kmp_info_t *th) {
1404   return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1405 }
1406 
1407 void __kmp_lock_suspend_mx(kmp_info_t *th) {
1408   int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1409   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1410 }
1411 
1412 void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1413   int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1414   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1415 }
1416 
1417 /* This routine puts the calling thread to sleep after setting the
1418    sleep bit for the indicated flag variable to true. */
1419 template <class C>
1420 static inline void __kmp_suspend_template(int th_gtid, C *flag) {
1421   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1422   kmp_info_t *th = __kmp_threads[th_gtid];
1423   int status;
1424   typename C::flag_t old_spin;
1425 
1426   KF_TRACE(30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1427                 flag->get()));
1428 
1429   __kmp_suspend_initialize_thread(th);
1430 
1431   __kmp_lock_suspend_mx(th);
1432 
1433   KF_TRACE(10, ("__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1434                 th_gtid, flag->get()));
1435 
1436   /* TODO: shouldn't this use release semantics to ensure that
1437      __kmp_suspend_initialize_thread gets called first? */
1438   old_spin = flag->set_sleeping();
1439   TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1440   th->th.th_sleep_loc_type = flag->get_type();
1441   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1442       __kmp_pause_status != kmp_soft_paused) {
1443     flag->unset_sleeping();
1444     TCW_PTR(th->th.th_sleep_loc, NULL);
1445     th->th.th_sleep_loc_type = flag_unset;
1446     __kmp_unlock_suspend_mx(th);
1447     return;
1448   }
1449   KF_TRACE(5, ("__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1450                " was %x\n",
1451                th_gtid, flag->get(), flag->load(), old_spin));
1452 
1453   if (flag->done_check_val(old_spin) || flag->done_check()) {
1454     flag->unset_sleeping();
1455     TCW_PTR(th->th.th_sleep_loc, NULL);
1456     th->th.th_sleep_loc_type = flag_unset;
1457     KF_TRACE(5, ("__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1458                  "for spin(%p)\n",
1459                  th_gtid, flag->get()));
1460   } else {
1461     /* Encapsulate in a loop as the documentation states that this may
1462        "with low probability" return when the condition variable has
1463        not been signaled or broadcast */
1464     int deactivated = FALSE;
1465 
1466     while (flag->is_sleeping()) {
1467 #ifdef DEBUG_SUSPEND
1468       char buffer[128];
1469       __kmp_suspend_count++;
1470       __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1471       __kmp_printf("__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1472                    buffer);
1473 #endif
1474       // Mark the thread as no longer active (only in the first iteration of the
1475       // loop).
1476       if (!deactivated) {
1477         th->th.th_active = FALSE;
1478         if (th->th.th_active_in_pool) {
1479           th->th.th_active_in_pool = FALSE;
1480           KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1481           KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1482         }
1483         deactivated = TRUE;
1484       }
1485 
1486       KMP_DEBUG_ASSERT(th->th.th_sleep_loc);
1487       KMP_DEBUG_ASSERT(flag->get_type() == th->th.th_sleep_loc_type);
1488 
1489 #if USE_SUSPEND_TIMEOUT
1490       struct timespec now;
1491       struct timeval tval;
1492       int msecs;
1493 
1494       status = gettimeofday(&tval, NULL);
1495       KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1496       TIMEVAL_TO_TIMESPEC(&tval, &now);
1497 
1498       msecs = (4 * __kmp_dflt_blocktime) + 200;
1499       now.tv_sec += msecs / 1000;
1500       now.tv_nsec += (msecs % 1000) * 1000;
1501 
1502       KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform "
1503                     "pthread_cond_timedwait\n",
1504                     th_gtid));
1505       status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1506                                       &th->th.th_suspend_mx.m_mutex, &now);
1507 #else
1508       KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform"
1509                     " pthread_cond_wait\n",
1510                     th_gtid));
1511       status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1512                                  &th->th.th_suspend_mx.m_mutex);
1513 #endif // USE_SUSPEND_TIMEOUT
1514 
1515       if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1516         KMP_SYSFAIL("pthread_cond_wait", status);
1517       }
1518 
1519       KMP_DEBUG_ASSERT(flag->get_type() == flag->get_ptr_type());
1520 
1521       if (!flag->is_sleeping() &&
1522           ((status == EINTR) || (status == ETIMEDOUT))) {
1523         // if interrupt or timeout, and thread is no longer sleeping, we need to
1524         // make sure sleep_loc gets reset; however, this shouldn't be needed if
1525         // we woke up with resume
1526         flag->unset_sleeping();
1527         TCW_PTR(th->th.th_sleep_loc, NULL);
1528         th->th.th_sleep_loc_type = flag_unset;
1529       }
1530 #ifdef KMP_DEBUG
1531       if (status == ETIMEDOUT) {
1532         if (flag->is_sleeping()) {
1533           KF_TRACE(100,
1534                    ("__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1535         } else {
1536           KF_TRACE(2, ("__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1537                        "not set!\n",
1538                        th_gtid));
1539           TCW_PTR(th->th.th_sleep_loc, NULL);
1540           th->th.th_sleep_loc_type = flag_unset;
1541         }
1542       } else if (flag->is_sleeping()) {
1543         KF_TRACE(100,
1544                  ("__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1545       }
1546 #endif
1547     } // while
1548 
1549     // Mark the thread as active again (if it was previous marked as inactive)
1550     if (deactivated) {
1551       th->th.th_active = TRUE;
1552       if (TCR_4(th->th.th_in_pool)) {
1553         KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1554         th->th.th_active_in_pool = TRUE;
1555       }
1556     }
1557   }
1558   // We may have had the loop variable set before entering the loop body;
1559   // so we need to reset sleep_loc.
1560   TCW_PTR(th->th.th_sleep_loc, NULL);
1561   th->th.th_sleep_loc_type = flag_unset;
1562 
1563   KMP_DEBUG_ASSERT(!flag->is_sleeping());
1564   KMP_DEBUG_ASSERT(!th->th.th_sleep_loc);
1565 #ifdef DEBUG_SUSPEND
1566   {
1567     char buffer[128];
1568     __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1569     __kmp_printf("__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1570                  buffer);
1571   }
1572 #endif
1573 
1574   __kmp_unlock_suspend_mx(th);
1575   KF_TRACE(30, ("__kmp_suspend_template: T#%d exit\n", th_gtid));
1576 }
1577 
1578 template <bool C, bool S>
1579 void __kmp_suspend_32(int th_gtid, kmp_flag_32<C, S> *flag) {
1580   __kmp_suspend_template(th_gtid, flag);
1581 }
1582 template <bool C, bool S>
1583 void __kmp_suspend_64(int th_gtid, kmp_flag_64<C, S> *flag) {
1584   __kmp_suspend_template(th_gtid, flag);
1585 }
1586 template <bool C, bool S>
1587 void __kmp_atomic_suspend_64(int th_gtid, kmp_atomic_flag_64<C, S> *flag) {
1588   __kmp_suspend_template(th_gtid, flag);
1589 }
1590 void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) {
1591   __kmp_suspend_template(th_gtid, flag);
1592 }
1593 
1594 template void __kmp_suspend_32<false, false>(int, kmp_flag_32<false, false> *);
1595 template void __kmp_suspend_64<false, true>(int, kmp_flag_64<false, true> *);
1596 template void __kmp_suspend_64<true, false>(int, kmp_flag_64<true, false> *);
1597 template void
1598 __kmp_atomic_suspend_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1599 template void
1600 __kmp_atomic_suspend_64<true, false>(int, kmp_atomic_flag_64<true, false> *);
1601 
1602 /* This routine signals the thread specified by target_gtid to wake up
1603    after setting the sleep bit indicated by the flag argument to FALSE.
1604    The target thread must already have called __kmp_suspend_template() */
1605 template <class C>
1606 static inline void __kmp_resume_template(int target_gtid, C *flag) {
1607   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1608   kmp_info_t *th = __kmp_threads[target_gtid];
1609   int status;
1610 
1611 #ifdef KMP_DEBUG
1612   int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1613 #endif
1614 
1615   KF_TRACE(30, ("__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1616                 gtid, target_gtid));
1617   KMP_DEBUG_ASSERT(gtid != target_gtid);
1618 
1619   __kmp_suspend_initialize_thread(th);
1620 
1621   __kmp_lock_suspend_mx(th);
1622 
1623   if (!flag || flag != th->th.th_sleep_loc) {
1624     // coming from __kmp_null_resume_wrapper, or thread is now sleeping on a
1625     // different location; wake up at new location
1626     flag = (C *)CCAST(void *, th->th.th_sleep_loc);
1627   }
1628 
1629   // First, check if the flag is null or its type has changed. If so, someone
1630   // else woke it up.
1631   if (!flag) { // Thread doesn't appear to be sleeping on anything
1632     KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1633                  "awake: flag(%p)\n",
1634                  gtid, target_gtid, (void *)NULL));
1635     __kmp_unlock_suspend_mx(th);
1636     return;
1637   } else if (flag->get_type() != th->th.th_sleep_loc_type) {
1638     // Flag type does not appear to match this function template; possibly the
1639     // thread is sleeping on something else. Try null resume again.
1640     KF_TRACE(
1641         5,
1642         ("__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1643          "spin(%p) type=%d ptr_type=%d\n",
1644          gtid, target_gtid, flag, flag->get(), flag->get_type(),
1645          th->th.th_sleep_loc_type));
1646     __kmp_unlock_suspend_mx(th);
1647     __kmp_null_resume_wrapper(th);
1648     return;
1649   } else { // if multiple threads are sleeping, flag should be internally
1650     // referring to a specific thread here
1651     if (!flag->is_sleeping()) {
1652       KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1653                    "awake: flag(%p): %u\n",
1654                    gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1655       __kmp_unlock_suspend_mx(th);
1656       return;
1657     }
1658   }
1659   KMP_DEBUG_ASSERT(flag);
1660   flag->unset_sleeping();
1661   TCW_PTR(th->th.th_sleep_loc, NULL);
1662   th->th.th_sleep_loc_type = flag_unset;
1663 
1664   KF_TRACE(5, ("__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1665                "sleep bit for flag's loc(%p): %u\n",
1666                gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1667 
1668 #ifdef DEBUG_SUSPEND
1669   {
1670     char buffer[128];
1671     __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1672     __kmp_printf("__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1673                  target_gtid, buffer);
1674   }
1675 #endif
1676   status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1677   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1678   __kmp_unlock_suspend_mx(th);
1679   KF_TRACE(30, ("__kmp_resume_template: T#%d exiting after signaling wake up"
1680                 " for T#%d\n",
1681                 gtid, target_gtid));
1682 }
1683 
1684 template <bool C, bool S>
1685 void __kmp_resume_32(int target_gtid, kmp_flag_32<C, S> *flag) {
1686   __kmp_resume_template(target_gtid, flag);
1687 }
1688 template <bool C, bool S>
1689 void __kmp_resume_64(int target_gtid, kmp_flag_64<C, S> *flag) {
1690   __kmp_resume_template(target_gtid, flag);
1691 }
1692 template <bool C, bool S>
1693 void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64<C, S> *flag) {
1694   __kmp_resume_template(target_gtid, flag);
1695 }
1696 void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) {
1697   __kmp_resume_template(target_gtid, flag);
1698 }
1699 
1700 template void __kmp_resume_32<false, true>(int, kmp_flag_32<false, true> *);
1701 template void __kmp_resume_32<false, false>(int, kmp_flag_32<false, false> *);
1702 template void __kmp_resume_64<false, true>(int, kmp_flag_64<false, true> *);
1703 template void
1704 __kmp_atomic_resume_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1705 
1706 #if KMP_USE_MONITOR
1707 void __kmp_resume_monitor() {
1708   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1709   int status;
1710 #ifdef KMP_DEBUG
1711   int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1712   KF_TRACE(30, ("__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1713                 KMP_GTID_MONITOR));
1714   KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1715 #endif
1716   status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1717   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1718 #ifdef DEBUG_SUSPEND
1719   {
1720     char buffer[128];
1721     __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1722     __kmp_printf("__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1723                  KMP_GTID_MONITOR, buffer);
1724   }
1725 #endif
1726   status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1727   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1728   status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1729   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1730   KF_TRACE(30, ("__kmp_resume_monitor: T#%d exiting after signaling wake up"
1731                 " for T#%d\n",
1732                 gtid, KMP_GTID_MONITOR));
1733 }
1734 #endif // KMP_USE_MONITOR
1735 
1736 void __kmp_yield() { sched_yield(); }
1737 
1738 void __kmp_gtid_set_specific(int gtid) {
1739   if (__kmp_init_gtid) {
1740     int status;
1741     status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1742                                  (void *)(intptr_t)(gtid + 1));
1743     KMP_CHECK_SYSFAIL("pthread_setspecific", status);
1744   } else {
1745     KA_TRACE(50, ("__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1746   }
1747 }
1748 
1749 int __kmp_gtid_get_specific() {
1750   int gtid;
1751   if (!__kmp_init_gtid) {
1752     KA_TRACE(50, ("__kmp_gtid_get_specific: runtime shutdown, returning "
1753                   "KMP_GTID_SHUTDOWN\n"));
1754     return KMP_GTID_SHUTDOWN;
1755   }
1756   gtid = (int)(size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1757   if (gtid == 0) {
1758     gtid = KMP_GTID_DNE;
1759   } else {
1760     gtid--;
1761   }
1762   KA_TRACE(50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
1763                 __kmp_gtid_threadprivate_key, gtid));
1764   return gtid;
1765 }
1766 
1767 double __kmp_read_cpu_time(void) {
1768   /*clock_t   t;*/
1769   struct tms buffer;
1770 
1771   /*t =*/times(&buffer);
1772 
1773   return (double)(buffer.tms_utime + buffer.tms_cutime) /
1774          (double)CLOCKS_PER_SEC;
1775 }
1776 
1777 int __kmp_read_system_info(struct kmp_sys_info *info) {
1778   int status;
1779   struct rusage r_usage;
1780 
1781   memset(info, 0, sizeof(*info));
1782 
1783   status = getrusage(RUSAGE_SELF, &r_usage);
1784   KMP_CHECK_SYSFAIL_ERRNO("getrusage", status);
1785 
1786 #if !KMP_OS_WASI
1787   // The maximum resident set size utilized (in kilobytes)
1788   info->maxrss = r_usage.ru_maxrss;
1789   // The number of page faults serviced without any I/O
1790   info->minflt = r_usage.ru_minflt;
1791   // The number of page faults serviced that required I/O
1792   info->majflt = r_usage.ru_majflt;
1793   // The number of times a process was "swapped" out of memory
1794   info->nswap = r_usage.ru_nswap;
1795   // The number of times the file system had to perform input
1796   info->inblock = r_usage.ru_inblock;
1797   // The number of times the file system had to perform output
1798   info->oublock = r_usage.ru_oublock;
1799   // The number of times a context switch was voluntarily
1800   info->nvcsw = r_usage.ru_nvcsw;
1801   // The number of times a context switch was forced
1802   info->nivcsw = r_usage.ru_nivcsw;
1803 #endif
1804 
1805   return (status != 0);
1806 }
1807 
1808 void __kmp_read_system_time(double *delta) {
1809   double t_ns;
1810   struct timeval tval;
1811   struct timespec stop;
1812   int status;
1813 
1814   status = gettimeofday(&tval, NULL);
1815   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1816   TIMEVAL_TO_TIMESPEC(&tval, &stop);
1817   t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1818   *delta = (t_ns * 1e-9);
1819 }
1820 
1821 void __kmp_clear_system_time(void) {
1822   struct timeval tval;
1823   int status;
1824   status = gettimeofday(&tval, NULL);
1825   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1826   TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1827 }
1828 
1829 static int __kmp_get_xproc(void) {
1830 
1831   int r = 0;
1832 
1833 #if KMP_OS_LINUX
1834 
1835   __kmp_type_convert(sysconf(_SC_NPROCESSORS_CONF), &(r));
1836 
1837 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1838     KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_WASI || KMP_OS_AIX
1839 
1840   __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1841 
1842 #elif KMP_OS_DARWIN
1843 
1844   // Bug C77011 High "OpenMP Threads and number of active cores".
1845 
1846   // Find the number of available CPUs.
1847   kern_return_t rc;
1848   host_basic_info_data_t info;
1849   mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1850   rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1851   if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1852     // Cannot use KA_TRACE() here because this code works before trace support
1853     // is initialized.
1854     r = info.avail_cpus;
1855   } else {
1856     KMP_WARNING(CantGetNumAvailCPU);
1857     KMP_INFORM(AssumedNumCPU);
1858   }
1859 
1860 #else
1861 
1862 #error "Unknown or unsupported OS."
1863 
1864 #endif
1865 
1866   return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
1867 
1868 } // __kmp_get_xproc
1869 
1870 int __kmp_read_from_file(char const *path, char const *format, ...) {
1871   int result;
1872   va_list args;
1873 
1874   va_start(args, format);
1875   FILE *f = fopen(path, "rb");
1876   if (f == NULL) {
1877     va_end(args);
1878     return 0;
1879   }
1880   result = vfscanf(f, format, args);
1881   fclose(f);
1882   va_end(args);
1883 
1884   return result;
1885 }
1886 
1887 void __kmp_runtime_initialize(void) {
1888   int status;
1889   pthread_mutexattr_t mutex_attr;
1890   pthread_condattr_t cond_attr;
1891 
1892   if (__kmp_init_runtime) {
1893     return;
1894   }
1895 
1896 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1897   if (!__kmp_cpuinfo.initialized) {
1898     __kmp_query_cpuid(&__kmp_cpuinfo);
1899   }
1900 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1901 
1902   __kmp_xproc = __kmp_get_xproc();
1903 
1904 #if !KMP_32_BIT_ARCH
1905   struct rlimit rlim;
1906   // read stack size of calling thread, save it as default for worker threads;
1907   // this should be done before reading environment variables
1908   status = getrlimit(RLIMIT_STACK, &rlim);
1909   if (status == 0) { // success?
1910     __kmp_stksize = rlim.rlim_cur;
1911     __kmp_check_stksize(&__kmp_stksize); // check value and adjust if needed
1912   }
1913 #endif /* KMP_32_BIT_ARCH */
1914 
1915   if (sysconf(_SC_THREADS)) {
1916 
1917     /* Query the maximum number of threads */
1918     __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1919 #ifdef __ve__
1920     if (__kmp_sys_max_nth == -1) {
1921       // VE's pthread supports only up to 64 threads per a VE process.
1922       // So we use that KMP_MAX_NTH (predefined as 64) here.
1923       __kmp_sys_max_nth = KMP_MAX_NTH;
1924     }
1925 #else
1926     if (__kmp_sys_max_nth == -1) {
1927       /* Unlimited threads for NPTL */
1928       __kmp_sys_max_nth = INT_MAX;
1929     } else if (__kmp_sys_max_nth <= 1) {
1930       /* Can't tell, just use PTHREAD_THREADS_MAX */
1931       __kmp_sys_max_nth = KMP_MAX_NTH;
1932     }
1933 #endif
1934 
1935     /* Query the minimum stack size */
1936     __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1937     if (__kmp_sys_min_stksize <= 1) {
1938       __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1939     }
1940   }
1941 
1942   /* Set up minimum number of threads to switch to TLS gtid */
1943   __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1944 
1945   status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1946                               __kmp_internal_end_dest);
1947   KMP_CHECK_SYSFAIL("pthread_key_create", status);
1948   status = pthread_mutexattr_init(&mutex_attr);
1949   KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1950   status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1951   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1952   status = pthread_mutexattr_destroy(&mutex_attr);
1953   KMP_CHECK_SYSFAIL("pthread_mutexattr_destroy", status);
1954   status = pthread_condattr_init(&cond_attr);
1955   KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1956   status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1957   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1958   status = pthread_condattr_destroy(&cond_attr);
1959   KMP_CHECK_SYSFAIL("pthread_condattr_destroy", status);
1960 #if USE_ITT_BUILD
1961   __kmp_itt_initialize();
1962 #endif /* USE_ITT_BUILD */
1963 
1964   __kmp_init_runtime = TRUE;
1965 }
1966 
1967 void __kmp_runtime_destroy(void) {
1968   int status;
1969 
1970   if (!__kmp_init_runtime) {
1971     return; // Nothing to do.
1972   }
1973 
1974 #if USE_ITT_BUILD
1975   __kmp_itt_destroy();
1976 #endif /* USE_ITT_BUILD */
1977 
1978   status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1979   KMP_CHECK_SYSFAIL("pthread_key_delete", status);
1980 
1981   status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1982   if (status != 0 && status != EBUSY) {
1983     KMP_SYSFAIL("pthread_mutex_destroy", status);
1984   }
1985   status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1986   if (status != 0 && status != EBUSY) {
1987     KMP_SYSFAIL("pthread_cond_destroy", status);
1988   }
1989 #if KMP_AFFINITY_SUPPORTED
1990   __kmp_affinity_uninitialize();
1991 #endif
1992 
1993   __kmp_init_runtime = FALSE;
1994 }
1995 
1996 /* Put the thread to sleep for a time period */
1997 /* NOTE: not currently used anywhere */
1998 void __kmp_thread_sleep(int millis) { sleep((millis + 500) / 1000); }
1999 
2000 /* Calculate the elapsed wall clock time for the user */
2001 void __kmp_elapsed(double *t) {
2002   int status;
2003 #ifdef FIX_SGI_CLOCK
2004   struct timespec ts;
2005 
2006   status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
2007   KMP_CHECK_SYSFAIL_ERRNO("clock_gettime", status);
2008   *t =
2009       (double)ts.tv_nsec * (1.0 / (double)KMP_NSEC_PER_SEC) + (double)ts.tv_sec;
2010 #else
2011   struct timeval tv;
2012 
2013   status = gettimeofday(&tv, NULL);
2014   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
2015   *t =
2016       (double)tv.tv_usec * (1.0 / (double)KMP_USEC_PER_SEC) + (double)tv.tv_sec;
2017 #endif
2018 }
2019 
2020 /* Calculate the elapsed wall clock tick for the user */
2021 void __kmp_elapsed_tick(double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
2022 
2023 /* Return the current time stamp in nsec */
2024 kmp_uint64 __kmp_now_nsec() {
2025   struct timeval t;
2026   gettimeofday(&t, NULL);
2027   kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
2028                     (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
2029   return nsec;
2030 }
2031 
2032 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2033 /* Measure clock ticks per millisecond */
2034 void __kmp_initialize_system_tick() {
2035   kmp_uint64 now, nsec2, diff;
2036   kmp_uint64 delay = 1000000; // ~450 usec on most machines.
2037   kmp_uint64 nsec = __kmp_now_nsec();
2038   kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
2039   while ((now = __kmp_hardware_timestamp()) < goal)
2040     ;
2041   nsec2 = __kmp_now_nsec();
2042   diff = nsec2 - nsec;
2043   if (diff > 0) {
2044     double tpus = 1000.0 * (double)(delay + (now - goal)) / (double)diff;
2045     if (tpus > 0.0) {
2046       __kmp_ticks_per_msec = (kmp_uint64)(tpus * 1000.0);
2047       __kmp_ticks_per_usec = (kmp_uint64)tpus;
2048     }
2049   }
2050 }
2051 #endif
2052 
2053 /* Determine whether the given address is mapped into the current address
2054    space. */
2055 
2056 int __kmp_is_address_mapped(void *addr) {
2057 
2058   int found = 0;
2059   int rc;
2060 
2061 #if KMP_OS_LINUX || KMP_OS_HURD
2062 
2063   /* On GNUish OSes, read the /proc/<pid>/maps pseudo-file to get all the
2064      address ranges mapped into the address space. */
2065 
2066   char *name = __kmp_str_format("/proc/%d/maps", getpid());
2067   FILE *file = NULL;
2068 
2069   file = fopen(name, "r");
2070   KMP_ASSERT(file != NULL);
2071 
2072   for (;;) {
2073 
2074     void *beginning = NULL;
2075     void *ending = NULL;
2076     char perms[5];
2077 
2078     rc = fscanf(file, "%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2079     if (rc == EOF) {
2080       break;
2081     }
2082     KMP_ASSERT(rc == 3 &&
2083                KMP_STRLEN(perms) == 4); // Make sure all fields are read.
2084 
2085     // Ending address is not included in the region, but beginning is.
2086     if ((addr >= beginning) && (addr < ending)) {
2087       perms[2] = 0; // 3th and 4th character does not matter.
2088       if (strcmp(perms, "rw") == 0) {
2089         // Memory we are looking for should be readable and writable.
2090         found = 1;
2091       }
2092       break;
2093     }
2094   }
2095 
2096   // Free resources.
2097   fclose(file);
2098   KMP_INTERNAL_FREE(name);
2099 #elif KMP_OS_FREEBSD
2100   char *buf;
2101   size_t lstsz;
2102   int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2103   rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2104   if (rc < 0)
2105     return 0;
2106   // We pass from number of vm entry's semantic
2107   // to size of whole entry map list.
2108   lstsz = lstsz * 4 / 3;
2109   buf = reinterpret_cast<char *>(kmpc_malloc(lstsz));
2110   rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2111   if (rc < 0) {
2112     kmpc_free(buf);
2113     return 0;
2114   }
2115 
2116   char *lw = buf;
2117   char *up = buf + lstsz;
2118 
2119   while (lw < up) {
2120     struct kinfo_vmentry *cur = reinterpret_cast<struct kinfo_vmentry *>(lw);
2121     size_t cursz = cur->kve_structsize;
2122     if (cursz == 0)
2123       break;
2124     void *start = reinterpret_cast<void *>(cur->kve_start);
2125     void *end = reinterpret_cast<void *>(cur->kve_end);
2126     // Readable/Writable addresses within current map entry
2127     if ((addr >= start) && (addr < end)) {
2128       if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2129           (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2130         found = 1;
2131         break;
2132       }
2133     }
2134     lw += cursz;
2135   }
2136   kmpc_free(buf);
2137 #elif KMP_OS_DRAGONFLY
2138   char err[_POSIX2_LINE_MAX];
2139   kinfo_proc *proc;
2140   vmspace sp;
2141   vm_map *cur;
2142   vm_map_entry entry, *c;
2143   struct proc p;
2144   kvm_t *fd;
2145   uintptr_t uaddr;
2146   int num;
2147 
2148   fd = kvm_openfiles(nullptr, nullptr, nullptr, O_RDONLY, err);
2149   if (!fd) {
2150     return 0;
2151   }
2152 
2153   proc = kvm_getprocs(fd, KERN_PROC_PID, getpid(), &num);
2154 
2155   if (kvm_read(fd, static_cast<uintptr_t>(proc->kp_paddr), &p, sizeof(p)) !=
2156           sizeof(p) ||
2157       kvm_read(fd, reinterpret_cast<uintptr_t>(p.p_vmspace), &sp, sizeof(sp)) !=
2158           sizeof(sp)) {
2159     kvm_close(fd);
2160     return 0;
2161   }
2162 
2163   (void)rc;
2164   cur = &sp.vm_map;
2165   uaddr = reinterpret_cast<uintptr_t>(addr);
2166   for (c = kvm_vm_map_entry_first(fd, cur, &entry); c;
2167        c = kvm_vm_map_entry_next(fd, c, &entry)) {
2168     if ((uaddr >= entry.ba.start) && (uaddr <= entry.ba.end)) {
2169       if ((entry.protection & VM_PROT_READ) != 0 &&
2170           (entry.protection & VM_PROT_WRITE) != 0) {
2171         found = 1;
2172         break;
2173       }
2174     }
2175   }
2176 
2177   kvm_close(fd);
2178 #elif KMP_OS_DARWIN
2179 
2180   /* On OS X*, /proc pseudo filesystem is not available. Try to read memory
2181      using vm interface. */
2182 
2183   int buffer;
2184   vm_size_t count;
2185   rc = vm_read_overwrite(
2186       mach_task_self(), // Task to read memory of.
2187       (vm_address_t)(addr), // Address to read from.
2188       1, // Number of bytes to be read.
2189       (vm_address_t)(&buffer), // Address of buffer to save read bytes in.
2190       &count // Address of var to save number of read bytes in.
2191   );
2192   if (rc == 0) {
2193     // Memory successfully read.
2194     found = 1;
2195   }
2196 
2197 #elif KMP_OS_NETBSD
2198 
2199   int mib[5];
2200   mib[0] = CTL_VM;
2201   mib[1] = VM_PROC;
2202   mib[2] = VM_PROC_MAP;
2203   mib[3] = getpid();
2204   mib[4] = sizeof(struct kinfo_vmentry);
2205 
2206   size_t size;
2207   rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2208   KMP_ASSERT(!rc);
2209   KMP_ASSERT(size);
2210 
2211   size = size * 4 / 3;
2212   struct kinfo_vmentry *kiv = (struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2213   KMP_ASSERT(kiv);
2214 
2215   rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2216   KMP_ASSERT(!rc);
2217   KMP_ASSERT(size);
2218 
2219   for (size_t i = 0; i < size; i++) {
2220     if (kiv[i].kve_start >= (uint64_t)addr &&
2221         kiv[i].kve_end <= (uint64_t)addr) {
2222       found = 1;
2223       break;
2224     }
2225   }
2226   KMP_INTERNAL_FREE(kiv);
2227 #elif KMP_OS_OPENBSD
2228 
2229   int mib[3];
2230   mib[0] = CTL_KERN;
2231   mib[1] = KERN_PROC_VMMAP;
2232   mib[2] = getpid();
2233 
2234   size_t size;
2235   uint64_t end;
2236   rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2237   KMP_ASSERT(!rc);
2238   KMP_ASSERT(size);
2239   end = size;
2240 
2241   struct kinfo_vmentry kiv = {.kve_start = 0};
2242 
2243   while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2244     KMP_ASSERT(size);
2245     if (kiv.kve_end == end)
2246       break;
2247 
2248     if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2249       found = 1;
2250       break;
2251     }
2252     kiv.kve_start += 1;
2253   }
2254 #elif KMP_OS_WASI
2255   found = (int)addr < (__builtin_wasm_memory_size(0) * PAGESIZE);
2256 #elif KMP_OS_SOLARIS || KMP_OS_AIX
2257 
2258   // FIXME(Solaris, AIX): Implement this
2259   found = 1;
2260 
2261 #else
2262 
2263 #error "Unknown or unsupported OS"
2264 
2265 #endif
2266 
2267   return found;
2268 
2269 } // __kmp_is_address_mapped
2270 
2271 #ifdef USE_LOAD_BALANCE
2272 
2273 #if KMP_OS_DARWIN || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||    \
2274     KMP_OS_OPENBSD || KMP_OS_SOLARIS
2275 
2276 // The function returns the rounded value of the system load average
2277 // during given time interval which depends on the value of
2278 // __kmp_load_balance_interval variable (default is 60 sec, other values
2279 // may be 300 sec or 900 sec).
2280 // It returns -1 in case of error.
2281 int __kmp_get_load_balance(int max) {
2282   double averages[3];
2283   int ret_avg = 0;
2284 
2285   int res = getloadavg(averages, 3);
2286 
2287   // Check __kmp_load_balance_interval to determine which of averages to use.
2288   // getloadavg() may return the number of samples less than requested that is
2289   // less than 3.
2290   if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2291     ret_avg = (int)averages[0]; // 1 min
2292   } else if ((__kmp_load_balance_interval >= 180 &&
2293               __kmp_load_balance_interval < 600) &&
2294              (res >= 2)) {
2295     ret_avg = (int)averages[1]; // 5 min
2296   } else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2297     ret_avg = (int)averages[2]; // 15 min
2298   } else { // Error occurred
2299     return -1;
2300   }
2301 
2302   return ret_avg;
2303 }
2304 
2305 #else // Linux* OS
2306 
2307 // The function returns number of running (not sleeping) threads, or -1 in case
2308 // of error. Error could be reported if Linux* OS kernel too old (without
2309 // "/proc" support). Counting running threads stops if max running threads
2310 // encountered.
2311 int __kmp_get_load_balance(int max) {
2312   static int permanent_error = 0;
2313   static int glb_running_threads = 0; // Saved count of the running threads for
2314   // the thread balance algorithm
2315   static double glb_call_time = 0; /* Thread balance algorithm call time */
2316 
2317   int running_threads = 0; // Number of running threads in the system.
2318 
2319   DIR *proc_dir = NULL; // Handle of "/proc/" directory.
2320   struct dirent *proc_entry = NULL;
2321 
2322   kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2323   DIR *task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2324   struct dirent *task_entry = NULL;
2325   int task_path_fixed_len;
2326 
2327   kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2328   int stat_file = -1;
2329   int stat_path_fixed_len;
2330 
2331 #ifdef KMP_DEBUG
2332   int total_processes = 0; // Total number of processes in system.
2333 #endif
2334 
2335   double call_time = 0.0;
2336 
2337   __kmp_str_buf_init(&task_path);
2338   __kmp_str_buf_init(&stat_path);
2339 
2340   __kmp_elapsed(&call_time);
2341 
2342   if (glb_call_time &&
2343       (call_time - glb_call_time < __kmp_load_balance_interval)) {
2344     running_threads = glb_running_threads;
2345     goto finish;
2346   }
2347 
2348   glb_call_time = call_time;
2349 
2350   // Do not spend time on scanning "/proc/" if we have a permanent error.
2351   if (permanent_error) {
2352     running_threads = -1;
2353     goto finish;
2354   }
2355 
2356   if (max <= 0) {
2357     max = INT_MAX;
2358   }
2359 
2360   // Open "/proc/" directory.
2361   proc_dir = opendir("/proc");
2362   if (proc_dir == NULL) {
2363     // Cannot open "/proc/". Probably the kernel does not support it. Return an
2364     // error now and in subsequent calls.
2365     running_threads = -1;
2366     permanent_error = 1;
2367     goto finish;
2368   }
2369 
2370   // Initialize fixed part of task_path. This part will not change.
2371   __kmp_str_buf_cat(&task_path, "/proc/", 6);
2372   task_path_fixed_len = task_path.used; // Remember number of used characters.
2373 
2374   proc_entry = readdir(proc_dir);
2375   while (proc_entry != NULL) {
2376 #if KMP_OS_AIX
2377     // Proc entry name starts with a digit. Assume it is a  process' directory.
2378     if (isdigit(proc_entry->d_name[0])) {
2379 #else
2380     // Proc entry is a directory and name starts with a digit. Assume it is a
2381     // process' directory.
2382     if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2383 #endif
2384 
2385 #ifdef KMP_DEBUG
2386       ++total_processes;
2387 #endif
2388       // Make sure init process is the very first in "/proc", so we can replace
2389       // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes ==
2390       // 1. We are going to check that total_processes == 1 => d_name == "1" is
2391       // true (where "=>" is implication). Since C++ does not have => operator,
2392       // let us replace it with its equivalent: a => b == ! a || b.
2393       KMP_DEBUG_ASSERT(total_processes != 1 ||
2394                        strcmp(proc_entry->d_name, "1") == 0);
2395 
2396       // Construct task_path.
2397       task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
2398       __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2399                         KMP_STRLEN(proc_entry->d_name));
2400       __kmp_str_buf_cat(&task_path, "/task", 5);
2401 
2402       task_dir = opendir(task_path.str);
2403       if (task_dir == NULL) {
2404         // Process can finish between reading "/proc/" directory entry and
2405         // opening process' "task/" directory. So, in general case we should not
2406         // complain, but have to skip this process and read the next one. But on
2407         // systems with no "task/" support we will spend lot of time to scan
2408         // "/proc/" tree again and again without any benefit. "init" process
2409         // (its pid is 1) should exist always, so, if we cannot open
2410         // "/proc/1/task/" directory, it means "task/" is not supported by
2411         // kernel. Report an error now and in the future.
2412         if (strcmp(proc_entry->d_name, "1") == 0) {
2413           running_threads = -1;
2414           permanent_error = 1;
2415           goto finish;
2416         }
2417       } else {
2418         // Construct fixed part of stat file path.
2419         __kmp_str_buf_clear(&stat_path);
2420         __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2421         __kmp_str_buf_cat(&stat_path, "/", 1);
2422         stat_path_fixed_len = stat_path.used;
2423 
2424         task_entry = readdir(task_dir);
2425         while (task_entry != NULL) {
2426           // It is a directory and name starts with a digit.
2427 #if KMP_OS_AIX
2428           if (isdigit(task_entry->d_name[0])) {
2429 #else
2430           if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2431 #endif
2432 
2433             // Construct complete stat file path. Easiest way would be:
2434             //  __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str,
2435             //  task_entry->d_name );
2436             // but seriae of __kmp_str_buf_cat works a bit faster.
2437             stat_path.used =
2438                 stat_path_fixed_len; // Reset stat path to its fixed part.
2439             __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2440                               KMP_STRLEN(task_entry->d_name));
2441             __kmp_str_buf_cat(&stat_path, "/stat", 5);
2442 
2443             // Note: Low-level API (open/read/close) is used. High-level API
2444             // (fopen/fclose)  works ~ 30 % slower.
2445             stat_file = open(stat_path.str, O_RDONLY);
2446             if (stat_file == -1) {
2447               // We cannot report an error because task (thread) can terminate
2448               // just before reading this file.
2449             } else {
2450               /* Content of "stat" file looks like:
2451                  24285 (program) S ...
2452 
2453                  It is a single line (if program name does not include funny
2454                  symbols). First number is a thread id, then name of executable
2455                  file name in paretheses, then state of the thread. We need just
2456                  thread state.
2457 
2458                  Good news: Length of program name is 15 characters max. Longer
2459                  names are truncated.
2460 
2461                  Thus, we need rather short buffer: 15 chars for program name +
2462                  2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2463 
2464                  Bad news: Program name may contain special symbols like space,
2465                  closing parenthesis, or even new line. This makes parsing
2466                  "stat" file not 100 % reliable. In case of fanny program names
2467                  parsing may fail (report incorrect thread state).
2468 
2469                  Parsing "status" file looks more promissing (due to different
2470                  file structure and escaping special symbols) but reading and
2471                  parsing of "status" file works slower.
2472                   -- ln
2473               */
2474               char buffer[65];
2475               ssize_t len;
2476               len = read(stat_file, buffer, sizeof(buffer) - 1);
2477               if (len >= 0) {
2478                 buffer[len] = 0;
2479                 // Using scanf:
2480                 //     sscanf( buffer, "%*d (%*s) %c ", & state );
2481                 // looks very nice, but searching for a closing parenthesis
2482                 // works a bit faster.
2483                 char *close_parent = strstr(buffer, ") ");
2484                 if (close_parent != NULL) {
2485                   char state = *(close_parent + 2);
2486                   if (state == 'R') {
2487                     ++running_threads;
2488                     if (running_threads >= max) {
2489                       goto finish;
2490                     }
2491                   }
2492                 }
2493               }
2494               close(stat_file);
2495               stat_file = -1;
2496             }
2497           }
2498           task_entry = readdir(task_dir);
2499         }
2500         closedir(task_dir);
2501         task_dir = NULL;
2502       }
2503     }
2504     proc_entry = readdir(proc_dir);
2505   }
2506 
2507   // There _might_ be a timing hole where the thread executing this
2508   // code get skipped in the load balance, and running_threads is 0.
2509   // Assert in the debug builds only!!!
2510   KMP_DEBUG_ASSERT(running_threads > 0);
2511   if (running_threads <= 0) {
2512     running_threads = 1;
2513   }
2514 
2515 finish: // Clean up and exit.
2516   if (proc_dir != NULL) {
2517     closedir(proc_dir);
2518   }
2519   __kmp_str_buf_free(&task_path);
2520   if (task_dir != NULL) {
2521     closedir(task_dir);
2522   }
2523   __kmp_str_buf_free(&stat_path);
2524   if (stat_file != -1) {
2525     close(stat_file);
2526   }
2527 
2528   glb_running_threads = running_threads;
2529 
2530   return running_threads;
2531 
2532 } // __kmp_get_load_balance
2533 
2534 #endif // KMP_OS_DARWIN
2535 
2536 #endif // USE_LOAD_BALANCE
2537 
2538 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC ||                            \
2539       ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) ||                 \
2540       KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 ||            \
2541       KMP_ARCH_ARM || KMP_ARCH_VE || KMP_ARCH_S390X || KMP_ARCH_PPC_XCOFF)
2542 
2543 // we really only need the case with 1 argument, because CLANG always build
2544 // a struct of pointers to shared variables referenced in the outlined function
2545 int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc,
2546                            void *p_argv[]
2547 #if OMPT_SUPPORT
2548                            ,
2549                            void **exit_frame_ptr
2550 #endif
2551 ) {
2552 #if OMPT_SUPPORT
2553   *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2554 #endif
2555 
2556   switch (argc) {
2557   default:
2558     fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2559     fflush(stderr);
2560     exit(-1);
2561   case 0:
2562     (*pkfn)(&gtid, &tid);
2563     break;
2564   case 1:
2565     (*pkfn)(&gtid, &tid, p_argv[0]);
2566     break;
2567   case 2:
2568     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2569     break;
2570   case 3:
2571     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2572     break;
2573   case 4:
2574     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2575     break;
2576   case 5:
2577     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2578     break;
2579   case 6:
2580     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2581             p_argv[5]);
2582     break;
2583   case 7:
2584     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2585             p_argv[5], p_argv[6]);
2586     break;
2587   case 8:
2588     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2589             p_argv[5], p_argv[6], p_argv[7]);
2590     break;
2591   case 9:
2592     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2593             p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2594     break;
2595   case 10:
2596     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2597             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2598     break;
2599   case 11:
2600     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2601             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2602     break;
2603   case 12:
2604     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2605             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2606             p_argv[11]);
2607     break;
2608   case 13:
2609     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2610             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2611             p_argv[11], p_argv[12]);
2612     break;
2613   case 14:
2614     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2615             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2616             p_argv[11], p_argv[12], p_argv[13]);
2617     break;
2618   case 15:
2619     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2620             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2621             p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2622     break;
2623   }
2624 
2625   return 1;
2626 }
2627 
2628 #endif
2629 
2630 #if KMP_OS_LINUX
2631 // Functions for hidden helper task
2632 namespace {
2633 // Condition variable for initializing hidden helper team
2634 pthread_cond_t hidden_helper_threads_initz_cond_var;
2635 pthread_mutex_t hidden_helper_threads_initz_lock;
2636 volatile int hidden_helper_initz_signaled = FALSE;
2637 
2638 // Condition variable for deinitializing hidden helper team
2639 pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2640 pthread_mutex_t hidden_helper_threads_deinitz_lock;
2641 volatile int hidden_helper_deinitz_signaled = FALSE;
2642 
2643 // Condition variable for the wrapper function of main thread
2644 pthread_cond_t hidden_helper_main_thread_cond_var;
2645 pthread_mutex_t hidden_helper_main_thread_lock;
2646 volatile int hidden_helper_main_thread_signaled = FALSE;
2647 
2648 // Semaphore for worker threads. We don't use condition variable here in case
2649 // that when multiple signals are sent at the same time, only one thread might
2650 // be waken.
2651 sem_t hidden_helper_task_sem;
2652 } // namespace
2653 
2654 void __kmp_hidden_helper_worker_thread_wait() {
2655   int status = sem_wait(&hidden_helper_task_sem);
2656   KMP_CHECK_SYSFAIL("sem_wait", status);
2657 }
2658 
2659 void __kmp_do_initialize_hidden_helper_threads() {
2660   // Initialize condition variable
2661   int status =
2662       pthread_cond_init(&hidden_helper_threads_initz_cond_var, nullptr);
2663   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2664 
2665   status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var, nullptr);
2666   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2667 
2668   status = pthread_cond_init(&hidden_helper_main_thread_cond_var, nullptr);
2669   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2670 
2671   status = pthread_mutex_init(&hidden_helper_threads_initz_lock, nullptr);
2672   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2673 
2674   status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock, nullptr);
2675   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2676 
2677   status = pthread_mutex_init(&hidden_helper_main_thread_lock, nullptr);
2678   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2679 
2680   // Initialize the semaphore
2681   status = sem_init(&hidden_helper_task_sem, 0, 0);
2682   KMP_CHECK_SYSFAIL("sem_init", status);
2683 
2684   // Create a new thread to finish initialization
2685   pthread_t handle;
2686   status = pthread_create(
2687       &handle, nullptr,
2688       [](void *) -> void * {
2689         __kmp_hidden_helper_threads_initz_routine();
2690         return nullptr;
2691       },
2692       nullptr);
2693   KMP_CHECK_SYSFAIL("pthread_create", status);
2694 }
2695 
2696 void __kmp_hidden_helper_threads_initz_wait() {
2697   // Initial thread waits here for the completion of the initialization. The
2698   // condition variable will be notified by main thread of hidden helper teams.
2699   int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2700   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2701 
2702   if (!TCR_4(hidden_helper_initz_signaled)) {
2703     status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2704                                &hidden_helper_threads_initz_lock);
2705     KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2706   }
2707 
2708   status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2709   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2710 }
2711 
2712 void __kmp_hidden_helper_initz_release() {
2713   // After all initialization, reset __kmp_init_hidden_helper_threads to false.
2714   int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2715   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2716 
2717   status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2718   KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2719 
2720   TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2721 
2722   status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2723   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2724 }
2725 
2726 void __kmp_hidden_helper_main_thread_wait() {
2727   // The main thread of hidden helper team will be blocked here. The
2728   // condition variable can only be signal in the destructor of RTL.
2729   int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2730   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2731 
2732   if (!TCR_4(hidden_helper_main_thread_signaled)) {
2733     status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2734                                &hidden_helper_main_thread_lock);
2735     KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2736   }
2737 
2738   status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2739   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2740 }
2741 
2742 void __kmp_hidden_helper_main_thread_release() {
2743   // The initial thread of OpenMP RTL should call this function to wake up the
2744   // main thread of hidden helper team.
2745   int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2746   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2747 
2748   status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2749   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
2750 
2751   // The hidden helper team is done here
2752   TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2753 
2754   status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2755   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2756 }
2757 
2758 void __kmp_hidden_helper_worker_thread_signal() {
2759   int status = sem_post(&hidden_helper_task_sem);
2760   KMP_CHECK_SYSFAIL("sem_post", status);
2761 }
2762 
2763 void __kmp_hidden_helper_threads_deinitz_wait() {
2764   // Initial thread waits here for the completion of the deinitialization. The
2765   // condition variable will be notified by main thread of hidden helper teams.
2766   int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2767   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2768 
2769   if (!TCR_4(hidden_helper_deinitz_signaled)) {
2770     status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2771                                &hidden_helper_threads_deinitz_lock);
2772     KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2773   }
2774 
2775   status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2776   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2777 }
2778 
2779 void __kmp_hidden_helper_threads_deinitz_release() {
2780   int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2781   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2782 
2783   status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2784   KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2785 
2786   TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2787 
2788   status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2789   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2790 }
2791 #else // KMP_OS_LINUX
2792 void __kmp_hidden_helper_worker_thread_wait() {
2793   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2794 }
2795 
2796 void __kmp_do_initialize_hidden_helper_threads() {
2797   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2798 }
2799 
2800 void __kmp_hidden_helper_threads_initz_wait() {
2801   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2802 }
2803 
2804 void __kmp_hidden_helper_initz_release() {
2805   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2806 }
2807 
2808 void __kmp_hidden_helper_main_thread_wait() {
2809   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2810 }
2811 
2812 void __kmp_hidden_helper_main_thread_release() {
2813   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2814 }
2815 
2816 void __kmp_hidden_helper_worker_thread_signal() {
2817   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2818 }
2819 
2820 void __kmp_hidden_helper_threads_deinitz_wait() {
2821   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2822 }
2823 
2824 void __kmp_hidden_helper_threads_deinitz_release() {
2825   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2826 }
2827 #endif // KMP_OS_LINUX
2828 
2829 bool __kmp_detect_shm() {
2830   DIR *dir = opendir("/dev/shm");
2831   if (dir) { // /dev/shm exists
2832     closedir(dir);
2833     return true;
2834   } else if (ENOENT == errno) { // /dev/shm does not exist
2835     return false;
2836   } else { // opendir() failed
2837     return false;
2838   }
2839 }
2840 
2841 bool __kmp_detect_tmp() {
2842   DIR *dir = opendir("/tmp");
2843   if (dir) { // /tmp exists
2844     closedir(dir);
2845     return true;
2846   } else if (ENOENT == errno) { // /tmp does not exist
2847     return false;
2848   } else { // opendir() failed
2849     return false;
2850   }
2851 }
2852 
2853 // end of file //
2854