14#include "kmp_affinity.h"
21#include "kmp_wait_release.h"
22#include "kmp_wrapper_getpid.h"
24#if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
31#include <sys/resource.h>
33#include <sys/syscall.h>
40#include <sys/sysinfo.h>
56#include <sys/sysctl.h>
57#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
59#include <sys/sysctl.h>
61#include <pthread_np.h>
65#elif KMP_OS_NETBSD || KMP_OS_OPENBSD
67#include <sys/sysctl.h>
69#include <sys/loadavg.h>
77 struct timespec start;
80#ifndef TIMEVAL_TO_TIMESPEC
82#define TIMEVAL_TO_TIMESPEC(tv, ts) \
84 (ts)->tv_sec = (tv)->tv_sec; \
85 (ts)->tv_nsec = (tv)->tv_usec * 1000; \
90#define TS2NS(timespec) \
91 (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
93static struct kmp_sys_timer __kmp_sys_timer_data;
96typedef void (*sig_func_t)(int);
97STATIC_EFI2_WORKAROUND
struct sigaction __kmp_sighldrs[NSIG];
98static sigset_t __kmp_sigset;
101static int __kmp_init_runtime = FALSE;
103static int __kmp_fork_count = 0;
105static pthread_condattr_t __kmp_suspend_cond_attr;
106static pthread_mutexattr_t __kmp_suspend_mutex_attr;
108static kmp_cond_align_t __kmp_wait_cv;
109static kmp_mutex_align_t __kmp_wait_mx;
111kmp_uint64 __kmp_ticks_per_msec = 1000000;
112kmp_uint64 __kmp_ticks_per_usec = 1000;
115static void __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond) {
116 KMP_SNPRINTF(buffer, 128,
"(cond (lock (%ld, %d)), (descr (%p)))",
117 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
118 cond->c_cond.__c_waiting);
122#if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED)
126void __kmp_affinity_bind_thread(
int which) {
127 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
128 "Illegal set affinity operation when not capable");
130 kmp_affin_mask_t *mask;
131 KMP_CPU_ALLOC_ON_STACK(mask);
133 KMP_CPU_SET(which, mask);
134 __kmp_set_system_affinity(mask, TRUE);
135 KMP_CPU_FREE_FROM_STACK(mask);
141void __kmp_affinity_determine_capable(
const char *env_var) {
145#define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
146#define KMP_CPU_SET_TRY_SIZE CACHE_LINE
148#define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
151 int verbose = __kmp_affinity.flags.verbose;
152 int warnings = __kmp_affinity.flags.warnings;
153 enum affinity_type type = __kmp_affinity.type;
158 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
162 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
163 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
164 "initial getaffinity call returned %ld errno = %d\n",
167 if (gCode < 0 && errno != EINVAL) {
170 (warnings && (type != affinity_none) && (type != affinity_default) &&
171 (type != affinity_disabled))) {
173 kmp_msg_t err_code = KMP_ERR(error);
174 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
175 err_code, __kmp_msg_null);
176 if (__kmp_generate_warnings == kmp_warnings_off) {
177 __kmp_str_free(&err_code.str);
180 KMP_AFFINITY_DISABLE();
181 KMP_INTERNAL_FREE(buf);
183 }
else if (gCode > 0) {
185 KMP_AFFINITY_ENABLE(gCode);
186 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
187 "affinity supported (mask size %d)\n",
188 (
int)__kmp_affin_mask_size));
189 KMP_INTERNAL_FREE(buf);
195 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
196 "searching for proper set size\n"));
198 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
199 gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
200 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
201 "getaffinity for mask size %ld returned %ld errno = %d\n",
202 size, gCode, errno));
205 if (errno == ENOSYS) {
207 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
208 "inconsistent OS call behavior: errno == ENOSYS for mask "
212 (warnings && (type != affinity_none) &&
213 (type != affinity_default) && (type != affinity_disabled))) {
215 kmp_msg_t err_code = KMP_ERR(error);
216 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
217 err_code, __kmp_msg_null);
218 if (__kmp_generate_warnings == kmp_warnings_off) {
219 __kmp_str_free(&err_code.str);
222 KMP_AFFINITY_DISABLE();
223 KMP_INTERNAL_FREE(buf);
229 KMP_AFFINITY_ENABLE(gCode);
230 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
231 "affinity supported (mask size %d)\n",
232 (
int)__kmp_affin_mask_size));
233 KMP_INTERNAL_FREE(buf);
239 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
240 gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
241 reinterpret_cast<cpuset_t *
>(buf));
242 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
243 "initial getaffinity call returned %d errno = %d\n",
246 KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
247 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
248 "affinity supported (mask size %d)\n",
249 (
int)__kmp_affin_mask_size));
250 KMP_INTERNAL_FREE(buf);
254 KMP_INTERNAL_FREE(buf);
257 KMP_AFFINITY_DISABLE();
258 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
259 "cannot determine mask size - affinity not supported\n"));
260 if (verbose || (warnings && (type != affinity_none) &&
261 (type != affinity_default) && (type != affinity_disabled))) {
262 KMP_WARNING(AffCantGetMaskSize, env_var);
270int __kmp_futex_determine_capable() {
272 long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
273 int retval = (rc == 0) || (errno != ENOSYS);
276 (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
277 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
278 retval ?
"" :
" not"));
285#if (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_WASM) && (!KMP_ASM_INTRINS)
289kmp_int8 __kmp_test_then_or8(
volatile kmp_int8 *p, kmp_int8 d) {
290 kmp_int8 old_value, new_value;
292 old_value = TCR_1(*p);
293 new_value = old_value | d;
295 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
297 old_value = TCR_1(*p);
298 new_value = old_value | d;
303kmp_int8 __kmp_test_then_and8(
volatile kmp_int8 *p, kmp_int8 d) {
304 kmp_int8 old_value, new_value;
306 old_value = TCR_1(*p);
307 new_value = old_value & d;
309 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
311 old_value = TCR_1(*p);
312 new_value = old_value & d;
317kmp_uint32 __kmp_test_then_or32(
volatile kmp_uint32 *p, kmp_uint32 d) {
318 kmp_uint32 old_value, new_value;
320 old_value = TCR_4(*p);
321 new_value = old_value | d;
323 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
325 old_value = TCR_4(*p);
326 new_value = old_value | d;
331kmp_uint32 __kmp_test_then_and32(
volatile kmp_uint32 *p, kmp_uint32 d) {
332 kmp_uint32 old_value, new_value;
334 old_value = TCR_4(*p);
335 new_value = old_value & d;
337 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
339 old_value = TCR_4(*p);
340 new_value = old_value & d;
345#if KMP_ARCH_X86 || KMP_ARCH_WASM
346kmp_int8 __kmp_test_then_add8(
volatile kmp_int8 *p, kmp_int8 d) {
347 kmp_int8 old_value, new_value;
349 old_value = TCR_1(*p);
350 new_value = old_value + d;
352 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
354 old_value = TCR_1(*p);
355 new_value = old_value + d;
360kmp_int64 __kmp_test_then_add64(
volatile kmp_int64 *p, kmp_int64 d) {
361 kmp_int64 old_value, new_value;
363 old_value = TCR_8(*p);
364 new_value = old_value + d;
366 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
368 old_value = TCR_8(*p);
369 new_value = old_value + d;
375kmp_uint64 __kmp_test_then_or64(
volatile kmp_uint64 *p, kmp_uint64 d) {
376 kmp_uint64 old_value, new_value;
378 old_value = TCR_8(*p);
379 new_value = old_value | d;
380 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
382 old_value = TCR_8(*p);
383 new_value = old_value | d;
388kmp_uint64 __kmp_test_then_and64(
volatile kmp_uint64 *p, kmp_uint64 d) {
389 kmp_uint64 old_value, new_value;
391 old_value = TCR_8(*p);
392 new_value = old_value & d;
393 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
395 old_value = TCR_8(*p);
396 new_value = old_value & d;
403void __kmp_terminate_thread(
int gtid) {
405 kmp_info_t *th = __kmp_threads[gtid];
410#ifdef KMP_CANCEL_THREADS
411 KA_TRACE(10, (
"__kmp_terminate_thread: kill (%d)\n", gtid));
412 status = pthread_cancel(th->th.th_info.ds.ds_thread);
413 if (status != 0 && status != ESRCH) {
414 __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
425static kmp_int32 __kmp_set_stack_info(
int gtid, kmp_info_t *th) {
427#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
428 KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
437 if (!KMP_UBER_GTID(gtid)) {
440 status = pthread_attr_init(&attr);
441 KMP_CHECK_SYSFAIL(
"pthread_attr_init", status);
442#if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
443 status = pthread_attr_get_np(pthread_self(), &attr);
444 KMP_CHECK_SYSFAIL(
"pthread_attr_get_np", status);
446 status = pthread_getattr_np(pthread_self(), &attr);
447 KMP_CHECK_SYSFAIL(
"pthread_getattr_np", status);
449 status = pthread_attr_getstack(&attr, &addr, &size);
450 KMP_CHECK_SYSFAIL(
"pthread_attr_getstack", status);
452 (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
453 " %lu, low addr: %p\n",
455 status = pthread_attr_destroy(&attr);
456 KMP_CHECK_SYSFAIL(
"pthread_attr_destroy", status);
459 if (size != 0 && addr != 0) {
461 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((
char *)addr) + size));
462 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
463 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
469 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
470 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
471 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
475static void *__kmp_launch_worker(
void *thr) {
476 int status, old_type, old_state;
477#ifdef KMP_BLOCK_SIGNALS
478 sigset_t new_set, old_set;
481#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
482 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS
483 void *
volatile padding = 0;
487 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
488 __kmp_gtid_set_specific(gtid);
494 __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
495 __kmp_stats_thread_ptr->startLife();
496 KMP_SET_THREAD_STATE(IDLE);
501 __kmp_itt_thread_name(gtid);
504#if KMP_AFFINITY_SUPPORTED
505 __kmp_affinity_bind_init_mask(gtid);
508#ifdef KMP_CANCEL_THREADS
509 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
510 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
512 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
513 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
516#if KMP_ARCH_X86 || KMP_ARCH_X86_64
518 __kmp_clear_x87_fpu_status_word();
519 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
520 __kmp_load_mxcsr(&__kmp_init_mxcsr);
523#ifdef KMP_BLOCK_SIGNALS
524 status = sigfillset(&new_set);
525 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
526 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
527 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
530#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
531 KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS
532 if (__kmp_stkoffset > 0 && gtid > 0) {
533 padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
539 __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
541 __kmp_check_stack_overlap((kmp_info_t *)thr);
543 exit_val = __kmp_launch_thread((kmp_info_t *)thr);
545#ifdef KMP_BLOCK_SIGNALS
546 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
547 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
556static void *__kmp_launch_monitor(
void *thr) {
557 int status, old_type, old_state;
558#ifdef KMP_BLOCK_SIGNALS
561 struct timespec interval;
565 KA_TRACE(10, (
"__kmp_launch_monitor: #1 launched\n"));
568 __kmp_gtid_set_specific(KMP_GTID_MONITOR);
570 __kmp_gtid = KMP_GTID_MONITOR;
577 __kmp_itt_thread_ignore();
580 __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
583 __kmp_check_stack_overlap((kmp_info_t *)thr);
585#ifdef KMP_CANCEL_THREADS
586 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
587 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
589 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
590 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
598 int sched = sched_getscheduler(0);
599 if (sched == SCHED_FIFO || sched == SCHED_RR) {
602 struct sched_param param;
603 int max_priority = sched_get_priority_max(sched);
605 KMP_WARNING(RealTimeSchedNotSupported);
606 sched_getparam(0, ¶m);
607 if (param.sched_priority < max_priority) {
608 param.sched_priority += 1;
609 rc = sched_setscheduler(0, sched, ¶m);
612 kmp_msg_t err_code = KMP_ERR(error);
613 __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
614 err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
615 if (__kmp_generate_warnings == kmp_warnings_off) {
616 __kmp_str_free(&err_code.str);
623 __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
624 KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
629 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
635 if (__kmp_monitor_wakeups == 1) {
637 interval.tv_nsec = 0;
640 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
643 KA_TRACE(10, (
"__kmp_launch_monitor: #2 monitor\n"));
645 while (!TCR_4(__kmp_global.g.g_done)) {
651 KA_TRACE(15, (
"__kmp_launch_monitor: update\n"));
653 status = gettimeofday(&tval, NULL);
654 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
655 TIMEVAL_TO_TIMESPEC(&tval, &now);
657 now.tv_sec += interval.tv_sec;
658 now.tv_nsec += interval.tv_nsec;
660 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
662 now.tv_nsec -= KMP_NSEC_PER_SEC;
665 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
666 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
668 if (!TCR_4(__kmp_global.g.g_done)) {
669 status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
670 &__kmp_wait_mx.m_mutex, &now);
672 if (status != ETIMEDOUT && status != EINTR) {
673 KMP_SYSFAIL(
"pthread_cond_timedwait", status);
677 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
678 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
680 TCW_4(__kmp_global.g.g_time.dt.t_value,
681 TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
686 KA_TRACE(10, (
"__kmp_launch_monitor: #3 cleanup\n"));
688#ifdef KMP_BLOCK_SIGNALS
689 status = sigfillset(&new_set);
690 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
691 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
692 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
695 KA_TRACE(10, (
"__kmp_launch_monitor: #4 finished\n"));
697 if (__kmp_global.g.g_abort != 0) {
703 KA_TRACE(10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n",
704 __kmp_global.g.g_abort));
709 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
710 __kmp_terminate_thread(gtid);
714 KA_TRACE(10, (
"__kmp_launch_monitor: #6 raise sig=%d\n",
715 __kmp_global.g.g_abort));
717 if (__kmp_global.g.g_abort > 0)
718 raise(__kmp_global.g.g_abort);
721 KA_TRACE(10, (
"__kmp_launch_monitor: #7 exit\n"));
727void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size) {
729 pthread_attr_t thread_attr;
732 th->th.th_info.ds.ds_gtid = gtid;
736 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
742 if (!KMP_UBER_GTID(gtid)) {
743 th->th.th_stats = __kmp_stats_list->push_back(gtid);
747 th->th.th_stats = __kmp_stats_thread_ptr;
749 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
753 if (KMP_UBER_GTID(gtid)) {
754 KA_TRACE(10, (
"__kmp_create_worker: uber thread (%d)\n", gtid));
755 th->th.th_info.ds.ds_thread = pthread_self();
756 __kmp_set_stack_info(gtid, th);
757 __kmp_check_stack_overlap(th);
761 KA_TRACE(10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid));
765#ifdef KMP_THREAD_ATTR
766 status = pthread_attr_init(&thread_attr);
768 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
770 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
772 __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
782 stack_size += gtid * __kmp_stkoffset * 2;
784 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
785 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
786 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
788#ifdef _POSIX_THREAD_ATTR_STACKSIZE
789 status = pthread_attr_setstacksize(&thread_attr, stack_size);
790#ifdef KMP_BACKUP_STKSIZE
792 if (!__kmp_env_stksize) {
793 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
794 __kmp_stksize = KMP_BACKUP_STKSIZE;
795 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
796 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
798 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
799 status = pthread_attr_setstacksize(&thread_attr, stack_size);
804 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
805 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
812 pthread_create(&handle, &thread_attr, __kmp_launch_worker, (
void *)th);
813 if (status != 0 || !handle) {
814#ifdef _POSIX_THREAD_ATTR_STACKSIZE
815 if (status == EINVAL) {
816 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
817 KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
819 if (status == ENOMEM) {
820 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
821 KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
824 if (status == EAGAIN) {
825 __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
826 KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
828 KMP_SYSFAIL(
"pthread_create", status);
831 th->th.th_info.ds.ds_thread = handle;
833#ifdef KMP_THREAD_ATTR
834 status = pthread_attr_destroy(&thread_attr);
836 kmp_msg_t err_code = KMP_ERR(status);
837 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
839 if (__kmp_generate_warnings == kmp_warnings_off) {
840 __kmp_str_free(&err_code.str);
847 KA_TRACE(10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid));
852void __kmp_create_monitor(kmp_info_t *th) {
854 pthread_attr_t thread_attr;
857 int auto_adj_size = FALSE;
859 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
861 KA_TRACE(10, (
"__kmp_create_monitor: skipping monitor thread because of "
863 th->th.th_info.ds.ds_tid = 0;
864 th->th.th_info.ds.ds_gtid = 0;
867 KA_TRACE(10, (
"__kmp_create_monitor: try to create monitor\n"));
871 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
872 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
874 TCW_4(__kmp_global.g.g_time.dt.t_value,
877 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
880#ifdef KMP_THREAD_ATTR
881 if (__kmp_monitor_stksize == 0) {
882 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
883 auto_adj_size = TRUE;
885 status = pthread_attr_init(&thread_attr);
887 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
889 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
891 __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
894#ifdef _POSIX_THREAD_ATTR_STACKSIZE
895 status = pthread_attr_getstacksize(&thread_attr, &size);
896 KMP_CHECK_SYSFAIL(
"pthread_attr_getstacksize", status);
898 size = __kmp_sys_min_stksize;
902 if (__kmp_monitor_stksize == 0) {
903 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
905 if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
906 __kmp_monitor_stksize = __kmp_sys_min_stksize;
909 KA_TRACE(10, (
"__kmp_create_monitor: default stacksize = %lu bytes,"
910 "requested stacksize = %lu bytes\n",
911 size, __kmp_monitor_stksize));
916#ifdef _POSIX_THREAD_ATTR_STACKSIZE
917 KA_TRACE(10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
918 __kmp_monitor_stksize));
919 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
922 __kmp_monitor_stksize *= 2;
925 kmp_msg_t err_code = KMP_ERR(status);
926 __kmp_msg(kmp_ms_warning,
927 KMP_MSG(CantSetMonitorStackSize, (
long int)__kmp_monitor_stksize),
928 err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
929 if (__kmp_generate_warnings == kmp_warnings_off) {
930 __kmp_str_free(&err_code.str);
936 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (
void *)th);
939#ifdef _POSIX_THREAD_ATTR_STACKSIZE
940 if (status == EINVAL) {
941 if (auto_adj_size && (__kmp_monitor_stksize < (
size_t)0x40000000)) {
942 __kmp_monitor_stksize *= 2;
945 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
946 KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
949 if (status == ENOMEM) {
950 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
951 KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
955 if (status == EAGAIN) {
956 __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
957 KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
959 KMP_SYSFAIL(
"pthread_create", status);
962 th->th.th_info.ds.ds_thread = handle;
966 KMP_DEBUG_ASSERT(
sizeof(kmp_uint32) ==
967 sizeof(__kmp_global.g.g_time.dt.t_value));
968 __kmp_wait_4((kmp_uint32
volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
972#ifdef KMP_THREAD_ATTR
973 status = pthread_attr_destroy(&thread_attr);
975 kmp_msg_t err_code = KMP_ERR(status);
976 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
978 if (__kmp_generate_warnings == kmp_warnings_off) {
979 __kmp_str_free(&err_code.str);
986 KA_TRACE(10, (
"__kmp_create_monitor: monitor created %#.8lx\n",
987 th->th.th_info.ds.ds_thread));
992void __kmp_exit_thread(
int exit_status) {
996 pthread_exit((
void *)(intptr_t)exit_status);
1001void __kmp_resume_monitor();
1003extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
1007 KA_TRACE(10, (
"__kmp_reap_monitor: try to reap monitor thread with handle"
1009 th->th.th_info.ds.ds_thread));
1014 KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1015 if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1016 KA_TRACE(10, (
"__kmp_reap_monitor: monitor did not start, returning\n"));
1026 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1027 if (status != ESRCH) {
1028 __kmp_resume_monitor();
1030 KA_TRACE(10, (
"__kmp_reap_monitor: try to join with monitor\n"));
1031 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1032 if (exit_val != th) {
1033 __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1036 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1037 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1039 KA_TRACE(10, (
"__kmp_reap_monitor: done reaping monitor thread with handle"
1041 th->th.th_info.ds.ds_thread));
1048extern "C" void __kmp_reap_monitor(kmp_info_t *th) { (void)th; }
1051void __kmp_reap_worker(kmp_info_t *th) {
1058 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1060 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1064 __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1066 if (exit_val != th) {
1067 KA_TRACE(10, (
"__kmp_reap_worker: worker T#%d did not reap properly, "
1069 th->th.th_info.ds.ds_gtid, exit_val));
1075 KA_TRACE(10, (
"__kmp_reap_worker: done reaping T#%d\n",
1076 th->th.th_info.ds.ds_gtid));
1081#if KMP_HANDLE_SIGNALS
1083static void __kmp_null_handler(
int signo) {
1087static void __kmp_team_handler(
int signo) {
1088 if (__kmp_global.g.g_abort == 0) {
1091 __kmp_debug_printf(
"__kmp_team_handler: caught signal = %d\n", signo);
1106 if (__kmp_debug_buf) {
1107 __kmp_dump_debug_buffer();
1109 __kmp_unregister_library();
1111 TCW_4(__kmp_global.g.g_abort, signo);
1113 TCW_4(__kmp_global.g.g_done, TRUE);
1118 __kmp_debug_printf(
"__kmp_team_handler: unknown signal type");
1125static void __kmp_sigaction(
int signum,
const struct sigaction *act,
1126 struct sigaction *oldact) {
1127 int rc = sigaction(signum, act, oldact);
1128 KMP_CHECK_SYSFAIL_ERRNO(
"sigaction", rc);
1131static void __kmp_install_one_handler(
int sig, sig_func_t handler_func,
1132 int parallel_init) {
1135 (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1136 if (parallel_init) {
1137 struct sigaction new_action;
1138 struct sigaction old_action;
1139 new_action.sa_handler = handler_func;
1140 new_action.sa_flags = 0;
1141 sigfillset(&new_action.sa_mask);
1142 __kmp_sigaction(sig, &new_action, &old_action);
1143 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1144 sigaddset(&__kmp_sigset, sig);
1147 __kmp_sigaction(sig, &old_action, NULL);
1151 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1156static void __kmp_remove_one_handler(
int sig) {
1157 KB_TRACE(60, (
"__kmp_remove_one_handler( %d )\n", sig));
1158 if (sigismember(&__kmp_sigset, sig)) {
1159 struct sigaction old;
1161 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1162 if ((old.sa_handler != __kmp_team_handler) &&
1163 (old.sa_handler != __kmp_null_handler)) {
1165 KB_TRACE(10, (
"__kmp_remove_one_handler: oops, not our handler, "
1166 "restoring: sig=%d\n",
1168 __kmp_sigaction(sig, &old, NULL);
1170 sigdelset(&__kmp_sigset, sig);
1175void __kmp_install_signals(
int parallel_init) {
1176 KB_TRACE(10, (
"__kmp_install_signals( %d )\n", parallel_init));
1177 if (__kmp_handle_signals || !parallel_init) {
1180 sigemptyset(&__kmp_sigset);
1181 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1182 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1183 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1184 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1185 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1186 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1187 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1188 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1190 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1192 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1194 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1199void __kmp_remove_signals(
void) {
1201 KB_TRACE(10, (
"__kmp_remove_signals()\n"));
1202 for (sig = 1; sig < NSIG; ++sig) {
1203 __kmp_remove_one_handler(sig);
1209void __kmp_enable(
int new_state) {
1210#ifdef KMP_CANCEL_THREADS
1211 int status, old_state;
1212 status = pthread_setcancelstate(new_state, &old_state);
1213 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1214 KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1218void __kmp_disable(
int *old_state) {
1219#ifdef KMP_CANCEL_THREADS
1221 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1222 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1226static void __kmp_atfork_prepare(
void) {
1227 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1228 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1231static void __kmp_atfork_parent(
void) {
1232 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1233 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1239static void __kmp_atfork_child(
void) {
1240 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1241 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1248#if KMP_AFFINITY_SUPPORTED
1249#if KMP_OS_LINUX || KMP_OS_FREEBSD
1252 kmp_set_thread_affinity_mask_initial();
1257 if (__kmp_nested_proc_bind.bind_types != NULL) {
1258 __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1260 for (kmp_affinity_t *affinity : __kmp_affinities)
1261 *affinity = KMP_AFFINITY_INIT(affinity->env_var);
1262 __kmp_affin_fullMask =
nullptr;
1263 __kmp_affin_origMask =
nullptr;
1264 __kmp_topology =
nullptr;
1268 __kmp_init_monitor = 0;
1270 __kmp_init_parallel = FALSE;
1271 __kmp_init_middle = FALSE;
1272 __kmp_init_serial = FALSE;
1273 TCW_4(__kmp_init_gtid, FALSE);
1274 __kmp_init_common = FALSE;
1276 TCW_4(__kmp_init_user_locks, FALSE);
1277#if !KMP_USE_DYNAMIC_LOCK
1278 __kmp_user_lock_table.used = 1;
1279 __kmp_user_lock_table.allocated = 0;
1280 __kmp_user_lock_table.table = NULL;
1281 __kmp_lock_blocks = NULL;
1285 TCW_4(__kmp_nth, 0);
1287 __kmp_thread_pool = NULL;
1288 __kmp_thread_pool_insert_pt = NULL;
1289 __kmp_team_pool = NULL;
1293 KA_TRACE(10, (
"__kmp_atfork_child: checking cache address list %p\n",
1294 __kmp_threadpriv_cache_list));
1296 while (__kmp_threadpriv_cache_list != NULL) {
1298 if (*__kmp_threadpriv_cache_list->addr != NULL) {
1299 KC_TRACE(50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1300 &(*__kmp_threadpriv_cache_list->addr)));
1302 *__kmp_threadpriv_cache_list->addr = NULL;
1304 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1307 __kmp_init_runtime = FALSE;
1310 __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1311 __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1312 __kmp_init_bootstrap_lock(&__kmp_console_lock);
1313 __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1323 __kmp_need_register_serial = FALSE;
1324 __kmp_serial_initialize();
1338void __kmp_register_atfork(
void) {
1339 if (__kmp_need_register_atfork) {
1341 int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1342 __kmp_atfork_child);
1343 KMP_CHECK_SYSFAIL(
"pthread_atfork", status);
1345 __kmp_need_register_atfork = FALSE;
1349void __kmp_suspend_initialize(
void) {
1351 status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1352 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1353 status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1354 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1357void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1358 int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1359 int new_value = __kmp_fork_count + 1;
1361 if (old_value == new_value)
1364 if (old_value == -1 || !__kmp_atomic_compare_store(
1365 &th->th.th_suspend_init_count, old_value, -1)) {
1366 while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1372 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1373 &__kmp_suspend_cond_attr);
1374 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1375 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1376 &__kmp_suspend_mutex_attr);
1377 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1378 KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1382void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1383 if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1388 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1389 if (status != 0 && status != EBUSY) {
1390 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1392 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1393 if (status != 0 && status != EBUSY) {
1394 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1396 --th->th.th_suspend_init_count;
1397 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1403int __kmp_try_suspend_mx(kmp_info_t *th) {
1404 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1407void __kmp_lock_suspend_mx(kmp_info_t *th) {
1408 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1409 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1412void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1413 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1414 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1420static inline void __kmp_suspend_template(
int th_gtid, C *flag) {
1421 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1422 kmp_info_t *th = __kmp_threads[th_gtid];
1424 typename C::flag_t old_spin;
1426 KF_TRACE(30, (
"__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1429 __kmp_suspend_initialize_thread(th);
1431 __kmp_lock_suspend_mx(th);
1433 KF_TRACE(10, (
"__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1434 th_gtid, flag->get()));
1438 old_spin = flag->set_sleeping();
1439 TCW_PTR(th->th.th_sleep_loc, (
void *)flag);
1440 th->th.th_sleep_loc_type = flag->get_type();
1441 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1442 __kmp_pause_status != kmp_soft_paused) {
1443 flag->unset_sleeping();
1444 TCW_PTR(th->th.th_sleep_loc, NULL);
1445 th->th.th_sleep_loc_type = flag_unset;
1446 __kmp_unlock_suspend_mx(th);
1449 KF_TRACE(5, (
"__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1451 th_gtid, flag->get(), flag->load(), old_spin));
1453 if (flag->done_check_val(old_spin) || flag->done_check()) {
1454 flag->unset_sleeping();
1455 TCW_PTR(th->th.th_sleep_loc, NULL);
1456 th->th.th_sleep_loc_type = flag_unset;
1457 KF_TRACE(5, (
"__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1459 th_gtid, flag->get()));
1464 int deactivated = FALSE;
1466 while (flag->is_sleeping()) {
1469 __kmp_suspend_count++;
1470 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1471 __kmp_printf(
"__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1477 th->th.th_active = FALSE;
1478 if (th->th.th_active_in_pool) {
1479 th->th.th_active_in_pool = FALSE;
1480 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1481 KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1486 KMP_DEBUG_ASSERT(th->th.th_sleep_loc);
1487 KMP_DEBUG_ASSERT(flag->get_type() == th->th.th_sleep_loc_type);
1489#if USE_SUSPEND_TIMEOUT
1490 struct timespec now;
1491 struct timeval tval;
1494 status = gettimeofday(&tval, NULL);
1495 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1496 TIMEVAL_TO_TIMESPEC(&tval, &now);
1498 msecs = (4 * __kmp_dflt_blocktime) + 200;
1499 now.tv_sec += msecs / 1000;
1500 now.tv_nsec += (msecs % 1000) * 1000;
1502 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform "
1503 "pthread_cond_timedwait\n",
1505 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1506 &th->th.th_suspend_mx.m_mutex, &now);
1508 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform"
1509 " pthread_cond_wait\n",
1511 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1512 &th->th.th_suspend_mx.m_mutex);
1515 if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1516 KMP_SYSFAIL(
"pthread_cond_wait", status);
1519 KMP_DEBUG_ASSERT(flag->get_type() == flag->get_ptr_type());
1521 if (!flag->is_sleeping() &&
1522 ((status == EINTR) || (status == ETIMEDOUT))) {
1526 flag->unset_sleeping();
1527 TCW_PTR(th->th.th_sleep_loc, NULL);
1528 th->th.th_sleep_loc_type = flag_unset;
1531 if (status == ETIMEDOUT) {
1532 if (flag->is_sleeping()) {
1534 (
"__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1536 KF_TRACE(2, (
"__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1539 TCW_PTR(th->th.th_sleep_loc, NULL);
1540 th->th.th_sleep_loc_type = flag_unset;
1542 }
else if (flag->is_sleeping()) {
1544 (
"__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1551 th->th.th_active = TRUE;
1552 if (TCR_4(th->th.th_in_pool)) {
1553 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1554 th->th.th_active_in_pool = TRUE;
1560 TCW_PTR(th->th.th_sleep_loc, NULL);
1561 th->th.th_sleep_loc_type = flag_unset;
1563 KMP_DEBUG_ASSERT(!flag->is_sleeping());
1564 KMP_DEBUG_ASSERT(!th->th.th_sleep_loc);
1568 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1569 __kmp_printf(
"__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1574 __kmp_unlock_suspend_mx(th);
1575 KF_TRACE(30, (
"__kmp_suspend_template: T#%d exit\n", th_gtid));
1578template <
bool C,
bool S>
1579void __kmp_suspend_32(
int th_gtid, kmp_flag_32<C, S> *flag) {
1580 __kmp_suspend_template(th_gtid, flag);
1582template <
bool C,
bool S>
1583void __kmp_suspend_64(
int th_gtid, kmp_flag_64<C, S> *flag) {
1584 __kmp_suspend_template(th_gtid, flag);
1586template <
bool C,
bool S>
1587void __kmp_atomic_suspend_64(
int th_gtid, kmp_atomic_flag_64<C, S> *flag) {
1588 __kmp_suspend_template(th_gtid, flag);
1590void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag) {
1591 __kmp_suspend_template(th_gtid, flag);
1594template void __kmp_suspend_32<false, false>(
int, kmp_flag_32<false, false> *);
1595template void __kmp_suspend_64<false, true>(
int, kmp_flag_64<false, true> *);
1596template void __kmp_suspend_64<true, false>(
int, kmp_flag_64<true, false> *);
1598__kmp_atomic_suspend_64<false, true>(
int, kmp_atomic_flag_64<false, true> *);
1600__kmp_atomic_suspend_64<true, false>(
int, kmp_atomic_flag_64<true, false> *);
1606static inline void __kmp_resume_template(
int target_gtid, C *flag) {
1607 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1608 kmp_info_t *th = __kmp_threads[target_gtid];
1612 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1615 KF_TRACE(30, (
"__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1616 gtid, target_gtid));
1617 KMP_DEBUG_ASSERT(gtid != target_gtid);
1619 __kmp_suspend_initialize_thread(th);
1621 __kmp_lock_suspend_mx(th);
1623 if (!flag || flag != th->th.th_sleep_loc) {
1626 flag = (C *)CCAST(
void *, th->th.th_sleep_loc);
1632 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1633 "awake: flag(%p)\n",
1634 gtid, target_gtid, (
void *)NULL));
1635 __kmp_unlock_suspend_mx(th);
1637 }
else if (flag->get_type() != th->th.th_sleep_loc_type) {
1642 (
"__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1643 "spin(%p) type=%d ptr_type=%d\n",
1644 gtid, target_gtid, flag, flag->get(), flag->get_type(),
1645 th->th.th_sleep_loc_type));
1646 __kmp_unlock_suspend_mx(th);
1647 __kmp_null_resume_wrapper(th);
1651 if (!flag->is_sleeping()) {
1652 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1653 "awake: flag(%p): %u\n",
1654 gtid, target_gtid, flag->get(), (
unsigned int)flag->load()));
1655 __kmp_unlock_suspend_mx(th);
1659 KMP_DEBUG_ASSERT(flag);
1660 flag->unset_sleeping();
1661 TCW_PTR(th->th.th_sleep_loc, NULL);
1662 th->th.th_sleep_loc_type = flag_unset;
1664 KF_TRACE(5, (
"__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1665 "sleep bit for flag's loc(%p): %u\n",
1666 gtid, target_gtid, flag->get(), (
unsigned int)flag->load()));
1671 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1672 __kmp_printf(
"__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1673 target_gtid, buffer);
1676 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1677 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1678 __kmp_unlock_suspend_mx(th);
1679 KF_TRACE(30, (
"__kmp_resume_template: T#%d exiting after signaling wake up"
1681 gtid, target_gtid));
1684template <
bool C,
bool S>
1685void __kmp_resume_32(
int target_gtid, kmp_flag_32<C, S> *flag) {
1686 __kmp_resume_template(target_gtid, flag);
1688template <
bool C,
bool S>
1689void __kmp_resume_64(
int target_gtid, kmp_flag_64<C, S> *flag) {
1690 __kmp_resume_template(target_gtid, flag);
1692template <
bool C,
bool S>
1693void __kmp_atomic_resume_64(
int target_gtid, kmp_atomic_flag_64<C, S> *flag) {
1694 __kmp_resume_template(target_gtid, flag);
1696void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag) {
1697 __kmp_resume_template(target_gtid, flag);
1700template void __kmp_resume_32<false, true>(
int, kmp_flag_32<false, true> *);
1701template void __kmp_resume_32<false, false>(
int, kmp_flag_32<false, false> *);
1702template void __kmp_resume_64<false, true>(
int, kmp_flag_64<false, true> *);
1704__kmp_atomic_resume_64<false, true>(
int, kmp_atomic_flag_64<false, true> *);
1707void __kmp_resume_monitor() {
1708 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1711 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1712 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1714 KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1716 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1717 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1721 __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1722 __kmp_printf(
"__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1723 KMP_GTID_MONITOR, buffer);
1726 status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1727 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1728 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1729 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1730 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d exiting after signaling wake up"
1732 gtid, KMP_GTID_MONITOR));
1736void __kmp_yield() { sched_yield(); }
1738void __kmp_gtid_set_specific(
int gtid) {
1739 if (__kmp_init_gtid) {
1741 status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1742 (
void *)(intptr_t)(gtid + 1));
1743 KMP_CHECK_SYSFAIL(
"pthread_setspecific", status);
1745 KA_TRACE(50, (
"__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1749int __kmp_gtid_get_specific() {
1751 if (!__kmp_init_gtid) {
1752 KA_TRACE(50, (
"__kmp_gtid_get_specific: runtime shutdown, returning "
1753 "KMP_GTID_SHUTDOWN\n"));
1754 return KMP_GTID_SHUTDOWN;
1756 gtid = (int)(
size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1758 gtid = KMP_GTID_DNE;
1762 KA_TRACE(50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1763 __kmp_gtid_threadprivate_key, gtid));
1767double __kmp_read_cpu_time(
void) {
1773 return (
double)(buffer.tms_utime + buffer.tms_cutime) /
1774 (
double)CLOCKS_PER_SEC;
1777int __kmp_read_system_info(
struct kmp_sys_info *info) {
1779 struct rusage r_usage;
1781 memset(info, 0,
sizeof(*info));
1783 status = getrusage(RUSAGE_SELF, &r_usage);
1784 KMP_CHECK_SYSFAIL_ERRNO(
"getrusage", status);
1788 info->maxrss = r_usage.ru_maxrss;
1790 info->minflt = r_usage.ru_minflt;
1792 info->majflt = r_usage.ru_majflt;
1794 info->nswap = r_usage.ru_nswap;
1796 info->inblock = r_usage.ru_inblock;
1798 info->oublock = r_usage.ru_oublock;
1800 info->nvcsw = r_usage.ru_nvcsw;
1802 info->nivcsw = r_usage.ru_nivcsw;
1805 return (status != 0);
1808void __kmp_read_system_time(
double *delta) {
1810 struct timeval tval;
1811 struct timespec stop;
1814 status = gettimeofday(&tval, NULL);
1815 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1816 TIMEVAL_TO_TIMESPEC(&tval, &stop);
1817 t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1818 *delta = (t_ns * 1e-9);
1821void __kmp_clear_system_time(
void) {
1822 struct timeval tval;
1824 status = gettimeofday(&tval, NULL);
1825 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1826 TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1829static int __kmp_get_xproc(
void) {
1835 __kmp_type_convert(sysconf(_SC_NPROCESSORS_CONF), &(r));
1837#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1838 KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_WASI || KMP_OS_AIX
1840 __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1848 host_basic_info_data_t info;
1849 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1850 rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1851 if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1854 r = info.avail_cpus;
1856 KMP_WARNING(CantGetNumAvailCPU);
1857 KMP_INFORM(AssumedNumCPU);
1862#error "Unknown or unsupported OS."
1866 return r > 0 ? r : 2;
1870int __kmp_read_from_file(
char const *path,
char const *format, ...) {
1874 va_start(args, format);
1875 FILE *f = fopen(path,
"rb");
1880 result = vfscanf(f, format, args);
1887void __kmp_runtime_initialize(
void) {
1889 pthread_mutexattr_t mutex_attr;
1890 pthread_condattr_t cond_attr;
1892 if (__kmp_init_runtime) {
1896#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1897 if (!__kmp_cpuinfo.initialized) {
1898 __kmp_query_cpuid(&__kmp_cpuinfo);
1902 __kmp_xproc = __kmp_get_xproc();
1908 status = getrlimit(RLIMIT_STACK, &rlim);
1910 __kmp_stksize = rlim.rlim_cur;
1911 __kmp_check_stksize(&__kmp_stksize);
1915 if (sysconf(_SC_THREADS)) {
1918 __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1920 if (__kmp_sys_max_nth == -1) {
1923 __kmp_sys_max_nth = KMP_MAX_NTH;
1926 if (__kmp_sys_max_nth == -1) {
1928 __kmp_sys_max_nth = INT_MAX;
1929 }
else if (__kmp_sys_max_nth <= 1) {
1931 __kmp_sys_max_nth = KMP_MAX_NTH;
1936 __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1937 if (__kmp_sys_min_stksize <= 1) {
1938 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1943 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1945 status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1946 __kmp_internal_end_dest);
1947 KMP_CHECK_SYSFAIL(
"pthread_key_create", status);
1948 status = pthread_mutexattr_init(&mutex_attr);
1949 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1950 status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1951 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1952 status = pthread_mutexattr_destroy(&mutex_attr);
1953 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_destroy", status);
1954 status = pthread_condattr_init(&cond_attr);
1955 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1956 status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1957 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1958 status = pthread_condattr_destroy(&cond_attr);
1959 KMP_CHECK_SYSFAIL(
"pthread_condattr_destroy", status);
1961 __kmp_itt_initialize();
1964 __kmp_init_runtime = TRUE;
1967void __kmp_runtime_destroy(
void) {
1970 if (!__kmp_init_runtime) {
1975 __kmp_itt_destroy();
1978 status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1979 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status);
1981 status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1982 if (status != 0 && status != EBUSY) {
1983 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1985 status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1986 if (status != 0 && status != EBUSY) {
1987 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1989#if KMP_AFFINITY_SUPPORTED
1990 __kmp_affinity_uninitialize();
1993 __kmp_init_runtime = FALSE;
1998void __kmp_thread_sleep(
int millis) { sleep((millis + 500) / 1000); }
2001void __kmp_elapsed(
double *t) {
2006 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
2007 KMP_CHECK_SYSFAIL_ERRNO(
"clock_gettime", status);
2009 (double)ts.tv_nsec * (1.0 / (
double)KMP_NSEC_PER_SEC) + (
double)ts.tv_sec;
2013 status = gettimeofday(&tv, NULL);
2014 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
2016 (double)tv.tv_usec * (1.0 / (
double)KMP_USEC_PER_SEC) + (
double)tv.tv_sec;
2021void __kmp_elapsed_tick(
double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
2024kmp_uint64 __kmp_now_nsec() {
2026 gettimeofday(&t, NULL);
2027 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
2028 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
2032#if KMP_ARCH_X86 || KMP_ARCH_X86_64
2034void __kmp_initialize_system_tick() {
2035 kmp_uint64 now, nsec2, diff;
2036 kmp_uint64 delay = 1000000;
2037 kmp_uint64 nsec = __kmp_now_nsec();
2038 kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
2039 while ((now = __kmp_hardware_timestamp()) < goal)
2041 nsec2 = __kmp_now_nsec();
2042 diff = nsec2 - nsec;
2044 double tpus = 1000.0 * (double)(delay + (now - goal)) / (
double)diff;
2046 __kmp_ticks_per_msec = (kmp_uint64)(tpus * 1000.0);
2047 __kmp_ticks_per_usec = (kmp_uint64)tpus;
2056int __kmp_is_address_mapped(
void *addr) {
2061#if KMP_OS_LINUX || KMP_OS_HURD
2066 char *name = __kmp_str_format(
"/proc/%d/maps", getpid());
2069 file = fopen(name,
"r");
2070 KMP_ASSERT(file != NULL);
2074 void *beginning = NULL;
2075 void *ending = NULL;
2078 rc = fscanf(file,
"%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2082 KMP_ASSERT(rc == 3 &&
2083 KMP_STRLEN(perms) == 4);
2086 if ((addr >= beginning) && (addr < ending)) {
2088 if (strcmp(perms,
"rw") == 0) {
2098 KMP_INTERNAL_FREE(name);
2102 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2103 rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2108 lstsz = lstsz * 4 / 3;
2109 buf =
reinterpret_cast<char *
>(kmpc_malloc(lstsz));
2110 rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2117 char *up = buf + lstsz;
2120 struct kinfo_vmentry *cur =
reinterpret_cast<struct kinfo_vmentry *
>(lw);
2121 size_t cursz = cur->kve_structsize;
2124 void *start =
reinterpret_cast<void *
>(cur->kve_start);
2125 void *end =
reinterpret_cast<void *
>(cur->kve_end);
2127 if ((addr >= start) && (addr < end)) {
2128 if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2129 (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2137#elif KMP_OS_DRAGONFLY
2138 char err[_POSIX2_LINE_MAX];
2142 vm_map_entry entry, *c;
2148 fd = kvm_openfiles(
nullptr,
nullptr,
nullptr, O_RDONLY, err);
2153 proc = kvm_getprocs(fd, KERN_PROC_PID, getpid(), &num);
2155 if (kvm_read(fd,
static_cast<uintptr_t
>(proc->kp_paddr), &p,
sizeof(p)) !=
2157 kvm_read(fd,
reinterpret_cast<uintptr_t
>(p.p_vmspace), &sp,
sizeof(sp)) !=
2165 uaddr =
reinterpret_cast<uintptr_t
>(addr);
2166 for (c = kvm_vm_map_entry_first(fd, cur, &entry); c;
2167 c = kvm_vm_map_entry_next(fd, c, &entry)) {
2168 if ((uaddr >= entry.ba.start) && (uaddr <= entry.ba.end)) {
2169 if ((entry.protection & VM_PROT_READ) != 0 &&
2170 (entry.protection & VM_PROT_WRITE) != 0) {
2185 rc = vm_read_overwrite(
2187 (vm_address_t)(addr),
2189 (vm_address_t)(&buffer),
2202 mib[2] = VM_PROC_MAP;
2204 mib[4] =
sizeof(
struct kinfo_vmentry);
2207 rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2211 size = size * 4 / 3;
2212 struct kinfo_vmentry *kiv = (
struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2215 rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2219 for (
size_t i = 0; i < size; i++) {
2220 if (kiv[i].kve_start >= (uint64_t)addr &&
2221 kiv[i].kve_end <= (uint64_t)addr) {
2226 KMP_INTERNAL_FREE(kiv);
2231 mib[1] = KERN_PROC_VMMAP;
2236 rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2241 struct kinfo_vmentry kiv = {.kve_start = 0};
2243 while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2245 if (kiv.kve_end == end)
2248 if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2255 found = (int)addr < (__builtin_wasm_memory_size(0) * PAGESIZE);
2256#elif KMP_OS_SOLARIS || KMP_OS_AIX
2263#error "Unknown or unsupported OS"
2271#ifdef USE_LOAD_BALANCE
2273#if KMP_OS_DARWIN || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
2274 KMP_OS_OPENBSD || KMP_OS_SOLARIS
2281int __kmp_get_load_balance(
int max) {
2285 int res = getloadavg(averages, 3);
2290 if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2291 ret_avg = (int)averages[0];
2292 }
else if ((__kmp_load_balance_interval >= 180 &&
2293 __kmp_load_balance_interval < 600) &&
2295 ret_avg = (int)averages[1];
2296 }
else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2297 ret_avg = (int)averages[2];
2311int __kmp_get_load_balance(
int max) {
2312 static int permanent_error = 0;
2313 static int glb_running_threads = 0;
2315 static double glb_call_time = 0;
2317 int running_threads = 0;
2319 DIR *proc_dir = NULL;
2320 struct dirent *proc_entry = NULL;
2322 kmp_str_buf_t task_path;
2323 DIR *task_dir = NULL;
2324 struct dirent *task_entry = NULL;
2325 int task_path_fixed_len;
2327 kmp_str_buf_t stat_path;
2329 int stat_path_fixed_len;
2332 int total_processes = 0;
2335 double call_time = 0.0;
2337 __kmp_str_buf_init(&task_path);
2338 __kmp_str_buf_init(&stat_path);
2340 __kmp_elapsed(&call_time);
2342 if (glb_call_time &&
2343 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2344 running_threads = glb_running_threads;
2348 glb_call_time = call_time;
2351 if (permanent_error) {
2352 running_threads = -1;
2361 proc_dir = opendir(
"/proc");
2362 if (proc_dir == NULL) {
2365 running_threads = -1;
2366 permanent_error = 1;
2371 __kmp_str_buf_cat(&task_path,
"/proc/", 6);
2372 task_path_fixed_len = task_path.used;
2374 proc_entry = readdir(proc_dir);
2375 while (proc_entry != NULL) {
2378 if (isdigit(proc_entry->d_name[0])) {
2382 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2393 KMP_DEBUG_ASSERT(total_processes != 1 ||
2394 strcmp(proc_entry->d_name,
"1") == 0);
2397 task_path.used = task_path_fixed_len;
2398 __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2399 KMP_STRLEN(proc_entry->d_name));
2400 __kmp_str_buf_cat(&task_path,
"/task", 5);
2402 task_dir = opendir(task_path.str);
2403 if (task_dir == NULL) {
2412 if (strcmp(proc_entry->d_name,
"1") == 0) {
2413 running_threads = -1;
2414 permanent_error = 1;
2419 __kmp_str_buf_clear(&stat_path);
2420 __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2421 __kmp_str_buf_cat(&stat_path,
"/", 1);
2422 stat_path_fixed_len = stat_path.used;
2424 task_entry = readdir(task_dir);
2425 while (task_entry != NULL) {
2428 if (isdigit(task_entry->d_name[0])) {
2430 if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2438 stat_path_fixed_len;
2439 __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2440 KMP_STRLEN(task_entry->d_name));
2441 __kmp_str_buf_cat(&stat_path,
"/stat", 5);
2445 stat_file = open(stat_path.str, O_RDONLY);
2446 if (stat_file == -1) {
2476 len = read(stat_file, buffer,
sizeof(buffer) - 1);
2483 char *close_parent = strstr(buffer,
") ");
2484 if (close_parent != NULL) {
2485 char state = *(close_parent + 2);
2488 if (running_threads >= max) {
2498 task_entry = readdir(task_dir);
2504 proc_entry = readdir(proc_dir);
2510 KMP_DEBUG_ASSERT(running_threads > 0);
2511 if (running_threads <= 0) {
2512 running_threads = 1;
2516 if (proc_dir != NULL) {
2519 __kmp_str_buf_free(&task_path);
2520 if (task_dir != NULL) {
2523 __kmp_str_buf_free(&stat_path);
2524 if (stat_file != -1) {
2528 glb_running_threads = running_threads;
2530 return running_threads;
2538#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
2539 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \
2540 KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
2541 KMP_ARCH_ARM || KMP_ARCH_VE || KMP_ARCH_S390X || KMP_ARCH_PPC_XCOFF)
2545int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int tid,
int argc,
2549 void **exit_frame_ptr
2553 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2558 fprintf(stderr,
"Too many args to microtask: %d!\n", argc);
2562 (*pkfn)(>id, &tid);
2565 (*pkfn)(>id, &tid, p_argv[0]);
2568 (*pkfn)(>id, &tid, p_argv[0], p_argv[1]);
2571 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]);
2574 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2577 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2580 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2584 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2585 p_argv[5], p_argv[6]);
2588 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2589 p_argv[5], p_argv[6], p_argv[7]);
2592 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2593 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2596 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2597 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2600 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2601 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2604 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2605 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2609 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2610 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2611 p_argv[11], p_argv[12]);
2614 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2615 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2616 p_argv[11], p_argv[12], p_argv[13]);
2619 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2620 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2621 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2634pthread_cond_t hidden_helper_threads_initz_cond_var;
2635pthread_mutex_t hidden_helper_threads_initz_lock;
2636volatile int hidden_helper_initz_signaled = FALSE;
2639pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2640pthread_mutex_t hidden_helper_threads_deinitz_lock;
2641volatile int hidden_helper_deinitz_signaled = FALSE;
2644pthread_cond_t hidden_helper_main_thread_cond_var;
2645pthread_mutex_t hidden_helper_main_thread_lock;
2646volatile int hidden_helper_main_thread_signaled = FALSE;
2651sem_t hidden_helper_task_sem;
2654void __kmp_hidden_helper_worker_thread_wait() {
2655 int status = sem_wait(&hidden_helper_task_sem);
2656 KMP_CHECK_SYSFAIL(
"sem_wait", status);
2659void __kmp_do_initialize_hidden_helper_threads() {
2662 pthread_cond_init(&hidden_helper_threads_initz_cond_var,
nullptr);
2663 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2665 status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var,
nullptr);
2666 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2668 status = pthread_cond_init(&hidden_helper_main_thread_cond_var,
nullptr);
2669 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
2671 status = pthread_mutex_init(&hidden_helper_threads_initz_lock,
nullptr);
2672 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2674 status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock,
nullptr);
2675 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2677 status = pthread_mutex_init(&hidden_helper_main_thread_lock,
nullptr);
2678 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
2681 status = sem_init(&hidden_helper_task_sem, 0, 0);
2682 KMP_CHECK_SYSFAIL(
"sem_init", status);
2686 status = pthread_create(
2688 [](
void *) ->
void * {
2689 __kmp_hidden_helper_threads_initz_routine();
2693 KMP_CHECK_SYSFAIL(
"pthread_create", status);
2696void __kmp_hidden_helper_threads_initz_wait() {
2699 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2700 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2702 if (!TCR_4(hidden_helper_initz_signaled)) {
2703 status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2704 &hidden_helper_threads_initz_lock);
2705 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2708 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2709 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2712void __kmp_hidden_helper_initz_release() {
2714 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2715 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2717 status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2718 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2720 TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2722 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2723 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2726void __kmp_hidden_helper_main_thread_wait() {
2729 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2730 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2732 if (!TCR_4(hidden_helper_main_thread_signaled)) {
2733 status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2734 &hidden_helper_main_thread_lock);
2735 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2738 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2739 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2742void __kmp_hidden_helper_main_thread_release() {
2745 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2746 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2748 status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2749 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
2752 TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2754 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2755 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2758void __kmp_hidden_helper_worker_thread_signal() {
2759 int status = sem_post(&hidden_helper_task_sem);
2760 KMP_CHECK_SYSFAIL(
"sem_post", status);
2763void __kmp_hidden_helper_threads_deinitz_wait() {
2766 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2767 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2769 if (!TCR_4(hidden_helper_deinitz_signaled)) {
2770 status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2771 &hidden_helper_threads_deinitz_lock);
2772 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2775 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2776 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2779void __kmp_hidden_helper_threads_deinitz_release() {
2780 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2781 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
2783 status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2784 KMP_CHECK_SYSFAIL(
"pthread_cond_wait", status);
2786 TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2788 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2789 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
2792void __kmp_hidden_helper_worker_thread_wait() {
2793 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2796void __kmp_do_initialize_hidden_helper_threads() {
2797 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2800void __kmp_hidden_helper_threads_initz_wait() {
2801 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2804void __kmp_hidden_helper_initz_release() {
2805 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2808void __kmp_hidden_helper_main_thread_wait() {
2809 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2812void __kmp_hidden_helper_main_thread_release() {
2813 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2816void __kmp_hidden_helper_worker_thread_signal() {
2817 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2820void __kmp_hidden_helper_threads_deinitz_wait() {
2821 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2824void __kmp_hidden_helper_threads_deinitz_release() {
2825 KMP_ASSERT(0 &&
"Hidden helper task is not supported on this OS");
2829bool __kmp_detect_shm() {
2830 DIR *dir = opendir(
"/dev/shm");
2834 }
else if (ENOENT == errno) {
2841bool __kmp_detect_tmp() {
2842 DIR *dir = opendir(
"/tmp");
2846 }
else if (ENOENT == errno) {
#define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the partitioned timers to begin with name.