Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 1 | /* |
| 2 | * kmp_global.c -- KPTS global variables for runtime support library |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 3 | */ |
| 4 | |
| 5 | |
| 6 | //===----------------------------------------------------------------------===// |
| 7 | // |
| 8 | // The LLVM Compiler Infrastructure |
| 9 | // |
| 10 | // This file is dual licensed under the MIT and the University of Illinois Open |
| 11 | // Source Licenses. See LICENSE.txt for details. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | |
| 16 | #include "kmp.h" |
| 17 | |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 18 | kmp_key_t __kmp_gtid_threadprivate_key; |
| 19 | |
Hal Finkel | 01bb240 | 2016-03-27 13:24:09 +0000 | [diff] [blame] | 20 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 21 | kmp_cpuinfo_t __kmp_cpuinfo = { 0 }; // Not initialized |
Hal Finkel | 01bb240 | 2016-03-27 13:24:09 +0000 | [diff] [blame] | 22 | #endif |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 23 | |
Jim Cownie | 4cc4bb4 | 2014-10-07 16:25:50 +0000 | [diff] [blame] | 24 | #if KMP_STATS_ENABLED |
| 25 | #include "kmp_stats.h" |
| 26 | // lock for modifying the global __kmp_stats_list |
Jonathan Peyton | ad57992 | 2015-12-17 16:19:05 +0000 | [diff] [blame] | 27 | kmp_tas_lock_t __kmp_stats_lock; |
Jim Cownie | 4cc4bb4 | 2014-10-07 16:25:50 +0000 | [diff] [blame] | 28 | |
| 29 | // global list of per thread stats, the head is a sentinel node which accumulates all stats produced before __kmp_create_worker is called. |
| 30 | kmp_stats_list __kmp_stats_list; |
| 31 | |
| 32 | // thread local pointer to stats node within list |
| 33 | __thread kmp_stats_list* __kmp_stats_thread_ptr = &__kmp_stats_list; |
| 34 | |
| 35 | // gives reference tick for all events (considered the 0 tick) |
| 36 | tsc_tick_count __kmp_stats_start_time; |
| 37 | #endif |
Jonathan Peyton | 01dcf36 | 2015-11-30 20:02:59 +0000 | [diff] [blame] | 38 | #if KMP_USE_HWLOC |
| 39 | int __kmp_hwloc_error = FALSE; |
| 40 | hwloc_topology_t __kmp_hwloc_topology = NULL; |
| 41 | #endif |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 42 | |
| 43 | /* ----------------------------------------------------- */ |
| 44 | /* INITIALIZATION VARIABLES */ |
| 45 | /* they are syncronized to write during init, but read anytime */ |
| 46 | volatile int __kmp_init_serial = FALSE; |
| 47 | volatile int __kmp_init_gtid = FALSE; |
| 48 | volatile int __kmp_init_common = FALSE; |
| 49 | volatile int __kmp_init_middle = FALSE; |
| 50 | volatile int __kmp_init_parallel = FALSE; |
| 51 | volatile int __kmp_init_monitor = 0; /* 1 - launched, 2 - actually started (Windows* OS only) */ |
| 52 | volatile int __kmp_init_user_locks = FALSE; |
| 53 | |
| 54 | /* list of address of allocated caches for commons */ |
| 55 | kmp_cached_addr_t *__kmp_threadpriv_cache_list = NULL; |
| 56 | |
| 57 | int __kmp_init_counter = 0; |
| 58 | int __kmp_root_counter = 0; |
| 59 | int __kmp_version = 0; |
| 60 | |
| 61 | volatile kmp_uint32 __kmp_team_counter = 0; |
| 62 | volatile kmp_uint32 __kmp_task_counter = 0; |
| 63 | |
| 64 | unsigned int __kmp_init_wait = KMP_DEFAULT_INIT_WAIT; /* initial number of spin-tests */ |
| 65 | unsigned int __kmp_next_wait = KMP_DEFAULT_NEXT_WAIT; /* susequent number of spin-tests */ |
| 66 | |
| 67 | size_t __kmp_stksize = KMP_DEFAULT_STKSIZE; |
| 68 | size_t __kmp_monitor_stksize = 0; // auto adjust |
| 69 | size_t __kmp_stkoffset = KMP_DEFAULT_STKOFFSET; |
Jim Cownie | 4cc4bb4 | 2014-10-07 16:25:50 +0000 | [diff] [blame] | 70 | int __kmp_stkpadding = KMP_MIN_STKPADDING; |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 71 | |
| 72 | size_t __kmp_malloc_pool_incr = KMP_DEFAULT_MALLOC_POOL_INCR; |
| 73 | |
| 74 | /* Barrier method defaults, settings, and strings */ |
| 75 | /* branch factor = 2^branch_bits (only relevant for tree and hyper barrier types) */ |
| 76 | #if KMP_ARCH_X86_64 |
| 77 | kmp_uint32 __kmp_barrier_gather_bb_dflt = 2; /* branch_factor = 4 */ /* hyper2: C78980 */ |
| 78 | kmp_uint32 __kmp_barrier_release_bb_dflt = 2; /* branch_factor = 4 */ /* hyper2: C78980 */ |
| 79 | #else |
| 80 | kmp_uint32 __kmp_barrier_gather_bb_dflt = 2; /* branch_factor = 4 */ /* communication in core for MIC */ |
| 81 | kmp_uint32 __kmp_barrier_release_bb_dflt = 2; /* branch_factor = 4 */ /* communication in core for MIC */ |
| 82 | #endif // KMP_ARCH_X86_64 |
| 83 | #if KMP_ARCH_X86_64 |
| 84 | kmp_bar_pat_e __kmp_barrier_gather_pat_dflt = bp_hyper_bar; /* hyper2: C78980 */ |
| 85 | kmp_bar_pat_e __kmp_barrier_release_pat_dflt = bp_hyper_bar; /* hyper2: C78980 */ |
| 86 | #else |
| 87 | kmp_bar_pat_e __kmp_barrier_gather_pat_dflt = bp_linear_bar; |
| 88 | kmp_bar_pat_e __kmp_barrier_release_pat_dflt = bp_linear_bar; |
| 89 | #endif |
| 90 | kmp_uint32 __kmp_barrier_gather_branch_bits [ bs_last_barrier ] = { 0 }; |
| 91 | kmp_uint32 __kmp_barrier_release_branch_bits [ bs_last_barrier ] = { 0 }; |
| 92 | kmp_bar_pat_e __kmp_barrier_gather_pattern [ bs_last_barrier ] = { bp_linear_bar }; |
| 93 | kmp_bar_pat_e __kmp_barrier_release_pattern [ bs_last_barrier ] = { bp_linear_bar }; |
| 94 | char const *__kmp_barrier_branch_bit_env_name [ bs_last_barrier ] = |
| 95 | { "KMP_PLAIN_BARRIER", "KMP_FORKJOIN_BARRIER" |
| 96 | #if KMP_FAST_REDUCTION_BARRIER |
| 97 | , "KMP_REDUCTION_BARRIER" |
| 98 | #endif // KMP_FAST_REDUCTION_BARRIER |
| 99 | }; |
| 100 | char const *__kmp_barrier_pattern_env_name [ bs_last_barrier ] = |
| 101 | { "KMP_PLAIN_BARRIER_PATTERN", "KMP_FORKJOIN_BARRIER_PATTERN" |
| 102 | #if KMP_FAST_REDUCTION_BARRIER |
| 103 | , "KMP_REDUCTION_BARRIER_PATTERN" |
| 104 | #endif // KMP_FAST_REDUCTION_BARRIER |
| 105 | }; |
| 106 | char const *__kmp_barrier_type_name [ bs_last_barrier ] = |
| 107 | { "plain", "forkjoin" |
| 108 | #if KMP_FAST_REDUCTION_BARRIER |
| 109 | , "reduction" |
| 110 | #endif // KMP_FAST_REDUCTION_BARRIER |
| 111 | }; |
Jonathan Peyton | 1bd61b4 | 2015-10-08 19:44:16 +0000 | [diff] [blame] | 112 | char const *__kmp_barrier_pattern_name[bp_last_bar] = {"linear","tree","hyper","hierarchical"}; |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 113 | |
| 114 | int __kmp_allThreadsSpecified = 0; |
| 115 | size_t __kmp_align_alloc = CACHE_LINE; |
| 116 | |
| 117 | |
| 118 | int __kmp_generate_warnings = kmp_warnings_low; |
| 119 | int __kmp_reserve_warn = 0; |
| 120 | int __kmp_xproc = 0; |
| 121 | int __kmp_avail_proc = 0; |
| 122 | size_t __kmp_sys_min_stksize = KMP_MIN_STKSIZE; |
| 123 | int __kmp_sys_max_nth = KMP_MAX_NTH; |
| 124 | int __kmp_max_nth = 0; |
| 125 | int __kmp_threads_capacity = 0; |
| 126 | int __kmp_dflt_team_nth = 0; |
| 127 | int __kmp_dflt_team_nth_ub = 0; |
| 128 | int __kmp_tp_capacity = 0; |
| 129 | int __kmp_tp_cached = 0; |
| 130 | int __kmp_dflt_nested = FALSE; |
Jonathan Peyton | 067325f | 2016-05-31 19:01:15 +0000 | [diff] [blame] | 131 | int __kmp_dispatch_num_buffers = KMP_DFLT_DISP_NUM_BUFF; |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 132 | int __kmp_dflt_max_active_levels = KMP_MAX_ACTIVE_LEVELS_LIMIT; /* max_active_levels limit */ |
Jim Cownie | 4cc4bb4 | 2014-10-07 16:25:50 +0000 | [diff] [blame] | 133 | #if KMP_NESTED_HOT_TEAMS |
| 134 | int __kmp_hot_teams_mode = 0; /* 0 - free extra threads when reduced */ |
| 135 | /* 1 - keep extra threads when reduced */ |
| 136 | int __kmp_hot_teams_max_level = 1; /* nesting level of hot teams */ |
| 137 | #endif |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 138 | enum library_type __kmp_library = library_none; |
| 139 | enum sched_type __kmp_sched = kmp_sch_default; /* scheduling method for runtime scheduling */ |
| 140 | enum sched_type __kmp_static = kmp_sch_static_greedy; /* default static scheduling method */ |
| 141 | enum sched_type __kmp_guided = kmp_sch_guided_iterative_chunked; /* default guided scheduling method */ |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 142 | enum sched_type __kmp_auto = kmp_sch_guided_analytical_chunked; /* default auto scheduling method */ |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 143 | int __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME; |
| 144 | int __kmp_monitor_wakeups = KMP_MIN_MONITOR_WAKEUPS; |
| 145 | int __kmp_bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME( KMP_DEFAULT_BLOCKTIME, KMP_MIN_MONITOR_WAKEUPS ); |
| 146 | #ifdef KMP_ADJUST_BLOCKTIME |
| 147 | int __kmp_zero_bt = FALSE; |
| 148 | #endif /* KMP_ADJUST_BLOCKTIME */ |
Andrey Churbanov | f696c82 | 2015-01-27 16:55:43 +0000 | [diff] [blame] | 149 | #ifdef KMP_DFLT_NTH_CORES |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 150 | int __kmp_ncores = 0; |
Andrey Churbanov | f696c82 | 2015-01-27 16:55:43 +0000 | [diff] [blame] | 151 | #endif |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 152 | int __kmp_chunk = 0; |
| 153 | int __kmp_abort_delay = 0; |
| 154 | #if KMP_OS_LINUX && defined(KMP_TDATA_GTID) |
| 155 | int __kmp_gtid_mode = 3; /* use __declspec(thread) TLS to store gtid */ |
| 156 | int __kmp_adjust_gtid_mode = FALSE; |
| 157 | #elif KMP_OS_WINDOWS |
| 158 | int __kmp_gtid_mode = 2; /* use TLS functions to store gtid */ |
| 159 | int __kmp_adjust_gtid_mode = FALSE; |
| 160 | #else |
| 161 | int __kmp_gtid_mode = 0; /* select method to get gtid based on #threads */ |
| 162 | int __kmp_adjust_gtid_mode = TRUE; |
| 163 | #endif /* KMP_OS_LINUX && defined(KMP_TDATA_GTID) */ |
| 164 | #ifdef KMP_TDATA_GTID |
| 165 | #if KMP_OS_WINDOWS |
| 166 | __declspec(thread) int __kmp_gtid = KMP_GTID_DNE; |
| 167 | #else |
| 168 | __thread int __kmp_gtid = KMP_GTID_DNE; |
| 169 | #endif /* KMP_OS_WINDOWS - workaround because Intel(R) Many Integrated Core compiler 20110316 doesn't accept __declspec */ |
| 170 | #endif /* KMP_TDATA_GTID */ |
| 171 | int __kmp_tls_gtid_min = INT_MAX; |
| 172 | int __kmp_foreign_tp = TRUE; |
| 173 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
| 174 | int __kmp_inherit_fp_control = TRUE; |
| 175 | kmp_int16 __kmp_init_x87_fpu_control_word = 0; |
| 176 | kmp_uint32 __kmp_init_mxcsr = 0; |
| 177 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ |
| 178 | |
| 179 | #ifdef USE_LOAD_BALANCE |
| 180 | double __kmp_load_balance_interval = 1.0; |
| 181 | #endif /* USE_LOAD_BALANCE */ |
| 182 | |
| 183 | kmp_nested_nthreads_t __kmp_nested_nth = { NULL, 0, 0 }; |
| 184 | |
| 185 | #if KMP_USE_ADAPTIVE_LOCKS |
| 186 | |
| 187 | kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params = { 1, 1024 }; // TODO: tune it! |
| 188 | |
| 189 | #if KMP_DEBUG_ADAPTIVE_LOCKS |
| 190 | char * __kmp_speculative_statsfile = "-"; |
| 191 | #endif |
| 192 | |
| 193 | #endif // KMP_USE_ADAPTIVE_LOCKS |
| 194 | |
| 195 | #if OMP_40_ENABLED |
| 196 | int __kmp_display_env = FALSE; |
| 197 | int __kmp_display_env_verbose = FALSE; |
Jim Cownie | 181b4bb | 2013-12-23 17:28:57 +0000 | [diff] [blame] | 198 | int __kmp_omp_cancellation = FALSE; |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 199 | #endif |
| 200 | |
| 201 | /* map OMP 3.0 schedule types with our internal schedule types */ |
| 202 | enum sched_type __kmp_sch_map[ kmp_sched_upper - kmp_sched_lower_ext + kmp_sched_upper_std - kmp_sched_lower - 2 ] = { |
| 203 | kmp_sch_static_chunked, // ==> kmp_sched_static = 1 |
| 204 | kmp_sch_dynamic_chunked, // ==> kmp_sched_dynamic = 2 |
| 205 | kmp_sch_guided_chunked, // ==> kmp_sched_guided = 3 |
| 206 | kmp_sch_auto, // ==> kmp_sched_auto = 4 |
| 207 | kmp_sch_trapezoidal // ==> kmp_sched_trapezoidal = 101 |
| 208 | // will likely not used, introduced here just to debug the code |
| 209 | // of public intel extension schedules |
| 210 | }; |
| 211 | |
| 212 | #if KMP_OS_LINUX |
| 213 | enum clock_function_type __kmp_clock_function; |
| 214 | int __kmp_clock_function_param; |
| 215 | #endif /* KMP_OS_LINUX */ |
| 216 | |
Andrey Churbanov | 613edeb | 2015-02-20 18:14:43 +0000 | [diff] [blame] | 217 | #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) |
| 218 | enum mic_type __kmp_mic_type = non_mic; |
| 219 | #endif |
| 220 | |
Alp Toker | 98758b0 | 2014-03-02 04:12:06 +0000 | [diff] [blame] | 221 | #if KMP_AFFINITY_SUPPORTED |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 222 | |
Andrey Churbanov | 7daf980 | 2015-01-27 16:52:57 +0000 | [diff] [blame] | 223 | # if KMP_GROUP_AFFINITY |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 224 | |
| 225 | int __kmp_num_proc_groups = 1; |
| 226 | |
| 227 | kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount = NULL; |
| 228 | kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount = NULL; |
| 229 | kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity = NULL; |
| 230 | kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity = NULL; |
| 231 | |
Andrey Churbanov | 7daf980 | 2015-01-27 16:52:57 +0000 | [diff] [blame] | 232 | # endif /* KMP_GROUP_AFFINITY */ |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 233 | |
| 234 | size_t __kmp_affin_mask_size = 0; |
| 235 | enum affinity_type __kmp_affinity_type = affinity_default; |
| 236 | enum affinity_gran __kmp_affinity_gran = affinity_gran_default; |
| 237 | int __kmp_affinity_gran_levels = -1; |
| 238 | int __kmp_affinity_dups = TRUE; |
| 239 | enum affinity_top_method __kmp_affinity_top_method = affinity_top_method_default; |
| 240 | int __kmp_affinity_compact = 0; |
| 241 | int __kmp_affinity_offset = 0; |
| 242 | int __kmp_affinity_verbose = FALSE; |
| 243 | int __kmp_affinity_warnings = TRUE; |
| 244 | int __kmp_affinity_respect_mask = affinity_respect_mask_default; |
| 245 | char * __kmp_affinity_proclist = NULL; |
| 246 | kmp_affin_mask_t *__kmp_affinity_masks = NULL; |
| 247 | unsigned __kmp_affinity_num_masks = 0; |
| 248 | |
| 249 | char const * __kmp_cpuinfo_file = NULL; |
| 250 | |
Alp Toker | 98758b0 | 2014-03-02 04:12:06 +0000 | [diff] [blame] | 251 | #endif /* KMP_AFFINITY_SUPPORTED */ |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 252 | |
| 253 | #if OMP_40_ENABLED |
| 254 | kmp_nested_proc_bind_t __kmp_nested_proc_bind = { NULL, 0, 0 }; |
| 255 | int __kmp_affinity_num_places = 0; |
| 256 | #endif |
| 257 | |
Jonathan Peyton | dd4aa9b | 2015-10-08 17:55:54 +0000 | [diff] [blame] | 258 | int __kmp_place_num_sockets = 0; |
| 259 | int __kmp_place_socket_offset = 0; |
Andrey Churbanov | 1287557 | 2015-03-10 09:00:36 +0000 | [diff] [blame] | 260 | int __kmp_place_num_cores = 0; |
Andrey Churbanov | 1287557 | 2015-03-10 09:00:36 +0000 | [diff] [blame] | 261 | int __kmp_place_core_offset = 0; |
Jonathan Peyton | dd4aa9b | 2015-10-08 17:55:54 +0000 | [diff] [blame] | 262 | int __kmp_place_num_threads_per_core = 0; |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 263 | |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 264 | kmp_tasking_mode_t __kmp_tasking_mode = tskm_task_teams; |
Jonathan Peyton | df6818b | 2016-06-14 17:57:47 +0000 | [diff] [blame^] | 265 | #if OMP_45_ENABLED |
Jonathan Peyton | 2851072 | 2016-02-25 18:04:09 +0000 | [diff] [blame] | 266 | kmp_int32 __kmp_max_task_priority = 0; |
| 267 | #endif |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 268 | |
| 269 | /* This check ensures that the compiler is passing the correct data type |
| 270 | * for the flags formal parameter of the function kmpc_omp_task_alloc(). |
| 271 | * If the type is not a 4-byte type, then give an error message about |
| 272 | * a non-positive length array pointing here. If that happens, the |
| 273 | * kmp_tasking_flags_t structure must be redefined to have exactly 32 bits. |
| 274 | */ |
| 275 | KMP_BUILD_ASSERT( sizeof(kmp_tasking_flags_t) == 4 ); |
| 276 | |
| 277 | kmp_int32 __kmp_task_stealing_constraint = 1; /* Constrain task stealing by default */ |
| 278 | |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 279 | #ifdef DEBUG_SUSPEND |
| 280 | int __kmp_suspend_count = 0; |
| 281 | #endif |
| 282 | |
| 283 | int __kmp_settings = FALSE; |
| 284 | int __kmp_duplicate_library_ok = 0; |
| 285 | #if USE_ITT_BUILD |
| 286 | int __kmp_forkjoin_frames = 1; |
Andrey Churbanov | c6317d5 | 2015-05-06 17:41:58 +0000 | [diff] [blame] | 287 | int __kmp_forkjoin_frames_mode = 3; |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 288 | #endif |
| 289 | PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method = reduction_method_not_defined; |
| 290 | int __kmp_determ_red = FALSE; |
| 291 | |
| 292 | #ifdef KMP_DEBUG |
| 293 | int kmp_a_debug = 0; |
| 294 | int kmp_b_debug = 0; |
| 295 | int kmp_c_debug = 0; |
| 296 | int kmp_d_debug = 0; |
| 297 | int kmp_e_debug = 0; |
| 298 | int kmp_f_debug = 0; |
| 299 | int kmp_diag = 0; |
| 300 | #endif |
| 301 | |
| 302 | /* For debug information logging using rotating buffer */ |
| 303 | int __kmp_debug_buf = FALSE; /* TRUE means use buffer, FALSE means print to stderr */ |
| 304 | int __kmp_debug_buf_lines = KMP_DEBUG_BUF_LINES_INIT; /* Lines of debug stored in buffer */ |
| 305 | int __kmp_debug_buf_chars = KMP_DEBUG_BUF_CHARS_INIT; /* Characters allowed per line in buffer */ |
| 306 | int __kmp_debug_buf_atomic = FALSE; /* TRUE means use atomic update of buffer entry pointer */ |
| 307 | |
| 308 | char *__kmp_debug_buffer = NULL; /* Debug buffer itself */ |
| 309 | int __kmp_debug_count = 0; /* Counter for number of lines printed in buffer so far */ |
| 310 | int __kmp_debug_buf_warn_chars = 0; /* Keep track of char increase recommended in warnings */ |
| 311 | /* end rotating debug buffer */ |
| 312 | |
| 313 | #ifdef KMP_DEBUG |
| 314 | int __kmp_par_range; /* +1 => only go par for constructs in range */ |
| 315 | /* -1 => only go par for constructs outside range */ |
| 316 | char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN] = { '\0' }; |
| 317 | char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN] = { '\0' }; |
| 318 | int __kmp_par_range_lb = 0; |
| 319 | int __kmp_par_range_ub = INT_MAX; |
| 320 | #endif /* KMP_DEBUG */ |
| 321 | |
| 322 | /* For printing out dynamic storage map for threads and teams */ |
| 323 | int __kmp_storage_map = FALSE; /* True means print storage map for threads and teams */ |
| 324 | int __kmp_storage_map_verbose = FALSE; /* True means storage map includes placement info */ |
| 325 | int __kmp_storage_map_verbose_specified = FALSE; |
| 326 | /* Initialize the library data structures when we fork a child process, defaults to TRUE */ |
| 327 | int __kmp_need_register_atfork = TRUE; /* At initialization, call pthread_atfork to install fork handler */ |
| 328 | int __kmp_need_register_atfork_specified = TRUE; |
| 329 | |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 330 | int __kmp_env_chunk = FALSE; /* KMP_CHUNK specified? */ |
| 331 | int __kmp_env_stksize = FALSE; /* KMP_STACKSIZE specified? */ |
| 332 | int __kmp_env_omp_stksize = FALSE; /* OMP_STACKSIZE specified? */ |
| 333 | int __kmp_env_all_threads = FALSE;/* KMP_ALL_THREADS or KMP_MAX_THREADS specified? */ |
| 334 | int __kmp_env_omp_all_threads = FALSE;/* OMP_THREAD_LIMIT specified? */ |
| 335 | int __kmp_env_blocktime = FALSE; /* KMP_BLOCKTIME specified? */ |
| 336 | int __kmp_env_checks = FALSE; /* KMP_CHECKS specified? */ |
| 337 | int __kmp_env_consistency_check = FALSE; /* KMP_CONSISTENCY_CHECK specified? */ |
| 338 | |
| 339 | kmp_uint32 __kmp_yield_init = KMP_INIT_WAIT; |
| 340 | kmp_uint32 __kmp_yield_next = KMP_NEXT_WAIT; |
| 341 | kmp_uint32 __kmp_yielding_on = 1; |
Jim Cownie | 3051f97 | 2014-08-07 10:12:54 +0000 | [diff] [blame] | 342 | #if KMP_OS_CNK |
| 343 | kmp_uint32 __kmp_yield_cycle = 0; |
| 344 | #else |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 345 | kmp_uint32 __kmp_yield_cycle = 1; /* Yield-cycle is on by default */ |
Jim Cownie | 3051f97 | 2014-08-07 10:12:54 +0000 | [diff] [blame] | 346 | #endif |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 347 | kmp_int32 __kmp_yield_on_count = 10; /* By default, yielding is on for 10 monitor periods. */ |
| 348 | kmp_int32 __kmp_yield_off_count = 1; /* By default, yielding is off for 1 monitor periods. */ |
| 349 | /* ----------------------------------------------------- */ |
| 350 | |
| 351 | |
| 352 | /* ------------------------------------------------------ */ |
| 353 | /* STATE mostly syncronized with global lock */ |
| 354 | /* data written to rarely by masters, read often by workers */ |
| 355 | /* |
| 356 | * SHALL WE EDIT THE COMMENT BELOW IN SOME WAY? |
| 357 | * TODO: None of this global padding stuff works consistently because |
| 358 | * the order of declaration is not necessarily correlated to storage order. |
| 359 | * To fix this, all the important globals must be put in a big structure |
| 360 | * instead. |
| 361 | */ |
| 362 | KMP_ALIGN_CACHE |
| 363 | kmp_info_t **__kmp_threads = NULL; |
| 364 | kmp_root_t **__kmp_root = NULL; |
| 365 | |
| 366 | /* data read/written to often by masters */ |
| 367 | KMP_ALIGN_CACHE |
| 368 | volatile int __kmp_nth = 0; |
| 369 | volatile int __kmp_all_nth = 0; |
| 370 | int __kmp_thread_pool_nth = 0; |
| 371 | volatile kmp_info_t *__kmp_thread_pool = NULL; |
| 372 | volatile kmp_team_t *__kmp_team_pool = NULL; |
| 373 | |
| 374 | KMP_ALIGN_CACHE |
| 375 | volatile int __kmp_thread_pool_active_nth = 0; |
| 376 | |
| 377 | /* ------------------------------------------------- |
| 378 | * GLOBAL/ROOT STATE */ |
| 379 | KMP_ALIGN_CACHE |
| 380 | kmp_global_t __kmp_global = {{ 0 }}; |
| 381 | |
| 382 | /* ----------------------------------------------- */ |
Alp Toker | 8f2d3f0 | 2014-02-24 10:40:15 +0000 | [diff] [blame] | 383 | /* GLOBAL SYNCHRONIZATION LOCKS */ |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 384 | /* TODO verify the need for these locks and if they need to be global */ |
Jim Cownie | 4cc4bb4 | 2014-10-07 16:25:50 +0000 | [diff] [blame] | 385 | |
| 386 | #if KMP_USE_INTERNODE_ALIGNMENT |
| 387 | /* Multinode systems have larger cache line granularity which can cause |
| 388 | * false sharing if the alignment is not large enough for these locks */ |
| 389 | KMP_ALIGN_CACHE_INTERNODE |
| 390 | |
| 391 | kmp_bootstrap_lock_t __kmp_initz_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( __kmp_initz_lock ); /* Control initializations */ |
| 392 | KMP_ALIGN_CACHE_INTERNODE |
| 393 | kmp_bootstrap_lock_t __kmp_forkjoin_lock; /* control fork/join access */ |
| 394 | KMP_ALIGN_CACHE_INTERNODE |
| 395 | kmp_bootstrap_lock_t __kmp_exit_lock; /* exit() is not always thread-safe */ |
| 396 | KMP_ALIGN_CACHE_INTERNODE |
| 397 | kmp_bootstrap_lock_t __kmp_monitor_lock; /* control monitor thread creation */ |
| 398 | KMP_ALIGN_CACHE_INTERNODE |
| 399 | kmp_bootstrap_lock_t __kmp_tp_cached_lock; /* used for the hack to allow threadprivate cache and __kmp_threads expansion to co-exist */ |
| 400 | |
| 401 | KMP_ALIGN_CACHE_INTERNODE |
| 402 | kmp_lock_t __kmp_global_lock; /* Control OS/global access */ |
| 403 | KMP_ALIGN_CACHE_INTERNODE |
| 404 | kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access */ |
| 405 | KMP_ALIGN_CACHE_INTERNODE |
| 406 | kmp_lock_t __kmp_debug_lock; /* Control I/O access for KMP_DEBUG */ |
| 407 | #else |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 408 | KMP_ALIGN_CACHE |
| 409 | |
| 410 | kmp_bootstrap_lock_t __kmp_initz_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( __kmp_initz_lock ); /* Control initializations */ |
| 411 | kmp_bootstrap_lock_t __kmp_forkjoin_lock; /* control fork/join access */ |
| 412 | kmp_bootstrap_lock_t __kmp_exit_lock; /* exit() is not always thread-safe */ |
| 413 | kmp_bootstrap_lock_t __kmp_monitor_lock; /* control monitor thread creation */ |
| 414 | kmp_bootstrap_lock_t __kmp_tp_cached_lock; /* used for the hack to allow threadprivate cache and __kmp_threads expansion to co-exist */ |
| 415 | |
| 416 | KMP_ALIGN(128) |
| 417 | kmp_lock_t __kmp_global_lock; /* Control OS/global access */ |
| 418 | KMP_ALIGN(128) |
| 419 | kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access */ |
| 420 | KMP_ALIGN(128) |
| 421 | kmp_lock_t __kmp_debug_lock; /* Control I/O access for KMP_DEBUG */ |
Jim Cownie | 4cc4bb4 | 2014-10-07 16:25:50 +0000 | [diff] [blame] | 422 | #endif |
Jim Cownie | 5e8470a | 2013-09-27 10:38:44 +0000 | [diff] [blame] | 423 | |
| 424 | /* ----------------------------------------------- */ |
| 425 | |
| 426 | #if KMP_HANDLE_SIGNALS |
| 427 | /* |
| 428 | Signal handling is disabled by default, because it confuses users: In case of sigsegv |
| 429 | (or other trouble) in user code signal handler catches the signal, which then "appears" in |
| 430 | the monitor thread (when the monitor executes raise() function). Users see signal in the |
| 431 | monitor thread and blame OpenMP RTL. |
| 432 | |
| 433 | Grant said signal handling required on some older OSes (Irix?) supported by KAI, because |
| 434 | bad applications hung but not aborted. Currently it is not a problem for Linux* OS, OS X* and |
| 435 | Windows* OS. |
| 436 | |
| 437 | Grant: Found new hangs for EL4, EL5, and a Fedora Core machine. So I'm putting |
| 438 | the default back for now to see if that fixes hangs on those machines. |
| 439 | |
| 440 | 2010-04013 Lev: It was a bug in Fortran RTL. Fortran RTL prints a kind of stack backtrace |
| 441 | when program is aborting, but the code is not signal-safe. When multiple signals raised at |
| 442 | the same time (which occurs in dynamic negative tests because all the worker threads detects |
| 443 | the same error), Fortran RTL may hang. The bug finally fixed in Fortran RTL library provided |
| 444 | by Steve R., and will be available soon. |
| 445 | */ |
| 446 | int __kmp_handle_signals = FALSE; |
| 447 | #endif |
| 448 | |
| 449 | /* ----------------------------------------------- */ |
| 450 | #ifdef BUILD_TV |
| 451 | kmp_key_t __kmp_tv_key = 0; |
| 452 | #endif |
| 453 | |
| 454 | /* ------------------------------------------------------------------------ */ |
| 455 | /* ------------------------------------------------------------------------ */ |
| 456 | |
| 457 | #ifdef DEBUG_SUSPEND |
| 458 | int |
| 459 | get_suspend_count_( void ) { |
| 460 | int count = __kmp_suspend_count; |
| 461 | __kmp_suspend_count = 0; |
| 462 | return count; |
| 463 | } |
| 464 | void |
| 465 | set_suspend_count_( int * value ) { |
| 466 | __kmp_suspend_count = *value; |
| 467 | } |
| 468 | #endif |
| 469 | |
| 470 | // Symbols for MS mutual detection. |
| 471 | int _You_must_link_with_exactly_one_OpenMP_library = 1; |
| 472 | int _You_must_link_with_Intel_OpenMP_library = 1; |
| 473 | #if KMP_OS_WINDOWS && ( KMP_VERSION_MAJOR > 4 ) |
| 474 | int _You_must_link_with_Microsoft_OpenMP_library = 1; |
| 475 | #endif |
| 476 | |
| 477 | // end of file // |