blob: 78cada4c3c04a3dd8a35d40ecfdd3e753be7dc60 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * z_Linux_util.c -- platform specific routines.
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "kmp.h"
17#include "kmp_wrapper_getpid.h"
18#include "kmp_itt.h"
19#include "kmp_str.h"
20#include "kmp_i18n.h"
21#include "kmp_io.h"
Jim Cownie4cc4bb42014-10-07 16:25:50 +000022#include "kmp_stats.h"
23#include "kmp_wait_release.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000024
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +000025#if !KMP_OS_FREEBSD && !KMP_OS_NETBSD
Alp Toker763b9392014-02-28 09:42:41 +000026# include <alloca.h>
27#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +000028#include <unistd.h>
29#include <math.h> // HUGE_VAL.
30#include <sys/time.h>
31#include <sys/times.h>
32#include <sys/resource.h>
33#include <sys/syscall.h>
34
Jim Cownie3051f972014-08-07 10:12:54 +000035#if KMP_OS_LINUX && !KMP_OS_CNK
Jim Cownie5e8470a2013-09-27 10:38:44 +000036# include <sys/sysinfo.h>
Andrey Churbanovcbda8682015-01-13 14:43:35 +000037# if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +000038// We should really include <futex.h>, but that causes compatibility problems on different
39// Linux* OS distributions that either require that you include (or break when you try to include)
40// <pci/types.h>.
41// Since all we need is the two macros below (which are part of the kernel ABI, so can't change)
42// we just define the constants here and don't include <futex.h>
43# ifndef FUTEX_WAIT
44# define FUTEX_WAIT 0
45# endif
46# ifndef FUTEX_WAKE
47# define FUTEX_WAKE 1
48# endif
49# endif
50#elif KMP_OS_DARWIN
51# include <sys/sysctl.h>
52# include <mach/mach.h>
Alp Toker763b9392014-02-28 09:42:41 +000053#elif KMP_OS_FREEBSD
Alp Toker763b9392014-02-28 09:42:41 +000054# include <pthread_np.h>
Jim Cownie5e8470a2013-09-27 10:38:44 +000055#endif
56
57
58#include <dirent.h>
59#include <ctype.h>
60#include <fcntl.h>
61
62/* ------------------------------------------------------------------------ */
63/* ------------------------------------------------------------------------ */
64
65struct kmp_sys_timer {
66 struct timespec start;
67};
68
69// Convert timespec to nanoseconds.
70#define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec)
71
72static struct kmp_sys_timer __kmp_sys_timer_data;
73
74#if KMP_HANDLE_SIGNALS
75 typedef void (* sig_func_t )( int );
76 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[ NSIG ];
77 static sigset_t __kmp_sigset;
78#endif
79
80static int __kmp_init_runtime = FALSE;
81
82static int __kmp_fork_count = 0;
83
84static pthread_condattr_t __kmp_suspend_cond_attr;
85static pthread_mutexattr_t __kmp_suspend_mutex_attr;
86
87static kmp_cond_align_t __kmp_wait_cv;
88static kmp_mutex_align_t __kmp_wait_mx;
89
90/* ------------------------------------------------------------------------ */
91/* ------------------------------------------------------------------------ */
92
93#ifdef DEBUG_SUSPEND
94static void
95__kmp_print_cond( char *buffer, kmp_cond_align_t *cond )
96{
Andrey Churbanov74bf17b2015-04-02 13:27:08 +000097 KMP_SNPRINTF( buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))",
Jim Cownie5e8470a2013-09-27 10:38:44 +000098 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
99 cond->c_cond.__c_waiting );
100}
101#endif
102
103/* ------------------------------------------------------------------------ */
104/* ------------------------------------------------------------------------ */
105
Jim Cownie3051f972014-08-07 10:12:54 +0000106#if ( KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000107
108/*
109 * Affinity support
110 */
111
112/*
113 * On some of the older OS's that we build on, these constants aren't present
114 * in <asm/unistd.h> #included from <sys.syscall.h>. They must be the same on
115 * all systems of the same arch where they are defined, and they cannot change.
116 * stone forever.
117 */
118
Jim Cownie181b4bb2013-12-23 17:28:57 +0000119# if KMP_ARCH_X86 || KMP_ARCH_ARM
Jim Cownie5e8470a2013-09-27 10:38:44 +0000120# ifndef __NR_sched_setaffinity
121# define __NR_sched_setaffinity 241
122# elif __NR_sched_setaffinity != 241
123# error Wrong code for setaffinity system call.
124# endif /* __NR_sched_setaffinity */
125# ifndef __NR_sched_getaffinity
126# define __NR_sched_getaffinity 242
127# elif __NR_sched_getaffinity != 242
128# error Wrong code for getaffinity system call.
129# endif /* __NR_sched_getaffinity */
130
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000131# elif KMP_ARCH_AARCH64
132# ifndef __NR_sched_setaffinity
133# define __NR_sched_setaffinity 122
134# elif __NR_sched_setaffinity != 122
135# error Wrong code for setaffinity system call.
136# endif /* __NR_sched_setaffinity */
137# ifndef __NR_sched_getaffinity
138# define __NR_sched_getaffinity 123
139# elif __NR_sched_getaffinity != 123
140# error Wrong code for getaffinity system call.
141# endif /* __NR_sched_getaffinity */
142
Jim Cownie5e8470a2013-09-27 10:38:44 +0000143# elif KMP_ARCH_X86_64
144# ifndef __NR_sched_setaffinity
145# define __NR_sched_setaffinity 203
146# elif __NR_sched_setaffinity != 203
147# error Wrong code for setaffinity system call.
148# endif /* __NR_sched_setaffinity */
149# ifndef __NR_sched_getaffinity
150# define __NR_sched_getaffinity 204
151# elif __NR_sched_getaffinity != 204
152# error Wrong code for getaffinity system call.
153# endif /* __NR_sched_getaffinity */
154
Jim Cownie3051f972014-08-07 10:12:54 +0000155# elif KMP_ARCH_PPC64
156# ifndef __NR_sched_setaffinity
157# define __NR_sched_setaffinity 222
158# elif __NR_sched_setaffinity != 222
159# error Wrong code for setaffinity system call.
160# endif /* __NR_sched_setaffinity */
161# ifndef __NR_sched_getaffinity
162# define __NR_sched_getaffinity 223
163# elif __NR_sched_getaffinity != 223
164# error Wrong code for getaffinity system call.
165# endif /* __NR_sched_getaffinity */
166
167
Jim Cownie5e8470a2013-09-27 10:38:44 +0000168# else
169# error Unknown or unsupported architecture
170
171# endif /* KMP_ARCH_* */
172
173int
174__kmp_set_system_affinity( kmp_affin_mask_t const *mask, int abort_on_error )
175{
176 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
177 "Illegal set affinity operation when not capable");
178
179 int retval = syscall( __NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask );
180 if (retval >= 0) {
181 return 0;
182 }
183 int error = errno;
184 if (abort_on_error) {
185 __kmp_msg(
186 kmp_ms_fatal,
187 KMP_MSG( FatalSysError ),
188 KMP_ERR( error ),
189 __kmp_msg_null
190 );
191 }
192 return error;
193}
194
195int
196__kmp_get_system_affinity( kmp_affin_mask_t *mask, int abort_on_error )
197{
198 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
199 "Illegal get affinity operation when not capable");
200
201 int retval = syscall( __NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask );
202 if (retval >= 0) {
203 return 0;
204 }
205 int error = errno;
206 if (abort_on_error) {
207 __kmp_msg(
208 kmp_ms_fatal,
209 KMP_MSG( FatalSysError ),
210 KMP_ERR( error ),
211 __kmp_msg_null
212 );
213 }
214 return error;
215}
216
217void
218__kmp_affinity_bind_thread( int which )
219{
220 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
221 "Illegal set affinity operation when not capable");
222
Andrey Churbanov74bf17b2015-04-02 13:27:08 +0000223 kmp_affin_mask_t *mask = (kmp_affin_mask_t *)KMP_ALLOCA(__kmp_affin_mask_size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000224 KMP_CPU_ZERO(mask);
225 KMP_CPU_SET(which, mask);
226 __kmp_set_system_affinity(mask, TRUE);
227}
228
229/*
230 * Determine if we can access affinity functionality on this version of
231 * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
232 * __kmp_affin_mask_size to the appropriate value (0 means not capable).
233 */
234void
235__kmp_affinity_determine_capable(const char *env_var)
236{
237 //
238 // Check and see if the OS supports thread affinity.
239 //
240
241# define KMP_CPU_SET_SIZE_LIMIT (1024*1024)
242
243 int gCode;
244 int sCode;
245 kmp_affin_mask_t *buf;
246 buf = ( kmp_affin_mask_t * ) KMP_INTERNAL_MALLOC( KMP_CPU_SET_SIZE_LIMIT );
247
248 // If Linux* OS:
249 // If the syscall fails or returns a suggestion for the size,
250 // then we don't have to search for an appropriate size.
251 gCode = syscall( __NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf );
252 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
Alp Toker8f2d3f02014-02-24 10:40:15 +0000253 "initial getaffinity call returned %d errno = %d\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +0000254 gCode, errno));
255
256 //if ((gCode < 0) && (errno == ENOSYS))
257 if (gCode < 0) {
258 //
259 // System call not supported
260 //
261 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
262 && (__kmp_affinity_type != affinity_none)
263 && (__kmp_affinity_type != affinity_default)
264 && (__kmp_affinity_type != affinity_disabled))) {
265 int error = errno;
266 __kmp_msg(
267 kmp_ms_warning,
268 KMP_MSG( GetAffSysCallNotSupported, env_var ),
269 KMP_ERR( error ),
270 __kmp_msg_null
271 );
272 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000273 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000274 KMP_INTERNAL_FREE(buf);
275 return;
276 }
277 if (gCode > 0) { // Linux* OS only
278 // The optimal situation: the OS returns the size of the buffer
279 // it expects.
280 //
281 // A verification of correct behavior is that Isetaffinity on a NULL
282 // buffer with the same size fails with errno set to EFAULT.
283 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
284 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
285 "setaffinity for mask size %d returned %d errno = %d\n",
286 gCode, sCode, errno));
287 if (sCode < 0) {
288 if (errno == ENOSYS) {
289 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
290 && (__kmp_affinity_type != affinity_none)
291 && (__kmp_affinity_type != affinity_default)
292 && (__kmp_affinity_type != affinity_disabled))) {
293 int error = errno;
294 __kmp_msg(
295 kmp_ms_warning,
296 KMP_MSG( SetAffSysCallNotSupported, env_var ),
297 KMP_ERR( error ),
298 __kmp_msg_null
299 );
300 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000301 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000302 KMP_INTERNAL_FREE(buf);
303 }
304 if (errno == EFAULT) {
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000305 KMP_AFFINITY_ENABLE(gCode);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000306 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
307 "affinity supported (mask size %d)\n",
308 (int)__kmp_affin_mask_size));
309 KMP_INTERNAL_FREE(buf);
310 return;
311 }
312 }
313 }
314
315 //
316 // Call the getaffinity system call repeatedly with increasing set sizes
317 // until we succeed, or reach an upper bound on the search.
318 //
319 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
320 "searching for proper set size\n"));
321 int size;
322 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
323 gCode = syscall( __NR_sched_getaffinity, 0, size, buf );
324 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
325 "getaffinity for mask size %d returned %d errno = %d\n", size,
326 gCode, errno));
327
328 if (gCode < 0) {
329 if ( errno == ENOSYS )
330 {
331 //
332 // We shouldn't get here
333 //
334 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
335 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
336 size));
337 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
338 && (__kmp_affinity_type != affinity_none)
339 && (__kmp_affinity_type != affinity_default)
340 && (__kmp_affinity_type != affinity_disabled))) {
341 int error = errno;
342 __kmp_msg(
343 kmp_ms_warning,
344 KMP_MSG( GetAffSysCallNotSupported, env_var ),
345 KMP_ERR( error ),
346 __kmp_msg_null
347 );
348 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000349 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000350 KMP_INTERNAL_FREE(buf);
351 return;
352 }
353 continue;
354 }
355
356 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
357 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
358 "setaffinity for mask size %d returned %d errno = %d\n",
359 gCode, sCode, errno));
360 if (sCode < 0) {
361 if (errno == ENOSYS) { // Linux* OS only
362 //
363 // We shouldn't get here
364 //
365 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
366 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
367 size));
368 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
369 && (__kmp_affinity_type != affinity_none)
370 && (__kmp_affinity_type != affinity_default)
371 && (__kmp_affinity_type != affinity_disabled))) {
372 int error = errno;
373 __kmp_msg(
374 kmp_ms_warning,
375 KMP_MSG( SetAffSysCallNotSupported, env_var ),
376 KMP_ERR( error ),
377 __kmp_msg_null
378 );
379 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000380 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000381 KMP_INTERNAL_FREE(buf);
382 return;
383 }
384 if (errno == EFAULT) {
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000385 KMP_AFFINITY_ENABLE(gCode);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000386 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
387 "affinity supported (mask size %d)\n",
388 (int)__kmp_affin_mask_size));
389 KMP_INTERNAL_FREE(buf);
390 return;
391 }
392 }
393 }
394 //int error = errno; // save uncaught error code
395 KMP_INTERNAL_FREE(buf);
396 // errno = error; // restore uncaught error code, will be printed at the next KMP_WARNING below
397
398 //
399 // Affinity is not supported
400 //
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000401 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000402 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
403 "cannot determine mask size - affinity not supported\n"));
404 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
405 && (__kmp_affinity_type != affinity_none)
406 && (__kmp_affinity_type != affinity_default)
407 && (__kmp_affinity_type != affinity_disabled))) {
408 KMP_WARNING( AffCantGetMaskSize, env_var );
409 }
410}
411
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000412#endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +0000413
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000414/* ------------------------------------------------------------------------ */
415/* ------------------------------------------------------------------------ */
416
417#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && !KMP_OS_CNK
418
419int
420__kmp_futex_determine_capable()
421{
422 int loc = 0;
423 int rc = syscall( __NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0 );
424 int retval = ( rc == 0 ) || ( errno != ENOSYS );
425
426 KA_TRACE(10, ( "__kmp_futex_determine_capable: rc = %d errno = %d\n", rc,
427 errno ) );
428 KA_TRACE(10, ( "__kmp_futex_determine_capable: futex syscall%s supported\n",
429 retval ? "" : " not" ) );
430
431 return retval;
432}
433
434#endif // KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM) && !KMP_OS_CNK
435
436/* ------------------------------------------------------------------------ */
437/* ------------------------------------------------------------------------ */
438
439#if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000440/*
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000441 * Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
442 * use compare_and_store for these routines
Jim Cownie5e8470a2013-09-27 10:38:44 +0000443 */
444
Andrey Churbanov7b2ab712015-03-10 09:03:42 +0000445kmp_int8
446__kmp_test_then_or8( volatile kmp_int8 *p, kmp_int8 d )
447{
448 kmp_int8 old_value, new_value;
449
450 old_value = TCR_1( *p );
451 new_value = old_value | d;
452
453 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
454 {
455 KMP_CPU_PAUSE();
456 old_value = TCR_1( *p );
457 new_value = old_value | d;
458 }
459 return old_value;
460}
461
462kmp_int8
463__kmp_test_then_and8( volatile kmp_int8 *p, kmp_int8 d )
464{
465 kmp_int8 old_value, new_value;
466
467 old_value = TCR_1( *p );
468 new_value = old_value & d;
469
470 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
471 {
472 KMP_CPU_PAUSE();
473 old_value = TCR_1( *p );
474 new_value = old_value & d;
475 }
476 return old_value;
477}
478
Jim Cownie5e8470a2013-09-27 10:38:44 +0000479kmp_int32
480__kmp_test_then_or32( volatile kmp_int32 *p, kmp_int32 d )
481{
482 kmp_int32 old_value, new_value;
483
484 old_value = TCR_4( *p );
485 new_value = old_value | d;
486
Jim Cownie3051f972014-08-07 10:12:54 +0000487 while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000488 {
489 KMP_CPU_PAUSE();
490 old_value = TCR_4( *p );
491 new_value = old_value | d;
492 }
493 return old_value;
494}
495
496kmp_int32
497__kmp_test_then_and32( volatile kmp_int32 *p, kmp_int32 d )
498{
499 kmp_int32 old_value, new_value;
500
501 old_value = TCR_4( *p );
502 new_value = old_value & d;
503
Jim Cownie3051f972014-08-07 10:12:54 +0000504 while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000505 {
506 KMP_CPU_PAUSE();
507 old_value = TCR_4( *p );
508 new_value = old_value & d;
509 }
510 return old_value;
511}
512
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000513# if KMP_ARCH_X86 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000514kmp_int8
515__kmp_test_then_add8( volatile kmp_int8 *p, kmp_int8 d )
516{
517 kmp_int8 old_value, new_value;
518
519 old_value = TCR_1( *p );
520 new_value = old_value + d;
521
522 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
523 {
524 KMP_CPU_PAUSE();
525 old_value = TCR_1( *p );
526 new_value = old_value + d;
527 }
528 return old_value;
529}
530
Jim Cownie5e8470a2013-09-27 10:38:44 +0000531kmp_int64
532__kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 d )
533{
534 kmp_int64 old_value, new_value;
535
536 old_value = TCR_8( *p );
537 new_value = old_value + d;
538
Jim Cownie3051f972014-08-07 10:12:54 +0000539 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000540 {
541 KMP_CPU_PAUSE();
542 old_value = TCR_8( *p );
543 new_value = old_value + d;
544 }
545 return old_value;
546}
547# endif /* KMP_ARCH_X86 */
548
549kmp_int64
550__kmp_test_then_or64( volatile kmp_int64 *p, kmp_int64 d )
551{
552 kmp_int64 old_value, new_value;
553
554 old_value = TCR_8( *p );
555 new_value = old_value | d;
Jim Cownie3051f972014-08-07 10:12:54 +0000556 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000557 {
558 KMP_CPU_PAUSE();
559 old_value = TCR_8( *p );
560 new_value = old_value | d;
561 }
562 return old_value;
563}
564
565kmp_int64
566__kmp_test_then_and64( volatile kmp_int64 *p, kmp_int64 d )
567{
568 kmp_int64 old_value, new_value;
569
570 old_value = TCR_8( *p );
571 new_value = old_value & d;
Jim Cownie3051f972014-08-07 10:12:54 +0000572 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000573 {
574 KMP_CPU_PAUSE();
575 old_value = TCR_8( *p );
576 new_value = old_value & d;
577 }
578 return old_value;
579}
580
581#endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
582
583void
584__kmp_terminate_thread( int gtid )
585{
586 int status;
587 kmp_info_t *th = __kmp_threads[ gtid ];
588
589 if ( !th ) return;
590
591 #ifdef KMP_CANCEL_THREADS
592 KA_TRACE( 10, ("__kmp_terminate_thread: kill (%d)\n", gtid ) );
593 status = pthread_cancel( th->th.th_info.ds.ds_thread );
594 if ( status != 0 && status != ESRCH ) {
595 __kmp_msg(
596 kmp_ms_fatal,
597 KMP_MSG( CantTerminateWorkerThread ),
598 KMP_ERR( status ),
599 __kmp_msg_null
600 );
601 }; // if
602 #endif
603 __kmp_yield( TRUE );
604} //
605
606/* ------------------------------------------------------------------------ */
607/* ------------------------------------------------------------------------ */
608
609/* ------------------------------------------------------------------------ */
610/* ------------------------------------------------------------------------ */
611
612/*
613 * Set thread stack info according to values returned by
614 * pthread_getattr_np().
615 * If values are unreasonable, assume call failed and use
616 * incremental stack refinement method instead.
617 * Returns TRUE if the stack parameters could be determined exactly,
618 * FALSE if incremental refinement is necessary.
619 */
620static kmp_int32
621__kmp_set_stack_info( int gtid, kmp_info_t *th )
622{
623 int stack_data;
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000624#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000625 /* Linux* OS only -- no pthread_getattr_np support on OS X* */
626 pthread_attr_t attr;
627 int status;
628 size_t size = 0;
629 void * addr = 0;
630
631 /* Always do incremental stack refinement for ubermaster threads since the initial
632 thread stack range can be reduced by sibling thread creation so pthread_attr_getstack
633 may cause thread gtid aliasing */
634 if ( ! KMP_UBER_GTID(gtid) ) {
635
636 /* Fetch the real thread attributes */
637 status = pthread_attr_init( &attr );
638 KMP_CHECK_SYSFAIL( "pthread_attr_init", status );
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000639#if KMP_OS_FREEBSD || KMP_OS_NETBSD
Alp Toker763b9392014-02-28 09:42:41 +0000640 status = pthread_attr_get_np( pthread_self(), &attr );
641 KMP_CHECK_SYSFAIL( "pthread_attr_get_np", status );
642#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000643 status = pthread_getattr_np( pthread_self(), &attr );
644 KMP_CHECK_SYSFAIL( "pthread_getattr_np", status );
Alp Toker763b9392014-02-28 09:42:41 +0000645#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000646 status = pthread_attr_getstack( &attr, &addr, &size );
647 KMP_CHECK_SYSFAIL( "pthread_attr_getstack", status );
648 KA_TRACE( 60, ( "__kmp_set_stack_info: T#%d pthread_attr_getstack returned size: %lu, "
649 "low addr: %p\n",
650 gtid, size, addr ));
651
652 status = pthread_attr_destroy( &attr );
653 KMP_CHECK_SYSFAIL( "pthread_attr_destroy", status );
654 }
655
656 if ( size != 0 && addr != 0 ) { /* was stack parameter determination successful? */
657 /* Store the correct base and size */
658 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
659 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
660 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
661 return TRUE;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000662 }
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000663#endif /* KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD */
Alp Toker763b9392014-02-28 09:42:41 +0000664 /* Use incremental refinement starting from initial conservative estimate */
665 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
666 TCW_PTR(th -> th.th_info.ds.ds_stackbase, &stack_data);
667 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
668 return FALSE;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000669}
670
671static void*
672__kmp_launch_worker( void *thr )
673{
674 int status, old_type, old_state;
675#ifdef KMP_BLOCK_SIGNALS
676 sigset_t new_set, old_set;
677#endif /* KMP_BLOCK_SIGNALS */
678 void *exit_val;
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000679#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD
Andrey Churbanov368b70e2015-08-05 11:12:45 +0000680 void * volatile padding = 0;
Jonathan Peyton2321d572015-06-08 19:25:25 +0000681#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000682 int gtid;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000683
684 gtid = ((kmp_info_t*)thr) -> th.th_info.ds.ds_gtid;
685 __kmp_gtid_set_specific( gtid );
686#ifdef KMP_TDATA_GTID
687 __kmp_gtid = gtid;
688#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000689#if KMP_STATS_ENABLED
690 // set __thread local index to point to thread-specific stats
691 __kmp_stats_thread_ptr = ((kmp_info_t*)thr)->th.th_stats;
692#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000693
694#if USE_ITT_BUILD
695 __kmp_itt_thread_name( gtid );
696#endif /* USE_ITT_BUILD */
697
Alp Toker763b9392014-02-28 09:42:41 +0000698#if KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +0000699 __kmp_affinity_set_init_mask( gtid, FALSE );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000700#endif
701
702#ifdef KMP_CANCEL_THREADS
703 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
704 KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status );
705 /* josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? */
706 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
707 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
708#endif
709
710#if KMP_ARCH_X86 || KMP_ARCH_X86_64
711 //
712 // Set the FP control regs to be a copy of
713 // the parallel initialization thread's.
714 //
715 __kmp_clear_x87_fpu_status_word();
716 __kmp_load_x87_fpu_control_word( &__kmp_init_x87_fpu_control_word );
717 __kmp_load_mxcsr( &__kmp_init_mxcsr );
718#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
719
720#ifdef KMP_BLOCK_SIGNALS
721 status = sigfillset( & new_set );
722 KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status );
723 status = pthread_sigmask( SIG_BLOCK, & new_set, & old_set );
724 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
725#endif /* KMP_BLOCK_SIGNALS */
726
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000727#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000728 if ( __kmp_stkoffset > 0 && gtid > 0 ) {
Andrey Churbanov74bf17b2015-04-02 13:27:08 +0000729 padding = KMP_ALLOCA( gtid * __kmp_stkoffset );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000730 }
731#endif
732
733 KMP_MB();
734 __kmp_set_stack_info( gtid, (kmp_info_t*)thr );
735
736 __kmp_check_stack_overlap( (kmp_info_t*)thr );
737
738 exit_val = __kmp_launch_thread( (kmp_info_t *) thr );
739
740#ifdef KMP_BLOCK_SIGNALS
741 status = pthread_sigmask( SIG_SETMASK, & old_set, NULL );
742 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
743#endif /* KMP_BLOCK_SIGNALS */
744
745 return exit_val;
746}
747
748
749/* The monitor thread controls all of the threads in the complex */
750
751static void*
752__kmp_launch_monitor( void *thr )
753{
754 int status, old_type, old_state;
755#ifdef KMP_BLOCK_SIGNALS
756 sigset_t new_set;
757#endif /* KMP_BLOCK_SIGNALS */
758 struct timespec interval;
759 int yield_count;
760 int yield_cycles = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000761
762 KMP_MB(); /* Flush all pending memory write invalidates. */
763
764 KA_TRACE( 10, ("__kmp_launch_monitor: #1 launched\n" ) );
765
766 /* register us as the monitor thread */
767 __kmp_gtid_set_specific( KMP_GTID_MONITOR );
768#ifdef KMP_TDATA_GTID
769 __kmp_gtid = KMP_GTID_MONITOR;
770#endif
771
772 KMP_MB();
773
774#if USE_ITT_BUILD
775 __kmp_itt_thread_ignore(); // Instruct Intel(R) Threading Tools to ignore monitor thread.
776#endif /* USE_ITT_BUILD */
777
778 __kmp_set_stack_info( ((kmp_info_t*)thr)->th.th_info.ds.ds_gtid, (kmp_info_t*)thr );
779
780 __kmp_check_stack_overlap( (kmp_info_t*)thr );
781
782#ifdef KMP_CANCEL_THREADS
783 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
784 KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status );
785 /* josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? */
786 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
787 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
788#endif
789
790 #if KMP_REAL_TIME_FIX
791 // This is a potential fix which allows application with real-time scheduling policy work.
792 // However, decision about the fix is not made yet, so it is disabled by default.
793 { // Are program started with real-time scheduling policy?
794 int sched = sched_getscheduler( 0 );
795 if ( sched == SCHED_FIFO || sched == SCHED_RR ) {
796 // Yes, we are a part of real-time application. Try to increase the priority of the
797 // monitor.
798 struct sched_param param;
799 int max_priority = sched_get_priority_max( sched );
800 int rc;
801 KMP_WARNING( RealTimeSchedNotSupported );
802 sched_getparam( 0, & param );
803 if ( param.sched_priority < max_priority ) {
804 param.sched_priority += 1;
805 rc = sched_setscheduler( 0, sched, & param );
806 if ( rc != 0 ) {
807 int error = errno;
808 __kmp_msg(
809 kmp_ms_warning,
810 KMP_MSG( CantChangeMonitorPriority ),
811 KMP_ERR( error ),
812 KMP_MSG( MonitorWillStarve ),
813 __kmp_msg_null
814 );
815 }; // if
816 } else {
817 // We cannot abort here, because number of CPUs may be enough for all the threads,
818 // including the monitor thread, so application could potentially work...
819 __kmp_msg(
820 kmp_ms_warning,
821 KMP_MSG( RunningAtMaxPriority ),
822 KMP_MSG( MonitorWillStarve ),
823 KMP_HNT( RunningAtMaxPriority ),
824 __kmp_msg_null
825 );
826 }; // if
827 }; // if
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000828 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 ); // AC: free thread that waits for monitor started
Jim Cownie5e8470a2013-09-27 10:38:44 +0000829 }
830 #endif // KMP_REAL_TIME_FIX
831
832 KMP_MB(); /* Flush all pending memory write invalidates. */
833
834 if ( __kmp_monitor_wakeups == 1 ) {
835 interval.tv_sec = 1;
836 interval.tv_nsec = 0;
837 } else {
838 interval.tv_sec = 0;
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +0000839 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000840 }
841
842 KA_TRACE( 10, ("__kmp_launch_monitor: #2 monitor\n" ) );
843
844 if (__kmp_yield_cycle) {
845 __kmp_yielding_on = 0; /* Start out with yielding shut off */
846 yield_count = __kmp_yield_off_count;
847 } else {
848 __kmp_yielding_on = 1; /* Yielding is on permanently */
849 }
850
851 while( ! TCR_4( __kmp_global.g.g_done ) ) {
852 struct timespec now;
853 struct timeval tval;
854
855 /* This thread monitors the state of the system */
856
857 KA_TRACE( 15, ( "__kmp_launch_monitor: update\n" ) );
858
859 status = gettimeofday( &tval, NULL );
860 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
861 TIMEVAL_TO_TIMESPEC( &tval, &now );
862
863 now.tv_sec += interval.tv_sec;
864 now.tv_nsec += interval.tv_nsec;
865
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +0000866 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000867 now.tv_sec += 1;
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +0000868 now.tv_nsec -= KMP_NSEC_PER_SEC;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000869 }
870
871 status = pthread_mutex_lock( & __kmp_wait_mx.m_mutex );
872 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
Jim Cownie07ea89f2014-09-03 11:10:54 +0000873 // AC: the monitor should not fall asleep if g_done has been set
874 if ( !TCR_4(__kmp_global.g.g_done) ) { // check once more under mutex
875 status = pthread_cond_timedwait( &__kmp_wait_cv.c_cond, &__kmp_wait_mx.m_mutex, &now );
876 if ( status != 0 ) {
877 if ( status != ETIMEDOUT && status != EINTR ) {
878 KMP_SYSFAIL( "pthread_cond_timedwait", status );
879 };
Jim Cownie5e8470a2013-09-27 10:38:44 +0000880 };
881 };
Jim Cownie5e8470a2013-09-27 10:38:44 +0000882 status = pthread_mutex_unlock( & __kmp_wait_mx.m_mutex );
883 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
884
885 if (__kmp_yield_cycle) {
886 yield_cycles++;
887 if ( (yield_cycles % yield_count) == 0 ) {
888 if (__kmp_yielding_on) {
889 __kmp_yielding_on = 0; /* Turn it off now */
890 yield_count = __kmp_yield_off_count;
891 } else {
892 __kmp_yielding_on = 1; /* Turn it on now */
893 yield_count = __kmp_yield_on_count;
894 }
895 yield_cycles = 0;
896 }
897 } else {
898 __kmp_yielding_on = 1;
899 }
900
901 TCW_4( __kmp_global.g.g_time.dt.t_value,
902 TCR_4( __kmp_global.g.g_time.dt.t_value ) + 1 );
903
904 KMP_MB(); /* Flush all pending memory write invalidates. */
905 }
906
907 KA_TRACE( 10, ("__kmp_launch_monitor: #3 cleanup\n" ) );
908
909#ifdef KMP_BLOCK_SIGNALS
910 status = sigfillset( & new_set );
911 KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status );
912 status = pthread_sigmask( SIG_UNBLOCK, & new_set, NULL );
913 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
914#endif /* KMP_BLOCK_SIGNALS */
915
916 KA_TRACE( 10, ("__kmp_launch_monitor: #4 finished\n" ) );
917
918 if( __kmp_global.g.g_abort != 0 ) {
919 /* now we need to terminate the worker threads */
920 /* the value of t_abort is the signal we caught */
921
922 int gtid;
923
924 KA_TRACE( 10, ("__kmp_launch_monitor: #5 terminate sig=%d\n", __kmp_global.g.g_abort ) );
925
926 /* terminate the OpenMP worker threads */
927 /* TODO this is not valid for sibling threads!!
928 * the uber master might not be 0 anymore.. */
929 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
930 __kmp_terminate_thread( gtid );
931
932 __kmp_cleanup();
933
934 KA_TRACE( 10, ("__kmp_launch_monitor: #6 raise sig=%d\n", __kmp_global.g.g_abort ) );
935
936 if (__kmp_global.g.g_abort > 0)
937 raise( __kmp_global.g.g_abort );
938
939 }
940
941 KA_TRACE( 10, ("__kmp_launch_monitor: #7 exit\n" ) );
942
943 return thr;
944}
945
946void
947__kmp_create_worker( int gtid, kmp_info_t *th, size_t stack_size )
948{
949 pthread_t handle;
950 pthread_attr_t thread_attr;
951 int status;
952
953
954 th->th.th_info.ds.ds_gtid = gtid;
955
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000956#if KMP_STATS_ENABLED
957 // sets up worker thread stats
958 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
959
960 // th->th.th_stats is used to transfer thread specific stats-pointer to __kmp_launch_worker
961 // So when thread is created (goes into __kmp_launch_worker) it will
962 // set it's __thread local pointer to th->th.th_stats
963 th->th.th_stats = __kmp_stats_list.push_back(gtid);
964 if(KMP_UBER_GTID(gtid)) {
965 __kmp_stats_start_time = tsc_tick_count::now();
966 __kmp_stats_thread_ptr = th->th.th_stats;
967 __kmp_stats_init();
968 KMP_START_EXPLICIT_TIMER(OMP_serial);
969 KMP_START_EXPLICIT_TIMER(OMP_start_end);
970 }
971 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
972
973#endif // KMP_STATS_ENABLED
974
Jim Cownie5e8470a2013-09-27 10:38:44 +0000975 if ( KMP_UBER_GTID(gtid) ) {
976 KA_TRACE( 10, ("__kmp_create_worker: uber thread (%d)\n", gtid ) );
977 th -> th.th_info.ds.ds_thread = pthread_self();
978 __kmp_set_stack_info( gtid, th );
979 __kmp_check_stack_overlap( th );
980 return;
981 }; // if
982
983 KA_TRACE( 10, ("__kmp_create_worker: try to create thread (%d)\n", gtid ) );
984
985 KMP_MB(); /* Flush all pending memory write invalidates. */
986
987#ifdef KMP_THREAD_ATTR
988 {
989 status = pthread_attr_init( &thread_attr );
990 if ( status != 0 ) {
991 __kmp_msg(
992 kmp_ms_fatal,
993 KMP_MSG( CantInitThreadAttrs ),
994 KMP_ERR( status ),
995 __kmp_msg_null
996 );
997 }; // if
998 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
999 if ( status != 0 ) {
1000 __kmp_msg(
1001 kmp_ms_fatal,
1002 KMP_MSG( CantSetWorkerState ),
1003 KMP_ERR( status ),
1004 __kmp_msg_null
1005 );
1006 }; // if
1007
Andrey Churbanov368b70e2015-08-05 11:12:45 +00001008 /* Set stack size for this thread now.
1009 * The multiple of 2 is there because on some machines, requesting an unusual stacksize
1010 * causes the thread to have an offset before the dummy alloca() takes place to create the
1011 * offset. Since we want the user to have a sufficient stacksize AND support a stack offset, we
1012 * alloca() twice the offset so that the upcoming alloca() does not eliminate any premade
1013 * offset, and also gives the user the stack space they requested for all threads */
1014 stack_size += gtid * __kmp_stkoffset * 2;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001015
1016 KA_TRACE( 10, ( "__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
1017 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
1018 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size ) );
1019
1020# ifdef _POSIX_THREAD_ATTR_STACKSIZE
1021 status = pthread_attr_setstacksize( & thread_attr, stack_size );
1022# ifdef KMP_BACKUP_STKSIZE
1023 if ( status != 0 ) {
1024 if ( ! __kmp_env_stksize ) {
1025 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
1026 __kmp_stksize = KMP_BACKUP_STKSIZE;
1027 KA_TRACE( 10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
1028 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
1029 "bytes\n",
1030 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size )
1031 );
1032 status = pthread_attr_setstacksize( &thread_attr, stack_size );
1033 }; // if
1034 }; // if
1035# endif /* KMP_BACKUP_STKSIZE */
1036 if ( status != 0 ) {
1037 __kmp_msg(
1038 kmp_ms_fatal,
1039 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1040 KMP_ERR( status ),
1041 KMP_HNT( ChangeWorkerStackSize ),
1042 __kmp_msg_null
1043 );
1044 }; // if
1045# endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1046 }
1047#endif /* KMP_THREAD_ATTR */
1048
1049 {
1050 status = pthread_create( & handle, & thread_attr, __kmp_launch_worker, (void *) th );
1051 if ( status != 0 || ! handle ) { // ??? Why do we check handle??
1052#ifdef _POSIX_THREAD_ATTR_STACKSIZE
1053 if ( status == EINVAL ) {
1054 __kmp_msg(
1055 kmp_ms_fatal,
1056 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1057 KMP_ERR( status ),
1058 KMP_HNT( IncreaseWorkerStackSize ),
1059 __kmp_msg_null
1060 );
1061 };
1062 if ( status == ENOMEM ) {
1063 __kmp_msg(
1064 kmp_ms_fatal,
1065 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1066 KMP_ERR( status ),
1067 KMP_HNT( DecreaseWorkerStackSize ),
1068 __kmp_msg_null
1069 );
1070 };
1071#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1072 if ( status == EAGAIN ) {
1073 __kmp_msg(
1074 kmp_ms_fatal,
1075 KMP_MSG( NoResourcesForWorkerThread ),
1076 KMP_ERR( status ),
1077 KMP_HNT( Decrease_NUM_THREADS ),
1078 __kmp_msg_null
1079 );
1080 }; // if
1081 KMP_SYSFAIL( "pthread_create", status );
1082 }; // if
1083
1084 th->th.th_info.ds.ds_thread = handle;
1085 }
1086
1087#ifdef KMP_THREAD_ATTR
1088 {
1089 status = pthread_attr_destroy( & thread_attr );
1090 if ( status ) {
1091 __kmp_msg(
1092 kmp_ms_warning,
1093 KMP_MSG( CantDestroyThreadAttrs ),
1094 KMP_ERR( status ),
1095 __kmp_msg_null
1096 );
1097 }; // if
1098 }
1099#endif /* KMP_THREAD_ATTR */
1100
1101 KMP_MB(); /* Flush all pending memory write invalidates. */
1102
1103 KA_TRACE( 10, ("__kmp_create_worker: done creating thread (%d)\n", gtid ) );
1104
1105} // __kmp_create_worker
1106
1107
1108void
1109__kmp_create_monitor( kmp_info_t *th )
1110{
1111 pthread_t handle;
1112 pthread_attr_t thread_attr;
1113 size_t size;
1114 int status;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001115 int auto_adj_size = FALSE;
1116
1117 KA_TRACE( 10, ("__kmp_create_monitor: try to create monitor\n" ) );
1118
1119 KMP_MB(); /* Flush all pending memory write invalidates. */
1120
1121 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
1122 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
1123 #if KMP_REAL_TIME_FIX
1124 TCW_4( __kmp_global.g.g_time.dt.t_value, -1 ); // Will use it for synchronization a bit later.
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001125 #else
1126 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001127 #endif // KMP_REAL_TIME_FIX
1128
1129 #ifdef KMP_THREAD_ATTR
1130 if ( __kmp_monitor_stksize == 0 ) {
1131 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1132 auto_adj_size = TRUE;
1133 }
1134 status = pthread_attr_init( &thread_attr );
1135 if ( status != 0 ) {
1136 __kmp_msg(
1137 kmp_ms_fatal,
1138 KMP_MSG( CantInitThreadAttrs ),
1139 KMP_ERR( status ),
1140 __kmp_msg_null
1141 );
1142 }; // if
1143 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
1144 if ( status != 0 ) {
1145 __kmp_msg(
1146 kmp_ms_fatal,
1147 KMP_MSG( CantSetMonitorState ),
1148 KMP_ERR( status ),
1149 __kmp_msg_null
1150 );
1151 }; // if
1152
1153 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1154 status = pthread_attr_getstacksize( & thread_attr, & size );
1155 KMP_CHECK_SYSFAIL( "pthread_attr_getstacksize", status );
1156 #else
1157 size = __kmp_sys_min_stksize;
1158 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1159 #endif /* KMP_THREAD_ATTR */
1160
1161 if ( __kmp_monitor_stksize == 0 ) {
1162 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1163 }
1164 if ( __kmp_monitor_stksize < __kmp_sys_min_stksize ) {
1165 __kmp_monitor_stksize = __kmp_sys_min_stksize;
1166 }
1167
1168 KA_TRACE( 10, ( "__kmp_create_monitor: default stacksize = %lu bytes,"
1169 "requested stacksize = %lu bytes\n",
1170 size, __kmp_monitor_stksize ) );
1171
1172 retry:
1173
1174 /* Set stack size for this thread now. */
1175
1176 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1177 KA_TRACE( 10, ( "__kmp_create_monitor: setting stacksize = %lu bytes,",
1178 __kmp_monitor_stksize ) );
1179 status = pthread_attr_setstacksize( & thread_attr, __kmp_monitor_stksize );
1180 if ( status != 0 ) {
1181 if ( auto_adj_size ) {
1182 __kmp_monitor_stksize *= 2;
1183 goto retry;
1184 }
1185 __kmp_msg(
1186 kmp_ms_warning, // should this be fatal? BB
1187 KMP_MSG( CantSetMonitorStackSize, (long int) __kmp_monitor_stksize ),
1188 KMP_ERR( status ),
1189 KMP_HNT( ChangeMonitorStackSize ),
1190 __kmp_msg_null
1191 );
1192 }; // if
1193 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1194
Jim Cownie5e8470a2013-09-27 10:38:44 +00001195 status = pthread_create( &handle, & thread_attr, __kmp_launch_monitor, (void *) th );
1196
1197 if ( status != 0 ) {
1198 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1199 if ( status == EINVAL ) {
1200 if ( auto_adj_size && ( __kmp_monitor_stksize < (size_t)0x40000000 ) ) {
1201 __kmp_monitor_stksize *= 2;
1202 goto retry;
1203 }
1204 __kmp_msg(
1205 kmp_ms_fatal,
1206 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1207 KMP_ERR( status ),
1208 KMP_HNT( IncreaseMonitorStackSize ),
1209 __kmp_msg_null
1210 );
1211 }; // if
1212 if ( status == ENOMEM ) {
1213 __kmp_msg(
1214 kmp_ms_fatal,
1215 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1216 KMP_ERR( status ),
1217 KMP_HNT( DecreaseMonitorStackSize ),
1218 __kmp_msg_null
1219 );
1220 }; // if
1221 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1222 if ( status == EAGAIN ) {
1223 __kmp_msg(
1224 kmp_ms_fatal,
1225 KMP_MSG( NoResourcesForMonitorThread ),
1226 KMP_ERR( status ),
1227 KMP_HNT( DecreaseNumberOfThreadsInUse ),
1228 __kmp_msg_null
1229 );
1230 }; // if
1231 KMP_SYSFAIL( "pthread_create", status );
1232 }; // if
1233
1234 th->th.th_info.ds.ds_thread = handle;
1235
1236 #if KMP_REAL_TIME_FIX
1237 // Wait for the monitor thread is really started and set its *priority*.
1238 KMP_DEBUG_ASSERT( sizeof( kmp_uint32 ) == sizeof( __kmp_global.g.g_time.dt.t_value ) );
1239 __kmp_wait_yield_4(
1240 (kmp_uint32 volatile *) & __kmp_global.g.g_time.dt.t_value, -1, & __kmp_neq_4, NULL
1241 );
1242 #endif // KMP_REAL_TIME_FIX
1243
1244 #ifdef KMP_THREAD_ATTR
1245 status = pthread_attr_destroy( & thread_attr );
1246 if ( status != 0 ) {
1247 __kmp_msg( //
1248 kmp_ms_warning,
1249 KMP_MSG( CantDestroyThreadAttrs ),
1250 KMP_ERR( status ),
1251 __kmp_msg_null
1252 );
1253 }; // if
1254 #endif
1255
1256 KMP_MB(); /* Flush all pending memory write invalidates. */
1257
1258 KA_TRACE( 10, ( "__kmp_create_monitor: monitor created %#.8lx\n", th->th.th_info.ds.ds_thread ) );
1259
1260} // __kmp_create_monitor
1261
1262void
1263__kmp_exit_thread(
1264 int exit_status
1265) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001266 pthread_exit( (void *)(intptr_t) exit_status );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001267} // __kmp_exit_thread
1268
Jim Cownie07ea89f2014-09-03 11:10:54 +00001269void __kmp_resume_monitor();
1270
Jim Cownie5e8470a2013-09-27 10:38:44 +00001271void
1272__kmp_reap_monitor( kmp_info_t *th )
1273{
Jonathan Peyton7c4d66d2015-06-08 20:01:14 +00001274 int status;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001275 void *exit_val;
1276
1277 KA_TRACE( 10, ("__kmp_reap_monitor: try to reap monitor thread with handle %#.8lx\n",
1278 th->th.th_info.ds.ds_thread ) );
1279
1280 // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
1281 // If both tid and gtid are 0, it means the monitor did not ever start.
1282 // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
1283 KMP_DEBUG_ASSERT( th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid );
1284 if ( th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR ) {
1285 return;
1286 }; // if
1287
1288 KMP_MB(); /* Flush all pending memory write invalidates. */
1289
1290
1291 /* First, check to see whether the monitor thread exists. This could prevent a hang,
1292 but if the monitor dies after the pthread_kill call and before the pthread_join
1293 call, it will still hang. */
1294
1295 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
1296 if (status == ESRCH) {
1297
1298 KA_TRACE( 10, ("__kmp_reap_monitor: monitor does not exist, returning\n") );
1299
1300 } else
1301 {
Jim Cownie07ea89f2014-09-03 11:10:54 +00001302 __kmp_resume_monitor(); // Wake up the monitor thread
Jim Cownie5e8470a2013-09-27 10:38:44 +00001303 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1304 if (exit_val != th) {
1305 __kmp_msg(
1306 kmp_ms_fatal,
1307 KMP_MSG( ReapMonitorError ),
1308 KMP_ERR( status ),
1309 __kmp_msg_null
1310 );
1311 }
1312 }
1313
1314 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1315 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1316
1317 KA_TRACE( 10, ("__kmp_reap_monitor: done reaping monitor thread with handle %#.8lx\n",
1318 th->th.th_info.ds.ds_thread ) );
1319
1320 KMP_MB(); /* Flush all pending memory write invalidates. */
1321
1322}
1323
1324void
1325__kmp_reap_worker( kmp_info_t *th )
1326{
1327 int status;
1328 void *exit_val;
1329
1330 KMP_MB(); /* Flush all pending memory write invalidates. */
1331
1332 KA_TRACE( 10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid ) );
1333
1334 /* First, check to see whether the worker thread exists. This could prevent a hang,
1335 but if the worker dies after the pthread_kill call and before the pthread_join
1336 call, it will still hang. */
1337
1338 {
1339 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
1340 if (status == ESRCH) {
1341 KA_TRACE( 10, ("__kmp_reap_worker: worker T#%d does not exist, returning\n",
1342 th->th.th_info.ds.ds_gtid ) );
1343 }
1344 else {
1345 KA_TRACE( 10, ("__kmp_reap_worker: try to join with worker T#%d\n",
1346 th->th.th_info.ds.ds_gtid ) );
1347
1348 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1349#ifdef KMP_DEBUG
1350 /* Don't expose these to the user until we understand when they trigger */
1351 if ( status != 0 ) {
1352 __kmp_msg(
1353 kmp_ms_fatal,
1354 KMP_MSG( ReapWorkerError ),
1355 KMP_ERR( status ),
1356 __kmp_msg_null
1357 );
1358 }
1359 if ( exit_val != th ) {
1360 KA_TRACE( 10, ( "__kmp_reap_worker: worker T#%d did not reap properly, "
1361 "exit_val = %p\n",
1362 th->th.th_info.ds.ds_gtid, exit_val ) );
1363 }
1364#endif /* KMP_DEBUG */
1365 }
1366 }
1367
1368 KA_TRACE( 10, ("__kmp_reap_worker: done reaping T#%d\n", th->th.th_info.ds.ds_gtid ) );
1369
1370 KMP_MB(); /* Flush all pending memory write invalidates. */
1371}
1372
1373
1374/* ------------------------------------------------------------------------ */
1375/* ------------------------------------------------------------------------ */
1376
1377#if KMP_HANDLE_SIGNALS
1378
1379
1380static void
1381__kmp_null_handler( int signo )
1382{
1383 // Do nothing, for doing SIG_IGN-type actions.
1384} // __kmp_null_handler
1385
1386
1387static void
1388__kmp_team_handler( int signo )
1389{
1390 if ( __kmp_global.g.g_abort == 0 ) {
1391 /* Stage 1 signal handler, let's shut down all of the threads */
1392 #ifdef KMP_DEBUG
1393 __kmp_debug_printf( "__kmp_team_handler: caught signal = %d\n", signo );
1394 #endif
1395 switch ( signo ) {
1396 case SIGHUP :
1397 case SIGINT :
1398 case SIGQUIT :
1399 case SIGILL :
1400 case SIGABRT :
1401 case SIGFPE :
1402 case SIGBUS :
1403 case SIGSEGV :
1404 #ifdef SIGSYS
1405 case SIGSYS :
1406 #endif
1407 case SIGTERM :
1408 if ( __kmp_debug_buf ) {
1409 __kmp_dump_debug_buffer( );
1410 }; // if
1411 KMP_MB(); // Flush all pending memory write invalidates.
1412 TCW_4( __kmp_global.g.g_abort, signo );
1413 KMP_MB(); // Flush all pending memory write invalidates.
1414 TCW_4( __kmp_global.g.g_done, TRUE );
1415 KMP_MB(); // Flush all pending memory write invalidates.
1416 break;
1417 default:
1418 #ifdef KMP_DEBUG
1419 __kmp_debug_printf( "__kmp_team_handler: unknown signal type" );
1420 #endif
1421 break;
1422 }; // switch
1423 }; // if
1424} // __kmp_team_handler
1425
1426
1427static
1428void __kmp_sigaction( int signum, const struct sigaction * act, struct sigaction * oldact ) {
1429 int rc = sigaction( signum, act, oldact );
1430 KMP_CHECK_SYSFAIL_ERRNO( "sigaction", rc );
1431}
1432
1433
1434static void
1435__kmp_install_one_handler( int sig, sig_func_t handler_func, int parallel_init )
1436{
1437 KMP_MB(); // Flush all pending memory write invalidates.
1438 KB_TRACE( 60, ( "__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init ) );
1439 if ( parallel_init ) {
1440 struct sigaction new_action;
1441 struct sigaction old_action;
1442 new_action.sa_handler = handler_func;
1443 new_action.sa_flags = 0;
1444 sigfillset( & new_action.sa_mask );
1445 __kmp_sigaction( sig, & new_action, & old_action );
1446 if ( old_action.sa_handler == __kmp_sighldrs[ sig ].sa_handler ) {
1447 sigaddset( & __kmp_sigset, sig );
1448 } else {
1449 // Restore/keep user's handler if one previously installed.
1450 __kmp_sigaction( sig, & old_action, NULL );
1451 }; // if
1452 } else {
1453 // Save initial/system signal handlers to see if user handlers installed.
1454 __kmp_sigaction( sig, NULL, & __kmp_sighldrs[ sig ] );
1455 }; // if
1456 KMP_MB(); // Flush all pending memory write invalidates.
1457} // __kmp_install_one_handler
1458
1459
1460static void
1461__kmp_remove_one_handler( int sig )
1462{
1463 KB_TRACE( 60, ( "__kmp_remove_one_handler( %d )\n", sig ) );
1464 if ( sigismember( & __kmp_sigset, sig ) ) {
1465 struct sigaction old;
1466 KMP_MB(); // Flush all pending memory write invalidates.
1467 __kmp_sigaction( sig, & __kmp_sighldrs[ sig ], & old );
1468 if ( ( old.sa_handler != __kmp_team_handler ) && ( old.sa_handler != __kmp_null_handler ) ) {
1469 // Restore the users signal handler.
1470 KB_TRACE( 10, ( "__kmp_remove_one_handler: oops, not our handler, restoring: sig=%d\n", sig ) );
1471 __kmp_sigaction( sig, & old, NULL );
1472 }; // if
1473 sigdelset( & __kmp_sigset, sig );
1474 KMP_MB(); // Flush all pending memory write invalidates.
1475 }; // if
1476} // __kmp_remove_one_handler
1477
1478
1479void
1480__kmp_install_signals( int parallel_init )
1481{
1482 KB_TRACE( 10, ( "__kmp_install_signals( %d )\n", parallel_init ) );
1483 if ( __kmp_handle_signals || ! parallel_init ) {
1484 // If ! parallel_init, we do not install handlers, just save original handlers.
1485 // Let us do it even __handle_signals is 0.
1486 sigemptyset( & __kmp_sigset );
1487 __kmp_install_one_handler( SIGHUP, __kmp_team_handler, parallel_init );
1488 __kmp_install_one_handler( SIGINT, __kmp_team_handler, parallel_init );
1489 __kmp_install_one_handler( SIGQUIT, __kmp_team_handler, parallel_init );
1490 __kmp_install_one_handler( SIGILL, __kmp_team_handler, parallel_init );
1491 __kmp_install_one_handler( SIGABRT, __kmp_team_handler, parallel_init );
1492 __kmp_install_one_handler( SIGFPE, __kmp_team_handler, parallel_init );
1493 __kmp_install_one_handler( SIGBUS, __kmp_team_handler, parallel_init );
1494 __kmp_install_one_handler( SIGSEGV, __kmp_team_handler, parallel_init );
1495 #ifdef SIGSYS
1496 __kmp_install_one_handler( SIGSYS, __kmp_team_handler, parallel_init );
1497 #endif // SIGSYS
1498 __kmp_install_one_handler( SIGTERM, __kmp_team_handler, parallel_init );
1499 #ifdef SIGPIPE
1500 __kmp_install_one_handler( SIGPIPE, __kmp_team_handler, parallel_init );
1501 #endif // SIGPIPE
1502 }; // if
1503} // __kmp_install_signals
1504
1505
1506void
1507__kmp_remove_signals( void )
1508{
1509 int sig;
1510 KB_TRACE( 10, ( "__kmp_remove_signals()\n" ) );
1511 for ( sig = 1; sig < NSIG; ++ sig ) {
1512 __kmp_remove_one_handler( sig );
1513 }; // for sig
1514} // __kmp_remove_signals
1515
1516
1517#endif // KMP_HANDLE_SIGNALS
1518
1519/* ------------------------------------------------------------------------ */
1520/* ------------------------------------------------------------------------ */
1521
1522void
1523__kmp_enable( int new_state )
1524{
1525 #ifdef KMP_CANCEL_THREADS
1526 int status, old_state;
1527 status = pthread_setcancelstate( new_state, & old_state );
1528 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
1529 KMP_DEBUG_ASSERT( old_state == PTHREAD_CANCEL_DISABLE );
1530 #endif
1531}
1532
1533void
1534__kmp_disable( int * old_state )
1535{
1536 #ifdef KMP_CANCEL_THREADS
1537 int status;
1538 status = pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, old_state );
1539 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
1540 #endif
1541}
1542
1543/* ------------------------------------------------------------------------ */
1544/* ------------------------------------------------------------------------ */
1545
1546static void
1547__kmp_atfork_prepare (void)
1548{
1549 /* nothing to do */
1550}
1551
1552static void
1553__kmp_atfork_parent (void)
1554{
1555 /* nothing to do */
1556}
1557
1558/*
1559 Reset the library so execution in the child starts "all over again" with
1560 clean data structures in initial states. Don't worry about freeing memory
1561 allocated by parent, just abandon it to be safe.
1562*/
1563static void
1564__kmp_atfork_child (void)
1565{
1566 /* TODO make sure this is done right for nested/sibling */
1567 // ATT: Memory leaks are here? TODO: Check it and fix.
1568 /* KMP_ASSERT( 0 ); */
1569
1570 ++__kmp_fork_count;
1571
1572 __kmp_init_runtime = FALSE;
1573 __kmp_init_monitor = 0;
1574 __kmp_init_parallel = FALSE;
1575 __kmp_init_middle = FALSE;
1576 __kmp_init_serial = FALSE;
1577 TCW_4(__kmp_init_gtid, FALSE);
1578 __kmp_init_common = FALSE;
1579
1580 TCW_4(__kmp_init_user_locks, FALSE);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001581#if ! KMP_USE_DYNAMIC_LOCK
Jim Cownie07ea89f2014-09-03 11:10:54 +00001582 __kmp_user_lock_table.used = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001583 __kmp_user_lock_table.allocated = 0;
1584 __kmp_user_lock_table.table = NULL;
1585 __kmp_lock_blocks = NULL;
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001586#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001587
1588 __kmp_all_nth = 0;
1589 TCW_4(__kmp_nth, 0);
1590
1591 /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate here
1592 so threadprivate doesn't use stale data */
1593 KA_TRACE( 10, ( "__kmp_atfork_child: checking cache address list %p\n",
1594 __kmp_threadpriv_cache_list ) );
1595
1596 while ( __kmp_threadpriv_cache_list != NULL ) {
1597
1598 if ( *__kmp_threadpriv_cache_list -> addr != NULL ) {
1599 KC_TRACE( 50, ( "__kmp_atfork_child: zeroing cache at address %p\n",
1600 &(*__kmp_threadpriv_cache_list -> addr) ) );
1601
1602 *__kmp_threadpriv_cache_list -> addr = NULL;
1603 }
1604 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list -> next;
1605 }
1606
1607 __kmp_init_runtime = FALSE;
1608
1609 /* reset statically initialized locks */
1610 __kmp_init_bootstrap_lock( &__kmp_initz_lock );
1611 __kmp_init_bootstrap_lock( &__kmp_stdio_lock );
1612 __kmp_init_bootstrap_lock( &__kmp_console_lock );
1613
1614 /* This is necessary to make sure no stale data is left around */
1615 /* AC: customers complain that we use unsafe routines in the atfork
1616 handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1617 in dynamic_link when check the presence of shared tbbmalloc library.
1618 Suggestion is to make the library initialization lazier, similar
1619 to what done for __kmpc_begin(). */
1620 // TODO: synchronize all static initializations with regular library
1621 // startup; look at kmp_global.c and etc.
1622 //__kmp_internal_begin ();
1623
1624}
1625
1626void
1627__kmp_register_atfork(void) {
1628 if ( __kmp_need_register_atfork ) {
1629 int status = pthread_atfork( __kmp_atfork_prepare, __kmp_atfork_parent, __kmp_atfork_child );
1630 KMP_CHECK_SYSFAIL( "pthread_atfork", status );
1631 __kmp_need_register_atfork = FALSE;
1632 }
1633}
1634
1635void
1636__kmp_suspend_initialize( void )
1637{
1638 int status;
1639 status = pthread_mutexattr_init( &__kmp_suspend_mutex_attr );
1640 KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status );
1641 status = pthread_condattr_init( &__kmp_suspend_cond_attr );
1642 KMP_CHECK_SYSFAIL( "pthread_condattr_init", status );
1643}
1644
1645static void
1646__kmp_suspend_initialize_thread( kmp_info_t *th )
1647{
1648 if ( th->th.th_suspend_init_count <= __kmp_fork_count ) {
1649 /* this means we haven't initialized the suspension pthread objects for this thread
1650 in this instance of the process */
1651 int status;
1652 status = pthread_cond_init( &th->th.th_suspend_cv.c_cond, &__kmp_suspend_cond_attr );
1653 KMP_CHECK_SYSFAIL( "pthread_cond_init", status );
1654 status = pthread_mutex_init( &th->th.th_suspend_mx.m_mutex, & __kmp_suspend_mutex_attr );
1655 KMP_CHECK_SYSFAIL( "pthread_mutex_init", status );
1656 *(volatile int*)&th->th.th_suspend_init_count = __kmp_fork_count + 1;
1657 };
1658}
1659
1660void
1661__kmp_suspend_uninitialize_thread( kmp_info_t *th )
1662{
1663 if(th->th.th_suspend_init_count > __kmp_fork_count) {
1664 /* this means we have initialize the suspension pthread objects for this thread
1665 in this instance of the process */
1666 int status;
1667
1668 status = pthread_cond_destroy( &th->th.th_suspend_cv.c_cond );
1669 if ( status != 0 && status != EBUSY ) {
1670 KMP_SYSFAIL( "pthread_cond_destroy", status );
1671 };
1672 status = pthread_mutex_destroy( &th->th.th_suspend_mx.m_mutex );
1673 if ( status != 0 && status != EBUSY ) {
1674 KMP_SYSFAIL( "pthread_mutex_destroy", status );
1675 };
1676 --th->th.th_suspend_init_count;
1677 KMP_DEBUG_ASSERT(th->th.th_suspend_init_count == __kmp_fork_count);
1678 }
1679}
1680
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001681/* This routine puts the calling thread to sleep after setting the
1682 * sleep bit for the indicated flag variable to true.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001683 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001684template <class C>
1685static inline void __kmp_suspend_template( int th_gtid, C *flag )
Jim Cownie5e8470a2013-09-27 10:38:44 +00001686{
Jonathan Peyton45be4502015-08-11 21:36:41 +00001687 KMP_TIME_DEVELOPER_BLOCK(USER_suspend);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001688 kmp_info_t *th = __kmp_threads[th_gtid];
1689 int status;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001690 typename C::flag_t old_spin;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001691
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001692 KF_TRACE( 30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid, flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001693
1694 __kmp_suspend_initialize_thread( th );
1695
1696 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1697 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
1698
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001699 KF_TRACE( 10, ( "__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1700 th_gtid, flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001701
1702 /* TODO: shouldn't this use release semantics to ensure that __kmp_suspend_initialize_thread
1703 gets called first?
1704 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001705 old_spin = flag->set_sleeping();
Jim Cownie5e8470a2013-09-27 10:38:44 +00001706
Jonathan Peytone03b62f2015-10-08 18:49:40 +00001707 KF_TRACE( 5, ( "__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x, was %x\n",
1708 th_gtid, flag->get(), *(flag->get()), old_spin ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001709
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001710 if ( flag->done_check_val(old_spin) ) {
1711 old_spin = flag->unset_sleeping();
1712 KF_TRACE( 5, ( "__kmp_suspend_template: T#%d false alarm, reset sleep bit for spin(%p)\n",
1713 th_gtid, flag->get()) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001714 } else {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001715 /* Encapsulate in a loop as the documentation states that this may
1716 * "with low probability" return when the condition variable has
1717 * not been signaled or broadcast
1718 */
1719 int deactivated = FALSE;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001720 TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1721 while ( flag->is_sleeping() ) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001722#ifdef DEBUG_SUSPEND
1723 char buffer[128];
1724 __kmp_suspend_count++;
1725 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001726 __kmp_printf( "__kmp_suspend_template: suspending T#%d: %s\n", th_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001727#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001728 // Mark the thread as no longer active (only in the first iteration of the loop).
Jim Cownie5e8470a2013-09-27 10:38:44 +00001729 if ( ! deactivated ) {
1730 th->th.th_active = FALSE;
1731 if ( th->th.th_active_in_pool ) {
1732 th->th.th_active_in_pool = FALSE;
1733 KMP_TEST_THEN_DEC32(
1734 (kmp_int32 *) &__kmp_thread_pool_active_nth );
1735 KMP_DEBUG_ASSERT( TCR_4(__kmp_thread_pool_active_nth) >= 0 );
1736 }
1737 deactivated = TRUE;
1738
1739
1740 }
1741
1742#if USE_SUSPEND_TIMEOUT
1743 struct timespec now;
1744 struct timeval tval;
1745 int msecs;
1746
1747 status = gettimeofday( &tval, NULL );
1748 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
1749 TIMEVAL_TO_TIMESPEC( &tval, &now );
1750
1751 msecs = (4*__kmp_dflt_blocktime) + 200;
1752 now.tv_sec += msecs / 1000;
1753 now.tv_nsec += (msecs % 1000)*1000;
1754
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001755 KF_TRACE( 15, ( "__kmp_suspend_template: T#%d about to perform pthread_cond_timedwait\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001756 th_gtid ) );
1757 status = pthread_cond_timedwait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex, & now );
1758#else
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001759 KF_TRACE( 15, ( "__kmp_suspend_template: T#%d about to perform pthread_cond_wait\n",
Jonathan Peyton1bd61b42015-10-08 19:44:16 +00001760 th_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001761 status = pthread_cond_wait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex );
1762#endif
1763
1764 if ( (status != 0) && (status != EINTR) && (status != ETIMEDOUT) ) {
1765 KMP_SYSFAIL( "pthread_cond_wait", status );
1766 }
1767#ifdef KMP_DEBUG
1768 if (status == ETIMEDOUT) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001769 if ( flag->is_sleeping() ) {
1770 KF_TRACE( 100, ( "__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001771 } else {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001772 KF_TRACE( 2, ( "__kmp_suspend_template: T#%d timeout wakeup, sleep bit not set!\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001773 th_gtid ) );
1774 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001775 } else if ( flag->is_sleeping() ) {
1776 KF_TRACE( 100, ( "__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001777 }
1778#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001779 } // while
1780
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001781 // Mark the thread as active again (if it was previous marked as inactive)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001782 if ( deactivated ) {
1783 th->th.th_active = TRUE;
1784 if ( TCR_4(th->th.th_in_pool) ) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001785 KMP_TEST_THEN_INC32( (kmp_int32 *) &__kmp_thread_pool_active_nth );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001786 th->th.th_active_in_pool = TRUE;
1787 }
1788 }
1789 }
1790
1791#ifdef DEBUG_SUSPEND
1792 {
1793 char buffer[128];
1794 __kmp_print_cond( buffer, &th->th.th_suspend_cv);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001795 __kmp_printf( "__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001796 }
1797#endif
1798
1799
1800 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1801 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1802
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001803 KF_TRACE( 30, ("__kmp_suspend_template: T#%d exit\n", th_gtid ) );
1804}
1805
1806void __kmp_suspend_32(int th_gtid, kmp_flag_32 *flag) {
1807 __kmp_suspend_template(th_gtid, flag);
1808}
1809void __kmp_suspend_64(int th_gtid, kmp_flag_64 *flag) {
1810 __kmp_suspend_template(th_gtid, flag);
1811}
1812void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) {
1813 __kmp_suspend_template(th_gtid, flag);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001814}
1815
1816
1817/* This routine signals the thread specified by target_gtid to wake up
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001818 * after setting the sleep bit indicated by the flag argument to FALSE.
1819 * The target thread must already have called __kmp_suspend_template()
Jim Cownie5e8470a2013-09-27 10:38:44 +00001820 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001821template <class C>
1822static inline void __kmp_resume_template( int target_gtid, C *flag )
Jim Cownie5e8470a2013-09-27 10:38:44 +00001823{
Jonathan Peyton45be4502015-08-11 21:36:41 +00001824 KMP_TIME_DEVELOPER_BLOCK(USER_resume);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001825 kmp_info_t *th = __kmp_threads[target_gtid];
1826 int status;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001827
1828#ifdef KMP_DEBUG
1829 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1830#endif
1831
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001832 KF_TRACE( 30, ( "__kmp_resume_template: T#%d wants to wakeup T#%d enter\n", gtid, target_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001833 KMP_DEBUG_ASSERT( gtid != target_gtid );
1834
1835 __kmp_suspend_initialize_thread( th );
1836
1837 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1838 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001839
1840 if (!flag) {
1841 flag = (C *)th->th.th_sleep_loc;
1842 }
1843
1844 if (!flag) {
1845 KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p)\n",
1846 gtid, target_gtid, NULL ) );
1847 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1848 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1849 return;
1850 }
Jonathan Peyton1bd61b42015-10-08 19:44:16 +00001851 else { // if multiple threads are sleeping, flag should be internally referring to a specific thread here
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001852 typename C::flag_t old_spin = flag->unset_sleeping();
1853 if ( ! flag->is_sleeping_val(old_spin) ) {
1854 KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p): "
1855 "%u => %u\n",
1856 gtid, target_gtid, flag->get(), old_spin, *flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001857
1858 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1859 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1860 return;
1861 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001862 KF_TRACE( 5, ( "__kmp_resume_template: T#%d about to wakeup T#%d, reset sleep bit for flag's loc(%p): "
1863 "%u => %u\n",
1864 gtid, target_gtid, flag->get(), old_spin, *flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001865 }
1866 TCW_PTR(th->th.th_sleep_loc, NULL);
1867
Jim Cownie5e8470a2013-09-27 10:38:44 +00001868
1869#ifdef DEBUG_SUSPEND
1870 {
1871 char buffer[128];
1872 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001873 __kmp_printf( "__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid, target_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001874 }
1875#endif
1876
1877
1878 status = pthread_cond_signal( &th->th.th_suspend_cv.c_cond );
1879 KMP_CHECK_SYSFAIL( "pthread_cond_signal", status );
1880 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1881 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001882 KF_TRACE( 30, ( "__kmp_resume_template: T#%d exiting after signaling wake up for T#%d\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001883 gtid, target_gtid ) );
1884}
1885
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001886void __kmp_resume_32(int target_gtid, kmp_flag_32 *flag) {
1887 __kmp_resume_template(target_gtid, flag);
1888}
1889void __kmp_resume_64(int target_gtid, kmp_flag_64 *flag) {
1890 __kmp_resume_template(target_gtid, flag);
1891}
1892void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) {
1893 __kmp_resume_template(target_gtid, flag);
1894}
1895
Jim Cownie07ea89f2014-09-03 11:10:54 +00001896void
1897__kmp_resume_monitor()
1898{
1899 int status;
1900#ifdef KMP_DEBUG
1901 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1902 KF_TRACE( 30, ( "__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n",
1903 gtid, KMP_GTID_MONITOR ) );
1904 KMP_DEBUG_ASSERT( gtid != KMP_GTID_MONITOR );
1905#endif
1906 status = pthread_mutex_lock( &__kmp_wait_mx.m_mutex );
1907 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
1908#ifdef DEBUG_SUSPEND
1909 {
1910 char buffer[128];
1911 __kmp_print_cond( buffer, &__kmp_wait_cv.c_cond );
1912 __kmp_printf( "__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid, KMP_GTID_MONITOR, buffer );
1913 }
1914#endif
1915 status = pthread_cond_signal( &__kmp_wait_cv.c_cond );
1916 KMP_CHECK_SYSFAIL( "pthread_cond_signal", status );
1917 status = pthread_mutex_unlock( &__kmp_wait_mx.m_mutex );
1918 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1919 KF_TRACE( 30, ( "__kmp_resume_monitor: T#%d exiting after signaling wake up for T#%d\n",
1920 gtid, KMP_GTID_MONITOR ) );
1921}
Jim Cownie5e8470a2013-09-27 10:38:44 +00001922
1923/* ------------------------------------------------------------------------ */
1924/* ------------------------------------------------------------------------ */
1925
1926void
1927__kmp_yield( int cond )
1928{
1929 if (cond && __kmp_yielding_on) {
1930 sched_yield();
1931 }
1932}
1933
1934/* ------------------------------------------------------------------------ */
1935/* ------------------------------------------------------------------------ */
1936
1937void
1938__kmp_gtid_set_specific( int gtid )
1939{
1940 int status;
1941 KMP_ASSERT( __kmp_init_runtime );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001942 status = pthread_setspecific( __kmp_gtid_threadprivate_key, (void*)(intptr_t)(gtid+1) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001943 KMP_CHECK_SYSFAIL( "pthread_setspecific", status );
1944}
1945
1946int
1947__kmp_gtid_get_specific()
1948{
1949 int gtid;
1950 if ( !__kmp_init_runtime ) {
1951 KA_TRACE( 50, ("__kmp_get_specific: runtime shutdown, returning KMP_GTID_SHUTDOWN\n" ) );
1952 return KMP_GTID_SHUTDOWN;
1953 }
1954 gtid = (int)(size_t)pthread_getspecific( __kmp_gtid_threadprivate_key );
1955 if ( gtid == 0 ) {
1956 gtid = KMP_GTID_DNE;
1957 }
1958 else {
1959 gtid--;
1960 }
1961 KA_TRACE( 50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
1962 __kmp_gtid_threadprivate_key, gtid ));
1963 return gtid;
1964}
1965
1966/* ------------------------------------------------------------------------ */
1967/* ------------------------------------------------------------------------ */
1968
1969double
1970__kmp_read_cpu_time( void )
1971{
1972 /*clock_t t;*/
1973 struct tms buffer;
1974
1975 /*t =*/ times( & buffer );
1976
1977 return (buffer.tms_utime + buffer.tms_cutime) / (double) CLOCKS_PER_SEC;
1978}
1979
1980int
1981__kmp_read_system_info( struct kmp_sys_info *info )
1982{
1983 int status;
1984 struct rusage r_usage;
1985
1986 memset( info, 0, sizeof( *info ) );
1987
1988 status = getrusage( RUSAGE_SELF, &r_usage);
1989 KMP_CHECK_SYSFAIL_ERRNO( "getrusage", status );
1990
1991 info->maxrss = r_usage.ru_maxrss; /* the maximum resident set size utilized (in kilobytes) */
1992 info->minflt = r_usage.ru_minflt; /* the number of page faults serviced without any I/O */
1993 info->majflt = r_usage.ru_majflt; /* the number of page faults serviced that required I/O */
1994 info->nswap = r_usage.ru_nswap; /* the number of times a process was "swapped" out of memory */
1995 info->inblock = r_usage.ru_inblock; /* the number of times the file system had to perform input */
1996 info->oublock = r_usage.ru_oublock; /* the number of times the file system had to perform output */
1997 info->nvcsw = r_usage.ru_nvcsw; /* the number of times a context switch was voluntarily */
1998 info->nivcsw = r_usage.ru_nivcsw; /* the number of times a context switch was forced */
1999
2000 return (status != 0);
2001}
2002
2003/* ------------------------------------------------------------------------ */
2004/* ------------------------------------------------------------------------ */
2005
2006
2007void
2008__kmp_read_system_time( double *delta )
2009{
2010 double t_ns;
2011 struct timeval tval;
2012 struct timespec stop;
2013 int status;
2014
2015 status = gettimeofday( &tval, NULL );
2016 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
2017 TIMEVAL_TO_TIMESPEC( &tval, &stop );
2018 t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start);
2019 *delta = (t_ns * 1e-9);
2020}
2021
2022void
2023__kmp_clear_system_time( void )
2024{
2025 struct timeval tval;
2026 int status;
2027 status = gettimeofday( &tval, NULL );
2028 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
2029 TIMEVAL_TO_TIMESPEC( &tval, &__kmp_sys_timer_data.start );
2030}
2031
2032/* ------------------------------------------------------------------------ */
2033/* ------------------------------------------------------------------------ */
2034
2035#ifdef BUILD_TV
2036
2037void
2038__kmp_tv_threadprivate_store( kmp_info_t *th, void *global_addr, void *thread_addr )
2039{
2040 struct tv_data *p;
2041
2042 p = (struct tv_data *) __kmp_allocate( sizeof( *p ) );
2043
2044 p->u.tp.global_addr = global_addr;
2045 p->u.tp.thread_addr = thread_addr;
2046
2047 p->type = (void *) 1;
2048
2049 p->next = th->th.th_local.tv_data;
2050 th->th.th_local.tv_data = p;
2051
2052 if ( p->next == 0 ) {
2053 int rc = pthread_setspecific( __kmp_tv_key, p );
2054 KMP_CHECK_SYSFAIL( "pthread_setspecific", rc );
2055 }
2056}
2057
2058#endif /* BUILD_TV */
2059
2060/* ------------------------------------------------------------------------ */
2061/* ------------------------------------------------------------------------ */
2062
2063static int
2064__kmp_get_xproc( void ) {
2065
2066 int r = 0;
2067
Joerg Sonnenberger7649cd42015-09-21 20:29:12 +00002068 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +00002069
2070 r = sysconf( _SC_NPROCESSORS_ONLN );
2071
2072 #elif KMP_OS_DARWIN
2073
2074 // Bug C77011 High "OpenMP Threads and number of active cores".
2075
2076 // Find the number of available CPUs.
2077 kern_return_t rc;
2078 host_basic_info_data_t info;
2079 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
2080 rc = host_info( mach_host_self(), HOST_BASIC_INFO, (host_info_t) & info, & num );
2081 if ( rc == 0 && num == HOST_BASIC_INFO_COUNT ) {
2082 // Cannot use KA_TRACE() here because this code works before trace support is
2083 // initialized.
2084 r = info.avail_cpus;
2085 } else {
2086 KMP_WARNING( CantGetNumAvailCPU );
2087 KMP_INFORM( AssumedNumCPU );
2088 }; // if
2089
2090 #else
2091
2092 #error "Unknown or unsupported OS."
2093
2094 #endif
2095
2096 return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
2097
2098} // __kmp_get_xproc
2099
Jim Cownie181b4bb2013-12-23 17:28:57 +00002100int
2101__kmp_read_from_file( char const *path, char const *format, ... )
2102{
2103 int result;
2104 va_list args;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002105
Jim Cownie181b4bb2013-12-23 17:28:57 +00002106 va_start(args, format);
2107 FILE *f = fopen(path, "rb");
2108 if ( f == NULL )
2109 return 0;
2110 result = vfscanf(f, format, args);
2111 fclose(f);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002112
Jim Cownie5e8470a2013-09-27 10:38:44 +00002113 return result;
Jim Cownie181b4bb2013-12-23 17:28:57 +00002114}
Jim Cownie5e8470a2013-09-27 10:38:44 +00002115
2116void
2117__kmp_runtime_initialize( void )
2118{
2119 int status;
2120 pthread_mutexattr_t mutex_attr;
2121 pthread_condattr_t cond_attr;
2122
2123 if ( __kmp_init_runtime ) {
2124 return;
2125 }; // if
2126
2127 #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 )
2128 if ( ! __kmp_cpuinfo.initialized ) {
2129 __kmp_query_cpuid( &__kmp_cpuinfo );
2130 }; // if
2131 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2132
Jim Cownie5e8470a2013-09-27 10:38:44 +00002133 __kmp_xproc = __kmp_get_xproc();
2134
2135 if ( sysconf( _SC_THREADS ) ) {
2136
2137 /* Query the maximum number of threads */
2138 __kmp_sys_max_nth = sysconf( _SC_THREAD_THREADS_MAX );
2139 if ( __kmp_sys_max_nth == -1 ) {
2140 /* Unlimited threads for NPTL */
2141 __kmp_sys_max_nth = INT_MAX;
2142 }
2143 else if ( __kmp_sys_max_nth <= 1 ) {
2144 /* Can't tell, just use PTHREAD_THREADS_MAX */
2145 __kmp_sys_max_nth = KMP_MAX_NTH;
2146 }
2147
2148 /* Query the minimum stack size */
2149 __kmp_sys_min_stksize = sysconf( _SC_THREAD_STACK_MIN );
2150 if ( __kmp_sys_min_stksize <= 1 ) {
2151 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
2152 }
2153 }
2154
2155 /* Set up minimum number of threads to switch to TLS gtid */
2156 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
2157
2158
2159 #ifdef BUILD_TV
2160 {
2161 int rc = pthread_key_create( & __kmp_tv_key, 0 );
2162 KMP_CHECK_SYSFAIL( "pthread_key_create", rc );
2163 }
2164 #endif
2165
2166 status = pthread_key_create( &__kmp_gtid_threadprivate_key, __kmp_internal_end_dest );
2167 KMP_CHECK_SYSFAIL( "pthread_key_create", status );
2168 status = pthread_mutexattr_init( & mutex_attr );
2169 KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status );
2170 status = pthread_mutex_init( & __kmp_wait_mx.m_mutex, & mutex_attr );
2171 KMP_CHECK_SYSFAIL( "pthread_mutex_init", status );
2172 status = pthread_condattr_init( & cond_attr );
2173 KMP_CHECK_SYSFAIL( "pthread_condattr_init", status );
2174 status = pthread_cond_init( & __kmp_wait_cv.c_cond, & cond_attr );
2175 KMP_CHECK_SYSFAIL( "pthread_cond_init", status );
2176#if USE_ITT_BUILD
2177 __kmp_itt_initialize();
2178#endif /* USE_ITT_BUILD */
2179
2180 __kmp_init_runtime = TRUE;
2181}
2182
2183void
2184__kmp_runtime_destroy( void )
2185{
2186 int status;
2187
2188 if ( ! __kmp_init_runtime ) {
2189 return; // Nothing to do.
2190 };
2191
2192#if USE_ITT_BUILD
2193 __kmp_itt_destroy();
2194#endif /* USE_ITT_BUILD */
2195
2196 status = pthread_key_delete( __kmp_gtid_threadprivate_key );
2197 KMP_CHECK_SYSFAIL( "pthread_key_delete", status );
2198 #ifdef BUILD_TV
2199 status = pthread_key_delete( __kmp_tv_key );
2200 KMP_CHECK_SYSFAIL( "pthread_key_delete", status );
2201 #endif
2202
2203 status = pthread_mutex_destroy( & __kmp_wait_mx.m_mutex );
2204 if ( status != 0 && status != EBUSY ) {
2205 KMP_SYSFAIL( "pthread_mutex_destroy", status );
2206 }
2207 status = pthread_cond_destroy( & __kmp_wait_cv.c_cond );
2208 if ( status != 0 && status != EBUSY ) {
2209 KMP_SYSFAIL( "pthread_cond_destroy", status );
2210 }
Alp Toker763b9392014-02-28 09:42:41 +00002211 #if KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +00002212 __kmp_affinity_uninitialize();
Jim Cownie5e8470a2013-09-27 10:38:44 +00002213 #endif
2214
2215 __kmp_init_runtime = FALSE;
2216}
2217
2218
2219/* Put the thread to sleep for a time period */
2220/* NOTE: not currently used anywhere */
2221void
2222__kmp_thread_sleep( int millis )
2223{
2224 sleep( ( millis + 500 ) / 1000 );
2225}
2226
2227/* Calculate the elapsed wall clock time for the user */
2228void
2229__kmp_elapsed( double *t )
2230{
2231 int status;
2232# ifdef FIX_SGI_CLOCK
2233 struct timespec ts;
2234
2235 status = clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &ts );
2236 KMP_CHECK_SYSFAIL_ERRNO( "clock_gettime", status );
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +00002237 *t = (double) ts.tv_nsec * (1.0 / (double) KMP_NSEC_PER_SEC) +
Jim Cownie5e8470a2013-09-27 10:38:44 +00002238 (double) ts.tv_sec;
2239# else
2240 struct timeval tv;
2241
2242 status = gettimeofday( & tv, NULL );
2243 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +00002244 *t = (double) tv.tv_usec * (1.0 / (double) KMP_USEC_PER_SEC) +
Jim Cownie5e8470a2013-09-27 10:38:44 +00002245 (double) tv.tv_sec;
2246# endif
2247}
2248
2249/* Calculate the elapsed wall clock tick for the user */
2250void
2251__kmp_elapsed_tick( double *t )
2252{
2253 *t = 1 / (double) CLOCKS_PER_SEC;
2254}
2255
2256/*
2257 Determine whether the given address is mapped into the current address space.
2258*/
2259
2260int
2261__kmp_is_address_mapped( void * addr ) {
2262
2263 int found = 0;
2264 int rc;
2265
Joerg Sonnenberger7649cd42015-09-21 20:29:12 +00002266 #if KMP_OS_LINUX || KMP_OS_FREEBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +00002267
2268 /*
2269 On Linux* OS, read the /proc/<pid>/maps pseudo-file to get all the address ranges mapped
2270 into the address space.
2271 */
2272
2273 char * name = __kmp_str_format( "/proc/%d/maps", getpid() );
2274 FILE * file = NULL;
2275
2276 file = fopen( name, "r" );
2277 KMP_ASSERT( file != NULL );
2278
2279 for ( ; ; ) {
2280
2281 void * beginning = NULL;
2282 void * ending = NULL;
2283 char perms[ 5 ];
2284
2285 rc = fscanf( file, "%p-%p %4s %*[^\n]\n", & beginning, & ending, perms );
2286 if ( rc == EOF ) {
2287 break;
2288 }; // if
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002289 KMP_ASSERT( rc == 3 && KMP_STRLEN( perms ) == 4 ); // Make sure all fields are read.
Jim Cownie5e8470a2013-09-27 10:38:44 +00002290
2291 // Ending address is not included in the region, but beginning is.
2292 if ( ( addr >= beginning ) && ( addr < ending ) ) {
2293 perms[ 2 ] = 0; // 3th and 4th character does not matter.
2294 if ( strcmp( perms, "rw" ) == 0 ) {
2295 // Memory we are looking for should be readable and writable.
2296 found = 1;
2297 }; // if
2298 break;
2299 }; // if
2300
2301 }; // forever
2302
2303 // Free resources.
2304 fclose( file );
2305 KMP_INTERNAL_FREE( name );
2306
2307 #elif KMP_OS_DARWIN
2308
2309 /*
2310 On OS X*, /proc pseudo filesystem is not available. Try to read memory using vm
2311 interface.
2312 */
2313
2314 int buffer;
2315 vm_size_t count;
2316 rc =
2317 vm_read_overwrite(
2318 mach_task_self(), // Task to read memory of.
2319 (vm_address_t)( addr ), // Address to read from.
2320 1, // Number of bytes to be read.
2321 (vm_address_t)( & buffer ), // Address of buffer to save read bytes in.
2322 & count // Address of var to save number of read bytes in.
2323 );
2324 if ( rc == 0 ) {
2325 // Memory successfully read.
2326 found = 1;
2327 }; // if
2328
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +00002329 #elif KMP_OS_FREEBSD || KMP_OS_NETBSD
Alp Toker763b9392014-02-28 09:42:41 +00002330
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +00002331 // FIXME(FreeBSD, NetBSD): Implement this
Alp Toker763b9392014-02-28 09:42:41 +00002332 found = 1;
2333
Jim Cownie5e8470a2013-09-27 10:38:44 +00002334 #else
2335
2336 #error "Unknown or unsupported OS"
2337
2338 #endif
2339
2340 return found;
2341
2342} // __kmp_is_address_mapped
2343
2344#ifdef USE_LOAD_BALANCE
2345
2346
2347# if KMP_OS_DARWIN
2348
2349// The function returns the rounded value of the system load average
2350// during given time interval which depends on the value of
2351// __kmp_load_balance_interval variable (default is 60 sec, other values
2352// may be 300 sec or 900 sec).
2353// It returns -1 in case of error.
2354int
2355__kmp_get_load_balance( int max )
2356{
2357 double averages[3];
2358 int ret_avg = 0;
2359
2360 int res = getloadavg( averages, 3 );
2361
2362 //Check __kmp_load_balance_interval to determine which of averages to use.
2363 // getloadavg() may return the number of samples less than requested that is
2364 // less than 3.
2365 if ( __kmp_load_balance_interval < 180 && ( res >= 1 ) ) {
2366 ret_avg = averages[0];// 1 min
2367 } else if ( ( __kmp_load_balance_interval >= 180
2368 && __kmp_load_balance_interval < 600 ) && ( res >= 2 ) ) {
2369 ret_avg = averages[1];// 5 min
2370 } else if ( ( __kmp_load_balance_interval >= 600 ) && ( res == 3 ) ) {
2371 ret_avg = averages[2];// 15 min
Alp Toker8f2d3f02014-02-24 10:40:15 +00002372 } else {// Error occurred
Jim Cownie5e8470a2013-09-27 10:38:44 +00002373 return -1;
2374 }
2375
2376 return ret_avg;
2377}
2378
2379# else // Linux* OS
2380
2381// The fuction returns number of running (not sleeping) threads, or -1 in case of error.
2382// Error could be reported if Linux* OS kernel too old (without "/proc" support).
2383// Counting running threads stops if max running threads encountered.
2384int
2385__kmp_get_load_balance( int max )
2386{
2387 static int permanent_error = 0;
2388
2389 static int glb_running_threads = 0; /* Saved count of the running threads for the thread balance algortihm */
2390 static double glb_call_time = 0; /* Thread balance algorithm call time */
2391
2392 int running_threads = 0; // Number of running threads in the system.
2393
2394 DIR * proc_dir = NULL; // Handle of "/proc/" directory.
2395 struct dirent * proc_entry = NULL;
2396
2397 kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2398 DIR * task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2399 struct dirent * task_entry = NULL;
2400 int task_path_fixed_len;
2401
2402 kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2403 int stat_file = -1;
2404 int stat_path_fixed_len;
2405
2406 int total_processes = 0; // Total number of processes in system.
2407 int total_threads = 0; // Total number of threads in system.
2408
2409 double call_time = 0.0;
2410
2411 __kmp_str_buf_init( & task_path );
2412 __kmp_str_buf_init( & stat_path );
2413
2414 __kmp_elapsed( & call_time );
2415
2416 if ( glb_call_time &&
2417 ( call_time - glb_call_time < __kmp_load_balance_interval ) ) {
2418 running_threads = glb_running_threads;
2419 goto finish;
2420 }
2421
2422 glb_call_time = call_time;
2423
2424 // Do not spend time on scanning "/proc/" if we have a permanent error.
2425 if ( permanent_error ) {
2426 running_threads = -1;
2427 goto finish;
2428 }; // if
2429
2430 if ( max <= 0 ) {
2431 max = INT_MAX;
2432 }; // if
2433
2434 // Open "/proc/" directory.
2435 proc_dir = opendir( "/proc" );
2436 if ( proc_dir == NULL ) {
2437 // Cannot open "/prroc/". Probably the kernel does not support it. Return an error now and
2438 // in subsequent calls.
2439 running_threads = -1;
2440 permanent_error = 1;
2441 goto finish;
2442 }; // if
2443
2444 // Initialize fixed part of task_path. This part will not change.
2445 __kmp_str_buf_cat( & task_path, "/proc/", 6 );
2446 task_path_fixed_len = task_path.used; // Remember number of used characters.
2447
2448 proc_entry = readdir( proc_dir );
2449 while ( proc_entry != NULL ) {
2450 // Proc entry is a directory and name starts with a digit. Assume it is a process'
2451 // directory.
2452 if ( proc_entry->d_type == DT_DIR && isdigit( proc_entry->d_name[ 0 ] ) ) {
2453
2454 ++ total_processes;
2455 // Make sure init process is the very first in "/proc", so we can replace
2456 // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes == 1.
2457 // We are going to check that total_processes == 1 => d_name == "1" is true (where
2458 // "=>" is implication). Since C++ does not have => operator, let us replace it with its
2459 // equivalent: a => b == ! a || b.
2460 KMP_DEBUG_ASSERT( total_processes != 1 || strcmp( proc_entry->d_name, "1" ) == 0 );
2461
2462 // Construct task_path.
2463 task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002464 __kmp_str_buf_cat( & task_path, proc_entry->d_name, KMP_STRLEN( proc_entry->d_name ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00002465 __kmp_str_buf_cat( & task_path, "/task", 5 );
2466
2467 task_dir = opendir( task_path.str );
2468 if ( task_dir == NULL ) {
2469 // Process can finish between reading "/proc/" directory entry and opening process'
2470 // "task/" directory. So, in general case we should not complain, but have to skip
2471 // this process and read the next one.
2472 // But on systems with no "task/" support we will spend lot of time to scan "/proc/"
2473 // tree again and again without any benefit. "init" process (its pid is 1) should
2474 // exist always, so, if we cannot open "/proc/1/task/" directory, it means "task/"
2475 // is not supported by kernel. Report an error now and in the future.
2476 if ( strcmp( proc_entry->d_name, "1" ) == 0 ) {
2477 running_threads = -1;
2478 permanent_error = 1;
2479 goto finish;
2480 }; // if
2481 } else {
2482 // Construct fixed part of stat file path.
2483 __kmp_str_buf_clear( & stat_path );
2484 __kmp_str_buf_cat( & stat_path, task_path.str, task_path.used );
2485 __kmp_str_buf_cat( & stat_path, "/", 1 );
2486 stat_path_fixed_len = stat_path.used;
2487
2488 task_entry = readdir( task_dir );
2489 while ( task_entry != NULL ) {
2490 // It is a directory and name starts with a digit.
2491 if ( proc_entry->d_type == DT_DIR && isdigit( task_entry->d_name[ 0 ] ) ) {
2492
2493 ++ total_threads;
2494
2495 // Consruct complete stat file path. Easiest way would be:
2496 // __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str, task_entry->d_name );
2497 // but seriae of __kmp_str_buf_cat works a bit faster.
2498 stat_path.used = stat_path_fixed_len; // Reset stat path to its fixed part.
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002499 __kmp_str_buf_cat( & stat_path, task_entry->d_name, KMP_STRLEN( task_entry->d_name ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00002500 __kmp_str_buf_cat( & stat_path, "/stat", 5 );
2501
2502 // Note: Low-level API (open/read/close) is used. High-level API
2503 // (fopen/fclose) works ~ 30 % slower.
2504 stat_file = open( stat_path.str, O_RDONLY );
2505 if ( stat_file == -1 ) {
2506 // We cannot report an error because task (thread) can terminate just
2507 // before reading this file.
2508 } else {
2509 /*
2510 Content of "stat" file looks like:
2511
2512 24285 (program) S ...
2513
2514 It is a single line (if program name does not include fanny
2515 symbols). First number is a thread id, then name of executable file
2516 name in paretheses, then state of the thread. We need just thread
2517 state.
2518
2519 Good news: Length of program name is 15 characters max. Longer
2520 names are truncated.
2521
2522 Thus, we need rather short buffer: 15 chars for program name +
2523 2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2524
2525 Bad news: Program name may contain special symbols like space,
2526 closing parenthesis, or even new line. This makes parsing "stat"
2527 file not 100 % reliable. In case of fanny program names parsing
2528 may fail (report incorrect thread state).
2529
2530 Parsing "status" file looks more promissing (due to different
2531 file structure and escaping special symbols) but reading and
2532 parsing of "status" file works slower.
2533
2534 -- ln
2535 */
2536 char buffer[ 65 ];
2537 int len;
2538 len = read( stat_file, buffer, sizeof( buffer ) - 1 );
2539 if ( len >= 0 ) {
2540 buffer[ len ] = 0;
2541 // Using scanf:
2542 // sscanf( buffer, "%*d (%*s) %c ", & state );
2543 // looks very nice, but searching for a closing parenthesis works a
2544 // bit faster.
2545 char * close_parent = strstr( buffer, ") " );
2546 if ( close_parent != NULL ) {
2547 char state = * ( close_parent + 2 );
2548 if ( state == 'R' ) {
2549 ++ running_threads;
2550 if ( running_threads >= max ) {
2551 goto finish;
2552 }; // if
2553 }; // if
2554 }; // if
2555 }; // if
2556 close( stat_file );
2557 stat_file = -1;
2558 }; // if
2559 }; // if
2560 task_entry = readdir( task_dir );
2561 }; // while
2562 closedir( task_dir );
2563 task_dir = NULL;
2564 }; // if
2565 }; // if
2566 proc_entry = readdir( proc_dir );
2567 }; // while
2568
2569 //
2570 // There _might_ be a timing hole where the thread executing this
2571 // code get skipped in the load balance, and running_threads is 0.
2572 // Assert in the debug builds only!!!
2573 //
2574 KMP_DEBUG_ASSERT( running_threads > 0 );
2575 if ( running_threads <= 0 ) {
2576 running_threads = 1;
2577 }
2578
2579 finish: // Clean up and exit.
2580 if ( proc_dir != NULL ) {
2581 closedir( proc_dir );
2582 }; // if
2583 __kmp_str_buf_free( & task_path );
2584 if ( task_dir != NULL ) {
2585 closedir( task_dir );
2586 }; // if
2587 __kmp_str_buf_free( & stat_path );
2588 if ( stat_file != -1 ) {
2589 close( stat_file );
2590 }; // if
2591
2592 glb_running_threads = running_threads;
2593
2594 return running_threads;
2595
2596} // __kmp_get_load_balance
2597
2598# endif // KMP_OS_DARWIN
2599
2600#endif // USE_LOAD_BALANCE
2601
Andrey Churbanovedc370e2015-08-05 11:23:10 +00002602#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC)
Jim Cownie3051f972014-08-07 10:12:54 +00002603
2604// we really only need the case with 1 argument, because CLANG always build
2605// a struct of pointers to shared variables referenced in the outlined function
2606int
2607__kmp_invoke_microtask( microtask_t pkfn,
2608 int gtid, int tid,
Jonathan Peyton122dd762015-07-13 18:55:45 +00002609 int argc, void *p_argv[]
2610#if OMPT_SUPPORT
2611 , void **exit_frame_ptr
2612#endif
2613)
2614{
2615#if OMPT_SUPPORT
2616 *exit_frame_ptr = __builtin_frame_address(0);
2617#endif
2618
Jim Cownie3051f972014-08-07 10:12:54 +00002619 switch (argc) {
2620 default:
2621 fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2622 fflush(stderr);
2623 exit(-1);
2624 case 0:
2625 (*pkfn)(&gtid, &tid);
2626 break;
2627 case 1:
2628 (*pkfn)(&gtid, &tid, p_argv[0]);
2629 break;
2630 case 2:
2631 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2632 break;
2633 case 3:
2634 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2635 break;
2636 case 4:
2637 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2638 break;
2639 case 5:
2640 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2641 break;
2642 case 6:
2643 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2644 p_argv[5]);
2645 break;
2646 case 7:
2647 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2648 p_argv[5], p_argv[6]);
2649 break;
2650 case 8:
2651 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2652 p_argv[5], p_argv[6], p_argv[7]);
2653 break;
2654 case 9:
2655 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2656 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2657 break;
2658 case 10:
2659 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2660 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2661 break;
2662 case 11:
2663 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2664 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2665 break;
2666 case 12:
2667 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2668 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2669 p_argv[11]);
2670 break;
2671 case 13:
2672 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2673 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2674 p_argv[11], p_argv[12]);
2675 break;
2676 case 14:
2677 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2678 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2679 p_argv[11], p_argv[12], p_argv[13]);
2680 break;
2681 case 15:
2682 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2683 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2684 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2685 break;
2686 }
2687
Jonathan Peyton122dd762015-07-13 18:55:45 +00002688#if OMPT_SUPPORT
2689 *exit_frame_ptr = 0;
2690#endif
2691
Jim Cownie3051f972014-08-07 10:12:54 +00002692 return 1;
2693}
2694
2695#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +00002696
Jim Cownie5e8470a2013-09-27 10:38:44 +00002697// end of file //
2698