blob: 890ec5cfaaf1b5d015bd544c312f9ccaf0fdf108 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * z_Linux_util.c -- platform specific routines.
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "kmp.h"
17#include "kmp_wrapper_getpid.h"
18#include "kmp_itt.h"
19#include "kmp_str.h"
20#include "kmp_i18n.h"
21#include "kmp_io.h"
Jim Cownie4cc4bb42014-10-07 16:25:50 +000022#include "kmp_stats.h"
23#include "kmp_wait_release.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000024
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +000025#if !KMP_OS_FREEBSD && !KMP_OS_NETBSD
Alp Toker763b9392014-02-28 09:42:41 +000026# include <alloca.h>
27#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +000028#include <unistd.h>
29#include <math.h> // HUGE_VAL.
30#include <sys/time.h>
31#include <sys/times.h>
32#include <sys/resource.h>
33#include <sys/syscall.h>
34
Jim Cownie3051f972014-08-07 10:12:54 +000035#if KMP_OS_LINUX && !KMP_OS_CNK
Jim Cownie5e8470a2013-09-27 10:38:44 +000036# include <sys/sysinfo.h>
Andrey Churbanovcbda8682015-01-13 14:43:35 +000037# if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +000038// We should really include <futex.h>, but that causes compatibility problems on different
39// Linux* OS distributions that either require that you include (or break when you try to include)
40// <pci/types.h>.
41// Since all we need is the two macros below (which are part of the kernel ABI, so can't change)
42// we just define the constants here and don't include <futex.h>
43# ifndef FUTEX_WAIT
44# define FUTEX_WAIT 0
45# endif
46# ifndef FUTEX_WAKE
47# define FUTEX_WAKE 1
48# endif
49# endif
50#elif KMP_OS_DARWIN
51# include <sys/sysctl.h>
52# include <mach/mach.h>
Alp Toker763b9392014-02-28 09:42:41 +000053#elif KMP_OS_FREEBSD
Alp Toker763b9392014-02-28 09:42:41 +000054# include <pthread_np.h>
Jim Cownie5e8470a2013-09-27 10:38:44 +000055#endif
56
57
58#include <dirent.h>
59#include <ctype.h>
60#include <fcntl.h>
61
62/* ------------------------------------------------------------------------ */
63/* ------------------------------------------------------------------------ */
64
65struct kmp_sys_timer {
66 struct timespec start;
67};
68
69// Convert timespec to nanoseconds.
70#define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec)
71
72static struct kmp_sys_timer __kmp_sys_timer_data;
73
74#if KMP_HANDLE_SIGNALS
75 typedef void (* sig_func_t )( int );
76 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[ NSIG ];
77 static sigset_t __kmp_sigset;
78#endif
79
80static int __kmp_init_runtime = FALSE;
81
82static int __kmp_fork_count = 0;
83
84static pthread_condattr_t __kmp_suspend_cond_attr;
85static pthread_mutexattr_t __kmp_suspend_mutex_attr;
86
87static kmp_cond_align_t __kmp_wait_cv;
88static kmp_mutex_align_t __kmp_wait_mx;
89
90/* ------------------------------------------------------------------------ */
91/* ------------------------------------------------------------------------ */
92
93#ifdef DEBUG_SUSPEND
94static void
95__kmp_print_cond( char *buffer, kmp_cond_align_t *cond )
96{
Andrey Churbanov74bf17b2015-04-02 13:27:08 +000097 KMP_SNPRINTF( buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))",
Jim Cownie5e8470a2013-09-27 10:38:44 +000098 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
99 cond->c_cond.__c_waiting );
100}
101#endif
102
103/* ------------------------------------------------------------------------ */
104/* ------------------------------------------------------------------------ */
105
Jim Cownie3051f972014-08-07 10:12:54 +0000106#if ( KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000107
108/*
109 * Affinity support
110 */
111
112/*
113 * On some of the older OS's that we build on, these constants aren't present
114 * in <asm/unistd.h> #included from <sys.syscall.h>. They must be the same on
115 * all systems of the same arch where they are defined, and they cannot change.
116 * stone forever.
117 */
118
Jim Cownie181b4bb2013-12-23 17:28:57 +0000119# if KMP_ARCH_X86 || KMP_ARCH_ARM
Jim Cownie5e8470a2013-09-27 10:38:44 +0000120# ifndef __NR_sched_setaffinity
121# define __NR_sched_setaffinity 241
122# elif __NR_sched_setaffinity != 241
123# error Wrong code for setaffinity system call.
124# endif /* __NR_sched_setaffinity */
125# ifndef __NR_sched_getaffinity
126# define __NR_sched_getaffinity 242
127# elif __NR_sched_getaffinity != 242
128# error Wrong code for getaffinity system call.
129# endif /* __NR_sched_getaffinity */
130
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000131# elif KMP_ARCH_AARCH64
132# ifndef __NR_sched_setaffinity
133# define __NR_sched_setaffinity 122
134# elif __NR_sched_setaffinity != 122
135# error Wrong code for setaffinity system call.
136# endif /* __NR_sched_setaffinity */
137# ifndef __NR_sched_getaffinity
138# define __NR_sched_getaffinity 123
139# elif __NR_sched_getaffinity != 123
140# error Wrong code for getaffinity system call.
141# endif /* __NR_sched_getaffinity */
142
Jim Cownie5e8470a2013-09-27 10:38:44 +0000143# elif KMP_ARCH_X86_64
144# ifndef __NR_sched_setaffinity
145# define __NR_sched_setaffinity 203
146# elif __NR_sched_setaffinity != 203
147# error Wrong code for setaffinity system call.
148# endif /* __NR_sched_setaffinity */
149# ifndef __NR_sched_getaffinity
150# define __NR_sched_getaffinity 204
151# elif __NR_sched_getaffinity != 204
152# error Wrong code for getaffinity system call.
153# endif /* __NR_sched_getaffinity */
154
Jim Cownie3051f972014-08-07 10:12:54 +0000155# elif KMP_ARCH_PPC64
156# ifndef __NR_sched_setaffinity
157# define __NR_sched_setaffinity 222
158# elif __NR_sched_setaffinity != 222
159# error Wrong code for setaffinity system call.
160# endif /* __NR_sched_setaffinity */
161# ifndef __NR_sched_getaffinity
162# define __NR_sched_getaffinity 223
163# elif __NR_sched_getaffinity != 223
164# error Wrong code for getaffinity system call.
165# endif /* __NR_sched_getaffinity */
166
167
Jim Cownie5e8470a2013-09-27 10:38:44 +0000168# else
169# error Unknown or unsupported architecture
170
171# endif /* KMP_ARCH_* */
172
173int
174__kmp_set_system_affinity( kmp_affin_mask_t const *mask, int abort_on_error )
175{
176 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
177 "Illegal set affinity operation when not capable");
178
179 int retval = syscall( __NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask );
180 if (retval >= 0) {
181 return 0;
182 }
183 int error = errno;
184 if (abort_on_error) {
185 __kmp_msg(
186 kmp_ms_fatal,
187 KMP_MSG( FatalSysError ),
188 KMP_ERR( error ),
189 __kmp_msg_null
190 );
191 }
192 return error;
193}
194
195int
196__kmp_get_system_affinity( kmp_affin_mask_t *mask, int abort_on_error )
197{
198 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
199 "Illegal get affinity operation when not capable");
200
201 int retval = syscall( __NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask );
202 if (retval >= 0) {
203 return 0;
204 }
205 int error = errno;
206 if (abort_on_error) {
207 __kmp_msg(
208 kmp_ms_fatal,
209 KMP_MSG( FatalSysError ),
210 KMP_ERR( error ),
211 __kmp_msg_null
212 );
213 }
214 return error;
215}
216
217void
218__kmp_affinity_bind_thread( int which )
219{
220 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
221 "Illegal set affinity operation when not capable");
222
Andrey Churbanov74bf17b2015-04-02 13:27:08 +0000223 kmp_affin_mask_t *mask = (kmp_affin_mask_t *)KMP_ALLOCA(__kmp_affin_mask_size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000224 KMP_CPU_ZERO(mask);
225 KMP_CPU_SET(which, mask);
226 __kmp_set_system_affinity(mask, TRUE);
227}
228
229/*
230 * Determine if we can access affinity functionality on this version of
231 * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
232 * __kmp_affin_mask_size to the appropriate value (0 means not capable).
233 */
234void
235__kmp_affinity_determine_capable(const char *env_var)
236{
237 //
238 // Check and see if the OS supports thread affinity.
239 //
240
241# define KMP_CPU_SET_SIZE_LIMIT (1024*1024)
242
243 int gCode;
244 int sCode;
245 kmp_affin_mask_t *buf;
246 buf = ( kmp_affin_mask_t * ) KMP_INTERNAL_MALLOC( KMP_CPU_SET_SIZE_LIMIT );
247
248 // If Linux* OS:
249 // If the syscall fails or returns a suggestion for the size,
250 // then we don't have to search for an appropriate size.
251 gCode = syscall( __NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf );
252 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
Alp Toker8f2d3f02014-02-24 10:40:15 +0000253 "initial getaffinity call returned %d errno = %d\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +0000254 gCode, errno));
255
256 //if ((gCode < 0) && (errno == ENOSYS))
257 if (gCode < 0) {
258 //
259 // System call not supported
260 //
261 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
262 && (__kmp_affinity_type != affinity_none)
263 && (__kmp_affinity_type != affinity_default)
264 && (__kmp_affinity_type != affinity_disabled))) {
265 int error = errno;
266 __kmp_msg(
267 kmp_ms_warning,
268 KMP_MSG( GetAffSysCallNotSupported, env_var ),
269 KMP_ERR( error ),
270 __kmp_msg_null
271 );
272 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000273 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000274 KMP_INTERNAL_FREE(buf);
275 return;
276 }
277 if (gCode > 0) { // Linux* OS only
278 // The optimal situation: the OS returns the size of the buffer
279 // it expects.
280 //
281 // A verification of correct behavior is that Isetaffinity on a NULL
282 // buffer with the same size fails with errno set to EFAULT.
283 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
284 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
285 "setaffinity for mask size %d returned %d errno = %d\n",
286 gCode, sCode, errno));
287 if (sCode < 0) {
288 if (errno == ENOSYS) {
289 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
290 && (__kmp_affinity_type != affinity_none)
291 && (__kmp_affinity_type != affinity_default)
292 && (__kmp_affinity_type != affinity_disabled))) {
293 int error = errno;
294 __kmp_msg(
295 kmp_ms_warning,
296 KMP_MSG( SetAffSysCallNotSupported, env_var ),
297 KMP_ERR( error ),
298 __kmp_msg_null
299 );
300 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000301 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000302 KMP_INTERNAL_FREE(buf);
303 }
304 if (errno == EFAULT) {
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000305 KMP_AFFINITY_ENABLE(gCode);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000306 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
307 "affinity supported (mask size %d)\n",
308 (int)__kmp_affin_mask_size));
309 KMP_INTERNAL_FREE(buf);
310 return;
311 }
312 }
313 }
314
315 //
316 // Call the getaffinity system call repeatedly with increasing set sizes
317 // until we succeed, or reach an upper bound on the search.
318 //
319 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
320 "searching for proper set size\n"));
321 int size;
322 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
323 gCode = syscall( __NR_sched_getaffinity, 0, size, buf );
324 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
325 "getaffinity for mask size %d returned %d errno = %d\n", size,
326 gCode, errno));
327
328 if (gCode < 0) {
329 if ( errno == ENOSYS )
330 {
331 //
332 // We shouldn't get here
333 //
334 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
335 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
336 size));
337 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
338 && (__kmp_affinity_type != affinity_none)
339 && (__kmp_affinity_type != affinity_default)
340 && (__kmp_affinity_type != affinity_disabled))) {
341 int error = errno;
342 __kmp_msg(
343 kmp_ms_warning,
344 KMP_MSG( GetAffSysCallNotSupported, env_var ),
345 KMP_ERR( error ),
346 __kmp_msg_null
347 );
348 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000349 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000350 KMP_INTERNAL_FREE(buf);
351 return;
352 }
353 continue;
354 }
355
356 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
357 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
358 "setaffinity for mask size %d returned %d errno = %d\n",
359 gCode, sCode, errno));
360 if (sCode < 0) {
361 if (errno == ENOSYS) { // Linux* OS only
362 //
363 // We shouldn't get here
364 //
365 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
366 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
367 size));
368 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
369 && (__kmp_affinity_type != affinity_none)
370 && (__kmp_affinity_type != affinity_default)
371 && (__kmp_affinity_type != affinity_disabled))) {
372 int error = errno;
373 __kmp_msg(
374 kmp_ms_warning,
375 KMP_MSG( SetAffSysCallNotSupported, env_var ),
376 KMP_ERR( error ),
377 __kmp_msg_null
378 );
379 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000380 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000381 KMP_INTERNAL_FREE(buf);
382 return;
383 }
384 if (errno == EFAULT) {
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000385 KMP_AFFINITY_ENABLE(gCode);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000386 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
387 "affinity supported (mask size %d)\n",
388 (int)__kmp_affin_mask_size));
389 KMP_INTERNAL_FREE(buf);
390 return;
391 }
392 }
393 }
394 //int error = errno; // save uncaught error code
395 KMP_INTERNAL_FREE(buf);
396 // errno = error; // restore uncaught error code, will be printed at the next KMP_WARNING below
397
398 //
399 // Affinity is not supported
400 //
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000401 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000402 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
403 "cannot determine mask size - affinity not supported\n"));
404 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
405 && (__kmp_affinity_type != affinity_none)
406 && (__kmp_affinity_type != affinity_default)
407 && (__kmp_affinity_type != affinity_disabled))) {
408 KMP_WARNING( AffCantGetMaskSize, env_var );
409 }
410}
411
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000412#endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +0000413
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000414/* ------------------------------------------------------------------------ */
415/* ------------------------------------------------------------------------ */
416
417#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && !KMP_OS_CNK
418
419int
420__kmp_futex_determine_capable()
421{
422 int loc = 0;
423 int rc = syscall( __NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0 );
424 int retval = ( rc == 0 ) || ( errno != ENOSYS );
425
426 KA_TRACE(10, ( "__kmp_futex_determine_capable: rc = %d errno = %d\n", rc,
427 errno ) );
428 KA_TRACE(10, ( "__kmp_futex_determine_capable: futex syscall%s supported\n",
429 retval ? "" : " not" ) );
430
431 return retval;
432}
433
434#endif // KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM) && !KMP_OS_CNK
435
436/* ------------------------------------------------------------------------ */
437/* ------------------------------------------------------------------------ */
438
439#if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000440/*
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000441 * Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
442 * use compare_and_store for these routines
Jim Cownie5e8470a2013-09-27 10:38:44 +0000443 */
444
Andrey Churbanov7b2ab712015-03-10 09:03:42 +0000445kmp_int8
446__kmp_test_then_or8( volatile kmp_int8 *p, kmp_int8 d )
447{
448 kmp_int8 old_value, new_value;
449
450 old_value = TCR_1( *p );
451 new_value = old_value | d;
452
453 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
454 {
455 KMP_CPU_PAUSE();
456 old_value = TCR_1( *p );
457 new_value = old_value | d;
458 }
459 return old_value;
460}
461
462kmp_int8
463__kmp_test_then_and8( volatile kmp_int8 *p, kmp_int8 d )
464{
465 kmp_int8 old_value, new_value;
466
467 old_value = TCR_1( *p );
468 new_value = old_value & d;
469
470 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
471 {
472 KMP_CPU_PAUSE();
473 old_value = TCR_1( *p );
474 new_value = old_value & d;
475 }
476 return old_value;
477}
478
Jim Cownie5e8470a2013-09-27 10:38:44 +0000479kmp_int32
480__kmp_test_then_or32( volatile kmp_int32 *p, kmp_int32 d )
481{
482 kmp_int32 old_value, new_value;
483
484 old_value = TCR_4( *p );
485 new_value = old_value | d;
486
Jim Cownie3051f972014-08-07 10:12:54 +0000487 while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000488 {
489 KMP_CPU_PAUSE();
490 old_value = TCR_4( *p );
491 new_value = old_value | d;
492 }
493 return old_value;
494}
495
496kmp_int32
497__kmp_test_then_and32( volatile kmp_int32 *p, kmp_int32 d )
498{
499 kmp_int32 old_value, new_value;
500
501 old_value = TCR_4( *p );
502 new_value = old_value & d;
503
Jim Cownie3051f972014-08-07 10:12:54 +0000504 while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000505 {
506 KMP_CPU_PAUSE();
507 old_value = TCR_4( *p );
508 new_value = old_value & d;
509 }
510 return old_value;
511}
512
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000513# if KMP_ARCH_X86 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000514kmp_int8
515__kmp_test_then_add8( volatile kmp_int8 *p, kmp_int8 d )
516{
517 kmp_int8 old_value, new_value;
518
519 old_value = TCR_1( *p );
520 new_value = old_value + d;
521
522 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
523 {
524 KMP_CPU_PAUSE();
525 old_value = TCR_1( *p );
526 new_value = old_value + d;
527 }
528 return old_value;
529}
530
Jim Cownie5e8470a2013-09-27 10:38:44 +0000531kmp_int64
532__kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 d )
533{
534 kmp_int64 old_value, new_value;
535
536 old_value = TCR_8( *p );
537 new_value = old_value + d;
538
Jim Cownie3051f972014-08-07 10:12:54 +0000539 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000540 {
541 KMP_CPU_PAUSE();
542 old_value = TCR_8( *p );
543 new_value = old_value + d;
544 }
545 return old_value;
546}
547# endif /* KMP_ARCH_X86 */
548
549kmp_int64
550__kmp_test_then_or64( volatile kmp_int64 *p, kmp_int64 d )
551{
552 kmp_int64 old_value, new_value;
553
554 old_value = TCR_8( *p );
555 new_value = old_value | d;
Jim Cownie3051f972014-08-07 10:12:54 +0000556 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000557 {
558 KMP_CPU_PAUSE();
559 old_value = TCR_8( *p );
560 new_value = old_value | d;
561 }
562 return old_value;
563}
564
565kmp_int64
566__kmp_test_then_and64( volatile kmp_int64 *p, kmp_int64 d )
567{
568 kmp_int64 old_value, new_value;
569
570 old_value = TCR_8( *p );
571 new_value = old_value & d;
Jim Cownie3051f972014-08-07 10:12:54 +0000572 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000573 {
574 KMP_CPU_PAUSE();
575 old_value = TCR_8( *p );
576 new_value = old_value & d;
577 }
578 return old_value;
579}
580
581#endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
582
583void
584__kmp_terminate_thread( int gtid )
585{
586 int status;
587 kmp_info_t *th = __kmp_threads[ gtid ];
588
589 if ( !th ) return;
590
591 #ifdef KMP_CANCEL_THREADS
592 KA_TRACE( 10, ("__kmp_terminate_thread: kill (%d)\n", gtid ) );
593 status = pthread_cancel( th->th.th_info.ds.ds_thread );
594 if ( status != 0 && status != ESRCH ) {
595 __kmp_msg(
596 kmp_ms_fatal,
597 KMP_MSG( CantTerminateWorkerThread ),
598 KMP_ERR( status ),
599 __kmp_msg_null
600 );
601 }; // if
602 #endif
603 __kmp_yield( TRUE );
604} //
605
606/* ------------------------------------------------------------------------ */
607/* ------------------------------------------------------------------------ */
608
609/* ------------------------------------------------------------------------ */
610/* ------------------------------------------------------------------------ */
611
612/*
613 * Set thread stack info according to values returned by
614 * pthread_getattr_np().
615 * If values are unreasonable, assume call failed and use
616 * incremental stack refinement method instead.
617 * Returns TRUE if the stack parameters could be determined exactly,
618 * FALSE if incremental refinement is necessary.
619 */
620static kmp_int32
621__kmp_set_stack_info( int gtid, kmp_info_t *th )
622{
623 int stack_data;
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000624#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000625 /* Linux* OS only -- no pthread_getattr_np support on OS X* */
626 pthread_attr_t attr;
627 int status;
628 size_t size = 0;
629 void * addr = 0;
630
631 /* Always do incremental stack refinement for ubermaster threads since the initial
632 thread stack range can be reduced by sibling thread creation so pthread_attr_getstack
633 may cause thread gtid aliasing */
634 if ( ! KMP_UBER_GTID(gtid) ) {
635
636 /* Fetch the real thread attributes */
637 status = pthread_attr_init( &attr );
638 KMP_CHECK_SYSFAIL( "pthread_attr_init", status );
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000639#if KMP_OS_FREEBSD || KMP_OS_NETBSD
Alp Toker763b9392014-02-28 09:42:41 +0000640 status = pthread_attr_get_np( pthread_self(), &attr );
641 KMP_CHECK_SYSFAIL( "pthread_attr_get_np", status );
642#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000643 status = pthread_getattr_np( pthread_self(), &attr );
644 KMP_CHECK_SYSFAIL( "pthread_getattr_np", status );
Alp Toker763b9392014-02-28 09:42:41 +0000645#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000646 status = pthread_attr_getstack( &attr, &addr, &size );
647 KMP_CHECK_SYSFAIL( "pthread_attr_getstack", status );
648 KA_TRACE( 60, ( "__kmp_set_stack_info: T#%d pthread_attr_getstack returned size: %lu, "
649 "low addr: %p\n",
650 gtid, size, addr ));
651
652 status = pthread_attr_destroy( &attr );
653 KMP_CHECK_SYSFAIL( "pthread_attr_destroy", status );
654 }
655
656 if ( size != 0 && addr != 0 ) { /* was stack parameter determination successful? */
657 /* Store the correct base and size */
658 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
659 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
660 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
661 return TRUE;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000662 }
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000663#endif /* KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD */
Alp Toker763b9392014-02-28 09:42:41 +0000664 /* Use incremental refinement starting from initial conservative estimate */
665 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
666 TCW_PTR(th -> th.th_info.ds.ds_stackbase, &stack_data);
667 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
668 return FALSE;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000669}
670
671static void*
672__kmp_launch_worker( void *thr )
673{
674 int status, old_type, old_state;
675#ifdef KMP_BLOCK_SIGNALS
676 sigset_t new_set, old_set;
677#endif /* KMP_BLOCK_SIGNALS */
678 void *exit_val;
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000679#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD
Andrey Churbanov368b70e2015-08-05 11:12:45 +0000680 void * volatile padding = 0;
Jonathan Peyton2321d572015-06-08 19:25:25 +0000681#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000682 int gtid;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000683
684 gtid = ((kmp_info_t*)thr) -> th.th_info.ds.ds_gtid;
685 __kmp_gtid_set_specific( gtid );
686#ifdef KMP_TDATA_GTID
687 __kmp_gtid = gtid;
688#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000689#if KMP_STATS_ENABLED
690 // set __thread local index to point to thread-specific stats
691 __kmp_stats_thread_ptr = ((kmp_info_t*)thr)->th.th_stats;
692#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000693
694#if USE_ITT_BUILD
695 __kmp_itt_thread_name( gtid );
696#endif /* USE_ITT_BUILD */
697
Alp Toker763b9392014-02-28 09:42:41 +0000698#if KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +0000699 __kmp_affinity_set_init_mask( gtid, FALSE );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000700#endif
701
702#ifdef KMP_CANCEL_THREADS
703 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
704 KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status );
705 /* josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? */
706 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
707 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
708#endif
709
710#if KMP_ARCH_X86 || KMP_ARCH_X86_64
711 //
712 // Set the FP control regs to be a copy of
713 // the parallel initialization thread's.
714 //
715 __kmp_clear_x87_fpu_status_word();
716 __kmp_load_x87_fpu_control_word( &__kmp_init_x87_fpu_control_word );
717 __kmp_load_mxcsr( &__kmp_init_mxcsr );
718#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
719
720#ifdef KMP_BLOCK_SIGNALS
721 status = sigfillset( & new_set );
722 KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status );
723 status = pthread_sigmask( SIG_BLOCK, & new_set, & old_set );
724 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
725#endif /* KMP_BLOCK_SIGNALS */
726
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000727#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000728 if ( __kmp_stkoffset > 0 && gtid > 0 ) {
Andrey Churbanov74bf17b2015-04-02 13:27:08 +0000729 padding = KMP_ALLOCA( gtid * __kmp_stkoffset );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000730 }
731#endif
732
733 KMP_MB();
734 __kmp_set_stack_info( gtid, (kmp_info_t*)thr );
735
736 __kmp_check_stack_overlap( (kmp_info_t*)thr );
737
738 exit_val = __kmp_launch_thread( (kmp_info_t *) thr );
739
740#ifdef KMP_BLOCK_SIGNALS
741 status = pthread_sigmask( SIG_SETMASK, & old_set, NULL );
742 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
743#endif /* KMP_BLOCK_SIGNALS */
744
745 return exit_val;
746}
747
748
749/* The monitor thread controls all of the threads in the complex */
750
751static void*
752__kmp_launch_monitor( void *thr )
753{
754 int status, old_type, old_state;
755#ifdef KMP_BLOCK_SIGNALS
756 sigset_t new_set;
757#endif /* KMP_BLOCK_SIGNALS */
758 struct timespec interval;
759 int yield_count;
760 int yield_cycles = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000761
762 KMP_MB(); /* Flush all pending memory write invalidates. */
763
764 KA_TRACE( 10, ("__kmp_launch_monitor: #1 launched\n" ) );
765
766 /* register us as the monitor thread */
767 __kmp_gtid_set_specific( KMP_GTID_MONITOR );
768#ifdef KMP_TDATA_GTID
769 __kmp_gtid = KMP_GTID_MONITOR;
770#endif
771
772 KMP_MB();
773
774#if USE_ITT_BUILD
775 __kmp_itt_thread_ignore(); // Instruct Intel(R) Threading Tools to ignore monitor thread.
776#endif /* USE_ITT_BUILD */
777
778 __kmp_set_stack_info( ((kmp_info_t*)thr)->th.th_info.ds.ds_gtid, (kmp_info_t*)thr );
779
780 __kmp_check_stack_overlap( (kmp_info_t*)thr );
781
782#ifdef KMP_CANCEL_THREADS
783 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
784 KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status );
785 /* josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? */
786 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
787 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
788#endif
789
790 #if KMP_REAL_TIME_FIX
791 // This is a potential fix which allows application with real-time scheduling policy work.
792 // However, decision about the fix is not made yet, so it is disabled by default.
793 { // Are program started with real-time scheduling policy?
794 int sched = sched_getscheduler( 0 );
795 if ( sched == SCHED_FIFO || sched == SCHED_RR ) {
796 // Yes, we are a part of real-time application. Try to increase the priority of the
797 // monitor.
798 struct sched_param param;
799 int max_priority = sched_get_priority_max( sched );
800 int rc;
801 KMP_WARNING( RealTimeSchedNotSupported );
802 sched_getparam( 0, & param );
803 if ( param.sched_priority < max_priority ) {
804 param.sched_priority += 1;
805 rc = sched_setscheduler( 0, sched, & param );
806 if ( rc != 0 ) {
807 int error = errno;
808 __kmp_msg(
809 kmp_ms_warning,
810 KMP_MSG( CantChangeMonitorPriority ),
811 KMP_ERR( error ),
812 KMP_MSG( MonitorWillStarve ),
813 __kmp_msg_null
814 );
815 }; // if
816 } else {
817 // We cannot abort here, because number of CPUs may be enough for all the threads,
818 // including the monitor thread, so application could potentially work...
819 __kmp_msg(
820 kmp_ms_warning,
821 KMP_MSG( RunningAtMaxPriority ),
822 KMP_MSG( MonitorWillStarve ),
823 KMP_HNT( RunningAtMaxPriority ),
824 __kmp_msg_null
825 );
826 }; // if
827 }; // if
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000828 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 ); // AC: free thread that waits for monitor started
Jim Cownie5e8470a2013-09-27 10:38:44 +0000829 }
830 #endif // KMP_REAL_TIME_FIX
831
832 KMP_MB(); /* Flush all pending memory write invalidates. */
833
834 if ( __kmp_monitor_wakeups == 1 ) {
835 interval.tv_sec = 1;
836 interval.tv_nsec = 0;
837 } else {
838 interval.tv_sec = 0;
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +0000839 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000840 }
841
842 KA_TRACE( 10, ("__kmp_launch_monitor: #2 monitor\n" ) );
843
844 if (__kmp_yield_cycle) {
845 __kmp_yielding_on = 0; /* Start out with yielding shut off */
846 yield_count = __kmp_yield_off_count;
847 } else {
848 __kmp_yielding_on = 1; /* Yielding is on permanently */
849 }
850
851 while( ! TCR_4( __kmp_global.g.g_done ) ) {
852 struct timespec now;
853 struct timeval tval;
854
855 /* This thread monitors the state of the system */
856
857 KA_TRACE( 15, ( "__kmp_launch_monitor: update\n" ) );
858
859 status = gettimeofday( &tval, NULL );
860 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
861 TIMEVAL_TO_TIMESPEC( &tval, &now );
862
863 now.tv_sec += interval.tv_sec;
864 now.tv_nsec += interval.tv_nsec;
865
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +0000866 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000867 now.tv_sec += 1;
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +0000868 now.tv_nsec -= KMP_NSEC_PER_SEC;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000869 }
870
871 status = pthread_mutex_lock( & __kmp_wait_mx.m_mutex );
872 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
Jim Cownie07ea89f2014-09-03 11:10:54 +0000873 // AC: the monitor should not fall asleep if g_done has been set
874 if ( !TCR_4(__kmp_global.g.g_done) ) { // check once more under mutex
875 status = pthread_cond_timedwait( &__kmp_wait_cv.c_cond, &__kmp_wait_mx.m_mutex, &now );
876 if ( status != 0 ) {
877 if ( status != ETIMEDOUT && status != EINTR ) {
878 KMP_SYSFAIL( "pthread_cond_timedwait", status );
879 };
Jim Cownie5e8470a2013-09-27 10:38:44 +0000880 };
881 };
Jim Cownie5e8470a2013-09-27 10:38:44 +0000882 status = pthread_mutex_unlock( & __kmp_wait_mx.m_mutex );
883 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
884
885 if (__kmp_yield_cycle) {
886 yield_cycles++;
887 if ( (yield_cycles % yield_count) == 0 ) {
888 if (__kmp_yielding_on) {
889 __kmp_yielding_on = 0; /* Turn it off now */
890 yield_count = __kmp_yield_off_count;
891 } else {
892 __kmp_yielding_on = 1; /* Turn it on now */
893 yield_count = __kmp_yield_on_count;
894 }
895 yield_cycles = 0;
896 }
897 } else {
898 __kmp_yielding_on = 1;
899 }
900
901 TCW_4( __kmp_global.g.g_time.dt.t_value,
902 TCR_4( __kmp_global.g.g_time.dt.t_value ) + 1 );
903
904 KMP_MB(); /* Flush all pending memory write invalidates. */
905 }
906
907 KA_TRACE( 10, ("__kmp_launch_monitor: #3 cleanup\n" ) );
908
909#ifdef KMP_BLOCK_SIGNALS
910 status = sigfillset( & new_set );
911 KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status );
912 status = pthread_sigmask( SIG_UNBLOCK, & new_set, NULL );
913 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
914#endif /* KMP_BLOCK_SIGNALS */
915
916 KA_TRACE( 10, ("__kmp_launch_monitor: #4 finished\n" ) );
917
918 if( __kmp_global.g.g_abort != 0 ) {
919 /* now we need to terminate the worker threads */
920 /* the value of t_abort is the signal we caught */
921
922 int gtid;
923
924 KA_TRACE( 10, ("__kmp_launch_monitor: #5 terminate sig=%d\n", __kmp_global.g.g_abort ) );
925
926 /* terminate the OpenMP worker threads */
927 /* TODO this is not valid for sibling threads!!
928 * the uber master might not be 0 anymore.. */
929 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
930 __kmp_terminate_thread( gtid );
931
932 __kmp_cleanup();
933
934 KA_TRACE( 10, ("__kmp_launch_monitor: #6 raise sig=%d\n", __kmp_global.g.g_abort ) );
935
936 if (__kmp_global.g.g_abort > 0)
937 raise( __kmp_global.g.g_abort );
938
939 }
940
941 KA_TRACE( 10, ("__kmp_launch_monitor: #7 exit\n" ) );
942
943 return thr;
944}
945
946void
947__kmp_create_worker( int gtid, kmp_info_t *th, size_t stack_size )
948{
949 pthread_t handle;
950 pthread_attr_t thread_attr;
951 int status;
952
953
954 th->th.th_info.ds.ds_gtid = gtid;
955
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000956#if KMP_STATS_ENABLED
957 // sets up worker thread stats
958 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
959
960 // th->th.th_stats is used to transfer thread specific stats-pointer to __kmp_launch_worker
961 // So when thread is created (goes into __kmp_launch_worker) it will
962 // set it's __thread local pointer to th->th.th_stats
963 th->th.th_stats = __kmp_stats_list.push_back(gtid);
964 if(KMP_UBER_GTID(gtid)) {
965 __kmp_stats_start_time = tsc_tick_count::now();
966 __kmp_stats_thread_ptr = th->th.th_stats;
967 __kmp_stats_init();
968 KMP_START_EXPLICIT_TIMER(OMP_serial);
969 KMP_START_EXPLICIT_TIMER(OMP_start_end);
970 }
971 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
972
973#endif // KMP_STATS_ENABLED
974
Jim Cownie5e8470a2013-09-27 10:38:44 +0000975 if ( KMP_UBER_GTID(gtid) ) {
976 KA_TRACE( 10, ("__kmp_create_worker: uber thread (%d)\n", gtid ) );
977 th -> th.th_info.ds.ds_thread = pthread_self();
978 __kmp_set_stack_info( gtid, th );
979 __kmp_check_stack_overlap( th );
980 return;
981 }; // if
982
983 KA_TRACE( 10, ("__kmp_create_worker: try to create thread (%d)\n", gtid ) );
984
985 KMP_MB(); /* Flush all pending memory write invalidates. */
986
987#ifdef KMP_THREAD_ATTR
988 {
989 status = pthread_attr_init( &thread_attr );
990 if ( status != 0 ) {
991 __kmp_msg(
992 kmp_ms_fatal,
993 KMP_MSG( CantInitThreadAttrs ),
994 KMP_ERR( status ),
995 __kmp_msg_null
996 );
997 }; // if
998 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
999 if ( status != 0 ) {
1000 __kmp_msg(
1001 kmp_ms_fatal,
1002 KMP_MSG( CantSetWorkerState ),
1003 KMP_ERR( status ),
1004 __kmp_msg_null
1005 );
1006 }; // if
1007
Andrey Churbanov368b70e2015-08-05 11:12:45 +00001008 /* Set stack size for this thread now.
1009 * The multiple of 2 is there because on some machines, requesting an unusual stacksize
1010 * causes the thread to have an offset before the dummy alloca() takes place to create the
1011 * offset. Since we want the user to have a sufficient stacksize AND support a stack offset, we
1012 * alloca() twice the offset so that the upcoming alloca() does not eliminate any premade
1013 * offset, and also gives the user the stack space they requested for all threads */
1014 stack_size += gtid * __kmp_stkoffset * 2;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001015
1016 KA_TRACE( 10, ( "__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
1017 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
1018 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size ) );
1019
1020# ifdef _POSIX_THREAD_ATTR_STACKSIZE
1021 status = pthread_attr_setstacksize( & thread_attr, stack_size );
1022# ifdef KMP_BACKUP_STKSIZE
1023 if ( status != 0 ) {
1024 if ( ! __kmp_env_stksize ) {
1025 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
1026 __kmp_stksize = KMP_BACKUP_STKSIZE;
1027 KA_TRACE( 10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
1028 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
1029 "bytes\n",
1030 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size )
1031 );
1032 status = pthread_attr_setstacksize( &thread_attr, stack_size );
1033 }; // if
1034 }; // if
1035# endif /* KMP_BACKUP_STKSIZE */
1036 if ( status != 0 ) {
1037 __kmp_msg(
1038 kmp_ms_fatal,
1039 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1040 KMP_ERR( status ),
1041 KMP_HNT( ChangeWorkerStackSize ),
1042 __kmp_msg_null
1043 );
1044 }; // if
1045# endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1046 }
1047#endif /* KMP_THREAD_ATTR */
1048
1049 {
1050 status = pthread_create( & handle, & thread_attr, __kmp_launch_worker, (void *) th );
1051 if ( status != 0 || ! handle ) { // ??? Why do we check handle??
1052#ifdef _POSIX_THREAD_ATTR_STACKSIZE
1053 if ( status == EINVAL ) {
1054 __kmp_msg(
1055 kmp_ms_fatal,
1056 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1057 KMP_ERR( status ),
1058 KMP_HNT( IncreaseWorkerStackSize ),
1059 __kmp_msg_null
1060 );
1061 };
1062 if ( status == ENOMEM ) {
1063 __kmp_msg(
1064 kmp_ms_fatal,
1065 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1066 KMP_ERR( status ),
1067 KMP_HNT( DecreaseWorkerStackSize ),
1068 __kmp_msg_null
1069 );
1070 };
1071#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1072 if ( status == EAGAIN ) {
1073 __kmp_msg(
1074 kmp_ms_fatal,
1075 KMP_MSG( NoResourcesForWorkerThread ),
1076 KMP_ERR( status ),
1077 KMP_HNT( Decrease_NUM_THREADS ),
1078 __kmp_msg_null
1079 );
1080 }; // if
1081 KMP_SYSFAIL( "pthread_create", status );
1082 }; // if
1083
1084 th->th.th_info.ds.ds_thread = handle;
1085 }
1086
1087#ifdef KMP_THREAD_ATTR
1088 {
1089 status = pthread_attr_destroy( & thread_attr );
1090 if ( status ) {
1091 __kmp_msg(
1092 kmp_ms_warning,
1093 KMP_MSG( CantDestroyThreadAttrs ),
1094 KMP_ERR( status ),
1095 __kmp_msg_null
1096 );
1097 }; // if
1098 }
1099#endif /* KMP_THREAD_ATTR */
1100
1101 KMP_MB(); /* Flush all pending memory write invalidates. */
1102
1103 KA_TRACE( 10, ("__kmp_create_worker: done creating thread (%d)\n", gtid ) );
1104
1105} // __kmp_create_worker
1106
1107
1108void
1109__kmp_create_monitor( kmp_info_t *th )
1110{
1111 pthread_t handle;
1112 pthread_attr_t thread_attr;
1113 size_t size;
1114 int status;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001115 int auto_adj_size = FALSE;
1116
1117 KA_TRACE( 10, ("__kmp_create_monitor: try to create monitor\n" ) );
1118
1119 KMP_MB(); /* Flush all pending memory write invalidates. */
1120
1121 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
1122 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
1123 #if KMP_REAL_TIME_FIX
1124 TCW_4( __kmp_global.g.g_time.dt.t_value, -1 ); // Will use it for synchronization a bit later.
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001125 #else
1126 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001127 #endif // KMP_REAL_TIME_FIX
1128
1129 #ifdef KMP_THREAD_ATTR
1130 if ( __kmp_monitor_stksize == 0 ) {
1131 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1132 auto_adj_size = TRUE;
1133 }
1134 status = pthread_attr_init( &thread_attr );
1135 if ( status != 0 ) {
1136 __kmp_msg(
1137 kmp_ms_fatal,
1138 KMP_MSG( CantInitThreadAttrs ),
1139 KMP_ERR( status ),
1140 __kmp_msg_null
1141 );
1142 }; // if
1143 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
1144 if ( status != 0 ) {
1145 __kmp_msg(
1146 kmp_ms_fatal,
1147 KMP_MSG( CantSetMonitorState ),
1148 KMP_ERR( status ),
1149 __kmp_msg_null
1150 );
1151 }; // if
1152
1153 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1154 status = pthread_attr_getstacksize( & thread_attr, & size );
1155 KMP_CHECK_SYSFAIL( "pthread_attr_getstacksize", status );
1156 #else
1157 size = __kmp_sys_min_stksize;
1158 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1159 #endif /* KMP_THREAD_ATTR */
1160
1161 if ( __kmp_monitor_stksize == 0 ) {
1162 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1163 }
1164 if ( __kmp_monitor_stksize < __kmp_sys_min_stksize ) {
1165 __kmp_monitor_stksize = __kmp_sys_min_stksize;
1166 }
1167
1168 KA_TRACE( 10, ( "__kmp_create_monitor: default stacksize = %lu bytes,"
1169 "requested stacksize = %lu bytes\n",
1170 size, __kmp_monitor_stksize ) );
1171
1172 retry:
1173
1174 /* Set stack size for this thread now. */
1175
1176 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1177 KA_TRACE( 10, ( "__kmp_create_monitor: setting stacksize = %lu bytes,",
1178 __kmp_monitor_stksize ) );
1179 status = pthread_attr_setstacksize( & thread_attr, __kmp_monitor_stksize );
1180 if ( status != 0 ) {
1181 if ( auto_adj_size ) {
1182 __kmp_monitor_stksize *= 2;
1183 goto retry;
1184 }
1185 __kmp_msg(
1186 kmp_ms_warning, // should this be fatal? BB
1187 KMP_MSG( CantSetMonitorStackSize, (long int) __kmp_monitor_stksize ),
1188 KMP_ERR( status ),
1189 KMP_HNT( ChangeMonitorStackSize ),
1190 __kmp_msg_null
1191 );
1192 }; // if
1193 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1194
Jim Cownie5e8470a2013-09-27 10:38:44 +00001195 status = pthread_create( &handle, & thread_attr, __kmp_launch_monitor, (void *) th );
1196
1197 if ( status != 0 ) {
1198 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1199 if ( status == EINVAL ) {
1200 if ( auto_adj_size && ( __kmp_monitor_stksize < (size_t)0x40000000 ) ) {
1201 __kmp_monitor_stksize *= 2;
1202 goto retry;
1203 }
1204 __kmp_msg(
1205 kmp_ms_fatal,
1206 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1207 KMP_ERR( status ),
1208 KMP_HNT( IncreaseMonitorStackSize ),
1209 __kmp_msg_null
1210 );
1211 }; // if
1212 if ( status == ENOMEM ) {
1213 __kmp_msg(
1214 kmp_ms_fatal,
1215 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1216 KMP_ERR( status ),
1217 KMP_HNT( DecreaseMonitorStackSize ),
1218 __kmp_msg_null
1219 );
1220 }; // if
1221 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1222 if ( status == EAGAIN ) {
1223 __kmp_msg(
1224 kmp_ms_fatal,
1225 KMP_MSG( NoResourcesForMonitorThread ),
1226 KMP_ERR( status ),
1227 KMP_HNT( DecreaseNumberOfThreadsInUse ),
1228 __kmp_msg_null
1229 );
1230 }; // if
1231 KMP_SYSFAIL( "pthread_create", status );
1232 }; // if
1233
1234 th->th.th_info.ds.ds_thread = handle;
1235
1236 #if KMP_REAL_TIME_FIX
1237 // Wait for the monitor thread is really started and set its *priority*.
1238 KMP_DEBUG_ASSERT( sizeof( kmp_uint32 ) == sizeof( __kmp_global.g.g_time.dt.t_value ) );
1239 __kmp_wait_yield_4(
1240 (kmp_uint32 volatile *) & __kmp_global.g.g_time.dt.t_value, -1, & __kmp_neq_4, NULL
1241 );
1242 #endif // KMP_REAL_TIME_FIX
1243
1244 #ifdef KMP_THREAD_ATTR
1245 status = pthread_attr_destroy( & thread_attr );
1246 if ( status != 0 ) {
1247 __kmp_msg( //
1248 kmp_ms_warning,
1249 KMP_MSG( CantDestroyThreadAttrs ),
1250 KMP_ERR( status ),
1251 __kmp_msg_null
1252 );
1253 }; // if
1254 #endif
1255
1256 KMP_MB(); /* Flush all pending memory write invalidates. */
1257
1258 KA_TRACE( 10, ( "__kmp_create_monitor: monitor created %#.8lx\n", th->th.th_info.ds.ds_thread ) );
1259
1260} // __kmp_create_monitor
1261
1262void
1263__kmp_exit_thread(
1264 int exit_status
1265) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001266 pthread_exit( (void *)(intptr_t) exit_status );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001267} // __kmp_exit_thread
1268
Jim Cownie07ea89f2014-09-03 11:10:54 +00001269void __kmp_resume_monitor();
1270
Jim Cownie5e8470a2013-09-27 10:38:44 +00001271void
1272__kmp_reap_monitor( kmp_info_t *th )
1273{
Jonathan Peyton7c4d66d2015-06-08 20:01:14 +00001274 int status;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001275 void *exit_val;
1276
1277 KA_TRACE( 10, ("__kmp_reap_monitor: try to reap monitor thread with handle %#.8lx\n",
1278 th->th.th_info.ds.ds_thread ) );
1279
1280 // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
1281 // If both tid and gtid are 0, it means the monitor did not ever start.
1282 // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
1283 KMP_DEBUG_ASSERT( th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid );
1284 if ( th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR ) {
1285 return;
1286 }; // if
1287
1288 KMP_MB(); /* Flush all pending memory write invalidates. */
1289
1290
1291 /* First, check to see whether the monitor thread exists. This could prevent a hang,
1292 but if the monitor dies after the pthread_kill call and before the pthread_join
1293 call, it will still hang. */
1294
1295 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
1296 if (status == ESRCH) {
1297
1298 KA_TRACE( 10, ("__kmp_reap_monitor: monitor does not exist, returning\n") );
1299
1300 } else
1301 {
Jim Cownie07ea89f2014-09-03 11:10:54 +00001302 __kmp_resume_monitor(); // Wake up the monitor thread
Jim Cownie5e8470a2013-09-27 10:38:44 +00001303 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1304 if (exit_val != th) {
1305 __kmp_msg(
1306 kmp_ms_fatal,
1307 KMP_MSG( ReapMonitorError ),
1308 KMP_ERR( status ),
1309 __kmp_msg_null
1310 );
1311 }
1312 }
1313
1314 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1315 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1316
1317 KA_TRACE( 10, ("__kmp_reap_monitor: done reaping monitor thread with handle %#.8lx\n",
1318 th->th.th_info.ds.ds_thread ) );
1319
1320 KMP_MB(); /* Flush all pending memory write invalidates. */
1321
1322}
1323
1324void
1325__kmp_reap_worker( kmp_info_t *th )
1326{
1327 int status;
1328 void *exit_val;
1329
1330 KMP_MB(); /* Flush all pending memory write invalidates. */
1331
1332 KA_TRACE( 10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid ) );
1333
1334 /* First, check to see whether the worker thread exists. This could prevent a hang,
1335 but if the worker dies after the pthread_kill call and before the pthread_join
1336 call, it will still hang. */
1337
1338 {
1339 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
1340 if (status == ESRCH) {
1341 KA_TRACE( 10, ("__kmp_reap_worker: worker T#%d does not exist, returning\n",
1342 th->th.th_info.ds.ds_gtid ) );
1343 }
1344 else {
1345 KA_TRACE( 10, ("__kmp_reap_worker: try to join with worker T#%d\n",
1346 th->th.th_info.ds.ds_gtid ) );
1347
1348 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1349#ifdef KMP_DEBUG
1350 /* Don't expose these to the user until we understand when they trigger */
1351 if ( status != 0 ) {
1352 __kmp_msg(
1353 kmp_ms_fatal,
1354 KMP_MSG( ReapWorkerError ),
1355 KMP_ERR( status ),
1356 __kmp_msg_null
1357 );
1358 }
1359 if ( exit_val != th ) {
1360 KA_TRACE( 10, ( "__kmp_reap_worker: worker T#%d did not reap properly, "
1361 "exit_val = %p\n",
1362 th->th.th_info.ds.ds_gtid, exit_val ) );
1363 }
1364#endif /* KMP_DEBUG */
1365 }
1366 }
1367
1368 KA_TRACE( 10, ("__kmp_reap_worker: done reaping T#%d\n", th->th.th_info.ds.ds_gtid ) );
1369
1370 KMP_MB(); /* Flush all pending memory write invalidates. */
1371}
1372
1373
1374/* ------------------------------------------------------------------------ */
1375/* ------------------------------------------------------------------------ */
1376
1377#if KMP_HANDLE_SIGNALS
1378
1379
1380static void
1381__kmp_null_handler( int signo )
1382{
1383 // Do nothing, for doing SIG_IGN-type actions.
1384} // __kmp_null_handler
1385
1386
1387static void
1388__kmp_team_handler( int signo )
1389{
1390 if ( __kmp_global.g.g_abort == 0 ) {
1391 /* Stage 1 signal handler, let's shut down all of the threads */
1392 #ifdef KMP_DEBUG
1393 __kmp_debug_printf( "__kmp_team_handler: caught signal = %d\n", signo );
1394 #endif
1395 switch ( signo ) {
1396 case SIGHUP :
1397 case SIGINT :
1398 case SIGQUIT :
1399 case SIGILL :
1400 case SIGABRT :
1401 case SIGFPE :
1402 case SIGBUS :
1403 case SIGSEGV :
1404 #ifdef SIGSYS
1405 case SIGSYS :
1406 #endif
1407 case SIGTERM :
1408 if ( __kmp_debug_buf ) {
1409 __kmp_dump_debug_buffer( );
1410 }; // if
1411 KMP_MB(); // Flush all pending memory write invalidates.
1412 TCW_4( __kmp_global.g.g_abort, signo );
1413 KMP_MB(); // Flush all pending memory write invalidates.
1414 TCW_4( __kmp_global.g.g_done, TRUE );
1415 KMP_MB(); // Flush all pending memory write invalidates.
1416 break;
1417 default:
1418 #ifdef KMP_DEBUG
1419 __kmp_debug_printf( "__kmp_team_handler: unknown signal type" );
1420 #endif
1421 break;
1422 }; // switch
1423 }; // if
1424} // __kmp_team_handler
1425
1426
1427static
1428void __kmp_sigaction( int signum, const struct sigaction * act, struct sigaction * oldact ) {
1429 int rc = sigaction( signum, act, oldact );
1430 KMP_CHECK_SYSFAIL_ERRNO( "sigaction", rc );
1431}
1432
1433
1434static void
1435__kmp_install_one_handler( int sig, sig_func_t handler_func, int parallel_init )
1436{
1437 KMP_MB(); // Flush all pending memory write invalidates.
1438 KB_TRACE( 60, ( "__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init ) );
1439 if ( parallel_init ) {
1440 struct sigaction new_action;
1441 struct sigaction old_action;
1442 new_action.sa_handler = handler_func;
1443 new_action.sa_flags = 0;
1444 sigfillset( & new_action.sa_mask );
1445 __kmp_sigaction( sig, & new_action, & old_action );
1446 if ( old_action.sa_handler == __kmp_sighldrs[ sig ].sa_handler ) {
1447 sigaddset( & __kmp_sigset, sig );
1448 } else {
1449 // Restore/keep user's handler if one previously installed.
1450 __kmp_sigaction( sig, & old_action, NULL );
1451 }; // if
1452 } else {
1453 // Save initial/system signal handlers to see if user handlers installed.
1454 __kmp_sigaction( sig, NULL, & __kmp_sighldrs[ sig ] );
1455 }; // if
1456 KMP_MB(); // Flush all pending memory write invalidates.
1457} // __kmp_install_one_handler
1458
1459
1460static void
1461__kmp_remove_one_handler( int sig )
1462{
1463 KB_TRACE( 60, ( "__kmp_remove_one_handler( %d )\n", sig ) );
1464 if ( sigismember( & __kmp_sigset, sig ) ) {
1465 struct sigaction old;
1466 KMP_MB(); // Flush all pending memory write invalidates.
1467 __kmp_sigaction( sig, & __kmp_sighldrs[ sig ], & old );
1468 if ( ( old.sa_handler != __kmp_team_handler ) && ( old.sa_handler != __kmp_null_handler ) ) {
1469 // Restore the users signal handler.
1470 KB_TRACE( 10, ( "__kmp_remove_one_handler: oops, not our handler, restoring: sig=%d\n", sig ) );
1471 __kmp_sigaction( sig, & old, NULL );
1472 }; // if
1473 sigdelset( & __kmp_sigset, sig );
1474 KMP_MB(); // Flush all pending memory write invalidates.
1475 }; // if
1476} // __kmp_remove_one_handler
1477
1478
1479void
1480__kmp_install_signals( int parallel_init )
1481{
1482 KB_TRACE( 10, ( "__kmp_install_signals( %d )\n", parallel_init ) );
1483 if ( __kmp_handle_signals || ! parallel_init ) {
1484 // If ! parallel_init, we do not install handlers, just save original handlers.
1485 // Let us do it even __handle_signals is 0.
1486 sigemptyset( & __kmp_sigset );
1487 __kmp_install_one_handler( SIGHUP, __kmp_team_handler, parallel_init );
1488 __kmp_install_one_handler( SIGINT, __kmp_team_handler, parallel_init );
1489 __kmp_install_one_handler( SIGQUIT, __kmp_team_handler, parallel_init );
1490 __kmp_install_one_handler( SIGILL, __kmp_team_handler, parallel_init );
1491 __kmp_install_one_handler( SIGABRT, __kmp_team_handler, parallel_init );
1492 __kmp_install_one_handler( SIGFPE, __kmp_team_handler, parallel_init );
1493 __kmp_install_one_handler( SIGBUS, __kmp_team_handler, parallel_init );
1494 __kmp_install_one_handler( SIGSEGV, __kmp_team_handler, parallel_init );
1495 #ifdef SIGSYS
1496 __kmp_install_one_handler( SIGSYS, __kmp_team_handler, parallel_init );
1497 #endif // SIGSYS
1498 __kmp_install_one_handler( SIGTERM, __kmp_team_handler, parallel_init );
1499 #ifdef SIGPIPE
1500 __kmp_install_one_handler( SIGPIPE, __kmp_team_handler, parallel_init );
1501 #endif // SIGPIPE
1502 }; // if
1503} // __kmp_install_signals
1504
1505
1506void
1507__kmp_remove_signals( void )
1508{
1509 int sig;
1510 KB_TRACE( 10, ( "__kmp_remove_signals()\n" ) );
1511 for ( sig = 1; sig < NSIG; ++ sig ) {
1512 __kmp_remove_one_handler( sig );
1513 }; // for sig
1514} // __kmp_remove_signals
1515
1516
1517#endif // KMP_HANDLE_SIGNALS
1518
1519/* ------------------------------------------------------------------------ */
1520/* ------------------------------------------------------------------------ */
1521
1522void
1523__kmp_enable( int new_state )
1524{
1525 #ifdef KMP_CANCEL_THREADS
1526 int status, old_state;
1527 status = pthread_setcancelstate( new_state, & old_state );
1528 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
1529 KMP_DEBUG_ASSERT( old_state == PTHREAD_CANCEL_DISABLE );
1530 #endif
1531}
1532
1533void
1534__kmp_disable( int * old_state )
1535{
1536 #ifdef KMP_CANCEL_THREADS
1537 int status;
1538 status = pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, old_state );
1539 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
1540 #endif
1541}
1542
1543/* ------------------------------------------------------------------------ */
1544/* ------------------------------------------------------------------------ */
1545
1546static void
1547__kmp_atfork_prepare (void)
1548{
1549 /* nothing to do */
1550}
1551
1552static void
1553__kmp_atfork_parent (void)
1554{
1555 /* nothing to do */
1556}
1557
1558/*
1559 Reset the library so execution in the child starts "all over again" with
1560 clean data structures in initial states. Don't worry about freeing memory
1561 allocated by parent, just abandon it to be safe.
1562*/
1563static void
1564__kmp_atfork_child (void)
1565{
1566 /* TODO make sure this is done right for nested/sibling */
1567 // ATT: Memory leaks are here? TODO: Check it and fix.
1568 /* KMP_ASSERT( 0 ); */
1569
1570 ++__kmp_fork_count;
1571
1572 __kmp_init_runtime = FALSE;
1573 __kmp_init_monitor = 0;
1574 __kmp_init_parallel = FALSE;
1575 __kmp_init_middle = FALSE;
1576 __kmp_init_serial = FALSE;
1577 TCW_4(__kmp_init_gtid, FALSE);
1578 __kmp_init_common = FALSE;
1579
1580 TCW_4(__kmp_init_user_locks, FALSE);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001581#if ! KMP_USE_DYNAMIC_LOCK
Jim Cownie07ea89f2014-09-03 11:10:54 +00001582 __kmp_user_lock_table.used = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001583 __kmp_user_lock_table.allocated = 0;
1584 __kmp_user_lock_table.table = NULL;
1585 __kmp_lock_blocks = NULL;
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001586#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001587
1588 __kmp_all_nth = 0;
1589 TCW_4(__kmp_nth, 0);
1590
1591 /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate here
1592 so threadprivate doesn't use stale data */
1593 KA_TRACE( 10, ( "__kmp_atfork_child: checking cache address list %p\n",
1594 __kmp_threadpriv_cache_list ) );
1595
1596 while ( __kmp_threadpriv_cache_list != NULL ) {
1597
1598 if ( *__kmp_threadpriv_cache_list -> addr != NULL ) {
1599 KC_TRACE( 50, ( "__kmp_atfork_child: zeroing cache at address %p\n",
1600 &(*__kmp_threadpriv_cache_list -> addr) ) );
1601
1602 *__kmp_threadpriv_cache_list -> addr = NULL;
1603 }
1604 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list -> next;
1605 }
1606
1607 __kmp_init_runtime = FALSE;
1608
1609 /* reset statically initialized locks */
1610 __kmp_init_bootstrap_lock( &__kmp_initz_lock );
1611 __kmp_init_bootstrap_lock( &__kmp_stdio_lock );
1612 __kmp_init_bootstrap_lock( &__kmp_console_lock );
1613
1614 /* This is necessary to make sure no stale data is left around */
1615 /* AC: customers complain that we use unsafe routines in the atfork
1616 handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1617 in dynamic_link when check the presence of shared tbbmalloc library.
1618 Suggestion is to make the library initialization lazier, similar
1619 to what done for __kmpc_begin(). */
1620 // TODO: synchronize all static initializations with regular library
1621 // startup; look at kmp_global.c and etc.
1622 //__kmp_internal_begin ();
1623
1624}
1625
1626void
1627__kmp_register_atfork(void) {
1628 if ( __kmp_need_register_atfork ) {
1629 int status = pthread_atfork( __kmp_atfork_prepare, __kmp_atfork_parent, __kmp_atfork_child );
1630 KMP_CHECK_SYSFAIL( "pthread_atfork", status );
1631 __kmp_need_register_atfork = FALSE;
1632 }
1633}
1634
1635void
1636__kmp_suspend_initialize( void )
1637{
1638 int status;
1639 status = pthread_mutexattr_init( &__kmp_suspend_mutex_attr );
1640 KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status );
1641 status = pthread_condattr_init( &__kmp_suspend_cond_attr );
1642 KMP_CHECK_SYSFAIL( "pthread_condattr_init", status );
1643}
1644
1645static void
1646__kmp_suspend_initialize_thread( kmp_info_t *th )
1647{
1648 if ( th->th.th_suspend_init_count <= __kmp_fork_count ) {
1649 /* this means we haven't initialized the suspension pthread objects for this thread
1650 in this instance of the process */
1651 int status;
1652 status = pthread_cond_init( &th->th.th_suspend_cv.c_cond, &__kmp_suspend_cond_attr );
1653 KMP_CHECK_SYSFAIL( "pthread_cond_init", status );
1654 status = pthread_mutex_init( &th->th.th_suspend_mx.m_mutex, & __kmp_suspend_mutex_attr );
1655 KMP_CHECK_SYSFAIL( "pthread_mutex_init", status );
1656 *(volatile int*)&th->th.th_suspend_init_count = __kmp_fork_count + 1;
1657 };
1658}
1659
1660void
1661__kmp_suspend_uninitialize_thread( kmp_info_t *th )
1662{
1663 if(th->th.th_suspend_init_count > __kmp_fork_count) {
1664 /* this means we have initialize the suspension pthread objects for this thread
1665 in this instance of the process */
1666 int status;
1667
1668 status = pthread_cond_destroy( &th->th.th_suspend_cv.c_cond );
1669 if ( status != 0 && status != EBUSY ) {
1670 KMP_SYSFAIL( "pthread_cond_destroy", status );
1671 };
1672 status = pthread_mutex_destroy( &th->th.th_suspend_mx.m_mutex );
1673 if ( status != 0 && status != EBUSY ) {
1674 KMP_SYSFAIL( "pthread_mutex_destroy", status );
1675 };
1676 --th->th.th_suspend_init_count;
1677 KMP_DEBUG_ASSERT(th->th.th_suspend_init_count == __kmp_fork_count);
1678 }
1679}
1680
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001681/* This routine puts the calling thread to sleep after setting the
1682 * sleep bit for the indicated flag variable to true.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001683 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001684template <class C>
1685static inline void __kmp_suspend_template( int th_gtid, C *flag )
Jim Cownie5e8470a2013-09-27 10:38:44 +00001686{
Jonathan Peyton45be4502015-08-11 21:36:41 +00001687 KMP_TIME_DEVELOPER_BLOCK(USER_suspend);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001688 kmp_info_t *th = __kmp_threads[th_gtid];
1689 int status;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001690 typename C::flag_t old_spin;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001691
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001692 KF_TRACE( 30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid, flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001693
1694 __kmp_suspend_initialize_thread( th );
1695
1696 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1697 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
1698
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001699 KF_TRACE( 10, ( "__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1700 th_gtid, flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001701
1702 /* TODO: shouldn't this use release semantics to ensure that __kmp_suspend_initialize_thread
1703 gets called first?
1704 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001705 old_spin = flag->set_sleeping();
Jim Cownie5e8470a2013-09-27 10:38:44 +00001706
Jonathan Peytone03b62f2015-10-08 18:49:40 +00001707 KF_TRACE( 5, ( "__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x, was %x\n",
1708 th_gtid, flag->get(), *(flag->get()), old_spin ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001709
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001710 if ( flag->done_check_val(old_spin) ) {
1711 old_spin = flag->unset_sleeping();
1712 KF_TRACE( 5, ( "__kmp_suspend_template: T#%d false alarm, reset sleep bit for spin(%p)\n",
1713 th_gtid, flag->get()) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001714 } else {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001715 /* Encapsulate in a loop as the documentation states that this may
1716 * "with low probability" return when the condition variable has
1717 * not been signaled or broadcast
1718 */
1719 int deactivated = FALSE;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001720 TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1721 while ( flag->is_sleeping() ) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001722#ifdef DEBUG_SUSPEND
1723 char buffer[128];
1724 __kmp_suspend_count++;
1725 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001726 __kmp_printf( "__kmp_suspend_template: suspending T#%d: %s\n", th_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001727#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001728 // Mark the thread as no longer active (only in the first iteration of the loop).
Jim Cownie5e8470a2013-09-27 10:38:44 +00001729 if ( ! deactivated ) {
1730 th->th.th_active = FALSE;
1731 if ( th->th.th_active_in_pool ) {
1732 th->th.th_active_in_pool = FALSE;
1733 KMP_TEST_THEN_DEC32(
1734 (kmp_int32 *) &__kmp_thread_pool_active_nth );
1735 KMP_DEBUG_ASSERT( TCR_4(__kmp_thread_pool_active_nth) >= 0 );
1736 }
1737 deactivated = TRUE;
1738
1739
1740 }
1741
1742#if USE_SUSPEND_TIMEOUT
1743 struct timespec now;
1744 struct timeval tval;
1745 int msecs;
1746
1747 status = gettimeofday( &tval, NULL );
1748 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
1749 TIMEVAL_TO_TIMESPEC( &tval, &now );
1750
1751 msecs = (4*__kmp_dflt_blocktime) + 200;
1752 now.tv_sec += msecs / 1000;
1753 now.tv_nsec += (msecs % 1000)*1000;
1754
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001755 KF_TRACE( 15, ( "__kmp_suspend_template: T#%d about to perform pthread_cond_timedwait\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001756 th_gtid ) );
1757 status = pthread_cond_timedwait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex, & now );
1758#else
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001759 KF_TRACE( 15, ( "__kmp_suspend_template: T#%d about to perform pthread_cond_wait\n",
Jonathan Peyton1bd61b42015-10-08 19:44:16 +00001760 th_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001761 status = pthread_cond_wait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex );
1762#endif
1763
1764 if ( (status != 0) && (status != EINTR) && (status != ETIMEDOUT) ) {
1765 KMP_SYSFAIL( "pthread_cond_wait", status );
1766 }
1767#ifdef KMP_DEBUG
1768 if (status == ETIMEDOUT) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001769 if ( flag->is_sleeping() ) {
1770 KF_TRACE( 100, ( "__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001771 } else {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001772 KF_TRACE( 2, ( "__kmp_suspend_template: T#%d timeout wakeup, sleep bit not set!\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001773 th_gtid ) );
1774 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001775 } else if ( flag->is_sleeping() ) {
1776 KF_TRACE( 100, ( "__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001777 }
1778#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001779 } // while
1780
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001781 // Mark the thread as active again (if it was previous marked as inactive)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001782 if ( deactivated ) {
1783 th->th.th_active = TRUE;
1784 if ( TCR_4(th->th.th_in_pool) ) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001785 KMP_TEST_THEN_INC32( (kmp_int32 *) &__kmp_thread_pool_active_nth );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001786 th->th.th_active_in_pool = TRUE;
1787 }
1788 }
1789 }
1790
1791#ifdef DEBUG_SUSPEND
1792 {
1793 char buffer[128];
1794 __kmp_print_cond( buffer, &th->th.th_suspend_cv);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001795 __kmp_printf( "__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001796 }
1797#endif
1798
1799
1800 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1801 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1802
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001803 KF_TRACE( 30, ("__kmp_suspend_template: T#%d exit\n", th_gtid ) );
1804}
1805
1806void __kmp_suspend_32(int th_gtid, kmp_flag_32 *flag) {
1807 __kmp_suspend_template(th_gtid, flag);
1808}
1809void __kmp_suspend_64(int th_gtid, kmp_flag_64 *flag) {
1810 __kmp_suspend_template(th_gtid, flag);
1811}
1812void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) {
1813 __kmp_suspend_template(th_gtid, flag);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001814}
1815
1816
1817/* This routine signals the thread specified by target_gtid to wake up
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001818 * after setting the sleep bit indicated by the flag argument to FALSE.
1819 * The target thread must already have called __kmp_suspend_template()
Jim Cownie5e8470a2013-09-27 10:38:44 +00001820 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001821template <class C>
1822static inline void __kmp_resume_template( int target_gtid, C *flag )
Jim Cownie5e8470a2013-09-27 10:38:44 +00001823{
Jonathan Peyton45be4502015-08-11 21:36:41 +00001824 KMP_TIME_DEVELOPER_BLOCK(USER_resume);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001825 kmp_info_t *th = __kmp_threads[target_gtid];
1826 int status;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001827
1828#ifdef KMP_DEBUG
1829 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1830#endif
1831
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001832 KF_TRACE( 30, ( "__kmp_resume_template: T#%d wants to wakeup T#%d enter\n", gtid, target_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001833 KMP_DEBUG_ASSERT( gtid != target_gtid );
1834
1835 __kmp_suspend_initialize_thread( th );
1836
1837 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1838 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001839
Jonathan Peyton3f5dfc22015-11-09 16:31:51 +00001840 if (!flag) { // coming from __kmp_null_resume_wrapper
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001841 flag = (C *)th->th.th_sleep_loc;
1842 }
1843
Jonathan Peyton3f5dfc22015-11-09 16:31:51 +00001844 // First, check if the flag is null or its type has changed. If so, someone else woke it up.
1845 if (!flag || flag->get_type() != flag->get_ptr_type()) { // get_ptr_type simply shows what flag was cast to
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001846 KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p)\n",
1847 gtid, target_gtid, NULL ) );
1848 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1849 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1850 return;
1851 }
Jonathan Peyton1bd61b42015-10-08 19:44:16 +00001852 else { // if multiple threads are sleeping, flag should be internally referring to a specific thread here
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001853 typename C::flag_t old_spin = flag->unset_sleeping();
1854 if ( ! flag->is_sleeping_val(old_spin) ) {
1855 KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p): "
1856 "%u => %u\n",
1857 gtid, target_gtid, flag->get(), old_spin, *flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001858
1859 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1860 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1861 return;
1862 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001863 KF_TRACE( 5, ( "__kmp_resume_template: T#%d about to wakeup T#%d, reset sleep bit for flag's loc(%p): "
1864 "%u => %u\n",
1865 gtid, target_gtid, flag->get(), old_spin, *flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001866 }
1867 TCW_PTR(th->th.th_sleep_loc, NULL);
1868
Jim Cownie5e8470a2013-09-27 10:38:44 +00001869
1870#ifdef DEBUG_SUSPEND
1871 {
1872 char buffer[128];
1873 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001874 __kmp_printf( "__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid, target_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001875 }
1876#endif
1877
1878
1879 status = pthread_cond_signal( &th->th.th_suspend_cv.c_cond );
1880 KMP_CHECK_SYSFAIL( "pthread_cond_signal", status );
1881 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1882 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001883 KF_TRACE( 30, ( "__kmp_resume_template: T#%d exiting after signaling wake up for T#%d\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001884 gtid, target_gtid ) );
1885}
1886
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001887void __kmp_resume_32(int target_gtid, kmp_flag_32 *flag) {
1888 __kmp_resume_template(target_gtid, flag);
1889}
1890void __kmp_resume_64(int target_gtid, kmp_flag_64 *flag) {
1891 __kmp_resume_template(target_gtid, flag);
1892}
1893void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) {
1894 __kmp_resume_template(target_gtid, flag);
1895}
1896
Jim Cownie07ea89f2014-09-03 11:10:54 +00001897void
1898__kmp_resume_monitor()
1899{
1900 int status;
1901#ifdef KMP_DEBUG
1902 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1903 KF_TRACE( 30, ( "__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n",
1904 gtid, KMP_GTID_MONITOR ) );
1905 KMP_DEBUG_ASSERT( gtid != KMP_GTID_MONITOR );
1906#endif
1907 status = pthread_mutex_lock( &__kmp_wait_mx.m_mutex );
1908 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
1909#ifdef DEBUG_SUSPEND
1910 {
1911 char buffer[128];
1912 __kmp_print_cond( buffer, &__kmp_wait_cv.c_cond );
1913 __kmp_printf( "__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid, KMP_GTID_MONITOR, buffer );
1914 }
1915#endif
1916 status = pthread_cond_signal( &__kmp_wait_cv.c_cond );
1917 KMP_CHECK_SYSFAIL( "pthread_cond_signal", status );
1918 status = pthread_mutex_unlock( &__kmp_wait_mx.m_mutex );
1919 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1920 KF_TRACE( 30, ( "__kmp_resume_monitor: T#%d exiting after signaling wake up for T#%d\n",
1921 gtid, KMP_GTID_MONITOR ) );
1922}
Jim Cownie5e8470a2013-09-27 10:38:44 +00001923
1924/* ------------------------------------------------------------------------ */
1925/* ------------------------------------------------------------------------ */
1926
1927void
1928__kmp_yield( int cond )
1929{
1930 if (cond && __kmp_yielding_on) {
1931 sched_yield();
1932 }
1933}
1934
1935/* ------------------------------------------------------------------------ */
1936/* ------------------------------------------------------------------------ */
1937
1938void
1939__kmp_gtid_set_specific( int gtid )
1940{
1941 int status;
1942 KMP_ASSERT( __kmp_init_runtime );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001943 status = pthread_setspecific( __kmp_gtid_threadprivate_key, (void*)(intptr_t)(gtid+1) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001944 KMP_CHECK_SYSFAIL( "pthread_setspecific", status );
1945}
1946
1947int
1948__kmp_gtid_get_specific()
1949{
1950 int gtid;
1951 if ( !__kmp_init_runtime ) {
1952 KA_TRACE( 50, ("__kmp_get_specific: runtime shutdown, returning KMP_GTID_SHUTDOWN\n" ) );
1953 return KMP_GTID_SHUTDOWN;
1954 }
1955 gtid = (int)(size_t)pthread_getspecific( __kmp_gtid_threadprivate_key );
1956 if ( gtid == 0 ) {
1957 gtid = KMP_GTID_DNE;
1958 }
1959 else {
1960 gtid--;
1961 }
1962 KA_TRACE( 50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
1963 __kmp_gtid_threadprivate_key, gtid ));
1964 return gtid;
1965}
1966
1967/* ------------------------------------------------------------------------ */
1968/* ------------------------------------------------------------------------ */
1969
1970double
1971__kmp_read_cpu_time( void )
1972{
1973 /*clock_t t;*/
1974 struct tms buffer;
1975
1976 /*t =*/ times( & buffer );
1977
1978 return (buffer.tms_utime + buffer.tms_cutime) / (double) CLOCKS_PER_SEC;
1979}
1980
1981int
1982__kmp_read_system_info( struct kmp_sys_info *info )
1983{
1984 int status;
1985 struct rusage r_usage;
1986
1987 memset( info, 0, sizeof( *info ) );
1988
1989 status = getrusage( RUSAGE_SELF, &r_usage);
1990 KMP_CHECK_SYSFAIL_ERRNO( "getrusage", status );
1991
1992 info->maxrss = r_usage.ru_maxrss; /* the maximum resident set size utilized (in kilobytes) */
1993 info->minflt = r_usage.ru_minflt; /* the number of page faults serviced without any I/O */
1994 info->majflt = r_usage.ru_majflt; /* the number of page faults serviced that required I/O */
1995 info->nswap = r_usage.ru_nswap; /* the number of times a process was "swapped" out of memory */
1996 info->inblock = r_usage.ru_inblock; /* the number of times the file system had to perform input */
1997 info->oublock = r_usage.ru_oublock; /* the number of times the file system had to perform output */
1998 info->nvcsw = r_usage.ru_nvcsw; /* the number of times a context switch was voluntarily */
1999 info->nivcsw = r_usage.ru_nivcsw; /* the number of times a context switch was forced */
2000
2001 return (status != 0);
2002}
2003
2004/* ------------------------------------------------------------------------ */
2005/* ------------------------------------------------------------------------ */
2006
Jim Cownie5e8470a2013-09-27 10:38:44 +00002007void
2008__kmp_read_system_time( double *delta )
2009{
2010 double t_ns;
2011 struct timeval tval;
2012 struct timespec stop;
2013 int status;
2014
2015 status = gettimeofday( &tval, NULL );
2016 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
2017 TIMEVAL_TO_TIMESPEC( &tval, &stop );
2018 t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start);
2019 *delta = (t_ns * 1e-9);
2020}
2021
2022void
2023__kmp_clear_system_time( void )
2024{
2025 struct timeval tval;
2026 int status;
2027 status = gettimeofday( &tval, NULL );
2028 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
2029 TIMEVAL_TO_TIMESPEC( &tval, &__kmp_sys_timer_data.start );
2030}
2031
2032/* ------------------------------------------------------------------------ */
2033/* ------------------------------------------------------------------------ */
2034
2035#ifdef BUILD_TV
2036
2037void
2038__kmp_tv_threadprivate_store( kmp_info_t *th, void *global_addr, void *thread_addr )
2039{
2040 struct tv_data *p;
2041
2042 p = (struct tv_data *) __kmp_allocate( sizeof( *p ) );
2043
2044 p->u.tp.global_addr = global_addr;
2045 p->u.tp.thread_addr = thread_addr;
2046
2047 p->type = (void *) 1;
2048
2049 p->next = th->th.th_local.tv_data;
2050 th->th.th_local.tv_data = p;
2051
2052 if ( p->next == 0 ) {
2053 int rc = pthread_setspecific( __kmp_tv_key, p );
2054 KMP_CHECK_SYSFAIL( "pthread_setspecific", rc );
2055 }
2056}
2057
2058#endif /* BUILD_TV */
2059
2060/* ------------------------------------------------------------------------ */
2061/* ------------------------------------------------------------------------ */
2062
2063static int
2064__kmp_get_xproc( void ) {
2065
2066 int r = 0;
2067
Joerg Sonnenberger7649cd42015-09-21 20:29:12 +00002068 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +00002069
2070 r = sysconf( _SC_NPROCESSORS_ONLN );
2071
2072 #elif KMP_OS_DARWIN
2073
2074 // Bug C77011 High "OpenMP Threads and number of active cores".
2075
2076 // Find the number of available CPUs.
2077 kern_return_t rc;
2078 host_basic_info_data_t info;
2079 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
2080 rc = host_info( mach_host_self(), HOST_BASIC_INFO, (host_info_t) & info, & num );
2081 if ( rc == 0 && num == HOST_BASIC_INFO_COUNT ) {
2082 // Cannot use KA_TRACE() here because this code works before trace support is
2083 // initialized.
2084 r = info.avail_cpus;
2085 } else {
2086 KMP_WARNING( CantGetNumAvailCPU );
2087 KMP_INFORM( AssumedNumCPU );
2088 }; // if
2089
2090 #else
2091
2092 #error "Unknown or unsupported OS."
2093
2094 #endif
2095
2096 return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
2097
2098} // __kmp_get_xproc
2099
Jim Cownie181b4bb2013-12-23 17:28:57 +00002100int
2101__kmp_read_from_file( char const *path, char const *format, ... )
2102{
2103 int result;
2104 va_list args;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002105
Jim Cownie181b4bb2013-12-23 17:28:57 +00002106 va_start(args, format);
2107 FILE *f = fopen(path, "rb");
2108 if ( f == NULL )
2109 return 0;
2110 result = vfscanf(f, format, args);
2111 fclose(f);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002112
Jim Cownie5e8470a2013-09-27 10:38:44 +00002113 return result;
Jim Cownie181b4bb2013-12-23 17:28:57 +00002114}
Jim Cownie5e8470a2013-09-27 10:38:44 +00002115
2116void
2117__kmp_runtime_initialize( void )
2118{
2119 int status;
2120 pthread_mutexattr_t mutex_attr;
2121 pthread_condattr_t cond_attr;
2122
2123 if ( __kmp_init_runtime ) {
2124 return;
2125 }; // if
2126
2127 #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 )
2128 if ( ! __kmp_cpuinfo.initialized ) {
2129 __kmp_query_cpuid( &__kmp_cpuinfo );
2130 }; // if
2131 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2132
Jim Cownie5e8470a2013-09-27 10:38:44 +00002133 __kmp_xproc = __kmp_get_xproc();
2134
2135 if ( sysconf( _SC_THREADS ) ) {
2136
2137 /* Query the maximum number of threads */
2138 __kmp_sys_max_nth = sysconf( _SC_THREAD_THREADS_MAX );
2139 if ( __kmp_sys_max_nth == -1 ) {
2140 /* Unlimited threads for NPTL */
2141 __kmp_sys_max_nth = INT_MAX;
2142 }
2143 else if ( __kmp_sys_max_nth <= 1 ) {
2144 /* Can't tell, just use PTHREAD_THREADS_MAX */
2145 __kmp_sys_max_nth = KMP_MAX_NTH;
2146 }
2147
2148 /* Query the minimum stack size */
2149 __kmp_sys_min_stksize = sysconf( _SC_THREAD_STACK_MIN );
2150 if ( __kmp_sys_min_stksize <= 1 ) {
2151 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
2152 }
2153 }
2154
2155 /* Set up minimum number of threads to switch to TLS gtid */
2156 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
2157
Jim Cownie5e8470a2013-09-27 10:38:44 +00002158 #ifdef BUILD_TV
2159 {
2160 int rc = pthread_key_create( & __kmp_tv_key, 0 );
2161 KMP_CHECK_SYSFAIL( "pthread_key_create", rc );
2162 }
2163 #endif
2164
2165 status = pthread_key_create( &__kmp_gtid_threadprivate_key, __kmp_internal_end_dest );
2166 KMP_CHECK_SYSFAIL( "pthread_key_create", status );
2167 status = pthread_mutexattr_init( & mutex_attr );
2168 KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status );
2169 status = pthread_mutex_init( & __kmp_wait_mx.m_mutex, & mutex_attr );
2170 KMP_CHECK_SYSFAIL( "pthread_mutex_init", status );
2171 status = pthread_condattr_init( & cond_attr );
2172 KMP_CHECK_SYSFAIL( "pthread_condattr_init", status );
2173 status = pthread_cond_init( & __kmp_wait_cv.c_cond, & cond_attr );
2174 KMP_CHECK_SYSFAIL( "pthread_cond_init", status );
2175#if USE_ITT_BUILD
2176 __kmp_itt_initialize();
2177#endif /* USE_ITT_BUILD */
2178
2179 __kmp_init_runtime = TRUE;
2180}
2181
2182void
2183__kmp_runtime_destroy( void )
2184{
2185 int status;
2186
2187 if ( ! __kmp_init_runtime ) {
2188 return; // Nothing to do.
2189 };
2190
2191#if USE_ITT_BUILD
2192 __kmp_itt_destroy();
2193#endif /* USE_ITT_BUILD */
2194
2195 status = pthread_key_delete( __kmp_gtid_threadprivate_key );
2196 KMP_CHECK_SYSFAIL( "pthread_key_delete", status );
2197 #ifdef BUILD_TV
2198 status = pthread_key_delete( __kmp_tv_key );
2199 KMP_CHECK_SYSFAIL( "pthread_key_delete", status );
2200 #endif
2201
2202 status = pthread_mutex_destroy( & __kmp_wait_mx.m_mutex );
2203 if ( status != 0 && status != EBUSY ) {
2204 KMP_SYSFAIL( "pthread_mutex_destroy", status );
2205 }
2206 status = pthread_cond_destroy( & __kmp_wait_cv.c_cond );
2207 if ( status != 0 && status != EBUSY ) {
2208 KMP_SYSFAIL( "pthread_cond_destroy", status );
2209 }
Alp Toker763b9392014-02-28 09:42:41 +00002210 #if KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +00002211 __kmp_affinity_uninitialize();
Jim Cownie5e8470a2013-09-27 10:38:44 +00002212 #endif
2213
2214 __kmp_init_runtime = FALSE;
2215}
2216
2217
2218/* Put the thread to sleep for a time period */
2219/* NOTE: not currently used anywhere */
2220void
2221__kmp_thread_sleep( int millis )
2222{
2223 sleep( ( millis + 500 ) / 1000 );
2224}
2225
2226/* Calculate the elapsed wall clock time for the user */
2227void
2228__kmp_elapsed( double *t )
2229{
2230 int status;
2231# ifdef FIX_SGI_CLOCK
2232 struct timespec ts;
2233
2234 status = clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &ts );
2235 KMP_CHECK_SYSFAIL_ERRNO( "clock_gettime", status );
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +00002236 *t = (double) ts.tv_nsec * (1.0 / (double) KMP_NSEC_PER_SEC) +
Jim Cownie5e8470a2013-09-27 10:38:44 +00002237 (double) ts.tv_sec;
2238# else
2239 struct timeval tv;
2240
2241 status = gettimeofday( & tv, NULL );
2242 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +00002243 *t = (double) tv.tv_usec * (1.0 / (double) KMP_USEC_PER_SEC) +
Jim Cownie5e8470a2013-09-27 10:38:44 +00002244 (double) tv.tv_sec;
2245# endif
2246}
2247
2248/* Calculate the elapsed wall clock tick for the user */
2249void
2250__kmp_elapsed_tick( double *t )
2251{
2252 *t = 1 / (double) CLOCKS_PER_SEC;
2253}
2254
2255/*
2256 Determine whether the given address is mapped into the current address space.
2257*/
2258
2259int
2260__kmp_is_address_mapped( void * addr ) {
2261
2262 int found = 0;
2263 int rc;
2264
Joerg Sonnenberger7649cd42015-09-21 20:29:12 +00002265 #if KMP_OS_LINUX || KMP_OS_FREEBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +00002266
2267 /*
2268 On Linux* OS, read the /proc/<pid>/maps pseudo-file to get all the address ranges mapped
2269 into the address space.
2270 */
2271
2272 char * name = __kmp_str_format( "/proc/%d/maps", getpid() );
2273 FILE * file = NULL;
2274
2275 file = fopen( name, "r" );
2276 KMP_ASSERT( file != NULL );
2277
2278 for ( ; ; ) {
2279
2280 void * beginning = NULL;
2281 void * ending = NULL;
2282 char perms[ 5 ];
2283
2284 rc = fscanf( file, "%p-%p %4s %*[^\n]\n", & beginning, & ending, perms );
2285 if ( rc == EOF ) {
2286 break;
2287 }; // if
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002288 KMP_ASSERT( rc == 3 && KMP_STRLEN( perms ) == 4 ); // Make sure all fields are read.
Jim Cownie5e8470a2013-09-27 10:38:44 +00002289
2290 // Ending address is not included in the region, but beginning is.
2291 if ( ( addr >= beginning ) && ( addr < ending ) ) {
2292 perms[ 2 ] = 0; // 3th and 4th character does not matter.
2293 if ( strcmp( perms, "rw" ) == 0 ) {
2294 // Memory we are looking for should be readable and writable.
2295 found = 1;
2296 }; // if
2297 break;
2298 }; // if
2299
2300 }; // forever
2301
2302 // Free resources.
2303 fclose( file );
2304 KMP_INTERNAL_FREE( name );
2305
2306 #elif KMP_OS_DARWIN
2307
2308 /*
2309 On OS X*, /proc pseudo filesystem is not available. Try to read memory using vm
2310 interface.
2311 */
2312
2313 int buffer;
2314 vm_size_t count;
2315 rc =
2316 vm_read_overwrite(
2317 mach_task_self(), // Task to read memory of.
2318 (vm_address_t)( addr ), // Address to read from.
2319 1, // Number of bytes to be read.
2320 (vm_address_t)( & buffer ), // Address of buffer to save read bytes in.
2321 & count // Address of var to save number of read bytes in.
2322 );
2323 if ( rc == 0 ) {
2324 // Memory successfully read.
2325 found = 1;
2326 }; // if
2327
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +00002328 #elif KMP_OS_FREEBSD || KMP_OS_NETBSD
Alp Toker763b9392014-02-28 09:42:41 +00002329
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +00002330 // FIXME(FreeBSD, NetBSD): Implement this
Alp Toker763b9392014-02-28 09:42:41 +00002331 found = 1;
2332
Jim Cownie5e8470a2013-09-27 10:38:44 +00002333 #else
2334
2335 #error "Unknown or unsupported OS"
2336
2337 #endif
2338
2339 return found;
2340
2341} // __kmp_is_address_mapped
2342
2343#ifdef USE_LOAD_BALANCE
2344
2345
2346# if KMP_OS_DARWIN
2347
2348// The function returns the rounded value of the system load average
2349// during given time interval which depends on the value of
2350// __kmp_load_balance_interval variable (default is 60 sec, other values
2351// may be 300 sec or 900 sec).
2352// It returns -1 in case of error.
2353int
2354__kmp_get_load_balance( int max )
2355{
2356 double averages[3];
2357 int ret_avg = 0;
2358
2359 int res = getloadavg( averages, 3 );
2360
2361 //Check __kmp_load_balance_interval to determine which of averages to use.
2362 // getloadavg() may return the number of samples less than requested that is
2363 // less than 3.
2364 if ( __kmp_load_balance_interval < 180 && ( res >= 1 ) ) {
2365 ret_avg = averages[0];// 1 min
2366 } else if ( ( __kmp_load_balance_interval >= 180
2367 && __kmp_load_balance_interval < 600 ) && ( res >= 2 ) ) {
2368 ret_avg = averages[1];// 5 min
2369 } else if ( ( __kmp_load_balance_interval >= 600 ) && ( res == 3 ) ) {
2370 ret_avg = averages[2];// 15 min
Alp Toker8f2d3f02014-02-24 10:40:15 +00002371 } else {// Error occurred
Jim Cownie5e8470a2013-09-27 10:38:44 +00002372 return -1;
2373 }
2374
2375 return ret_avg;
2376}
2377
2378# else // Linux* OS
2379
2380// The fuction returns number of running (not sleeping) threads, or -1 in case of error.
2381// Error could be reported if Linux* OS kernel too old (without "/proc" support).
2382// Counting running threads stops if max running threads encountered.
2383int
2384__kmp_get_load_balance( int max )
2385{
2386 static int permanent_error = 0;
2387
2388 static int glb_running_threads = 0; /* Saved count of the running threads for the thread balance algortihm */
2389 static double glb_call_time = 0; /* Thread balance algorithm call time */
2390
2391 int running_threads = 0; // Number of running threads in the system.
2392
2393 DIR * proc_dir = NULL; // Handle of "/proc/" directory.
2394 struct dirent * proc_entry = NULL;
2395
2396 kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2397 DIR * task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2398 struct dirent * task_entry = NULL;
2399 int task_path_fixed_len;
2400
2401 kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2402 int stat_file = -1;
2403 int stat_path_fixed_len;
2404
2405 int total_processes = 0; // Total number of processes in system.
2406 int total_threads = 0; // Total number of threads in system.
2407
2408 double call_time = 0.0;
2409
2410 __kmp_str_buf_init( & task_path );
2411 __kmp_str_buf_init( & stat_path );
2412
2413 __kmp_elapsed( & call_time );
2414
2415 if ( glb_call_time &&
2416 ( call_time - glb_call_time < __kmp_load_balance_interval ) ) {
2417 running_threads = glb_running_threads;
2418 goto finish;
2419 }
2420
2421 glb_call_time = call_time;
2422
2423 // Do not spend time on scanning "/proc/" if we have a permanent error.
2424 if ( permanent_error ) {
2425 running_threads = -1;
2426 goto finish;
2427 }; // if
2428
2429 if ( max <= 0 ) {
2430 max = INT_MAX;
2431 }; // if
2432
2433 // Open "/proc/" directory.
2434 proc_dir = opendir( "/proc" );
2435 if ( proc_dir == NULL ) {
2436 // Cannot open "/prroc/". Probably the kernel does not support it. Return an error now and
2437 // in subsequent calls.
2438 running_threads = -1;
2439 permanent_error = 1;
2440 goto finish;
2441 }; // if
2442
2443 // Initialize fixed part of task_path. This part will not change.
2444 __kmp_str_buf_cat( & task_path, "/proc/", 6 );
2445 task_path_fixed_len = task_path.used; // Remember number of used characters.
2446
2447 proc_entry = readdir( proc_dir );
2448 while ( proc_entry != NULL ) {
2449 // Proc entry is a directory and name starts with a digit. Assume it is a process'
2450 // directory.
2451 if ( proc_entry->d_type == DT_DIR && isdigit( proc_entry->d_name[ 0 ] ) ) {
2452
2453 ++ total_processes;
2454 // Make sure init process is the very first in "/proc", so we can replace
2455 // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes == 1.
2456 // We are going to check that total_processes == 1 => d_name == "1" is true (where
2457 // "=>" is implication). Since C++ does not have => operator, let us replace it with its
2458 // equivalent: a => b == ! a || b.
2459 KMP_DEBUG_ASSERT( total_processes != 1 || strcmp( proc_entry->d_name, "1" ) == 0 );
2460
2461 // Construct task_path.
2462 task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002463 __kmp_str_buf_cat( & task_path, proc_entry->d_name, KMP_STRLEN( proc_entry->d_name ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00002464 __kmp_str_buf_cat( & task_path, "/task", 5 );
2465
2466 task_dir = opendir( task_path.str );
2467 if ( task_dir == NULL ) {
2468 // Process can finish between reading "/proc/" directory entry and opening process'
2469 // "task/" directory. So, in general case we should not complain, but have to skip
2470 // this process and read the next one.
2471 // But on systems with no "task/" support we will spend lot of time to scan "/proc/"
2472 // tree again and again without any benefit. "init" process (its pid is 1) should
2473 // exist always, so, if we cannot open "/proc/1/task/" directory, it means "task/"
2474 // is not supported by kernel. Report an error now and in the future.
2475 if ( strcmp( proc_entry->d_name, "1" ) == 0 ) {
2476 running_threads = -1;
2477 permanent_error = 1;
2478 goto finish;
2479 }; // if
2480 } else {
2481 // Construct fixed part of stat file path.
2482 __kmp_str_buf_clear( & stat_path );
2483 __kmp_str_buf_cat( & stat_path, task_path.str, task_path.used );
2484 __kmp_str_buf_cat( & stat_path, "/", 1 );
2485 stat_path_fixed_len = stat_path.used;
2486
2487 task_entry = readdir( task_dir );
2488 while ( task_entry != NULL ) {
2489 // It is a directory and name starts with a digit.
2490 if ( proc_entry->d_type == DT_DIR && isdigit( task_entry->d_name[ 0 ] ) ) {
2491
2492 ++ total_threads;
2493
2494 // Consruct complete stat file path. Easiest way would be:
2495 // __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str, task_entry->d_name );
2496 // but seriae of __kmp_str_buf_cat works a bit faster.
2497 stat_path.used = stat_path_fixed_len; // Reset stat path to its fixed part.
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002498 __kmp_str_buf_cat( & stat_path, task_entry->d_name, KMP_STRLEN( task_entry->d_name ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00002499 __kmp_str_buf_cat( & stat_path, "/stat", 5 );
2500
2501 // Note: Low-level API (open/read/close) is used. High-level API
2502 // (fopen/fclose) works ~ 30 % slower.
2503 stat_file = open( stat_path.str, O_RDONLY );
2504 if ( stat_file == -1 ) {
2505 // We cannot report an error because task (thread) can terminate just
2506 // before reading this file.
2507 } else {
2508 /*
2509 Content of "stat" file looks like:
2510
2511 24285 (program) S ...
2512
2513 It is a single line (if program name does not include fanny
2514 symbols). First number is a thread id, then name of executable file
2515 name in paretheses, then state of the thread. We need just thread
2516 state.
2517
2518 Good news: Length of program name is 15 characters max. Longer
2519 names are truncated.
2520
2521 Thus, we need rather short buffer: 15 chars for program name +
2522 2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2523
2524 Bad news: Program name may contain special symbols like space,
2525 closing parenthesis, or even new line. This makes parsing "stat"
2526 file not 100 % reliable. In case of fanny program names parsing
2527 may fail (report incorrect thread state).
2528
2529 Parsing "status" file looks more promissing (due to different
2530 file structure and escaping special symbols) but reading and
2531 parsing of "status" file works slower.
2532
2533 -- ln
2534 */
2535 char buffer[ 65 ];
2536 int len;
2537 len = read( stat_file, buffer, sizeof( buffer ) - 1 );
2538 if ( len >= 0 ) {
2539 buffer[ len ] = 0;
2540 // Using scanf:
2541 // sscanf( buffer, "%*d (%*s) %c ", & state );
2542 // looks very nice, but searching for a closing parenthesis works a
2543 // bit faster.
2544 char * close_parent = strstr( buffer, ") " );
2545 if ( close_parent != NULL ) {
2546 char state = * ( close_parent + 2 );
2547 if ( state == 'R' ) {
2548 ++ running_threads;
2549 if ( running_threads >= max ) {
2550 goto finish;
2551 }; // if
2552 }; // if
2553 }; // if
2554 }; // if
2555 close( stat_file );
2556 stat_file = -1;
2557 }; // if
2558 }; // if
2559 task_entry = readdir( task_dir );
2560 }; // while
2561 closedir( task_dir );
2562 task_dir = NULL;
2563 }; // if
2564 }; // if
2565 proc_entry = readdir( proc_dir );
2566 }; // while
2567
2568 //
2569 // There _might_ be a timing hole where the thread executing this
2570 // code get skipped in the load balance, and running_threads is 0.
2571 // Assert in the debug builds only!!!
2572 //
2573 KMP_DEBUG_ASSERT( running_threads > 0 );
2574 if ( running_threads <= 0 ) {
2575 running_threads = 1;
2576 }
2577
2578 finish: // Clean up and exit.
2579 if ( proc_dir != NULL ) {
2580 closedir( proc_dir );
2581 }; // if
2582 __kmp_str_buf_free( & task_path );
2583 if ( task_dir != NULL ) {
2584 closedir( task_dir );
2585 }; // if
2586 __kmp_str_buf_free( & stat_path );
2587 if ( stat_file != -1 ) {
2588 close( stat_file );
2589 }; // if
2590
2591 glb_running_threads = running_threads;
2592
2593 return running_threads;
2594
2595} // __kmp_get_load_balance
2596
2597# endif // KMP_OS_DARWIN
2598
2599#endif // USE_LOAD_BALANCE
2600
Andrey Churbanovedc370e2015-08-05 11:23:10 +00002601#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC)
Jim Cownie3051f972014-08-07 10:12:54 +00002602
2603// we really only need the case with 1 argument, because CLANG always build
2604// a struct of pointers to shared variables referenced in the outlined function
2605int
2606__kmp_invoke_microtask( microtask_t pkfn,
2607 int gtid, int tid,
Jonathan Peyton122dd762015-07-13 18:55:45 +00002608 int argc, void *p_argv[]
2609#if OMPT_SUPPORT
2610 , void **exit_frame_ptr
2611#endif
2612)
2613{
2614#if OMPT_SUPPORT
2615 *exit_frame_ptr = __builtin_frame_address(0);
2616#endif
2617
Jim Cownie3051f972014-08-07 10:12:54 +00002618 switch (argc) {
2619 default:
2620 fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2621 fflush(stderr);
2622 exit(-1);
2623 case 0:
2624 (*pkfn)(&gtid, &tid);
2625 break;
2626 case 1:
2627 (*pkfn)(&gtid, &tid, p_argv[0]);
2628 break;
2629 case 2:
2630 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2631 break;
2632 case 3:
2633 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2634 break;
2635 case 4:
2636 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2637 break;
2638 case 5:
2639 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2640 break;
2641 case 6:
2642 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2643 p_argv[5]);
2644 break;
2645 case 7:
2646 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2647 p_argv[5], p_argv[6]);
2648 break;
2649 case 8:
2650 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2651 p_argv[5], p_argv[6], p_argv[7]);
2652 break;
2653 case 9:
2654 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2655 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2656 break;
2657 case 10:
2658 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2659 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2660 break;
2661 case 11:
2662 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2663 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2664 break;
2665 case 12:
2666 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2667 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2668 p_argv[11]);
2669 break;
2670 case 13:
2671 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2672 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2673 p_argv[11], p_argv[12]);
2674 break;
2675 case 14:
2676 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2677 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2678 p_argv[11], p_argv[12], p_argv[13]);
2679 break;
2680 case 15:
2681 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2682 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2683 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2684 break;
2685 }
2686
Jonathan Peyton122dd762015-07-13 18:55:45 +00002687#if OMPT_SUPPORT
2688 *exit_frame_ptr = 0;
2689#endif
2690
Jim Cownie3051f972014-08-07 10:12:54 +00002691 return 1;
2692}
2693
2694#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +00002695
Jim Cownie5e8470a2013-09-27 10:38:44 +00002696// end of file //
2697