blob: 8992733d7bb4859a409f7baad37f4ccda593285d [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * z_Linux_util.c -- platform specific routines.
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "kmp.h"
17#include "kmp_wrapper_getpid.h"
18#include "kmp_itt.h"
19#include "kmp_str.h"
20#include "kmp_i18n.h"
21#include "kmp_io.h"
Jim Cownie4cc4bb42014-10-07 16:25:50 +000022#include "kmp_stats.h"
23#include "kmp_wait_release.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000024
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +000025#if !KMP_OS_FREEBSD && !KMP_OS_NETBSD
Alp Toker763b9392014-02-28 09:42:41 +000026# include <alloca.h>
27#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +000028#include <unistd.h>
29#include <math.h> // HUGE_VAL.
30#include <sys/time.h>
31#include <sys/times.h>
32#include <sys/resource.h>
33#include <sys/syscall.h>
34
Jim Cownie3051f972014-08-07 10:12:54 +000035#if KMP_OS_LINUX && !KMP_OS_CNK
Jim Cownie5e8470a2013-09-27 10:38:44 +000036# include <sys/sysinfo.h>
Andrey Churbanovcbda8682015-01-13 14:43:35 +000037# if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +000038// We should really include <futex.h>, but that causes compatibility problems on different
39// Linux* OS distributions that either require that you include (or break when you try to include)
40// <pci/types.h>.
41// Since all we need is the two macros below (which are part of the kernel ABI, so can't change)
42// we just define the constants here and don't include <futex.h>
43# ifndef FUTEX_WAIT
44# define FUTEX_WAIT 0
45# endif
46# ifndef FUTEX_WAKE
47# define FUTEX_WAKE 1
48# endif
49# endif
50#elif KMP_OS_DARWIN
51# include <sys/sysctl.h>
52# include <mach/mach.h>
Alp Toker763b9392014-02-28 09:42:41 +000053#elif KMP_OS_FREEBSD
Alp Toker763b9392014-02-28 09:42:41 +000054# include <pthread_np.h>
Jim Cownie5e8470a2013-09-27 10:38:44 +000055#endif
56
57
58#include <dirent.h>
59#include <ctype.h>
60#include <fcntl.h>
61
62/* ------------------------------------------------------------------------ */
63/* ------------------------------------------------------------------------ */
64
65struct kmp_sys_timer {
66 struct timespec start;
67};
68
69// Convert timespec to nanoseconds.
70#define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec)
71
72static struct kmp_sys_timer __kmp_sys_timer_data;
73
74#if KMP_HANDLE_SIGNALS
75 typedef void (* sig_func_t )( int );
76 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[ NSIG ];
77 static sigset_t __kmp_sigset;
78#endif
79
80static int __kmp_init_runtime = FALSE;
81
82static int __kmp_fork_count = 0;
83
84static pthread_condattr_t __kmp_suspend_cond_attr;
85static pthread_mutexattr_t __kmp_suspend_mutex_attr;
86
87static kmp_cond_align_t __kmp_wait_cv;
88static kmp_mutex_align_t __kmp_wait_mx;
89
90/* ------------------------------------------------------------------------ */
91/* ------------------------------------------------------------------------ */
92
93#ifdef DEBUG_SUSPEND
94static void
95__kmp_print_cond( char *buffer, kmp_cond_align_t *cond )
96{
Andrey Churbanov74bf17b2015-04-02 13:27:08 +000097 KMP_SNPRINTF( buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))",
Jim Cownie5e8470a2013-09-27 10:38:44 +000098 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
99 cond->c_cond.__c_waiting );
100}
101#endif
102
103/* ------------------------------------------------------------------------ */
104/* ------------------------------------------------------------------------ */
105
Jim Cownie3051f972014-08-07 10:12:54 +0000106#if ( KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000107
108/*
109 * Affinity support
110 */
111
112/*
113 * On some of the older OS's that we build on, these constants aren't present
114 * in <asm/unistd.h> #included from <sys.syscall.h>. They must be the same on
115 * all systems of the same arch where they are defined, and they cannot change.
116 * stone forever.
117 */
118
Jim Cownie181b4bb2013-12-23 17:28:57 +0000119# if KMP_ARCH_X86 || KMP_ARCH_ARM
Jim Cownie5e8470a2013-09-27 10:38:44 +0000120# ifndef __NR_sched_setaffinity
121# define __NR_sched_setaffinity 241
122# elif __NR_sched_setaffinity != 241
123# error Wrong code for setaffinity system call.
124# endif /* __NR_sched_setaffinity */
125# ifndef __NR_sched_getaffinity
126# define __NR_sched_getaffinity 242
127# elif __NR_sched_getaffinity != 242
128# error Wrong code for getaffinity system call.
129# endif /* __NR_sched_getaffinity */
130
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000131# elif KMP_ARCH_AARCH64
132# ifndef __NR_sched_setaffinity
133# define __NR_sched_setaffinity 122
134# elif __NR_sched_setaffinity != 122
135# error Wrong code for setaffinity system call.
136# endif /* __NR_sched_setaffinity */
137# ifndef __NR_sched_getaffinity
138# define __NR_sched_getaffinity 123
139# elif __NR_sched_getaffinity != 123
140# error Wrong code for getaffinity system call.
141# endif /* __NR_sched_getaffinity */
142
Jim Cownie5e8470a2013-09-27 10:38:44 +0000143# elif KMP_ARCH_X86_64
144# ifndef __NR_sched_setaffinity
145# define __NR_sched_setaffinity 203
146# elif __NR_sched_setaffinity != 203
147# error Wrong code for setaffinity system call.
148# endif /* __NR_sched_setaffinity */
149# ifndef __NR_sched_getaffinity
150# define __NR_sched_getaffinity 204
151# elif __NR_sched_getaffinity != 204
152# error Wrong code for getaffinity system call.
153# endif /* __NR_sched_getaffinity */
154
Jim Cownie3051f972014-08-07 10:12:54 +0000155# elif KMP_ARCH_PPC64
156# ifndef __NR_sched_setaffinity
157# define __NR_sched_setaffinity 222
158# elif __NR_sched_setaffinity != 222
159# error Wrong code for setaffinity system call.
160# endif /* __NR_sched_setaffinity */
161# ifndef __NR_sched_getaffinity
162# define __NR_sched_getaffinity 223
163# elif __NR_sched_getaffinity != 223
164# error Wrong code for getaffinity system call.
165# endif /* __NR_sched_getaffinity */
166
167
Jim Cownie5e8470a2013-09-27 10:38:44 +0000168# else
169# error Unknown or unsupported architecture
170
171# endif /* KMP_ARCH_* */
172
173int
174__kmp_set_system_affinity( kmp_affin_mask_t const *mask, int abort_on_error )
175{
176 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
177 "Illegal set affinity operation when not capable");
178
179 int retval = syscall( __NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask );
180 if (retval >= 0) {
181 return 0;
182 }
183 int error = errno;
184 if (abort_on_error) {
185 __kmp_msg(
186 kmp_ms_fatal,
187 KMP_MSG( FatalSysError ),
188 KMP_ERR( error ),
189 __kmp_msg_null
190 );
191 }
192 return error;
193}
194
195int
196__kmp_get_system_affinity( kmp_affin_mask_t *mask, int abort_on_error )
197{
198 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
199 "Illegal get affinity operation when not capable");
200
201 int retval = syscall( __NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask );
202 if (retval >= 0) {
203 return 0;
204 }
205 int error = errno;
206 if (abort_on_error) {
207 __kmp_msg(
208 kmp_ms_fatal,
209 KMP_MSG( FatalSysError ),
210 KMP_ERR( error ),
211 __kmp_msg_null
212 );
213 }
214 return error;
215}
216
217void
218__kmp_affinity_bind_thread( int which )
219{
220 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
221 "Illegal set affinity operation when not capable");
222
Andrey Churbanov74bf17b2015-04-02 13:27:08 +0000223 kmp_affin_mask_t *mask = (kmp_affin_mask_t *)KMP_ALLOCA(__kmp_affin_mask_size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000224 KMP_CPU_ZERO(mask);
225 KMP_CPU_SET(which, mask);
226 __kmp_set_system_affinity(mask, TRUE);
227}
228
229/*
230 * Determine if we can access affinity functionality on this version of
231 * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
232 * __kmp_affin_mask_size to the appropriate value (0 means not capable).
233 */
234void
235__kmp_affinity_determine_capable(const char *env_var)
236{
237 //
238 // Check and see if the OS supports thread affinity.
239 //
240
241# define KMP_CPU_SET_SIZE_LIMIT (1024*1024)
242
243 int gCode;
244 int sCode;
245 kmp_affin_mask_t *buf;
246 buf = ( kmp_affin_mask_t * ) KMP_INTERNAL_MALLOC( KMP_CPU_SET_SIZE_LIMIT );
247
248 // If Linux* OS:
249 // If the syscall fails or returns a suggestion for the size,
250 // then we don't have to search for an appropriate size.
251 gCode = syscall( __NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf );
252 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
Alp Toker8f2d3f02014-02-24 10:40:15 +0000253 "initial getaffinity call returned %d errno = %d\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +0000254 gCode, errno));
255
256 //if ((gCode < 0) && (errno == ENOSYS))
257 if (gCode < 0) {
258 //
259 // System call not supported
260 //
261 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
262 && (__kmp_affinity_type != affinity_none)
263 && (__kmp_affinity_type != affinity_default)
264 && (__kmp_affinity_type != affinity_disabled))) {
265 int error = errno;
266 __kmp_msg(
267 kmp_ms_warning,
268 KMP_MSG( GetAffSysCallNotSupported, env_var ),
269 KMP_ERR( error ),
270 __kmp_msg_null
271 );
272 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000273 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000274 KMP_INTERNAL_FREE(buf);
275 return;
276 }
277 if (gCode > 0) { // Linux* OS only
278 // The optimal situation: the OS returns the size of the buffer
279 // it expects.
280 //
281 // A verification of correct behavior is that Isetaffinity on a NULL
282 // buffer with the same size fails with errno set to EFAULT.
283 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
284 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
285 "setaffinity for mask size %d returned %d errno = %d\n",
286 gCode, sCode, errno));
287 if (sCode < 0) {
288 if (errno == ENOSYS) {
289 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
290 && (__kmp_affinity_type != affinity_none)
291 && (__kmp_affinity_type != affinity_default)
292 && (__kmp_affinity_type != affinity_disabled))) {
293 int error = errno;
294 __kmp_msg(
295 kmp_ms_warning,
296 KMP_MSG( SetAffSysCallNotSupported, env_var ),
297 KMP_ERR( error ),
298 __kmp_msg_null
299 );
300 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000301 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000302 KMP_INTERNAL_FREE(buf);
303 }
304 if (errno == EFAULT) {
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000305 KMP_AFFINITY_ENABLE(gCode);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000306 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
307 "affinity supported (mask size %d)\n",
308 (int)__kmp_affin_mask_size));
309 KMP_INTERNAL_FREE(buf);
310 return;
311 }
312 }
313 }
314
315 //
316 // Call the getaffinity system call repeatedly with increasing set sizes
317 // until we succeed, or reach an upper bound on the search.
318 //
319 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
320 "searching for proper set size\n"));
321 int size;
322 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
323 gCode = syscall( __NR_sched_getaffinity, 0, size, buf );
324 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
325 "getaffinity for mask size %d returned %d errno = %d\n", size,
326 gCode, errno));
327
328 if (gCode < 0) {
329 if ( errno == ENOSYS )
330 {
331 //
332 // We shouldn't get here
333 //
334 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
335 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
336 size));
337 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
338 && (__kmp_affinity_type != affinity_none)
339 && (__kmp_affinity_type != affinity_default)
340 && (__kmp_affinity_type != affinity_disabled))) {
341 int error = errno;
342 __kmp_msg(
343 kmp_ms_warning,
344 KMP_MSG( GetAffSysCallNotSupported, env_var ),
345 KMP_ERR( error ),
346 __kmp_msg_null
347 );
348 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000349 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000350 KMP_INTERNAL_FREE(buf);
351 return;
352 }
353 continue;
354 }
355
356 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
357 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
358 "setaffinity for mask size %d returned %d errno = %d\n",
359 gCode, sCode, errno));
360 if (sCode < 0) {
361 if (errno == ENOSYS) { // Linux* OS only
362 //
363 // We shouldn't get here
364 //
365 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
366 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
367 size));
368 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
369 && (__kmp_affinity_type != affinity_none)
370 && (__kmp_affinity_type != affinity_default)
371 && (__kmp_affinity_type != affinity_disabled))) {
372 int error = errno;
373 __kmp_msg(
374 kmp_ms_warning,
375 KMP_MSG( SetAffSysCallNotSupported, env_var ),
376 KMP_ERR( error ),
377 __kmp_msg_null
378 );
379 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000380 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000381 KMP_INTERNAL_FREE(buf);
382 return;
383 }
384 if (errno == EFAULT) {
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000385 KMP_AFFINITY_ENABLE(gCode);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000386 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
387 "affinity supported (mask size %d)\n",
388 (int)__kmp_affin_mask_size));
389 KMP_INTERNAL_FREE(buf);
390 return;
391 }
392 }
393 }
394 //int error = errno; // save uncaught error code
395 KMP_INTERNAL_FREE(buf);
396 // errno = error; // restore uncaught error code, will be printed at the next KMP_WARNING below
397
398 //
399 // Affinity is not supported
400 //
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000401 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000402 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
403 "cannot determine mask size - affinity not supported\n"));
404 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
405 && (__kmp_affinity_type != affinity_none)
406 && (__kmp_affinity_type != affinity_default)
407 && (__kmp_affinity_type != affinity_disabled))) {
408 KMP_WARNING( AffCantGetMaskSize, env_var );
409 }
410}
411
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000412#endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +0000413
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000414/* ------------------------------------------------------------------------ */
415/* ------------------------------------------------------------------------ */
416
417#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && !KMP_OS_CNK
418
419int
420__kmp_futex_determine_capable()
421{
422 int loc = 0;
423 int rc = syscall( __NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0 );
424 int retval = ( rc == 0 ) || ( errno != ENOSYS );
425
426 KA_TRACE(10, ( "__kmp_futex_determine_capable: rc = %d errno = %d\n", rc,
427 errno ) );
428 KA_TRACE(10, ( "__kmp_futex_determine_capable: futex syscall%s supported\n",
429 retval ? "" : " not" ) );
430
431 return retval;
432}
433
434#endif // KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM) && !KMP_OS_CNK
435
436/* ------------------------------------------------------------------------ */
437/* ------------------------------------------------------------------------ */
438
439#if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000440/*
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000441 * Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
442 * use compare_and_store for these routines
Jim Cownie5e8470a2013-09-27 10:38:44 +0000443 */
444
Andrey Churbanov7b2ab712015-03-10 09:03:42 +0000445kmp_int8
446__kmp_test_then_or8( volatile kmp_int8 *p, kmp_int8 d )
447{
448 kmp_int8 old_value, new_value;
449
450 old_value = TCR_1( *p );
451 new_value = old_value | d;
452
453 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
454 {
455 KMP_CPU_PAUSE();
456 old_value = TCR_1( *p );
457 new_value = old_value | d;
458 }
459 return old_value;
460}
461
462kmp_int8
463__kmp_test_then_and8( volatile kmp_int8 *p, kmp_int8 d )
464{
465 kmp_int8 old_value, new_value;
466
467 old_value = TCR_1( *p );
468 new_value = old_value & d;
469
470 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
471 {
472 KMP_CPU_PAUSE();
473 old_value = TCR_1( *p );
474 new_value = old_value & d;
475 }
476 return old_value;
477}
478
Jim Cownie5e8470a2013-09-27 10:38:44 +0000479kmp_int32
480__kmp_test_then_or32( volatile kmp_int32 *p, kmp_int32 d )
481{
482 kmp_int32 old_value, new_value;
483
484 old_value = TCR_4( *p );
485 new_value = old_value | d;
486
Jim Cownie3051f972014-08-07 10:12:54 +0000487 while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000488 {
489 KMP_CPU_PAUSE();
490 old_value = TCR_4( *p );
491 new_value = old_value | d;
492 }
493 return old_value;
494}
495
496kmp_int32
497__kmp_test_then_and32( volatile kmp_int32 *p, kmp_int32 d )
498{
499 kmp_int32 old_value, new_value;
500
501 old_value = TCR_4( *p );
502 new_value = old_value & d;
503
Jim Cownie3051f972014-08-07 10:12:54 +0000504 while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000505 {
506 KMP_CPU_PAUSE();
507 old_value = TCR_4( *p );
508 new_value = old_value & d;
509 }
510 return old_value;
511}
512
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000513# if KMP_ARCH_X86 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000514kmp_int8
515__kmp_test_then_add8( volatile kmp_int8 *p, kmp_int8 d )
516{
517 kmp_int8 old_value, new_value;
518
519 old_value = TCR_1( *p );
520 new_value = old_value + d;
521
522 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
523 {
524 KMP_CPU_PAUSE();
525 old_value = TCR_1( *p );
526 new_value = old_value + d;
527 }
528 return old_value;
529}
530
Jim Cownie5e8470a2013-09-27 10:38:44 +0000531kmp_int64
532__kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 d )
533{
534 kmp_int64 old_value, new_value;
535
536 old_value = TCR_8( *p );
537 new_value = old_value + d;
538
Jim Cownie3051f972014-08-07 10:12:54 +0000539 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000540 {
541 KMP_CPU_PAUSE();
542 old_value = TCR_8( *p );
543 new_value = old_value + d;
544 }
545 return old_value;
546}
547# endif /* KMP_ARCH_X86 */
548
549kmp_int64
550__kmp_test_then_or64( volatile kmp_int64 *p, kmp_int64 d )
551{
552 kmp_int64 old_value, new_value;
553
554 old_value = TCR_8( *p );
555 new_value = old_value | d;
Jim Cownie3051f972014-08-07 10:12:54 +0000556 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000557 {
558 KMP_CPU_PAUSE();
559 old_value = TCR_8( *p );
560 new_value = old_value | d;
561 }
562 return old_value;
563}
564
565kmp_int64
566__kmp_test_then_and64( volatile kmp_int64 *p, kmp_int64 d )
567{
568 kmp_int64 old_value, new_value;
569
570 old_value = TCR_8( *p );
571 new_value = old_value & d;
Jim Cownie3051f972014-08-07 10:12:54 +0000572 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000573 {
574 KMP_CPU_PAUSE();
575 old_value = TCR_8( *p );
576 new_value = old_value & d;
577 }
578 return old_value;
579}
580
581#endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
582
583void
584__kmp_terminate_thread( int gtid )
585{
586 int status;
587 kmp_info_t *th = __kmp_threads[ gtid ];
588
589 if ( !th ) return;
590
591 #ifdef KMP_CANCEL_THREADS
592 KA_TRACE( 10, ("__kmp_terminate_thread: kill (%d)\n", gtid ) );
593 status = pthread_cancel( th->th.th_info.ds.ds_thread );
594 if ( status != 0 && status != ESRCH ) {
595 __kmp_msg(
596 kmp_ms_fatal,
597 KMP_MSG( CantTerminateWorkerThread ),
598 KMP_ERR( status ),
599 __kmp_msg_null
600 );
601 }; // if
602 #endif
603 __kmp_yield( TRUE );
604} //
605
606/* ------------------------------------------------------------------------ */
607/* ------------------------------------------------------------------------ */
608
609/* ------------------------------------------------------------------------ */
610/* ------------------------------------------------------------------------ */
611
612/*
613 * Set thread stack info according to values returned by
614 * pthread_getattr_np().
615 * If values are unreasonable, assume call failed and use
616 * incremental stack refinement method instead.
617 * Returns TRUE if the stack parameters could be determined exactly,
618 * FALSE if incremental refinement is necessary.
619 */
620static kmp_int32
621__kmp_set_stack_info( int gtid, kmp_info_t *th )
622{
623 int stack_data;
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000624#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000625 /* Linux* OS only -- no pthread_getattr_np support on OS X* */
626 pthread_attr_t attr;
627 int status;
628 size_t size = 0;
629 void * addr = 0;
630
631 /* Always do incremental stack refinement for ubermaster threads since the initial
632 thread stack range can be reduced by sibling thread creation so pthread_attr_getstack
633 may cause thread gtid aliasing */
634 if ( ! KMP_UBER_GTID(gtid) ) {
635
636 /* Fetch the real thread attributes */
637 status = pthread_attr_init( &attr );
638 KMP_CHECK_SYSFAIL( "pthread_attr_init", status );
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000639#if KMP_OS_FREEBSD || KMP_OS_NETBSD
Alp Toker763b9392014-02-28 09:42:41 +0000640 status = pthread_attr_get_np( pthread_self(), &attr );
641 KMP_CHECK_SYSFAIL( "pthread_attr_get_np", status );
642#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000643 status = pthread_getattr_np( pthread_self(), &attr );
644 KMP_CHECK_SYSFAIL( "pthread_getattr_np", status );
Alp Toker763b9392014-02-28 09:42:41 +0000645#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000646 status = pthread_attr_getstack( &attr, &addr, &size );
647 KMP_CHECK_SYSFAIL( "pthread_attr_getstack", status );
648 KA_TRACE( 60, ( "__kmp_set_stack_info: T#%d pthread_attr_getstack returned size: %lu, "
649 "low addr: %p\n",
650 gtid, size, addr ));
651
652 status = pthread_attr_destroy( &attr );
653 KMP_CHECK_SYSFAIL( "pthread_attr_destroy", status );
654 }
655
656 if ( size != 0 && addr != 0 ) { /* was stack parameter determination successful? */
657 /* Store the correct base and size */
658 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
659 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
660 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
661 return TRUE;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000662 }
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000663#endif /* KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD */
Alp Toker763b9392014-02-28 09:42:41 +0000664 /* Use incremental refinement starting from initial conservative estimate */
665 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
666 TCW_PTR(th -> th.th_info.ds.ds_stackbase, &stack_data);
667 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
668 return FALSE;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000669}
670
671static void*
672__kmp_launch_worker( void *thr )
673{
674 int status, old_type, old_state;
675#ifdef KMP_BLOCK_SIGNALS
676 sigset_t new_set, old_set;
677#endif /* KMP_BLOCK_SIGNALS */
678 void *exit_val;
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000679#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD
Andrey Churbanov368b70e2015-08-05 11:12:45 +0000680 void * volatile padding = 0;
Jonathan Peyton2321d572015-06-08 19:25:25 +0000681#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000682 int gtid;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000683
684 gtid = ((kmp_info_t*)thr) -> th.th_info.ds.ds_gtid;
685 __kmp_gtid_set_specific( gtid );
686#ifdef KMP_TDATA_GTID
687 __kmp_gtid = gtid;
688#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000689#if KMP_STATS_ENABLED
690 // set __thread local index to point to thread-specific stats
691 __kmp_stats_thread_ptr = ((kmp_info_t*)thr)->th.th_stats;
692#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000693
694#if USE_ITT_BUILD
695 __kmp_itt_thread_name( gtid );
696#endif /* USE_ITT_BUILD */
697
Alp Toker763b9392014-02-28 09:42:41 +0000698#if KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +0000699 __kmp_affinity_set_init_mask( gtid, FALSE );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000700#endif
701
702#ifdef KMP_CANCEL_THREADS
703 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
704 KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status );
705 /* josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? */
706 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
707 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
708#endif
709
710#if KMP_ARCH_X86 || KMP_ARCH_X86_64
711 //
712 // Set the FP control regs to be a copy of
713 // the parallel initialization thread's.
714 //
715 __kmp_clear_x87_fpu_status_word();
716 __kmp_load_x87_fpu_control_word( &__kmp_init_x87_fpu_control_word );
717 __kmp_load_mxcsr( &__kmp_init_mxcsr );
718#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
719
720#ifdef KMP_BLOCK_SIGNALS
721 status = sigfillset( & new_set );
722 KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status );
723 status = pthread_sigmask( SIG_BLOCK, & new_set, & old_set );
724 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
725#endif /* KMP_BLOCK_SIGNALS */
726
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +0000727#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000728 if ( __kmp_stkoffset > 0 && gtid > 0 ) {
Andrey Churbanov74bf17b2015-04-02 13:27:08 +0000729 padding = KMP_ALLOCA( gtid * __kmp_stkoffset );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000730 }
731#endif
732
733 KMP_MB();
734 __kmp_set_stack_info( gtid, (kmp_info_t*)thr );
735
736 __kmp_check_stack_overlap( (kmp_info_t*)thr );
737
738 exit_val = __kmp_launch_thread( (kmp_info_t *) thr );
739
740#ifdef KMP_BLOCK_SIGNALS
741 status = pthread_sigmask( SIG_SETMASK, & old_set, NULL );
742 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
743#endif /* KMP_BLOCK_SIGNALS */
744
745 return exit_val;
746}
747
748
749/* The monitor thread controls all of the threads in the complex */
750
751static void*
752__kmp_launch_monitor( void *thr )
753{
754 int status, old_type, old_state;
755#ifdef KMP_BLOCK_SIGNALS
756 sigset_t new_set;
757#endif /* KMP_BLOCK_SIGNALS */
758 struct timespec interval;
759 int yield_count;
760 int yield_cycles = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000761
762 KMP_MB(); /* Flush all pending memory write invalidates. */
763
764 KA_TRACE( 10, ("__kmp_launch_monitor: #1 launched\n" ) );
765
766 /* register us as the monitor thread */
767 __kmp_gtid_set_specific( KMP_GTID_MONITOR );
768#ifdef KMP_TDATA_GTID
769 __kmp_gtid = KMP_GTID_MONITOR;
770#endif
771
772 KMP_MB();
773
774#if USE_ITT_BUILD
775 __kmp_itt_thread_ignore(); // Instruct Intel(R) Threading Tools to ignore monitor thread.
776#endif /* USE_ITT_BUILD */
777
778 __kmp_set_stack_info( ((kmp_info_t*)thr)->th.th_info.ds.ds_gtid, (kmp_info_t*)thr );
779
780 __kmp_check_stack_overlap( (kmp_info_t*)thr );
781
782#ifdef KMP_CANCEL_THREADS
783 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
784 KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status );
785 /* josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? */
786 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
787 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
788#endif
789
790 #if KMP_REAL_TIME_FIX
791 // This is a potential fix which allows application with real-time scheduling policy work.
792 // However, decision about the fix is not made yet, so it is disabled by default.
793 { // Are program started with real-time scheduling policy?
794 int sched = sched_getscheduler( 0 );
795 if ( sched == SCHED_FIFO || sched == SCHED_RR ) {
796 // Yes, we are a part of real-time application. Try to increase the priority of the
797 // monitor.
798 struct sched_param param;
799 int max_priority = sched_get_priority_max( sched );
800 int rc;
801 KMP_WARNING( RealTimeSchedNotSupported );
802 sched_getparam( 0, & param );
803 if ( param.sched_priority < max_priority ) {
804 param.sched_priority += 1;
805 rc = sched_setscheduler( 0, sched, & param );
806 if ( rc != 0 ) {
807 int error = errno;
808 __kmp_msg(
809 kmp_ms_warning,
810 KMP_MSG( CantChangeMonitorPriority ),
811 KMP_ERR( error ),
812 KMP_MSG( MonitorWillStarve ),
813 __kmp_msg_null
814 );
815 }; // if
816 } else {
817 // We cannot abort here, because number of CPUs may be enough for all the threads,
818 // including the monitor thread, so application could potentially work...
819 __kmp_msg(
820 kmp_ms_warning,
821 KMP_MSG( RunningAtMaxPriority ),
822 KMP_MSG( MonitorWillStarve ),
823 KMP_HNT( RunningAtMaxPriority ),
824 __kmp_msg_null
825 );
826 }; // if
827 }; // if
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000828 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 ); // AC: free thread that waits for monitor started
Jim Cownie5e8470a2013-09-27 10:38:44 +0000829 }
830 #endif // KMP_REAL_TIME_FIX
831
832 KMP_MB(); /* Flush all pending memory write invalidates. */
833
834 if ( __kmp_monitor_wakeups == 1 ) {
835 interval.tv_sec = 1;
836 interval.tv_nsec = 0;
837 } else {
838 interval.tv_sec = 0;
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +0000839 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000840 }
841
842 KA_TRACE( 10, ("__kmp_launch_monitor: #2 monitor\n" ) );
843
844 if (__kmp_yield_cycle) {
845 __kmp_yielding_on = 0; /* Start out with yielding shut off */
846 yield_count = __kmp_yield_off_count;
847 } else {
848 __kmp_yielding_on = 1; /* Yielding is on permanently */
849 }
850
851 while( ! TCR_4( __kmp_global.g.g_done ) ) {
852 struct timespec now;
853 struct timeval tval;
854
855 /* This thread monitors the state of the system */
856
857 KA_TRACE( 15, ( "__kmp_launch_monitor: update\n" ) );
858
859 status = gettimeofday( &tval, NULL );
860 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
861 TIMEVAL_TO_TIMESPEC( &tval, &now );
862
863 now.tv_sec += interval.tv_sec;
864 now.tv_nsec += interval.tv_nsec;
865
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +0000866 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000867 now.tv_sec += 1;
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +0000868 now.tv_nsec -= KMP_NSEC_PER_SEC;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000869 }
870
871 status = pthread_mutex_lock( & __kmp_wait_mx.m_mutex );
872 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
Jim Cownie07ea89f2014-09-03 11:10:54 +0000873 // AC: the monitor should not fall asleep if g_done has been set
874 if ( !TCR_4(__kmp_global.g.g_done) ) { // check once more under mutex
875 status = pthread_cond_timedwait( &__kmp_wait_cv.c_cond, &__kmp_wait_mx.m_mutex, &now );
876 if ( status != 0 ) {
877 if ( status != ETIMEDOUT && status != EINTR ) {
878 KMP_SYSFAIL( "pthread_cond_timedwait", status );
879 };
Jim Cownie5e8470a2013-09-27 10:38:44 +0000880 };
881 };
Jim Cownie5e8470a2013-09-27 10:38:44 +0000882 status = pthread_mutex_unlock( & __kmp_wait_mx.m_mutex );
883 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
884
885 if (__kmp_yield_cycle) {
886 yield_cycles++;
887 if ( (yield_cycles % yield_count) == 0 ) {
888 if (__kmp_yielding_on) {
889 __kmp_yielding_on = 0; /* Turn it off now */
890 yield_count = __kmp_yield_off_count;
891 } else {
892 __kmp_yielding_on = 1; /* Turn it on now */
893 yield_count = __kmp_yield_on_count;
894 }
895 yield_cycles = 0;
896 }
897 } else {
898 __kmp_yielding_on = 1;
899 }
900
901 TCW_4( __kmp_global.g.g_time.dt.t_value,
902 TCR_4( __kmp_global.g.g_time.dt.t_value ) + 1 );
903
904 KMP_MB(); /* Flush all pending memory write invalidates. */
905 }
906
907 KA_TRACE( 10, ("__kmp_launch_monitor: #3 cleanup\n" ) );
908
909#ifdef KMP_BLOCK_SIGNALS
910 status = sigfillset( & new_set );
911 KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status );
912 status = pthread_sigmask( SIG_UNBLOCK, & new_set, NULL );
913 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
914#endif /* KMP_BLOCK_SIGNALS */
915
916 KA_TRACE( 10, ("__kmp_launch_monitor: #4 finished\n" ) );
917
918 if( __kmp_global.g.g_abort != 0 ) {
919 /* now we need to terminate the worker threads */
920 /* the value of t_abort is the signal we caught */
921
922 int gtid;
923
924 KA_TRACE( 10, ("__kmp_launch_monitor: #5 terminate sig=%d\n", __kmp_global.g.g_abort ) );
925
926 /* terminate the OpenMP worker threads */
927 /* TODO this is not valid for sibling threads!!
928 * the uber master might not be 0 anymore.. */
929 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
930 __kmp_terminate_thread( gtid );
931
932 __kmp_cleanup();
933
934 KA_TRACE( 10, ("__kmp_launch_monitor: #6 raise sig=%d\n", __kmp_global.g.g_abort ) );
935
936 if (__kmp_global.g.g_abort > 0)
937 raise( __kmp_global.g.g_abort );
938
939 }
940
941 KA_TRACE( 10, ("__kmp_launch_monitor: #7 exit\n" ) );
942
943 return thr;
944}
945
946void
947__kmp_create_worker( int gtid, kmp_info_t *th, size_t stack_size )
948{
949 pthread_t handle;
950 pthread_attr_t thread_attr;
951 int status;
952
953
954 th->th.th_info.ds.ds_gtid = gtid;
955
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000956#if KMP_STATS_ENABLED
957 // sets up worker thread stats
958 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
959
960 // th->th.th_stats is used to transfer thread specific stats-pointer to __kmp_launch_worker
961 // So when thread is created (goes into __kmp_launch_worker) it will
962 // set it's __thread local pointer to th->th.th_stats
963 th->th.th_stats = __kmp_stats_list.push_back(gtid);
964 if(KMP_UBER_GTID(gtid)) {
965 __kmp_stats_start_time = tsc_tick_count::now();
966 __kmp_stats_thread_ptr = th->th.th_stats;
967 __kmp_stats_init();
968 KMP_START_EXPLICIT_TIMER(OMP_serial);
969 KMP_START_EXPLICIT_TIMER(OMP_start_end);
970 }
971 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
972
973#endif // KMP_STATS_ENABLED
974
Jim Cownie5e8470a2013-09-27 10:38:44 +0000975 if ( KMP_UBER_GTID(gtid) ) {
976 KA_TRACE( 10, ("__kmp_create_worker: uber thread (%d)\n", gtid ) );
977 th -> th.th_info.ds.ds_thread = pthread_self();
978 __kmp_set_stack_info( gtid, th );
979 __kmp_check_stack_overlap( th );
980 return;
981 }; // if
982
983 KA_TRACE( 10, ("__kmp_create_worker: try to create thread (%d)\n", gtid ) );
984
985 KMP_MB(); /* Flush all pending memory write invalidates. */
986
987#ifdef KMP_THREAD_ATTR
988 {
989 status = pthread_attr_init( &thread_attr );
990 if ( status != 0 ) {
991 __kmp_msg(
992 kmp_ms_fatal,
993 KMP_MSG( CantInitThreadAttrs ),
994 KMP_ERR( status ),
995 __kmp_msg_null
996 );
997 }; // if
998 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
999 if ( status != 0 ) {
1000 __kmp_msg(
1001 kmp_ms_fatal,
1002 KMP_MSG( CantSetWorkerState ),
1003 KMP_ERR( status ),
1004 __kmp_msg_null
1005 );
1006 }; // if
1007
Andrey Churbanov368b70e2015-08-05 11:12:45 +00001008 /* Set stack size for this thread now.
1009 * The multiple of 2 is there because on some machines, requesting an unusual stacksize
1010 * causes the thread to have an offset before the dummy alloca() takes place to create the
1011 * offset. Since we want the user to have a sufficient stacksize AND support a stack offset, we
1012 * alloca() twice the offset so that the upcoming alloca() does not eliminate any premade
1013 * offset, and also gives the user the stack space they requested for all threads */
1014 stack_size += gtid * __kmp_stkoffset * 2;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001015
1016 KA_TRACE( 10, ( "__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
1017 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
1018 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size ) );
1019
1020# ifdef _POSIX_THREAD_ATTR_STACKSIZE
1021 status = pthread_attr_setstacksize( & thread_attr, stack_size );
1022# ifdef KMP_BACKUP_STKSIZE
1023 if ( status != 0 ) {
1024 if ( ! __kmp_env_stksize ) {
1025 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
1026 __kmp_stksize = KMP_BACKUP_STKSIZE;
1027 KA_TRACE( 10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
1028 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
1029 "bytes\n",
1030 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size )
1031 );
1032 status = pthread_attr_setstacksize( &thread_attr, stack_size );
1033 }; // if
1034 }; // if
1035# endif /* KMP_BACKUP_STKSIZE */
1036 if ( status != 0 ) {
1037 __kmp_msg(
1038 kmp_ms_fatal,
1039 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1040 KMP_ERR( status ),
1041 KMP_HNT( ChangeWorkerStackSize ),
1042 __kmp_msg_null
1043 );
1044 }; // if
1045# endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1046 }
1047#endif /* KMP_THREAD_ATTR */
1048
1049 {
1050 status = pthread_create( & handle, & thread_attr, __kmp_launch_worker, (void *) th );
1051 if ( status != 0 || ! handle ) { // ??? Why do we check handle??
1052#ifdef _POSIX_THREAD_ATTR_STACKSIZE
1053 if ( status == EINVAL ) {
1054 __kmp_msg(
1055 kmp_ms_fatal,
1056 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1057 KMP_ERR( status ),
1058 KMP_HNT( IncreaseWorkerStackSize ),
1059 __kmp_msg_null
1060 );
1061 };
1062 if ( status == ENOMEM ) {
1063 __kmp_msg(
1064 kmp_ms_fatal,
1065 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1066 KMP_ERR( status ),
1067 KMP_HNT( DecreaseWorkerStackSize ),
1068 __kmp_msg_null
1069 );
1070 };
1071#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1072 if ( status == EAGAIN ) {
1073 __kmp_msg(
1074 kmp_ms_fatal,
1075 KMP_MSG( NoResourcesForWorkerThread ),
1076 KMP_ERR( status ),
1077 KMP_HNT( Decrease_NUM_THREADS ),
1078 __kmp_msg_null
1079 );
1080 }; // if
1081 KMP_SYSFAIL( "pthread_create", status );
1082 }; // if
1083
1084 th->th.th_info.ds.ds_thread = handle;
1085 }
1086
1087#ifdef KMP_THREAD_ATTR
1088 {
1089 status = pthread_attr_destroy( & thread_attr );
1090 if ( status ) {
1091 __kmp_msg(
1092 kmp_ms_warning,
1093 KMP_MSG( CantDestroyThreadAttrs ),
1094 KMP_ERR( status ),
1095 __kmp_msg_null
1096 );
1097 }; // if
1098 }
1099#endif /* KMP_THREAD_ATTR */
1100
1101 KMP_MB(); /* Flush all pending memory write invalidates. */
1102
1103 KA_TRACE( 10, ("__kmp_create_worker: done creating thread (%d)\n", gtid ) );
1104
1105} // __kmp_create_worker
1106
1107
1108void
1109__kmp_create_monitor( kmp_info_t *th )
1110{
1111 pthread_t handle;
1112 pthread_attr_t thread_attr;
1113 size_t size;
1114 int status;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001115 int auto_adj_size = FALSE;
1116
1117 KA_TRACE( 10, ("__kmp_create_monitor: try to create monitor\n" ) );
1118
1119 KMP_MB(); /* Flush all pending memory write invalidates. */
1120
1121 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
1122 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
1123 #if KMP_REAL_TIME_FIX
1124 TCW_4( __kmp_global.g.g_time.dt.t_value, -1 ); // Will use it for synchronization a bit later.
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001125 #else
1126 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001127 #endif // KMP_REAL_TIME_FIX
1128
1129 #ifdef KMP_THREAD_ATTR
1130 if ( __kmp_monitor_stksize == 0 ) {
1131 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1132 auto_adj_size = TRUE;
1133 }
1134 status = pthread_attr_init( &thread_attr );
1135 if ( status != 0 ) {
1136 __kmp_msg(
1137 kmp_ms_fatal,
1138 KMP_MSG( CantInitThreadAttrs ),
1139 KMP_ERR( status ),
1140 __kmp_msg_null
1141 );
1142 }; // if
1143 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
1144 if ( status != 0 ) {
1145 __kmp_msg(
1146 kmp_ms_fatal,
1147 KMP_MSG( CantSetMonitorState ),
1148 KMP_ERR( status ),
1149 __kmp_msg_null
1150 );
1151 }; // if
1152
1153 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1154 status = pthread_attr_getstacksize( & thread_attr, & size );
1155 KMP_CHECK_SYSFAIL( "pthread_attr_getstacksize", status );
1156 #else
1157 size = __kmp_sys_min_stksize;
1158 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1159 #endif /* KMP_THREAD_ATTR */
1160
1161 if ( __kmp_monitor_stksize == 0 ) {
1162 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1163 }
1164 if ( __kmp_monitor_stksize < __kmp_sys_min_stksize ) {
1165 __kmp_monitor_stksize = __kmp_sys_min_stksize;
1166 }
1167
1168 KA_TRACE( 10, ( "__kmp_create_monitor: default stacksize = %lu bytes,"
1169 "requested stacksize = %lu bytes\n",
1170 size, __kmp_monitor_stksize ) );
1171
1172 retry:
1173
1174 /* Set stack size for this thread now. */
1175
1176 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1177 KA_TRACE( 10, ( "__kmp_create_monitor: setting stacksize = %lu bytes,",
1178 __kmp_monitor_stksize ) );
1179 status = pthread_attr_setstacksize( & thread_attr, __kmp_monitor_stksize );
1180 if ( status != 0 ) {
1181 if ( auto_adj_size ) {
1182 __kmp_monitor_stksize *= 2;
1183 goto retry;
1184 }
1185 __kmp_msg(
1186 kmp_ms_warning, // should this be fatal? BB
1187 KMP_MSG( CantSetMonitorStackSize, (long int) __kmp_monitor_stksize ),
1188 KMP_ERR( status ),
1189 KMP_HNT( ChangeMonitorStackSize ),
1190 __kmp_msg_null
1191 );
1192 }; // if
1193 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1194
Jim Cownie5e8470a2013-09-27 10:38:44 +00001195 status = pthread_create( &handle, & thread_attr, __kmp_launch_monitor, (void *) th );
1196
1197 if ( status != 0 ) {
1198 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1199 if ( status == EINVAL ) {
1200 if ( auto_adj_size && ( __kmp_monitor_stksize < (size_t)0x40000000 ) ) {
1201 __kmp_monitor_stksize *= 2;
1202 goto retry;
1203 }
1204 __kmp_msg(
1205 kmp_ms_fatal,
1206 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1207 KMP_ERR( status ),
1208 KMP_HNT( IncreaseMonitorStackSize ),
1209 __kmp_msg_null
1210 );
1211 }; // if
1212 if ( status == ENOMEM ) {
1213 __kmp_msg(
1214 kmp_ms_fatal,
1215 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1216 KMP_ERR( status ),
1217 KMP_HNT( DecreaseMonitorStackSize ),
1218 __kmp_msg_null
1219 );
1220 }; // if
1221 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1222 if ( status == EAGAIN ) {
1223 __kmp_msg(
1224 kmp_ms_fatal,
1225 KMP_MSG( NoResourcesForMonitorThread ),
1226 KMP_ERR( status ),
1227 KMP_HNT( DecreaseNumberOfThreadsInUse ),
1228 __kmp_msg_null
1229 );
1230 }; // if
1231 KMP_SYSFAIL( "pthread_create", status );
1232 }; // if
1233
1234 th->th.th_info.ds.ds_thread = handle;
1235
1236 #if KMP_REAL_TIME_FIX
1237 // Wait for the monitor thread is really started and set its *priority*.
1238 KMP_DEBUG_ASSERT( sizeof( kmp_uint32 ) == sizeof( __kmp_global.g.g_time.dt.t_value ) );
1239 __kmp_wait_yield_4(
1240 (kmp_uint32 volatile *) & __kmp_global.g.g_time.dt.t_value, -1, & __kmp_neq_4, NULL
1241 );
1242 #endif // KMP_REAL_TIME_FIX
1243
1244 #ifdef KMP_THREAD_ATTR
1245 status = pthread_attr_destroy( & thread_attr );
1246 if ( status != 0 ) {
1247 __kmp_msg( //
1248 kmp_ms_warning,
1249 KMP_MSG( CantDestroyThreadAttrs ),
1250 KMP_ERR( status ),
1251 __kmp_msg_null
1252 );
1253 }; // if
1254 #endif
1255
1256 KMP_MB(); /* Flush all pending memory write invalidates. */
1257
1258 KA_TRACE( 10, ( "__kmp_create_monitor: monitor created %#.8lx\n", th->th.th_info.ds.ds_thread ) );
1259
1260} // __kmp_create_monitor
1261
1262void
1263__kmp_exit_thread(
1264 int exit_status
1265) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001266 pthread_exit( (void *)(intptr_t) exit_status );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001267} // __kmp_exit_thread
1268
Jim Cownie07ea89f2014-09-03 11:10:54 +00001269void __kmp_resume_monitor();
1270
Jim Cownie5e8470a2013-09-27 10:38:44 +00001271void
1272__kmp_reap_monitor( kmp_info_t *th )
1273{
Jonathan Peyton7c4d66d2015-06-08 20:01:14 +00001274 int status;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001275 void *exit_val;
1276
1277 KA_TRACE( 10, ("__kmp_reap_monitor: try to reap monitor thread with handle %#.8lx\n",
1278 th->th.th_info.ds.ds_thread ) );
1279
1280 // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
1281 // If both tid and gtid are 0, it means the monitor did not ever start.
1282 // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
1283 KMP_DEBUG_ASSERT( th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid );
1284 if ( th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR ) {
1285 return;
1286 }; // if
1287
1288 KMP_MB(); /* Flush all pending memory write invalidates. */
1289
1290
1291 /* First, check to see whether the monitor thread exists. This could prevent a hang,
1292 but if the monitor dies after the pthread_kill call and before the pthread_join
1293 call, it will still hang. */
1294
1295 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
1296 if (status == ESRCH) {
1297
1298 KA_TRACE( 10, ("__kmp_reap_monitor: monitor does not exist, returning\n") );
1299
1300 } else
1301 {
Jim Cownie07ea89f2014-09-03 11:10:54 +00001302 __kmp_resume_monitor(); // Wake up the monitor thread
Jim Cownie5e8470a2013-09-27 10:38:44 +00001303 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1304 if (exit_val != th) {
1305 __kmp_msg(
1306 kmp_ms_fatal,
1307 KMP_MSG( ReapMonitorError ),
1308 KMP_ERR( status ),
1309 __kmp_msg_null
1310 );
1311 }
1312 }
1313
1314 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1315 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1316
1317 KA_TRACE( 10, ("__kmp_reap_monitor: done reaping monitor thread with handle %#.8lx\n",
1318 th->th.th_info.ds.ds_thread ) );
1319
1320 KMP_MB(); /* Flush all pending memory write invalidates. */
1321
1322}
1323
1324void
1325__kmp_reap_worker( kmp_info_t *th )
1326{
1327 int status;
1328 void *exit_val;
1329
1330 KMP_MB(); /* Flush all pending memory write invalidates. */
1331
1332 KA_TRACE( 10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid ) );
1333
1334 /* First, check to see whether the worker thread exists. This could prevent a hang,
1335 but if the worker dies after the pthread_kill call and before the pthread_join
1336 call, it will still hang. */
1337
1338 {
1339 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
1340 if (status == ESRCH) {
1341 KA_TRACE( 10, ("__kmp_reap_worker: worker T#%d does not exist, returning\n",
1342 th->th.th_info.ds.ds_gtid ) );
1343 }
1344 else {
1345 KA_TRACE( 10, ("__kmp_reap_worker: try to join with worker T#%d\n",
1346 th->th.th_info.ds.ds_gtid ) );
1347
1348 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1349#ifdef KMP_DEBUG
1350 /* Don't expose these to the user until we understand when they trigger */
1351 if ( status != 0 ) {
1352 __kmp_msg(
1353 kmp_ms_fatal,
1354 KMP_MSG( ReapWorkerError ),
1355 KMP_ERR( status ),
1356 __kmp_msg_null
1357 );
1358 }
1359 if ( exit_val != th ) {
1360 KA_TRACE( 10, ( "__kmp_reap_worker: worker T#%d did not reap properly, "
1361 "exit_val = %p\n",
1362 th->th.th_info.ds.ds_gtid, exit_val ) );
1363 }
1364#endif /* KMP_DEBUG */
1365 }
1366 }
1367
1368 KA_TRACE( 10, ("__kmp_reap_worker: done reaping T#%d\n", th->th.th_info.ds.ds_gtid ) );
1369
1370 KMP_MB(); /* Flush all pending memory write invalidates. */
1371}
1372
1373
1374/* ------------------------------------------------------------------------ */
1375/* ------------------------------------------------------------------------ */
1376
1377#if KMP_HANDLE_SIGNALS
1378
1379
1380static void
1381__kmp_null_handler( int signo )
1382{
1383 // Do nothing, for doing SIG_IGN-type actions.
1384} // __kmp_null_handler
1385
1386
1387static void
1388__kmp_team_handler( int signo )
1389{
1390 if ( __kmp_global.g.g_abort == 0 ) {
1391 /* Stage 1 signal handler, let's shut down all of the threads */
1392 #ifdef KMP_DEBUG
1393 __kmp_debug_printf( "__kmp_team_handler: caught signal = %d\n", signo );
1394 #endif
1395 switch ( signo ) {
1396 case SIGHUP :
1397 case SIGINT :
1398 case SIGQUIT :
1399 case SIGILL :
1400 case SIGABRT :
1401 case SIGFPE :
1402 case SIGBUS :
1403 case SIGSEGV :
1404 #ifdef SIGSYS
1405 case SIGSYS :
1406 #endif
1407 case SIGTERM :
1408 if ( __kmp_debug_buf ) {
1409 __kmp_dump_debug_buffer( );
1410 }; // if
1411 KMP_MB(); // Flush all pending memory write invalidates.
1412 TCW_4( __kmp_global.g.g_abort, signo );
1413 KMP_MB(); // Flush all pending memory write invalidates.
1414 TCW_4( __kmp_global.g.g_done, TRUE );
1415 KMP_MB(); // Flush all pending memory write invalidates.
1416 break;
1417 default:
1418 #ifdef KMP_DEBUG
1419 __kmp_debug_printf( "__kmp_team_handler: unknown signal type" );
1420 #endif
1421 break;
1422 }; // switch
1423 }; // if
1424} // __kmp_team_handler
1425
1426
1427static
1428void __kmp_sigaction( int signum, const struct sigaction * act, struct sigaction * oldact ) {
1429 int rc = sigaction( signum, act, oldact );
1430 KMP_CHECK_SYSFAIL_ERRNO( "sigaction", rc );
1431}
1432
1433
1434static void
1435__kmp_install_one_handler( int sig, sig_func_t handler_func, int parallel_init )
1436{
1437 KMP_MB(); // Flush all pending memory write invalidates.
1438 KB_TRACE( 60, ( "__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init ) );
1439 if ( parallel_init ) {
1440 struct sigaction new_action;
1441 struct sigaction old_action;
1442 new_action.sa_handler = handler_func;
1443 new_action.sa_flags = 0;
1444 sigfillset( & new_action.sa_mask );
1445 __kmp_sigaction( sig, & new_action, & old_action );
1446 if ( old_action.sa_handler == __kmp_sighldrs[ sig ].sa_handler ) {
1447 sigaddset( & __kmp_sigset, sig );
1448 } else {
1449 // Restore/keep user's handler if one previously installed.
1450 __kmp_sigaction( sig, & old_action, NULL );
1451 }; // if
1452 } else {
1453 // Save initial/system signal handlers to see if user handlers installed.
1454 __kmp_sigaction( sig, NULL, & __kmp_sighldrs[ sig ] );
1455 }; // if
1456 KMP_MB(); // Flush all pending memory write invalidates.
1457} // __kmp_install_one_handler
1458
1459
1460static void
1461__kmp_remove_one_handler( int sig )
1462{
1463 KB_TRACE( 60, ( "__kmp_remove_one_handler( %d )\n", sig ) );
1464 if ( sigismember( & __kmp_sigset, sig ) ) {
1465 struct sigaction old;
1466 KMP_MB(); // Flush all pending memory write invalidates.
1467 __kmp_sigaction( sig, & __kmp_sighldrs[ sig ], & old );
1468 if ( ( old.sa_handler != __kmp_team_handler ) && ( old.sa_handler != __kmp_null_handler ) ) {
1469 // Restore the users signal handler.
1470 KB_TRACE( 10, ( "__kmp_remove_one_handler: oops, not our handler, restoring: sig=%d\n", sig ) );
1471 __kmp_sigaction( sig, & old, NULL );
1472 }; // if
1473 sigdelset( & __kmp_sigset, sig );
1474 KMP_MB(); // Flush all pending memory write invalidates.
1475 }; // if
1476} // __kmp_remove_one_handler
1477
1478
1479void
1480__kmp_install_signals( int parallel_init )
1481{
1482 KB_TRACE( 10, ( "__kmp_install_signals( %d )\n", parallel_init ) );
1483 if ( __kmp_handle_signals || ! parallel_init ) {
1484 // If ! parallel_init, we do not install handlers, just save original handlers.
1485 // Let us do it even __handle_signals is 0.
1486 sigemptyset( & __kmp_sigset );
1487 __kmp_install_one_handler( SIGHUP, __kmp_team_handler, parallel_init );
1488 __kmp_install_one_handler( SIGINT, __kmp_team_handler, parallel_init );
1489 __kmp_install_one_handler( SIGQUIT, __kmp_team_handler, parallel_init );
1490 __kmp_install_one_handler( SIGILL, __kmp_team_handler, parallel_init );
1491 __kmp_install_one_handler( SIGABRT, __kmp_team_handler, parallel_init );
1492 __kmp_install_one_handler( SIGFPE, __kmp_team_handler, parallel_init );
1493 __kmp_install_one_handler( SIGBUS, __kmp_team_handler, parallel_init );
1494 __kmp_install_one_handler( SIGSEGV, __kmp_team_handler, parallel_init );
1495 #ifdef SIGSYS
1496 __kmp_install_one_handler( SIGSYS, __kmp_team_handler, parallel_init );
1497 #endif // SIGSYS
1498 __kmp_install_one_handler( SIGTERM, __kmp_team_handler, parallel_init );
1499 #ifdef SIGPIPE
1500 __kmp_install_one_handler( SIGPIPE, __kmp_team_handler, parallel_init );
1501 #endif // SIGPIPE
1502 }; // if
1503} // __kmp_install_signals
1504
1505
1506void
1507__kmp_remove_signals( void )
1508{
1509 int sig;
1510 KB_TRACE( 10, ( "__kmp_remove_signals()\n" ) );
1511 for ( sig = 1; sig < NSIG; ++ sig ) {
1512 __kmp_remove_one_handler( sig );
1513 }; // for sig
1514} // __kmp_remove_signals
1515
1516
1517#endif // KMP_HANDLE_SIGNALS
1518
1519/* ------------------------------------------------------------------------ */
1520/* ------------------------------------------------------------------------ */
1521
1522void
1523__kmp_enable( int new_state )
1524{
1525 #ifdef KMP_CANCEL_THREADS
1526 int status, old_state;
1527 status = pthread_setcancelstate( new_state, & old_state );
1528 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
1529 KMP_DEBUG_ASSERT( old_state == PTHREAD_CANCEL_DISABLE );
1530 #endif
1531}
1532
1533void
1534__kmp_disable( int * old_state )
1535{
1536 #ifdef KMP_CANCEL_THREADS
1537 int status;
1538 status = pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, old_state );
1539 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
1540 #endif
1541}
1542
1543/* ------------------------------------------------------------------------ */
1544/* ------------------------------------------------------------------------ */
1545
1546static void
1547__kmp_atfork_prepare (void)
1548{
1549 /* nothing to do */
1550}
1551
1552static void
1553__kmp_atfork_parent (void)
1554{
1555 /* nothing to do */
1556}
1557
1558/*
1559 Reset the library so execution in the child starts "all over again" with
1560 clean data structures in initial states. Don't worry about freeing memory
1561 allocated by parent, just abandon it to be safe.
1562*/
1563static void
1564__kmp_atfork_child (void)
1565{
1566 /* TODO make sure this is done right for nested/sibling */
1567 // ATT: Memory leaks are here? TODO: Check it and fix.
1568 /* KMP_ASSERT( 0 ); */
1569
1570 ++__kmp_fork_count;
1571
1572 __kmp_init_runtime = FALSE;
1573 __kmp_init_monitor = 0;
1574 __kmp_init_parallel = FALSE;
1575 __kmp_init_middle = FALSE;
1576 __kmp_init_serial = FALSE;
1577 TCW_4(__kmp_init_gtid, FALSE);
1578 __kmp_init_common = FALSE;
1579
1580 TCW_4(__kmp_init_user_locks, FALSE);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001581#if ! KMP_USE_DYNAMIC_LOCK
Jim Cownie07ea89f2014-09-03 11:10:54 +00001582 __kmp_user_lock_table.used = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001583 __kmp_user_lock_table.allocated = 0;
1584 __kmp_user_lock_table.table = NULL;
1585 __kmp_lock_blocks = NULL;
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001586#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001587
1588 __kmp_all_nth = 0;
1589 TCW_4(__kmp_nth, 0);
1590
1591 /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate here
1592 so threadprivate doesn't use stale data */
1593 KA_TRACE( 10, ( "__kmp_atfork_child: checking cache address list %p\n",
1594 __kmp_threadpriv_cache_list ) );
1595
1596 while ( __kmp_threadpriv_cache_list != NULL ) {
1597
1598 if ( *__kmp_threadpriv_cache_list -> addr != NULL ) {
1599 KC_TRACE( 50, ( "__kmp_atfork_child: zeroing cache at address %p\n",
1600 &(*__kmp_threadpriv_cache_list -> addr) ) );
1601
1602 *__kmp_threadpriv_cache_list -> addr = NULL;
1603 }
1604 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list -> next;
1605 }
1606
1607 __kmp_init_runtime = FALSE;
1608
1609 /* reset statically initialized locks */
1610 __kmp_init_bootstrap_lock( &__kmp_initz_lock );
1611 __kmp_init_bootstrap_lock( &__kmp_stdio_lock );
1612 __kmp_init_bootstrap_lock( &__kmp_console_lock );
1613
1614 /* This is necessary to make sure no stale data is left around */
1615 /* AC: customers complain that we use unsafe routines in the atfork
1616 handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1617 in dynamic_link when check the presence of shared tbbmalloc library.
1618 Suggestion is to make the library initialization lazier, similar
1619 to what done for __kmpc_begin(). */
1620 // TODO: synchronize all static initializations with regular library
1621 // startup; look at kmp_global.c and etc.
1622 //__kmp_internal_begin ();
1623
1624}
1625
1626void
1627__kmp_register_atfork(void) {
1628 if ( __kmp_need_register_atfork ) {
1629 int status = pthread_atfork( __kmp_atfork_prepare, __kmp_atfork_parent, __kmp_atfork_child );
1630 KMP_CHECK_SYSFAIL( "pthread_atfork", status );
1631 __kmp_need_register_atfork = FALSE;
1632 }
1633}
1634
1635void
1636__kmp_suspend_initialize( void )
1637{
1638 int status;
1639 status = pthread_mutexattr_init( &__kmp_suspend_mutex_attr );
1640 KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status );
1641 status = pthread_condattr_init( &__kmp_suspend_cond_attr );
1642 KMP_CHECK_SYSFAIL( "pthread_condattr_init", status );
1643}
1644
1645static void
1646__kmp_suspend_initialize_thread( kmp_info_t *th )
1647{
1648 if ( th->th.th_suspend_init_count <= __kmp_fork_count ) {
1649 /* this means we haven't initialized the suspension pthread objects for this thread
1650 in this instance of the process */
1651 int status;
1652 status = pthread_cond_init( &th->th.th_suspend_cv.c_cond, &__kmp_suspend_cond_attr );
1653 KMP_CHECK_SYSFAIL( "pthread_cond_init", status );
1654 status = pthread_mutex_init( &th->th.th_suspend_mx.m_mutex, & __kmp_suspend_mutex_attr );
1655 KMP_CHECK_SYSFAIL( "pthread_mutex_init", status );
1656 *(volatile int*)&th->th.th_suspend_init_count = __kmp_fork_count + 1;
1657 };
1658}
1659
1660void
1661__kmp_suspend_uninitialize_thread( kmp_info_t *th )
1662{
1663 if(th->th.th_suspend_init_count > __kmp_fork_count) {
1664 /* this means we have initialize the suspension pthread objects for this thread
1665 in this instance of the process */
1666 int status;
1667
1668 status = pthread_cond_destroy( &th->th.th_suspend_cv.c_cond );
1669 if ( status != 0 && status != EBUSY ) {
1670 KMP_SYSFAIL( "pthread_cond_destroy", status );
1671 };
1672 status = pthread_mutex_destroy( &th->th.th_suspend_mx.m_mutex );
1673 if ( status != 0 && status != EBUSY ) {
1674 KMP_SYSFAIL( "pthread_mutex_destroy", status );
1675 };
1676 --th->th.th_suspend_init_count;
1677 KMP_DEBUG_ASSERT(th->th.th_suspend_init_count == __kmp_fork_count);
1678 }
1679}
1680
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001681/* This routine puts the calling thread to sleep after setting the
1682 * sleep bit for the indicated flag variable to true.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001683 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001684template <class C>
1685static inline void __kmp_suspend_template( int th_gtid, C *flag )
Jim Cownie5e8470a2013-09-27 10:38:44 +00001686{
Jonathan Peyton45be4502015-08-11 21:36:41 +00001687 KMP_TIME_DEVELOPER_BLOCK(USER_suspend);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001688 kmp_info_t *th = __kmp_threads[th_gtid];
1689 int status;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001690 typename C::flag_t old_spin;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001691
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001692 KF_TRACE( 30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid, flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001693
1694 __kmp_suspend_initialize_thread( th );
1695
1696 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1697 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
1698
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001699 KF_TRACE( 10, ( "__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1700 th_gtid, flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001701
1702 /* TODO: shouldn't this use release semantics to ensure that __kmp_suspend_initialize_thread
1703 gets called first?
1704 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001705 old_spin = flag->set_sleeping();
Jim Cownie5e8470a2013-09-27 10:38:44 +00001706
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001707 KF_TRACE( 5, ( "__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%d\n",
1708 th_gtid, flag->get(), *(flag->get()) ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001709
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001710 if ( flag->done_check_val(old_spin) ) {
1711 old_spin = flag->unset_sleeping();
1712 KF_TRACE( 5, ( "__kmp_suspend_template: T#%d false alarm, reset sleep bit for spin(%p)\n",
1713 th_gtid, flag->get()) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001714 } else {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001715 /* Encapsulate in a loop as the documentation states that this may
1716 * "with low probability" return when the condition variable has
1717 * not been signaled or broadcast
1718 */
1719 int deactivated = FALSE;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001720 TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1721 while ( flag->is_sleeping() ) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001722#ifdef DEBUG_SUSPEND
1723 char buffer[128];
1724 __kmp_suspend_count++;
1725 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001726 __kmp_printf( "__kmp_suspend_template: suspending T#%d: %s\n", th_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001727#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001728 // Mark the thread as no longer active (only in the first iteration of the loop).
Jim Cownie5e8470a2013-09-27 10:38:44 +00001729 if ( ! deactivated ) {
1730 th->th.th_active = FALSE;
1731 if ( th->th.th_active_in_pool ) {
1732 th->th.th_active_in_pool = FALSE;
1733 KMP_TEST_THEN_DEC32(
1734 (kmp_int32 *) &__kmp_thread_pool_active_nth );
1735 KMP_DEBUG_ASSERT( TCR_4(__kmp_thread_pool_active_nth) >= 0 );
1736 }
1737 deactivated = TRUE;
1738
1739
1740 }
1741
1742#if USE_SUSPEND_TIMEOUT
1743 struct timespec now;
1744 struct timeval tval;
1745 int msecs;
1746
1747 status = gettimeofday( &tval, NULL );
1748 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
1749 TIMEVAL_TO_TIMESPEC( &tval, &now );
1750
1751 msecs = (4*__kmp_dflt_blocktime) + 200;
1752 now.tv_sec += msecs / 1000;
1753 now.tv_nsec += (msecs % 1000)*1000;
1754
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001755 KF_TRACE( 15, ( "__kmp_suspend_template: T#%d about to perform pthread_cond_timedwait\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001756 th_gtid ) );
1757 status = pthread_cond_timedwait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex, & now );
1758#else
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001759 KF_TRACE( 15, ( "__kmp_suspend_template: T#%d about to perform pthread_cond_wait\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001760 th_gtid ) );
1761
1762 status = pthread_cond_wait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex );
1763#endif
1764
1765 if ( (status != 0) && (status != EINTR) && (status != ETIMEDOUT) ) {
1766 KMP_SYSFAIL( "pthread_cond_wait", status );
1767 }
1768#ifdef KMP_DEBUG
1769 if (status == ETIMEDOUT) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001770 if ( flag->is_sleeping() ) {
1771 KF_TRACE( 100, ( "__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001772 } else {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001773 KF_TRACE( 2, ( "__kmp_suspend_template: T#%d timeout wakeup, sleep bit not set!\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001774 th_gtid ) );
1775 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001776 } else if ( flag->is_sleeping() ) {
1777 KF_TRACE( 100, ( "__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001778 }
1779#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001780 } // while
1781
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001782 // Mark the thread as active again (if it was previous marked as inactive)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001783 if ( deactivated ) {
1784 th->th.th_active = TRUE;
1785 if ( TCR_4(th->th.th_in_pool) ) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001786 KMP_TEST_THEN_INC32( (kmp_int32 *) &__kmp_thread_pool_active_nth );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001787 th->th.th_active_in_pool = TRUE;
1788 }
1789 }
1790 }
1791
1792#ifdef DEBUG_SUSPEND
1793 {
1794 char buffer[128];
1795 __kmp_print_cond( buffer, &th->th.th_suspend_cv);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001796 __kmp_printf( "__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001797 }
1798#endif
1799
1800
1801 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1802 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1803
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001804 KF_TRACE( 30, ("__kmp_suspend_template: T#%d exit\n", th_gtid ) );
1805}
1806
1807void __kmp_suspend_32(int th_gtid, kmp_flag_32 *flag) {
1808 __kmp_suspend_template(th_gtid, flag);
1809}
1810void __kmp_suspend_64(int th_gtid, kmp_flag_64 *flag) {
1811 __kmp_suspend_template(th_gtid, flag);
1812}
1813void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) {
1814 __kmp_suspend_template(th_gtid, flag);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001815}
1816
1817
1818/* This routine signals the thread specified by target_gtid to wake up
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001819 * after setting the sleep bit indicated by the flag argument to FALSE.
1820 * The target thread must already have called __kmp_suspend_template()
Jim Cownie5e8470a2013-09-27 10:38:44 +00001821 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001822template <class C>
1823static inline void __kmp_resume_template( int target_gtid, C *flag )
Jim Cownie5e8470a2013-09-27 10:38:44 +00001824{
Jonathan Peyton45be4502015-08-11 21:36:41 +00001825 KMP_TIME_DEVELOPER_BLOCK(USER_resume);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001826 kmp_info_t *th = __kmp_threads[target_gtid];
1827 int status;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001828
1829#ifdef KMP_DEBUG
1830 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1831#endif
1832
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001833 KF_TRACE( 30, ( "__kmp_resume_template: T#%d wants to wakeup T#%d enter\n", gtid, target_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001834 KMP_DEBUG_ASSERT( gtid != target_gtid );
1835
1836 __kmp_suspend_initialize_thread( th );
1837
1838 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1839 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001840
1841 if (!flag) {
1842 flag = (C *)th->th.th_sleep_loc;
1843 }
1844
1845 if (!flag) {
1846 KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p)\n",
1847 gtid, target_gtid, NULL ) );
1848 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1849 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1850 return;
1851 }
1852 else {
1853 typename C::flag_t old_spin = flag->unset_sleeping();
1854 if ( ! flag->is_sleeping_val(old_spin) ) {
1855 KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p): "
1856 "%u => %u\n",
1857 gtid, target_gtid, flag->get(), old_spin, *flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001858
1859 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1860 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1861 return;
1862 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001863 KF_TRACE( 5, ( "__kmp_resume_template: T#%d about to wakeup T#%d, reset sleep bit for flag's loc(%p): "
1864 "%u => %u\n",
1865 gtid, target_gtid, flag->get(), old_spin, *flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001866 }
1867 TCW_PTR(th->th.th_sleep_loc, NULL);
1868
Jim Cownie5e8470a2013-09-27 10:38:44 +00001869
1870#ifdef DEBUG_SUSPEND
1871 {
1872 char buffer[128];
1873 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001874 __kmp_printf( "__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid, target_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001875 }
1876#endif
1877
1878
1879 status = pthread_cond_signal( &th->th.th_suspend_cv.c_cond );
1880 KMP_CHECK_SYSFAIL( "pthread_cond_signal", status );
1881 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1882 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001883 KF_TRACE( 30, ( "__kmp_resume_template: T#%d exiting after signaling wake up for T#%d\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001884 gtid, target_gtid ) );
1885}
1886
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001887void __kmp_resume_32(int target_gtid, kmp_flag_32 *flag) {
1888 __kmp_resume_template(target_gtid, flag);
1889}
1890void __kmp_resume_64(int target_gtid, kmp_flag_64 *flag) {
1891 __kmp_resume_template(target_gtid, flag);
1892}
1893void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) {
1894 __kmp_resume_template(target_gtid, flag);
1895}
1896
Jim Cownie07ea89f2014-09-03 11:10:54 +00001897void
1898__kmp_resume_monitor()
1899{
1900 int status;
1901#ifdef KMP_DEBUG
1902 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1903 KF_TRACE( 30, ( "__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n",
1904 gtid, KMP_GTID_MONITOR ) );
1905 KMP_DEBUG_ASSERT( gtid != KMP_GTID_MONITOR );
1906#endif
1907 status = pthread_mutex_lock( &__kmp_wait_mx.m_mutex );
1908 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
1909#ifdef DEBUG_SUSPEND
1910 {
1911 char buffer[128];
1912 __kmp_print_cond( buffer, &__kmp_wait_cv.c_cond );
1913 __kmp_printf( "__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid, KMP_GTID_MONITOR, buffer );
1914 }
1915#endif
1916 status = pthread_cond_signal( &__kmp_wait_cv.c_cond );
1917 KMP_CHECK_SYSFAIL( "pthread_cond_signal", status );
1918 status = pthread_mutex_unlock( &__kmp_wait_mx.m_mutex );
1919 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1920 KF_TRACE( 30, ( "__kmp_resume_monitor: T#%d exiting after signaling wake up for T#%d\n",
1921 gtid, KMP_GTID_MONITOR ) );
1922}
Jim Cownie5e8470a2013-09-27 10:38:44 +00001923
1924/* ------------------------------------------------------------------------ */
1925/* ------------------------------------------------------------------------ */
1926
1927void
1928__kmp_yield( int cond )
1929{
1930 if (cond && __kmp_yielding_on) {
1931 sched_yield();
1932 }
1933}
1934
1935/* ------------------------------------------------------------------------ */
1936/* ------------------------------------------------------------------------ */
1937
1938void
1939__kmp_gtid_set_specific( int gtid )
1940{
1941 int status;
1942 KMP_ASSERT( __kmp_init_runtime );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001943 status = pthread_setspecific( __kmp_gtid_threadprivate_key, (void*)(intptr_t)(gtid+1) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001944 KMP_CHECK_SYSFAIL( "pthread_setspecific", status );
1945}
1946
1947int
1948__kmp_gtid_get_specific()
1949{
1950 int gtid;
1951 if ( !__kmp_init_runtime ) {
1952 KA_TRACE( 50, ("__kmp_get_specific: runtime shutdown, returning KMP_GTID_SHUTDOWN\n" ) );
1953 return KMP_GTID_SHUTDOWN;
1954 }
1955 gtid = (int)(size_t)pthread_getspecific( __kmp_gtid_threadprivate_key );
1956 if ( gtid == 0 ) {
1957 gtid = KMP_GTID_DNE;
1958 }
1959 else {
1960 gtid--;
1961 }
1962 KA_TRACE( 50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
1963 __kmp_gtid_threadprivate_key, gtid ));
1964 return gtid;
1965}
1966
1967/* ------------------------------------------------------------------------ */
1968/* ------------------------------------------------------------------------ */
1969
1970double
1971__kmp_read_cpu_time( void )
1972{
1973 /*clock_t t;*/
1974 struct tms buffer;
1975
1976 /*t =*/ times( & buffer );
1977
1978 return (buffer.tms_utime + buffer.tms_cutime) / (double) CLOCKS_PER_SEC;
1979}
1980
1981int
1982__kmp_read_system_info( struct kmp_sys_info *info )
1983{
1984 int status;
1985 struct rusage r_usage;
1986
1987 memset( info, 0, sizeof( *info ) );
1988
1989 status = getrusage( RUSAGE_SELF, &r_usage);
1990 KMP_CHECK_SYSFAIL_ERRNO( "getrusage", status );
1991
1992 info->maxrss = r_usage.ru_maxrss; /* the maximum resident set size utilized (in kilobytes) */
1993 info->minflt = r_usage.ru_minflt; /* the number of page faults serviced without any I/O */
1994 info->majflt = r_usage.ru_majflt; /* the number of page faults serviced that required I/O */
1995 info->nswap = r_usage.ru_nswap; /* the number of times a process was "swapped" out of memory */
1996 info->inblock = r_usage.ru_inblock; /* the number of times the file system had to perform input */
1997 info->oublock = r_usage.ru_oublock; /* the number of times the file system had to perform output */
1998 info->nvcsw = r_usage.ru_nvcsw; /* the number of times a context switch was voluntarily */
1999 info->nivcsw = r_usage.ru_nivcsw; /* the number of times a context switch was forced */
2000
2001 return (status != 0);
2002}
2003
2004/* ------------------------------------------------------------------------ */
2005/* ------------------------------------------------------------------------ */
2006
2007
2008void
2009__kmp_read_system_time( double *delta )
2010{
2011 double t_ns;
2012 struct timeval tval;
2013 struct timespec stop;
2014 int status;
2015
2016 status = gettimeofday( &tval, NULL );
2017 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
2018 TIMEVAL_TO_TIMESPEC( &tval, &stop );
2019 t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start);
2020 *delta = (t_ns * 1e-9);
2021}
2022
2023void
2024__kmp_clear_system_time( void )
2025{
2026 struct timeval tval;
2027 int status;
2028 status = gettimeofday( &tval, NULL );
2029 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
2030 TIMEVAL_TO_TIMESPEC( &tval, &__kmp_sys_timer_data.start );
2031}
2032
2033/* ------------------------------------------------------------------------ */
2034/* ------------------------------------------------------------------------ */
2035
2036#ifdef BUILD_TV
2037
2038void
2039__kmp_tv_threadprivate_store( kmp_info_t *th, void *global_addr, void *thread_addr )
2040{
2041 struct tv_data *p;
2042
2043 p = (struct tv_data *) __kmp_allocate( sizeof( *p ) );
2044
2045 p->u.tp.global_addr = global_addr;
2046 p->u.tp.thread_addr = thread_addr;
2047
2048 p->type = (void *) 1;
2049
2050 p->next = th->th.th_local.tv_data;
2051 th->th.th_local.tv_data = p;
2052
2053 if ( p->next == 0 ) {
2054 int rc = pthread_setspecific( __kmp_tv_key, p );
2055 KMP_CHECK_SYSFAIL( "pthread_setspecific", rc );
2056 }
2057}
2058
2059#endif /* BUILD_TV */
2060
2061/* ------------------------------------------------------------------------ */
2062/* ------------------------------------------------------------------------ */
2063
2064static int
2065__kmp_get_xproc( void ) {
2066
2067 int r = 0;
2068
Joerg Sonnenberger7649cd42015-09-21 20:29:12 +00002069 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +00002070
2071 r = sysconf( _SC_NPROCESSORS_ONLN );
2072
2073 #elif KMP_OS_DARWIN
2074
2075 // Bug C77011 High "OpenMP Threads and number of active cores".
2076
2077 // Find the number of available CPUs.
2078 kern_return_t rc;
2079 host_basic_info_data_t info;
2080 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
2081 rc = host_info( mach_host_self(), HOST_BASIC_INFO, (host_info_t) & info, & num );
2082 if ( rc == 0 && num == HOST_BASIC_INFO_COUNT ) {
2083 // Cannot use KA_TRACE() here because this code works before trace support is
2084 // initialized.
2085 r = info.avail_cpus;
2086 } else {
2087 KMP_WARNING( CantGetNumAvailCPU );
2088 KMP_INFORM( AssumedNumCPU );
2089 }; // if
2090
2091 #else
2092
2093 #error "Unknown or unsupported OS."
2094
2095 #endif
2096
2097 return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
2098
2099} // __kmp_get_xproc
2100
Jim Cownie181b4bb2013-12-23 17:28:57 +00002101int
2102__kmp_read_from_file( char const *path, char const *format, ... )
2103{
2104 int result;
2105 va_list args;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002106
Jim Cownie181b4bb2013-12-23 17:28:57 +00002107 va_start(args, format);
2108 FILE *f = fopen(path, "rb");
2109 if ( f == NULL )
2110 return 0;
2111 result = vfscanf(f, format, args);
2112 fclose(f);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002113
Jim Cownie5e8470a2013-09-27 10:38:44 +00002114 return result;
Jim Cownie181b4bb2013-12-23 17:28:57 +00002115}
Jim Cownie5e8470a2013-09-27 10:38:44 +00002116
2117void
2118__kmp_runtime_initialize( void )
2119{
2120 int status;
2121 pthread_mutexattr_t mutex_attr;
2122 pthread_condattr_t cond_attr;
2123
2124 if ( __kmp_init_runtime ) {
2125 return;
2126 }; // if
2127
2128 #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 )
2129 if ( ! __kmp_cpuinfo.initialized ) {
2130 __kmp_query_cpuid( &__kmp_cpuinfo );
2131 }; // if
2132 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2133
Jim Cownie5e8470a2013-09-27 10:38:44 +00002134 __kmp_xproc = __kmp_get_xproc();
2135
2136 if ( sysconf( _SC_THREADS ) ) {
2137
2138 /* Query the maximum number of threads */
2139 __kmp_sys_max_nth = sysconf( _SC_THREAD_THREADS_MAX );
2140 if ( __kmp_sys_max_nth == -1 ) {
2141 /* Unlimited threads for NPTL */
2142 __kmp_sys_max_nth = INT_MAX;
2143 }
2144 else if ( __kmp_sys_max_nth <= 1 ) {
2145 /* Can't tell, just use PTHREAD_THREADS_MAX */
2146 __kmp_sys_max_nth = KMP_MAX_NTH;
2147 }
2148
2149 /* Query the minimum stack size */
2150 __kmp_sys_min_stksize = sysconf( _SC_THREAD_STACK_MIN );
2151 if ( __kmp_sys_min_stksize <= 1 ) {
2152 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
2153 }
2154 }
2155
2156 /* Set up minimum number of threads to switch to TLS gtid */
2157 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
2158
2159
2160 #ifdef BUILD_TV
2161 {
2162 int rc = pthread_key_create( & __kmp_tv_key, 0 );
2163 KMP_CHECK_SYSFAIL( "pthread_key_create", rc );
2164 }
2165 #endif
2166
2167 status = pthread_key_create( &__kmp_gtid_threadprivate_key, __kmp_internal_end_dest );
2168 KMP_CHECK_SYSFAIL( "pthread_key_create", status );
2169 status = pthread_mutexattr_init( & mutex_attr );
2170 KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status );
2171 status = pthread_mutex_init( & __kmp_wait_mx.m_mutex, & mutex_attr );
2172 KMP_CHECK_SYSFAIL( "pthread_mutex_init", status );
2173 status = pthread_condattr_init( & cond_attr );
2174 KMP_CHECK_SYSFAIL( "pthread_condattr_init", status );
2175 status = pthread_cond_init( & __kmp_wait_cv.c_cond, & cond_attr );
2176 KMP_CHECK_SYSFAIL( "pthread_cond_init", status );
2177#if USE_ITT_BUILD
2178 __kmp_itt_initialize();
2179#endif /* USE_ITT_BUILD */
2180
2181 __kmp_init_runtime = TRUE;
2182}
2183
2184void
2185__kmp_runtime_destroy( void )
2186{
2187 int status;
2188
2189 if ( ! __kmp_init_runtime ) {
2190 return; // Nothing to do.
2191 };
2192
2193#if USE_ITT_BUILD
2194 __kmp_itt_destroy();
2195#endif /* USE_ITT_BUILD */
2196
2197 status = pthread_key_delete( __kmp_gtid_threadprivate_key );
2198 KMP_CHECK_SYSFAIL( "pthread_key_delete", status );
2199 #ifdef BUILD_TV
2200 status = pthread_key_delete( __kmp_tv_key );
2201 KMP_CHECK_SYSFAIL( "pthread_key_delete", status );
2202 #endif
2203
2204 status = pthread_mutex_destroy( & __kmp_wait_mx.m_mutex );
2205 if ( status != 0 && status != EBUSY ) {
2206 KMP_SYSFAIL( "pthread_mutex_destroy", status );
2207 }
2208 status = pthread_cond_destroy( & __kmp_wait_cv.c_cond );
2209 if ( status != 0 && status != EBUSY ) {
2210 KMP_SYSFAIL( "pthread_cond_destroy", status );
2211 }
Alp Toker763b9392014-02-28 09:42:41 +00002212 #if KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +00002213 __kmp_affinity_uninitialize();
Jim Cownie5e8470a2013-09-27 10:38:44 +00002214 #endif
2215
2216 __kmp_init_runtime = FALSE;
2217}
2218
2219
2220/* Put the thread to sleep for a time period */
2221/* NOTE: not currently used anywhere */
2222void
2223__kmp_thread_sleep( int millis )
2224{
2225 sleep( ( millis + 500 ) / 1000 );
2226}
2227
2228/* Calculate the elapsed wall clock time for the user */
2229void
2230__kmp_elapsed( double *t )
2231{
2232 int status;
2233# ifdef FIX_SGI_CLOCK
2234 struct timespec ts;
2235
2236 status = clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &ts );
2237 KMP_CHECK_SYSFAIL_ERRNO( "clock_gettime", status );
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +00002238 *t = (double) ts.tv_nsec * (1.0 / (double) KMP_NSEC_PER_SEC) +
Jim Cownie5e8470a2013-09-27 10:38:44 +00002239 (double) ts.tv_sec;
2240# else
2241 struct timeval tv;
2242
2243 status = gettimeofday( & tv, NULL );
2244 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +00002245 *t = (double) tv.tv_usec * (1.0 / (double) KMP_USEC_PER_SEC) +
Jim Cownie5e8470a2013-09-27 10:38:44 +00002246 (double) tv.tv_sec;
2247# endif
2248}
2249
2250/* Calculate the elapsed wall clock tick for the user */
2251void
2252__kmp_elapsed_tick( double *t )
2253{
2254 *t = 1 / (double) CLOCKS_PER_SEC;
2255}
2256
2257/*
2258 Determine whether the given address is mapped into the current address space.
2259*/
2260
2261int
2262__kmp_is_address_mapped( void * addr ) {
2263
2264 int found = 0;
2265 int rc;
2266
Joerg Sonnenberger7649cd42015-09-21 20:29:12 +00002267 #if KMP_OS_LINUX || KMP_OS_FREEBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +00002268
2269 /*
2270 On Linux* OS, read the /proc/<pid>/maps pseudo-file to get all the address ranges mapped
2271 into the address space.
2272 */
2273
2274 char * name = __kmp_str_format( "/proc/%d/maps", getpid() );
2275 FILE * file = NULL;
2276
2277 file = fopen( name, "r" );
2278 KMP_ASSERT( file != NULL );
2279
2280 for ( ; ; ) {
2281
2282 void * beginning = NULL;
2283 void * ending = NULL;
2284 char perms[ 5 ];
2285
2286 rc = fscanf( file, "%p-%p %4s %*[^\n]\n", & beginning, & ending, perms );
2287 if ( rc == EOF ) {
2288 break;
2289 }; // if
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002290 KMP_ASSERT( rc == 3 && KMP_STRLEN( perms ) == 4 ); // Make sure all fields are read.
Jim Cownie5e8470a2013-09-27 10:38:44 +00002291
2292 // Ending address is not included in the region, but beginning is.
2293 if ( ( addr >= beginning ) && ( addr < ending ) ) {
2294 perms[ 2 ] = 0; // 3th and 4th character does not matter.
2295 if ( strcmp( perms, "rw" ) == 0 ) {
2296 // Memory we are looking for should be readable and writable.
2297 found = 1;
2298 }; // if
2299 break;
2300 }; // if
2301
2302 }; // forever
2303
2304 // Free resources.
2305 fclose( file );
2306 KMP_INTERNAL_FREE( name );
2307
2308 #elif KMP_OS_DARWIN
2309
2310 /*
2311 On OS X*, /proc pseudo filesystem is not available. Try to read memory using vm
2312 interface.
2313 */
2314
2315 int buffer;
2316 vm_size_t count;
2317 rc =
2318 vm_read_overwrite(
2319 mach_task_self(), // Task to read memory of.
2320 (vm_address_t)( addr ), // Address to read from.
2321 1, // Number of bytes to be read.
2322 (vm_address_t)( & buffer ), // Address of buffer to save read bytes in.
2323 & count // Address of var to save number of read bytes in.
2324 );
2325 if ( rc == 0 ) {
2326 // Memory successfully read.
2327 found = 1;
2328 }; // if
2329
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +00002330 #elif KMP_OS_FREEBSD || KMP_OS_NETBSD
Alp Toker763b9392014-02-28 09:42:41 +00002331
Joerg Sonnenberger1564f3c2015-09-21 20:02:45 +00002332 // FIXME(FreeBSD, NetBSD): Implement this
Alp Toker763b9392014-02-28 09:42:41 +00002333 found = 1;
2334
Jim Cownie5e8470a2013-09-27 10:38:44 +00002335 #else
2336
2337 #error "Unknown or unsupported OS"
2338
2339 #endif
2340
2341 return found;
2342
2343} // __kmp_is_address_mapped
2344
2345#ifdef USE_LOAD_BALANCE
2346
2347
2348# if KMP_OS_DARWIN
2349
2350// The function returns the rounded value of the system load average
2351// during given time interval which depends on the value of
2352// __kmp_load_balance_interval variable (default is 60 sec, other values
2353// may be 300 sec or 900 sec).
2354// It returns -1 in case of error.
2355int
2356__kmp_get_load_balance( int max )
2357{
2358 double averages[3];
2359 int ret_avg = 0;
2360
2361 int res = getloadavg( averages, 3 );
2362
2363 //Check __kmp_load_balance_interval to determine which of averages to use.
2364 // getloadavg() may return the number of samples less than requested that is
2365 // less than 3.
2366 if ( __kmp_load_balance_interval < 180 && ( res >= 1 ) ) {
2367 ret_avg = averages[0];// 1 min
2368 } else if ( ( __kmp_load_balance_interval >= 180
2369 && __kmp_load_balance_interval < 600 ) && ( res >= 2 ) ) {
2370 ret_avg = averages[1];// 5 min
2371 } else if ( ( __kmp_load_balance_interval >= 600 ) && ( res == 3 ) ) {
2372 ret_avg = averages[2];// 15 min
Alp Toker8f2d3f02014-02-24 10:40:15 +00002373 } else {// Error occurred
Jim Cownie5e8470a2013-09-27 10:38:44 +00002374 return -1;
2375 }
2376
2377 return ret_avg;
2378}
2379
2380# else // Linux* OS
2381
2382// The fuction returns number of running (not sleeping) threads, or -1 in case of error.
2383// Error could be reported if Linux* OS kernel too old (without "/proc" support).
2384// Counting running threads stops if max running threads encountered.
2385int
2386__kmp_get_load_balance( int max )
2387{
2388 static int permanent_error = 0;
2389
2390 static int glb_running_threads = 0; /* Saved count of the running threads for the thread balance algortihm */
2391 static double glb_call_time = 0; /* Thread balance algorithm call time */
2392
2393 int running_threads = 0; // Number of running threads in the system.
2394
2395 DIR * proc_dir = NULL; // Handle of "/proc/" directory.
2396 struct dirent * proc_entry = NULL;
2397
2398 kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2399 DIR * task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2400 struct dirent * task_entry = NULL;
2401 int task_path_fixed_len;
2402
2403 kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2404 int stat_file = -1;
2405 int stat_path_fixed_len;
2406
2407 int total_processes = 0; // Total number of processes in system.
2408 int total_threads = 0; // Total number of threads in system.
2409
2410 double call_time = 0.0;
2411
2412 __kmp_str_buf_init( & task_path );
2413 __kmp_str_buf_init( & stat_path );
2414
2415 __kmp_elapsed( & call_time );
2416
2417 if ( glb_call_time &&
2418 ( call_time - glb_call_time < __kmp_load_balance_interval ) ) {
2419 running_threads = glb_running_threads;
2420 goto finish;
2421 }
2422
2423 glb_call_time = call_time;
2424
2425 // Do not spend time on scanning "/proc/" if we have a permanent error.
2426 if ( permanent_error ) {
2427 running_threads = -1;
2428 goto finish;
2429 }; // if
2430
2431 if ( max <= 0 ) {
2432 max = INT_MAX;
2433 }; // if
2434
2435 // Open "/proc/" directory.
2436 proc_dir = opendir( "/proc" );
2437 if ( proc_dir == NULL ) {
2438 // Cannot open "/prroc/". Probably the kernel does not support it. Return an error now and
2439 // in subsequent calls.
2440 running_threads = -1;
2441 permanent_error = 1;
2442 goto finish;
2443 }; // if
2444
2445 // Initialize fixed part of task_path. This part will not change.
2446 __kmp_str_buf_cat( & task_path, "/proc/", 6 );
2447 task_path_fixed_len = task_path.used; // Remember number of used characters.
2448
2449 proc_entry = readdir( proc_dir );
2450 while ( proc_entry != NULL ) {
2451 // Proc entry is a directory and name starts with a digit. Assume it is a process'
2452 // directory.
2453 if ( proc_entry->d_type == DT_DIR && isdigit( proc_entry->d_name[ 0 ] ) ) {
2454
2455 ++ total_processes;
2456 // Make sure init process is the very first in "/proc", so we can replace
2457 // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes == 1.
2458 // We are going to check that total_processes == 1 => d_name == "1" is true (where
2459 // "=>" is implication). Since C++ does not have => operator, let us replace it with its
2460 // equivalent: a => b == ! a || b.
2461 KMP_DEBUG_ASSERT( total_processes != 1 || strcmp( proc_entry->d_name, "1" ) == 0 );
2462
2463 // Construct task_path.
2464 task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002465 __kmp_str_buf_cat( & task_path, proc_entry->d_name, KMP_STRLEN( proc_entry->d_name ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00002466 __kmp_str_buf_cat( & task_path, "/task", 5 );
2467
2468 task_dir = opendir( task_path.str );
2469 if ( task_dir == NULL ) {
2470 // Process can finish between reading "/proc/" directory entry and opening process'
2471 // "task/" directory. So, in general case we should not complain, but have to skip
2472 // this process and read the next one.
2473 // But on systems with no "task/" support we will spend lot of time to scan "/proc/"
2474 // tree again and again without any benefit. "init" process (its pid is 1) should
2475 // exist always, so, if we cannot open "/proc/1/task/" directory, it means "task/"
2476 // is not supported by kernel. Report an error now and in the future.
2477 if ( strcmp( proc_entry->d_name, "1" ) == 0 ) {
2478 running_threads = -1;
2479 permanent_error = 1;
2480 goto finish;
2481 }; // if
2482 } else {
2483 // Construct fixed part of stat file path.
2484 __kmp_str_buf_clear( & stat_path );
2485 __kmp_str_buf_cat( & stat_path, task_path.str, task_path.used );
2486 __kmp_str_buf_cat( & stat_path, "/", 1 );
2487 stat_path_fixed_len = stat_path.used;
2488
2489 task_entry = readdir( task_dir );
2490 while ( task_entry != NULL ) {
2491 // It is a directory and name starts with a digit.
2492 if ( proc_entry->d_type == DT_DIR && isdigit( task_entry->d_name[ 0 ] ) ) {
2493
2494 ++ total_threads;
2495
2496 // Consruct complete stat file path. Easiest way would be:
2497 // __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str, task_entry->d_name );
2498 // but seriae of __kmp_str_buf_cat works a bit faster.
2499 stat_path.used = stat_path_fixed_len; // Reset stat path to its fixed part.
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002500 __kmp_str_buf_cat( & stat_path, task_entry->d_name, KMP_STRLEN( task_entry->d_name ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00002501 __kmp_str_buf_cat( & stat_path, "/stat", 5 );
2502
2503 // Note: Low-level API (open/read/close) is used. High-level API
2504 // (fopen/fclose) works ~ 30 % slower.
2505 stat_file = open( stat_path.str, O_RDONLY );
2506 if ( stat_file == -1 ) {
2507 // We cannot report an error because task (thread) can terminate just
2508 // before reading this file.
2509 } else {
2510 /*
2511 Content of "stat" file looks like:
2512
2513 24285 (program) S ...
2514
2515 It is a single line (if program name does not include fanny
2516 symbols). First number is a thread id, then name of executable file
2517 name in paretheses, then state of the thread. We need just thread
2518 state.
2519
2520 Good news: Length of program name is 15 characters max. Longer
2521 names are truncated.
2522
2523 Thus, we need rather short buffer: 15 chars for program name +
2524 2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2525
2526 Bad news: Program name may contain special symbols like space,
2527 closing parenthesis, or even new line. This makes parsing "stat"
2528 file not 100 % reliable. In case of fanny program names parsing
2529 may fail (report incorrect thread state).
2530
2531 Parsing "status" file looks more promissing (due to different
2532 file structure and escaping special symbols) but reading and
2533 parsing of "status" file works slower.
2534
2535 -- ln
2536 */
2537 char buffer[ 65 ];
2538 int len;
2539 len = read( stat_file, buffer, sizeof( buffer ) - 1 );
2540 if ( len >= 0 ) {
2541 buffer[ len ] = 0;
2542 // Using scanf:
2543 // sscanf( buffer, "%*d (%*s) %c ", & state );
2544 // looks very nice, but searching for a closing parenthesis works a
2545 // bit faster.
2546 char * close_parent = strstr( buffer, ") " );
2547 if ( close_parent != NULL ) {
2548 char state = * ( close_parent + 2 );
2549 if ( state == 'R' ) {
2550 ++ running_threads;
2551 if ( running_threads >= max ) {
2552 goto finish;
2553 }; // if
2554 }; // if
2555 }; // if
2556 }; // if
2557 close( stat_file );
2558 stat_file = -1;
2559 }; // if
2560 }; // if
2561 task_entry = readdir( task_dir );
2562 }; // while
2563 closedir( task_dir );
2564 task_dir = NULL;
2565 }; // if
2566 }; // if
2567 proc_entry = readdir( proc_dir );
2568 }; // while
2569
2570 //
2571 // There _might_ be a timing hole where the thread executing this
2572 // code get skipped in the load balance, and running_threads is 0.
2573 // Assert in the debug builds only!!!
2574 //
2575 KMP_DEBUG_ASSERT( running_threads > 0 );
2576 if ( running_threads <= 0 ) {
2577 running_threads = 1;
2578 }
2579
2580 finish: // Clean up and exit.
2581 if ( proc_dir != NULL ) {
2582 closedir( proc_dir );
2583 }; // if
2584 __kmp_str_buf_free( & task_path );
2585 if ( task_dir != NULL ) {
2586 closedir( task_dir );
2587 }; // if
2588 __kmp_str_buf_free( & stat_path );
2589 if ( stat_file != -1 ) {
2590 close( stat_file );
2591 }; // if
2592
2593 glb_running_threads = running_threads;
2594
2595 return running_threads;
2596
2597} // __kmp_get_load_balance
2598
2599# endif // KMP_OS_DARWIN
2600
2601#endif // USE_LOAD_BALANCE
2602
Andrey Churbanovedc370e2015-08-05 11:23:10 +00002603#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC)
Jim Cownie3051f972014-08-07 10:12:54 +00002604
2605// we really only need the case with 1 argument, because CLANG always build
2606// a struct of pointers to shared variables referenced in the outlined function
2607int
2608__kmp_invoke_microtask( microtask_t pkfn,
2609 int gtid, int tid,
Jonathan Peyton122dd762015-07-13 18:55:45 +00002610 int argc, void *p_argv[]
2611#if OMPT_SUPPORT
2612 , void **exit_frame_ptr
2613#endif
2614)
2615{
2616#if OMPT_SUPPORT
2617 *exit_frame_ptr = __builtin_frame_address(0);
2618#endif
2619
Jim Cownie3051f972014-08-07 10:12:54 +00002620 switch (argc) {
2621 default:
2622 fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2623 fflush(stderr);
2624 exit(-1);
2625 case 0:
2626 (*pkfn)(&gtid, &tid);
2627 break;
2628 case 1:
2629 (*pkfn)(&gtid, &tid, p_argv[0]);
2630 break;
2631 case 2:
2632 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2633 break;
2634 case 3:
2635 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2636 break;
2637 case 4:
2638 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2639 break;
2640 case 5:
2641 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2642 break;
2643 case 6:
2644 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2645 p_argv[5]);
2646 break;
2647 case 7:
2648 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2649 p_argv[5], p_argv[6]);
2650 break;
2651 case 8:
2652 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2653 p_argv[5], p_argv[6], p_argv[7]);
2654 break;
2655 case 9:
2656 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2657 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2658 break;
2659 case 10:
2660 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2661 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2662 break;
2663 case 11:
2664 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2665 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2666 break;
2667 case 12:
2668 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2669 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2670 p_argv[11]);
2671 break;
2672 case 13:
2673 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2674 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2675 p_argv[11], p_argv[12]);
2676 break;
2677 case 14:
2678 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2679 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2680 p_argv[11], p_argv[12], p_argv[13]);
2681 break;
2682 case 15:
2683 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2684 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2685 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2686 break;
2687 }
2688
Jonathan Peyton122dd762015-07-13 18:55:45 +00002689#if OMPT_SUPPORT
2690 *exit_frame_ptr = 0;
2691#endif
2692
Jim Cownie3051f972014-08-07 10:12:54 +00002693 return 1;
2694}
2695
2696#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +00002697
Jim Cownie5e8470a2013-09-27 10:38:44 +00002698// end of file //
2699