blob: 41e6273796bf751e92f805cf711e2b7963005264 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * z_Linux_util.c -- platform specific routines.
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "kmp.h"
17#include "kmp_wrapper_getpid.h"
18#include "kmp_itt.h"
19#include "kmp_str.h"
20#include "kmp_i18n.h"
21#include "kmp_io.h"
Jim Cownie4cc4bb42014-10-07 16:25:50 +000022#include "kmp_stats.h"
23#include "kmp_wait_release.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000024
Alp Toker763b9392014-02-28 09:42:41 +000025#if !KMP_OS_FREEBSD
26# include <alloca.h>
27#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +000028#include <unistd.h>
29#include <math.h> // HUGE_VAL.
30#include <sys/time.h>
31#include <sys/times.h>
32#include <sys/resource.h>
33#include <sys/syscall.h>
34
Jim Cownie3051f972014-08-07 10:12:54 +000035#if KMP_OS_LINUX && !KMP_OS_CNK
Jim Cownie5e8470a2013-09-27 10:38:44 +000036# include <sys/sysinfo.h>
Andrey Churbanovcbda8682015-01-13 14:43:35 +000037# if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +000038// We should really include <futex.h>, but that causes compatibility problems on different
39// Linux* OS distributions that either require that you include (or break when you try to include)
40// <pci/types.h>.
41// Since all we need is the two macros below (which are part of the kernel ABI, so can't change)
42// we just define the constants here and don't include <futex.h>
43# ifndef FUTEX_WAIT
44# define FUTEX_WAIT 0
45# endif
46# ifndef FUTEX_WAKE
47# define FUTEX_WAKE 1
48# endif
49# endif
50#elif KMP_OS_DARWIN
51# include <sys/sysctl.h>
52# include <mach/mach.h>
Alp Toker763b9392014-02-28 09:42:41 +000053#elif KMP_OS_FREEBSD
54# include <sys/sysctl.h>
55# include <pthread_np.h>
Jim Cownie5e8470a2013-09-27 10:38:44 +000056#endif
57
58
59#include <dirent.h>
60#include <ctype.h>
61#include <fcntl.h>
62
63/* ------------------------------------------------------------------------ */
64/* ------------------------------------------------------------------------ */
65
66struct kmp_sys_timer {
67 struct timespec start;
68};
69
70// Convert timespec to nanoseconds.
71#define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec)
72
73static struct kmp_sys_timer __kmp_sys_timer_data;
74
75#if KMP_HANDLE_SIGNALS
76 typedef void (* sig_func_t )( int );
77 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[ NSIG ];
78 static sigset_t __kmp_sigset;
79#endif
80
81static int __kmp_init_runtime = FALSE;
82
83static int __kmp_fork_count = 0;
84
85static pthread_condattr_t __kmp_suspend_cond_attr;
86static pthread_mutexattr_t __kmp_suspend_mutex_attr;
87
88static kmp_cond_align_t __kmp_wait_cv;
89static kmp_mutex_align_t __kmp_wait_mx;
90
91/* ------------------------------------------------------------------------ */
92/* ------------------------------------------------------------------------ */
93
94#ifdef DEBUG_SUSPEND
95static void
96__kmp_print_cond( char *buffer, kmp_cond_align_t *cond )
97{
Andrey Churbanov74bf17b2015-04-02 13:27:08 +000098 KMP_SNPRINTF( buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))",
Jim Cownie5e8470a2013-09-27 10:38:44 +000099 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
100 cond->c_cond.__c_waiting );
101}
102#endif
103
104/* ------------------------------------------------------------------------ */
105/* ------------------------------------------------------------------------ */
106
Jim Cownie3051f972014-08-07 10:12:54 +0000107#if ( KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000108
109/*
110 * Affinity support
111 */
112
113/*
114 * On some of the older OS's that we build on, these constants aren't present
115 * in <asm/unistd.h> #included from <sys.syscall.h>. They must be the same on
116 * all systems of the same arch where they are defined, and they cannot change.
117 * stone forever.
118 */
119
Jim Cownie181b4bb2013-12-23 17:28:57 +0000120# if KMP_ARCH_X86 || KMP_ARCH_ARM
Jim Cownie5e8470a2013-09-27 10:38:44 +0000121# ifndef __NR_sched_setaffinity
122# define __NR_sched_setaffinity 241
123# elif __NR_sched_setaffinity != 241
124# error Wrong code for setaffinity system call.
125# endif /* __NR_sched_setaffinity */
126# ifndef __NR_sched_getaffinity
127# define __NR_sched_getaffinity 242
128# elif __NR_sched_getaffinity != 242
129# error Wrong code for getaffinity system call.
130# endif /* __NR_sched_getaffinity */
131
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000132# elif KMP_ARCH_AARCH64
133# ifndef __NR_sched_setaffinity
134# define __NR_sched_setaffinity 122
135# elif __NR_sched_setaffinity != 122
136# error Wrong code for setaffinity system call.
137# endif /* __NR_sched_setaffinity */
138# ifndef __NR_sched_getaffinity
139# define __NR_sched_getaffinity 123
140# elif __NR_sched_getaffinity != 123
141# error Wrong code for getaffinity system call.
142# endif /* __NR_sched_getaffinity */
143
Jim Cownie5e8470a2013-09-27 10:38:44 +0000144# elif KMP_ARCH_X86_64
145# ifndef __NR_sched_setaffinity
146# define __NR_sched_setaffinity 203
147# elif __NR_sched_setaffinity != 203
148# error Wrong code for setaffinity system call.
149# endif /* __NR_sched_setaffinity */
150# ifndef __NR_sched_getaffinity
151# define __NR_sched_getaffinity 204
152# elif __NR_sched_getaffinity != 204
153# error Wrong code for getaffinity system call.
154# endif /* __NR_sched_getaffinity */
155
Jim Cownie3051f972014-08-07 10:12:54 +0000156# elif KMP_ARCH_PPC64
157# ifndef __NR_sched_setaffinity
158# define __NR_sched_setaffinity 222
159# elif __NR_sched_setaffinity != 222
160# error Wrong code for setaffinity system call.
161# endif /* __NR_sched_setaffinity */
162# ifndef __NR_sched_getaffinity
163# define __NR_sched_getaffinity 223
164# elif __NR_sched_getaffinity != 223
165# error Wrong code for getaffinity system call.
166# endif /* __NR_sched_getaffinity */
167
168
Jim Cownie5e8470a2013-09-27 10:38:44 +0000169# else
170# error Unknown or unsupported architecture
171
172# endif /* KMP_ARCH_* */
173
174int
175__kmp_set_system_affinity( kmp_affin_mask_t const *mask, int abort_on_error )
176{
177 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
178 "Illegal set affinity operation when not capable");
179
180 int retval = syscall( __NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask );
181 if (retval >= 0) {
182 return 0;
183 }
184 int error = errno;
185 if (abort_on_error) {
186 __kmp_msg(
187 kmp_ms_fatal,
188 KMP_MSG( FatalSysError ),
189 KMP_ERR( error ),
190 __kmp_msg_null
191 );
192 }
193 return error;
194}
195
196int
197__kmp_get_system_affinity( kmp_affin_mask_t *mask, int abort_on_error )
198{
199 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
200 "Illegal get affinity operation when not capable");
201
202 int retval = syscall( __NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask );
203 if (retval >= 0) {
204 return 0;
205 }
206 int error = errno;
207 if (abort_on_error) {
208 __kmp_msg(
209 kmp_ms_fatal,
210 KMP_MSG( FatalSysError ),
211 KMP_ERR( error ),
212 __kmp_msg_null
213 );
214 }
215 return error;
216}
217
218void
219__kmp_affinity_bind_thread( int which )
220{
221 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
222 "Illegal set affinity operation when not capable");
223
Andrey Churbanov74bf17b2015-04-02 13:27:08 +0000224 kmp_affin_mask_t *mask = (kmp_affin_mask_t *)KMP_ALLOCA(__kmp_affin_mask_size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000225 KMP_CPU_ZERO(mask);
226 KMP_CPU_SET(which, mask);
227 __kmp_set_system_affinity(mask, TRUE);
228}
229
230/*
231 * Determine if we can access affinity functionality on this version of
232 * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
233 * __kmp_affin_mask_size to the appropriate value (0 means not capable).
234 */
235void
236__kmp_affinity_determine_capable(const char *env_var)
237{
238 //
239 // Check and see if the OS supports thread affinity.
240 //
241
242# define KMP_CPU_SET_SIZE_LIMIT (1024*1024)
243
244 int gCode;
245 int sCode;
246 kmp_affin_mask_t *buf;
247 buf = ( kmp_affin_mask_t * ) KMP_INTERNAL_MALLOC( KMP_CPU_SET_SIZE_LIMIT );
248
249 // If Linux* OS:
250 // If the syscall fails or returns a suggestion for the size,
251 // then we don't have to search for an appropriate size.
252 gCode = syscall( __NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf );
253 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
Alp Toker8f2d3f02014-02-24 10:40:15 +0000254 "initial getaffinity call returned %d errno = %d\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +0000255 gCode, errno));
256
257 //if ((gCode < 0) && (errno == ENOSYS))
258 if (gCode < 0) {
259 //
260 // System call not supported
261 //
262 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
263 && (__kmp_affinity_type != affinity_none)
264 && (__kmp_affinity_type != affinity_default)
265 && (__kmp_affinity_type != affinity_disabled))) {
266 int error = errno;
267 __kmp_msg(
268 kmp_ms_warning,
269 KMP_MSG( GetAffSysCallNotSupported, env_var ),
270 KMP_ERR( error ),
271 __kmp_msg_null
272 );
273 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000274 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000275 KMP_INTERNAL_FREE(buf);
276 return;
277 }
278 if (gCode > 0) { // Linux* OS only
279 // The optimal situation: the OS returns the size of the buffer
280 // it expects.
281 //
282 // A verification of correct behavior is that Isetaffinity on a NULL
283 // buffer with the same size fails with errno set to EFAULT.
284 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
285 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
286 "setaffinity for mask size %d returned %d errno = %d\n",
287 gCode, sCode, errno));
288 if (sCode < 0) {
289 if (errno == ENOSYS) {
290 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
291 && (__kmp_affinity_type != affinity_none)
292 && (__kmp_affinity_type != affinity_default)
293 && (__kmp_affinity_type != affinity_disabled))) {
294 int error = errno;
295 __kmp_msg(
296 kmp_ms_warning,
297 KMP_MSG( SetAffSysCallNotSupported, env_var ),
298 KMP_ERR( error ),
299 __kmp_msg_null
300 );
301 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000302 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000303 KMP_INTERNAL_FREE(buf);
304 }
305 if (errno == EFAULT) {
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000306 KMP_AFFINITY_ENABLE(gCode);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000307 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
308 "affinity supported (mask size %d)\n",
309 (int)__kmp_affin_mask_size));
310 KMP_INTERNAL_FREE(buf);
311 return;
312 }
313 }
314 }
315
316 //
317 // Call the getaffinity system call repeatedly with increasing set sizes
318 // until we succeed, or reach an upper bound on the search.
319 //
320 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
321 "searching for proper set size\n"));
322 int size;
323 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
324 gCode = syscall( __NR_sched_getaffinity, 0, size, buf );
325 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
326 "getaffinity for mask size %d returned %d errno = %d\n", size,
327 gCode, errno));
328
329 if (gCode < 0) {
330 if ( errno == ENOSYS )
331 {
332 //
333 // We shouldn't get here
334 //
335 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
336 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
337 size));
338 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
339 && (__kmp_affinity_type != affinity_none)
340 && (__kmp_affinity_type != affinity_default)
341 && (__kmp_affinity_type != affinity_disabled))) {
342 int error = errno;
343 __kmp_msg(
344 kmp_ms_warning,
345 KMP_MSG( GetAffSysCallNotSupported, env_var ),
346 KMP_ERR( error ),
347 __kmp_msg_null
348 );
349 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000350 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000351 KMP_INTERNAL_FREE(buf);
352 return;
353 }
354 continue;
355 }
356
357 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
358 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
359 "setaffinity for mask size %d returned %d errno = %d\n",
360 gCode, sCode, errno));
361 if (sCode < 0) {
362 if (errno == ENOSYS) { // Linux* OS only
363 //
364 // We shouldn't get here
365 //
366 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
367 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
368 size));
369 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
370 && (__kmp_affinity_type != affinity_none)
371 && (__kmp_affinity_type != affinity_default)
372 && (__kmp_affinity_type != affinity_disabled))) {
373 int error = errno;
374 __kmp_msg(
375 kmp_ms_warning,
376 KMP_MSG( SetAffSysCallNotSupported, env_var ),
377 KMP_ERR( error ),
378 __kmp_msg_null
379 );
380 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000381 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000382 KMP_INTERNAL_FREE(buf);
383 return;
384 }
385 if (errno == EFAULT) {
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000386 KMP_AFFINITY_ENABLE(gCode);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000387 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
388 "affinity supported (mask size %d)\n",
389 (int)__kmp_affin_mask_size));
390 KMP_INTERNAL_FREE(buf);
391 return;
392 }
393 }
394 }
395 //int error = errno; // save uncaught error code
396 KMP_INTERNAL_FREE(buf);
397 // errno = error; // restore uncaught error code, will be printed at the next KMP_WARNING below
398
399 //
400 // Affinity is not supported
401 //
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000402 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000403 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
404 "cannot determine mask size - affinity not supported\n"));
405 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
406 && (__kmp_affinity_type != affinity_none)
407 && (__kmp_affinity_type != affinity_default)
408 && (__kmp_affinity_type != affinity_disabled))) {
409 KMP_WARNING( AffCantGetMaskSize, env_var );
410 }
411}
412
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000413#endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +0000414
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000415/* ------------------------------------------------------------------------ */
416/* ------------------------------------------------------------------------ */
417
418#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && !KMP_OS_CNK
419
420int
421__kmp_futex_determine_capable()
422{
423 int loc = 0;
424 int rc = syscall( __NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0 );
425 int retval = ( rc == 0 ) || ( errno != ENOSYS );
426
427 KA_TRACE(10, ( "__kmp_futex_determine_capable: rc = %d errno = %d\n", rc,
428 errno ) );
429 KA_TRACE(10, ( "__kmp_futex_determine_capable: futex syscall%s supported\n",
430 retval ? "" : " not" ) );
431
432 return retval;
433}
434
435#endif // KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM) && !KMP_OS_CNK
436
437/* ------------------------------------------------------------------------ */
438/* ------------------------------------------------------------------------ */
439
440#if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000441/*
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000442 * Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
443 * use compare_and_store for these routines
Jim Cownie5e8470a2013-09-27 10:38:44 +0000444 */
445
Andrey Churbanov7b2ab712015-03-10 09:03:42 +0000446kmp_int8
447__kmp_test_then_or8( volatile kmp_int8 *p, kmp_int8 d )
448{
449 kmp_int8 old_value, new_value;
450
451 old_value = TCR_1( *p );
452 new_value = old_value | d;
453
454 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
455 {
456 KMP_CPU_PAUSE();
457 old_value = TCR_1( *p );
458 new_value = old_value | d;
459 }
460 return old_value;
461}
462
463kmp_int8
464__kmp_test_then_and8( volatile kmp_int8 *p, kmp_int8 d )
465{
466 kmp_int8 old_value, new_value;
467
468 old_value = TCR_1( *p );
469 new_value = old_value & d;
470
471 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
472 {
473 KMP_CPU_PAUSE();
474 old_value = TCR_1( *p );
475 new_value = old_value & d;
476 }
477 return old_value;
478}
479
Jim Cownie5e8470a2013-09-27 10:38:44 +0000480kmp_int32
481__kmp_test_then_or32( volatile kmp_int32 *p, kmp_int32 d )
482{
483 kmp_int32 old_value, new_value;
484
485 old_value = TCR_4( *p );
486 new_value = old_value | d;
487
Jim Cownie3051f972014-08-07 10:12:54 +0000488 while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000489 {
490 KMP_CPU_PAUSE();
491 old_value = TCR_4( *p );
492 new_value = old_value | d;
493 }
494 return old_value;
495}
496
497kmp_int32
498__kmp_test_then_and32( volatile kmp_int32 *p, kmp_int32 d )
499{
500 kmp_int32 old_value, new_value;
501
502 old_value = TCR_4( *p );
503 new_value = old_value & d;
504
Jim Cownie3051f972014-08-07 10:12:54 +0000505 while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000506 {
507 KMP_CPU_PAUSE();
508 old_value = TCR_4( *p );
509 new_value = old_value & d;
510 }
511 return old_value;
512}
513
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000514# if KMP_ARCH_X86 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64
Andrey Churbanovd39f11c2015-03-10 10:14:57 +0000515kmp_int8
516__kmp_test_then_add8( volatile kmp_int8 *p, kmp_int8 d )
517{
518 kmp_int8 old_value, new_value;
519
520 old_value = TCR_1( *p );
521 new_value = old_value + d;
522
523 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
524 {
525 KMP_CPU_PAUSE();
526 old_value = TCR_1( *p );
527 new_value = old_value + d;
528 }
529 return old_value;
530}
531
Jim Cownie5e8470a2013-09-27 10:38:44 +0000532kmp_int64
533__kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 d )
534{
535 kmp_int64 old_value, new_value;
536
537 old_value = TCR_8( *p );
538 new_value = old_value + d;
539
Jim Cownie3051f972014-08-07 10:12:54 +0000540 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000541 {
542 KMP_CPU_PAUSE();
543 old_value = TCR_8( *p );
544 new_value = old_value + d;
545 }
546 return old_value;
547}
548# endif /* KMP_ARCH_X86 */
549
550kmp_int64
551__kmp_test_then_or64( volatile kmp_int64 *p, kmp_int64 d )
552{
553 kmp_int64 old_value, new_value;
554
555 old_value = TCR_8( *p );
556 new_value = old_value | d;
Jim Cownie3051f972014-08-07 10:12:54 +0000557 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000558 {
559 KMP_CPU_PAUSE();
560 old_value = TCR_8( *p );
561 new_value = old_value | d;
562 }
563 return old_value;
564}
565
566kmp_int64
567__kmp_test_then_and64( volatile kmp_int64 *p, kmp_int64 d )
568{
569 kmp_int64 old_value, new_value;
570
571 old_value = TCR_8( *p );
572 new_value = old_value & d;
Jim Cownie3051f972014-08-07 10:12:54 +0000573 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000574 {
575 KMP_CPU_PAUSE();
576 old_value = TCR_8( *p );
577 new_value = old_value & d;
578 }
579 return old_value;
580}
581
582#endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
583
584void
585__kmp_terminate_thread( int gtid )
586{
587 int status;
588 kmp_info_t *th = __kmp_threads[ gtid ];
589
590 if ( !th ) return;
591
592 #ifdef KMP_CANCEL_THREADS
593 KA_TRACE( 10, ("__kmp_terminate_thread: kill (%d)\n", gtid ) );
594 status = pthread_cancel( th->th.th_info.ds.ds_thread );
595 if ( status != 0 && status != ESRCH ) {
596 __kmp_msg(
597 kmp_ms_fatal,
598 KMP_MSG( CantTerminateWorkerThread ),
599 KMP_ERR( status ),
600 __kmp_msg_null
601 );
602 }; // if
603 #endif
604 __kmp_yield( TRUE );
605} //
606
607/* ------------------------------------------------------------------------ */
608/* ------------------------------------------------------------------------ */
609
610/* ------------------------------------------------------------------------ */
611/* ------------------------------------------------------------------------ */
612
613/*
614 * Set thread stack info according to values returned by
615 * pthread_getattr_np().
616 * If values are unreasonable, assume call failed and use
617 * incremental stack refinement method instead.
618 * Returns TRUE if the stack parameters could be determined exactly,
619 * FALSE if incremental refinement is necessary.
620 */
621static kmp_int32
622__kmp_set_stack_info( int gtid, kmp_info_t *th )
623{
624 int stack_data;
Alp Toker763b9392014-02-28 09:42:41 +0000625#if KMP_OS_LINUX || KMP_OS_FREEBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000626 /* Linux* OS only -- no pthread_getattr_np support on OS X* */
627 pthread_attr_t attr;
628 int status;
629 size_t size = 0;
630 void * addr = 0;
631
632 /* Always do incremental stack refinement for ubermaster threads since the initial
633 thread stack range can be reduced by sibling thread creation so pthread_attr_getstack
634 may cause thread gtid aliasing */
635 if ( ! KMP_UBER_GTID(gtid) ) {
636
637 /* Fetch the real thread attributes */
638 status = pthread_attr_init( &attr );
639 KMP_CHECK_SYSFAIL( "pthread_attr_init", status );
Alp Toker763b9392014-02-28 09:42:41 +0000640#if KMP_OS_FREEBSD
641 status = pthread_attr_get_np( pthread_self(), &attr );
642 KMP_CHECK_SYSFAIL( "pthread_attr_get_np", status );
643#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000644 status = pthread_getattr_np( pthread_self(), &attr );
645 KMP_CHECK_SYSFAIL( "pthread_getattr_np", status );
Alp Toker763b9392014-02-28 09:42:41 +0000646#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000647 status = pthread_attr_getstack( &attr, &addr, &size );
648 KMP_CHECK_SYSFAIL( "pthread_attr_getstack", status );
649 KA_TRACE( 60, ( "__kmp_set_stack_info: T#%d pthread_attr_getstack returned size: %lu, "
650 "low addr: %p\n",
651 gtid, size, addr ));
652
653 status = pthread_attr_destroy( &attr );
654 KMP_CHECK_SYSFAIL( "pthread_attr_destroy", status );
655 }
656
657 if ( size != 0 && addr != 0 ) { /* was stack parameter determination successful? */
658 /* Store the correct base and size */
659 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
660 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
661 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
662 return TRUE;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000663 }
Alp Toker763b9392014-02-28 09:42:41 +0000664#endif /* KMP_OS_LINUX || KMP_OS_FREEBSD */
Alp Toker763b9392014-02-28 09:42:41 +0000665 /* Use incremental refinement starting from initial conservative estimate */
666 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
667 TCW_PTR(th -> th.th_info.ds.ds_stackbase, &stack_data);
668 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
669 return FALSE;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000670}
671
672static void*
673__kmp_launch_worker( void *thr )
674{
675 int status, old_type, old_state;
676#ifdef KMP_BLOCK_SIGNALS
677 sigset_t new_set, old_set;
678#endif /* KMP_BLOCK_SIGNALS */
679 void *exit_val;
Jonathan Peyton2321d572015-06-08 19:25:25 +0000680#if KMP_OS_LINUX || KMP_OS_FREEBSD
Andrey Churbanov368b70e2015-08-05 11:12:45 +0000681 void * volatile padding = 0;
Jonathan Peyton2321d572015-06-08 19:25:25 +0000682#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000683 int gtid;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000684
685 gtid = ((kmp_info_t*)thr) -> th.th_info.ds.ds_gtid;
686 __kmp_gtid_set_specific( gtid );
687#ifdef KMP_TDATA_GTID
688 __kmp_gtid = gtid;
689#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000690#if KMP_STATS_ENABLED
691 // set __thread local index to point to thread-specific stats
692 __kmp_stats_thread_ptr = ((kmp_info_t*)thr)->th.th_stats;
693#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000694
695#if USE_ITT_BUILD
696 __kmp_itt_thread_name( gtid );
697#endif /* USE_ITT_BUILD */
698
Alp Toker763b9392014-02-28 09:42:41 +0000699#if KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +0000700 __kmp_affinity_set_init_mask( gtid, FALSE );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000701#endif
702
703#ifdef KMP_CANCEL_THREADS
704 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
705 KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status );
706 /* josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? */
707 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
708 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
709#endif
710
711#if KMP_ARCH_X86 || KMP_ARCH_X86_64
712 //
713 // Set the FP control regs to be a copy of
714 // the parallel initialization thread's.
715 //
716 __kmp_clear_x87_fpu_status_word();
717 __kmp_load_x87_fpu_control_word( &__kmp_init_x87_fpu_control_word );
718 __kmp_load_mxcsr( &__kmp_init_mxcsr );
719#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
720
721#ifdef KMP_BLOCK_SIGNALS
722 status = sigfillset( & new_set );
723 KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status );
724 status = pthread_sigmask( SIG_BLOCK, & new_set, & old_set );
725 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
726#endif /* KMP_BLOCK_SIGNALS */
727
Alp Toker763b9392014-02-28 09:42:41 +0000728#if KMP_OS_LINUX || KMP_OS_FREEBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000729 if ( __kmp_stkoffset > 0 && gtid > 0 ) {
Andrey Churbanov74bf17b2015-04-02 13:27:08 +0000730 padding = KMP_ALLOCA( gtid * __kmp_stkoffset );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000731 }
732#endif
733
734 KMP_MB();
735 __kmp_set_stack_info( gtid, (kmp_info_t*)thr );
736
737 __kmp_check_stack_overlap( (kmp_info_t*)thr );
738
739 exit_val = __kmp_launch_thread( (kmp_info_t *) thr );
740
741#ifdef KMP_BLOCK_SIGNALS
742 status = pthread_sigmask( SIG_SETMASK, & old_set, NULL );
743 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
744#endif /* KMP_BLOCK_SIGNALS */
745
746 return exit_val;
747}
748
749
750/* The monitor thread controls all of the threads in the complex */
751
752static void*
753__kmp_launch_monitor( void *thr )
754{
755 int status, old_type, old_state;
756#ifdef KMP_BLOCK_SIGNALS
757 sigset_t new_set;
758#endif /* KMP_BLOCK_SIGNALS */
759 struct timespec interval;
760 int yield_count;
761 int yield_cycles = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000762
763 KMP_MB(); /* Flush all pending memory write invalidates. */
764
765 KA_TRACE( 10, ("__kmp_launch_monitor: #1 launched\n" ) );
766
767 /* register us as the monitor thread */
768 __kmp_gtid_set_specific( KMP_GTID_MONITOR );
769#ifdef KMP_TDATA_GTID
770 __kmp_gtid = KMP_GTID_MONITOR;
771#endif
772
773 KMP_MB();
774
775#if USE_ITT_BUILD
776 __kmp_itt_thread_ignore(); // Instruct Intel(R) Threading Tools to ignore monitor thread.
777#endif /* USE_ITT_BUILD */
778
779 __kmp_set_stack_info( ((kmp_info_t*)thr)->th.th_info.ds.ds_gtid, (kmp_info_t*)thr );
780
781 __kmp_check_stack_overlap( (kmp_info_t*)thr );
782
783#ifdef KMP_CANCEL_THREADS
784 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
785 KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status );
786 /* josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? */
787 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
788 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
789#endif
790
791 #if KMP_REAL_TIME_FIX
792 // This is a potential fix which allows application with real-time scheduling policy work.
793 // However, decision about the fix is not made yet, so it is disabled by default.
794 { // Are program started with real-time scheduling policy?
795 int sched = sched_getscheduler( 0 );
796 if ( sched == SCHED_FIFO || sched == SCHED_RR ) {
797 // Yes, we are a part of real-time application. Try to increase the priority of the
798 // monitor.
799 struct sched_param param;
800 int max_priority = sched_get_priority_max( sched );
801 int rc;
802 KMP_WARNING( RealTimeSchedNotSupported );
803 sched_getparam( 0, & param );
804 if ( param.sched_priority < max_priority ) {
805 param.sched_priority += 1;
806 rc = sched_setscheduler( 0, sched, & param );
807 if ( rc != 0 ) {
808 int error = errno;
809 __kmp_msg(
810 kmp_ms_warning,
811 KMP_MSG( CantChangeMonitorPriority ),
812 KMP_ERR( error ),
813 KMP_MSG( MonitorWillStarve ),
814 __kmp_msg_null
815 );
816 }; // if
817 } else {
818 // We cannot abort here, because number of CPUs may be enough for all the threads,
819 // including the monitor thread, so application could potentially work...
820 __kmp_msg(
821 kmp_ms_warning,
822 KMP_MSG( RunningAtMaxPriority ),
823 KMP_MSG( MonitorWillStarve ),
824 KMP_HNT( RunningAtMaxPriority ),
825 __kmp_msg_null
826 );
827 }; // if
828 }; // if
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000829 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 ); // AC: free thread that waits for monitor started
Jim Cownie5e8470a2013-09-27 10:38:44 +0000830 }
831 #endif // KMP_REAL_TIME_FIX
832
833 KMP_MB(); /* Flush all pending memory write invalidates. */
834
835 if ( __kmp_monitor_wakeups == 1 ) {
836 interval.tv_sec = 1;
837 interval.tv_nsec = 0;
838 } else {
839 interval.tv_sec = 0;
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +0000840 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000841 }
842
843 KA_TRACE( 10, ("__kmp_launch_monitor: #2 monitor\n" ) );
844
845 if (__kmp_yield_cycle) {
846 __kmp_yielding_on = 0; /* Start out with yielding shut off */
847 yield_count = __kmp_yield_off_count;
848 } else {
849 __kmp_yielding_on = 1; /* Yielding is on permanently */
850 }
851
852 while( ! TCR_4( __kmp_global.g.g_done ) ) {
853 struct timespec now;
854 struct timeval tval;
855
856 /* This thread monitors the state of the system */
857
858 KA_TRACE( 15, ( "__kmp_launch_monitor: update\n" ) );
859
860 status = gettimeofday( &tval, NULL );
861 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
862 TIMEVAL_TO_TIMESPEC( &tval, &now );
863
864 now.tv_sec += interval.tv_sec;
865 now.tv_nsec += interval.tv_nsec;
866
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +0000867 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000868 now.tv_sec += 1;
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +0000869 now.tv_nsec -= KMP_NSEC_PER_SEC;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000870 }
871
872 status = pthread_mutex_lock( & __kmp_wait_mx.m_mutex );
873 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
Jim Cownie07ea89f2014-09-03 11:10:54 +0000874 // AC: the monitor should not fall asleep if g_done has been set
875 if ( !TCR_4(__kmp_global.g.g_done) ) { // check once more under mutex
876 status = pthread_cond_timedwait( &__kmp_wait_cv.c_cond, &__kmp_wait_mx.m_mutex, &now );
877 if ( status != 0 ) {
878 if ( status != ETIMEDOUT && status != EINTR ) {
879 KMP_SYSFAIL( "pthread_cond_timedwait", status );
880 };
Jim Cownie5e8470a2013-09-27 10:38:44 +0000881 };
882 };
Jim Cownie5e8470a2013-09-27 10:38:44 +0000883 status = pthread_mutex_unlock( & __kmp_wait_mx.m_mutex );
884 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
885
886 if (__kmp_yield_cycle) {
887 yield_cycles++;
888 if ( (yield_cycles % yield_count) == 0 ) {
889 if (__kmp_yielding_on) {
890 __kmp_yielding_on = 0; /* Turn it off now */
891 yield_count = __kmp_yield_off_count;
892 } else {
893 __kmp_yielding_on = 1; /* Turn it on now */
894 yield_count = __kmp_yield_on_count;
895 }
896 yield_cycles = 0;
897 }
898 } else {
899 __kmp_yielding_on = 1;
900 }
901
902 TCW_4( __kmp_global.g.g_time.dt.t_value,
903 TCR_4( __kmp_global.g.g_time.dt.t_value ) + 1 );
904
905 KMP_MB(); /* Flush all pending memory write invalidates. */
906 }
907
908 KA_TRACE( 10, ("__kmp_launch_monitor: #3 cleanup\n" ) );
909
910#ifdef KMP_BLOCK_SIGNALS
911 status = sigfillset( & new_set );
912 KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status );
913 status = pthread_sigmask( SIG_UNBLOCK, & new_set, NULL );
914 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
915#endif /* KMP_BLOCK_SIGNALS */
916
917 KA_TRACE( 10, ("__kmp_launch_monitor: #4 finished\n" ) );
918
919 if( __kmp_global.g.g_abort != 0 ) {
920 /* now we need to terminate the worker threads */
921 /* the value of t_abort is the signal we caught */
922
923 int gtid;
924
925 KA_TRACE( 10, ("__kmp_launch_monitor: #5 terminate sig=%d\n", __kmp_global.g.g_abort ) );
926
927 /* terminate the OpenMP worker threads */
928 /* TODO this is not valid for sibling threads!!
929 * the uber master might not be 0 anymore.. */
930 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
931 __kmp_terminate_thread( gtid );
932
933 __kmp_cleanup();
934
935 KA_TRACE( 10, ("__kmp_launch_monitor: #6 raise sig=%d\n", __kmp_global.g.g_abort ) );
936
937 if (__kmp_global.g.g_abort > 0)
938 raise( __kmp_global.g.g_abort );
939
940 }
941
942 KA_TRACE( 10, ("__kmp_launch_monitor: #7 exit\n" ) );
943
944 return thr;
945}
946
947void
948__kmp_create_worker( int gtid, kmp_info_t *th, size_t stack_size )
949{
950 pthread_t handle;
951 pthread_attr_t thread_attr;
952 int status;
953
954
955 th->th.th_info.ds.ds_gtid = gtid;
956
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000957#if KMP_STATS_ENABLED
958 // sets up worker thread stats
959 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
960
961 // th->th.th_stats is used to transfer thread specific stats-pointer to __kmp_launch_worker
962 // So when thread is created (goes into __kmp_launch_worker) it will
963 // set it's __thread local pointer to th->th.th_stats
964 th->th.th_stats = __kmp_stats_list.push_back(gtid);
965 if(KMP_UBER_GTID(gtid)) {
966 __kmp_stats_start_time = tsc_tick_count::now();
967 __kmp_stats_thread_ptr = th->th.th_stats;
968 __kmp_stats_init();
969 KMP_START_EXPLICIT_TIMER(OMP_serial);
970 KMP_START_EXPLICIT_TIMER(OMP_start_end);
971 }
972 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
973
974#endif // KMP_STATS_ENABLED
975
Jim Cownie5e8470a2013-09-27 10:38:44 +0000976 if ( KMP_UBER_GTID(gtid) ) {
977 KA_TRACE( 10, ("__kmp_create_worker: uber thread (%d)\n", gtid ) );
978 th -> th.th_info.ds.ds_thread = pthread_self();
979 __kmp_set_stack_info( gtid, th );
980 __kmp_check_stack_overlap( th );
981 return;
982 }; // if
983
984 KA_TRACE( 10, ("__kmp_create_worker: try to create thread (%d)\n", gtid ) );
985
986 KMP_MB(); /* Flush all pending memory write invalidates. */
987
988#ifdef KMP_THREAD_ATTR
989 {
990 status = pthread_attr_init( &thread_attr );
991 if ( status != 0 ) {
992 __kmp_msg(
993 kmp_ms_fatal,
994 KMP_MSG( CantInitThreadAttrs ),
995 KMP_ERR( status ),
996 __kmp_msg_null
997 );
998 }; // if
999 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
1000 if ( status != 0 ) {
1001 __kmp_msg(
1002 kmp_ms_fatal,
1003 KMP_MSG( CantSetWorkerState ),
1004 KMP_ERR( status ),
1005 __kmp_msg_null
1006 );
1007 }; // if
1008
Andrey Churbanov368b70e2015-08-05 11:12:45 +00001009 /* Set stack size for this thread now.
1010 * The multiple of 2 is there because on some machines, requesting an unusual stacksize
1011 * causes the thread to have an offset before the dummy alloca() takes place to create the
1012 * offset. Since we want the user to have a sufficient stacksize AND support a stack offset, we
1013 * alloca() twice the offset so that the upcoming alloca() does not eliminate any premade
1014 * offset, and also gives the user the stack space they requested for all threads */
1015 stack_size += gtid * __kmp_stkoffset * 2;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001016
1017 KA_TRACE( 10, ( "__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
1018 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
1019 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size ) );
1020
1021# ifdef _POSIX_THREAD_ATTR_STACKSIZE
1022 status = pthread_attr_setstacksize( & thread_attr, stack_size );
1023# ifdef KMP_BACKUP_STKSIZE
1024 if ( status != 0 ) {
1025 if ( ! __kmp_env_stksize ) {
1026 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
1027 __kmp_stksize = KMP_BACKUP_STKSIZE;
1028 KA_TRACE( 10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
1029 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
1030 "bytes\n",
1031 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size )
1032 );
1033 status = pthread_attr_setstacksize( &thread_attr, stack_size );
1034 }; // if
1035 }; // if
1036# endif /* KMP_BACKUP_STKSIZE */
1037 if ( status != 0 ) {
1038 __kmp_msg(
1039 kmp_ms_fatal,
1040 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1041 KMP_ERR( status ),
1042 KMP_HNT( ChangeWorkerStackSize ),
1043 __kmp_msg_null
1044 );
1045 }; // if
1046# endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1047 }
1048#endif /* KMP_THREAD_ATTR */
1049
1050 {
1051 status = pthread_create( & handle, & thread_attr, __kmp_launch_worker, (void *) th );
1052 if ( status != 0 || ! handle ) { // ??? Why do we check handle??
1053#ifdef _POSIX_THREAD_ATTR_STACKSIZE
1054 if ( status == EINVAL ) {
1055 __kmp_msg(
1056 kmp_ms_fatal,
1057 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1058 KMP_ERR( status ),
1059 KMP_HNT( IncreaseWorkerStackSize ),
1060 __kmp_msg_null
1061 );
1062 };
1063 if ( status == ENOMEM ) {
1064 __kmp_msg(
1065 kmp_ms_fatal,
1066 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1067 KMP_ERR( status ),
1068 KMP_HNT( DecreaseWorkerStackSize ),
1069 __kmp_msg_null
1070 );
1071 };
1072#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1073 if ( status == EAGAIN ) {
1074 __kmp_msg(
1075 kmp_ms_fatal,
1076 KMP_MSG( NoResourcesForWorkerThread ),
1077 KMP_ERR( status ),
1078 KMP_HNT( Decrease_NUM_THREADS ),
1079 __kmp_msg_null
1080 );
1081 }; // if
1082 KMP_SYSFAIL( "pthread_create", status );
1083 }; // if
1084
1085 th->th.th_info.ds.ds_thread = handle;
1086 }
1087
1088#ifdef KMP_THREAD_ATTR
1089 {
1090 status = pthread_attr_destroy( & thread_attr );
1091 if ( status ) {
1092 __kmp_msg(
1093 kmp_ms_warning,
1094 KMP_MSG( CantDestroyThreadAttrs ),
1095 KMP_ERR( status ),
1096 __kmp_msg_null
1097 );
1098 }; // if
1099 }
1100#endif /* KMP_THREAD_ATTR */
1101
1102 KMP_MB(); /* Flush all pending memory write invalidates. */
1103
1104 KA_TRACE( 10, ("__kmp_create_worker: done creating thread (%d)\n", gtid ) );
1105
1106} // __kmp_create_worker
1107
1108
1109void
1110__kmp_create_monitor( kmp_info_t *th )
1111{
1112 pthread_t handle;
1113 pthread_attr_t thread_attr;
1114 size_t size;
1115 int status;
Jonathan Peytone8104ad2015-06-08 18:56:33 +00001116 int caller_gtid;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001117 int auto_adj_size = FALSE;
1118
Jonathan Peytone8104ad2015-06-08 18:56:33 +00001119 caller_gtid = __kmp_get_gtid();
1120
Jim Cownie5e8470a2013-09-27 10:38:44 +00001121 KA_TRACE( 10, ("__kmp_create_monitor: try to create monitor\n" ) );
1122
1123 KMP_MB(); /* Flush all pending memory write invalidates. */
1124
1125 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
1126 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
1127 #if KMP_REAL_TIME_FIX
1128 TCW_4( __kmp_global.g.g_time.dt.t_value, -1 ); // Will use it for synchronization a bit later.
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001129 #else
1130 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001131 #endif // KMP_REAL_TIME_FIX
1132
1133 #ifdef KMP_THREAD_ATTR
1134 if ( __kmp_monitor_stksize == 0 ) {
1135 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1136 auto_adj_size = TRUE;
1137 }
1138 status = pthread_attr_init( &thread_attr );
1139 if ( status != 0 ) {
1140 __kmp_msg(
1141 kmp_ms_fatal,
1142 KMP_MSG( CantInitThreadAttrs ),
1143 KMP_ERR( status ),
1144 __kmp_msg_null
1145 );
1146 }; // if
1147 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
1148 if ( status != 0 ) {
1149 __kmp_msg(
1150 kmp_ms_fatal,
1151 KMP_MSG( CantSetMonitorState ),
1152 KMP_ERR( status ),
1153 __kmp_msg_null
1154 );
1155 }; // if
1156
1157 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1158 status = pthread_attr_getstacksize( & thread_attr, & size );
1159 KMP_CHECK_SYSFAIL( "pthread_attr_getstacksize", status );
1160 #else
1161 size = __kmp_sys_min_stksize;
1162 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1163 #endif /* KMP_THREAD_ATTR */
1164
1165 if ( __kmp_monitor_stksize == 0 ) {
1166 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1167 }
1168 if ( __kmp_monitor_stksize < __kmp_sys_min_stksize ) {
1169 __kmp_monitor_stksize = __kmp_sys_min_stksize;
1170 }
1171
1172 KA_TRACE( 10, ( "__kmp_create_monitor: default stacksize = %lu bytes,"
1173 "requested stacksize = %lu bytes\n",
1174 size, __kmp_monitor_stksize ) );
1175
1176 retry:
1177
1178 /* Set stack size for this thread now. */
1179
1180 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1181 KA_TRACE( 10, ( "__kmp_create_monitor: setting stacksize = %lu bytes,",
1182 __kmp_monitor_stksize ) );
1183 status = pthread_attr_setstacksize( & thread_attr, __kmp_monitor_stksize );
1184 if ( status != 0 ) {
1185 if ( auto_adj_size ) {
1186 __kmp_monitor_stksize *= 2;
1187 goto retry;
1188 }
1189 __kmp_msg(
1190 kmp_ms_warning, // should this be fatal? BB
1191 KMP_MSG( CantSetMonitorStackSize, (long int) __kmp_monitor_stksize ),
1192 KMP_ERR( status ),
1193 KMP_HNT( ChangeMonitorStackSize ),
1194 __kmp_msg_null
1195 );
1196 }; // if
1197 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1198
Jim Cownie5e8470a2013-09-27 10:38:44 +00001199 status = pthread_create( &handle, & thread_attr, __kmp_launch_monitor, (void *) th );
1200
1201 if ( status != 0 ) {
1202 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1203 if ( status == EINVAL ) {
1204 if ( auto_adj_size && ( __kmp_monitor_stksize < (size_t)0x40000000 ) ) {
1205 __kmp_monitor_stksize *= 2;
1206 goto retry;
1207 }
1208 __kmp_msg(
1209 kmp_ms_fatal,
1210 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1211 KMP_ERR( status ),
1212 KMP_HNT( IncreaseMonitorStackSize ),
1213 __kmp_msg_null
1214 );
1215 }; // if
1216 if ( status == ENOMEM ) {
1217 __kmp_msg(
1218 kmp_ms_fatal,
1219 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1220 KMP_ERR( status ),
1221 KMP_HNT( DecreaseMonitorStackSize ),
1222 __kmp_msg_null
1223 );
1224 }; // if
1225 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1226 if ( status == EAGAIN ) {
1227 __kmp_msg(
1228 kmp_ms_fatal,
1229 KMP_MSG( NoResourcesForMonitorThread ),
1230 KMP_ERR( status ),
1231 KMP_HNT( DecreaseNumberOfThreadsInUse ),
1232 __kmp_msg_null
1233 );
1234 }; // if
1235 KMP_SYSFAIL( "pthread_create", status );
1236 }; // if
1237
1238 th->th.th_info.ds.ds_thread = handle;
1239
1240 #if KMP_REAL_TIME_FIX
1241 // Wait for the monitor thread is really started and set its *priority*.
1242 KMP_DEBUG_ASSERT( sizeof( kmp_uint32 ) == sizeof( __kmp_global.g.g_time.dt.t_value ) );
1243 __kmp_wait_yield_4(
1244 (kmp_uint32 volatile *) & __kmp_global.g.g_time.dt.t_value, -1, & __kmp_neq_4, NULL
1245 );
1246 #endif // KMP_REAL_TIME_FIX
1247
1248 #ifdef KMP_THREAD_ATTR
1249 status = pthread_attr_destroy( & thread_attr );
1250 if ( status != 0 ) {
1251 __kmp_msg( //
1252 kmp_ms_warning,
1253 KMP_MSG( CantDestroyThreadAttrs ),
1254 KMP_ERR( status ),
1255 __kmp_msg_null
1256 );
1257 }; // if
1258 #endif
1259
1260 KMP_MB(); /* Flush all pending memory write invalidates. */
1261
1262 KA_TRACE( 10, ( "__kmp_create_monitor: monitor created %#.8lx\n", th->th.th_info.ds.ds_thread ) );
1263
1264} // __kmp_create_monitor
1265
1266void
1267__kmp_exit_thread(
1268 int exit_status
1269) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001270 pthread_exit( (void *)(intptr_t) exit_status );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001271} // __kmp_exit_thread
1272
Jim Cownie07ea89f2014-09-03 11:10:54 +00001273void __kmp_resume_monitor();
1274
Jim Cownie5e8470a2013-09-27 10:38:44 +00001275void
1276__kmp_reap_monitor( kmp_info_t *th )
1277{
Jonathan Peyton7c4d66d2015-06-08 20:01:14 +00001278 int status;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001279 void *exit_val;
1280
1281 KA_TRACE( 10, ("__kmp_reap_monitor: try to reap monitor thread with handle %#.8lx\n",
1282 th->th.th_info.ds.ds_thread ) );
1283
1284 // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
1285 // If both tid and gtid are 0, it means the monitor did not ever start.
1286 // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
1287 KMP_DEBUG_ASSERT( th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid );
1288 if ( th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR ) {
1289 return;
1290 }; // if
1291
1292 KMP_MB(); /* Flush all pending memory write invalidates. */
1293
1294
1295 /* First, check to see whether the monitor thread exists. This could prevent a hang,
1296 but if the monitor dies after the pthread_kill call and before the pthread_join
1297 call, it will still hang. */
1298
1299 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
1300 if (status == ESRCH) {
1301
1302 KA_TRACE( 10, ("__kmp_reap_monitor: monitor does not exist, returning\n") );
1303
1304 } else
1305 {
Jim Cownie07ea89f2014-09-03 11:10:54 +00001306 __kmp_resume_monitor(); // Wake up the monitor thread
Jim Cownie5e8470a2013-09-27 10:38:44 +00001307 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1308 if (exit_val != th) {
1309 __kmp_msg(
1310 kmp_ms_fatal,
1311 KMP_MSG( ReapMonitorError ),
1312 KMP_ERR( status ),
1313 __kmp_msg_null
1314 );
1315 }
1316 }
1317
1318 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1319 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1320
1321 KA_TRACE( 10, ("__kmp_reap_monitor: done reaping monitor thread with handle %#.8lx\n",
1322 th->th.th_info.ds.ds_thread ) );
1323
1324 KMP_MB(); /* Flush all pending memory write invalidates. */
1325
1326}
1327
1328void
1329__kmp_reap_worker( kmp_info_t *th )
1330{
1331 int status;
1332 void *exit_val;
1333
1334 KMP_MB(); /* Flush all pending memory write invalidates. */
1335
1336 KA_TRACE( 10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid ) );
1337
1338 /* First, check to see whether the worker thread exists. This could prevent a hang,
1339 but if the worker dies after the pthread_kill call and before the pthread_join
1340 call, it will still hang. */
1341
1342 {
1343 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
1344 if (status == ESRCH) {
1345 KA_TRACE( 10, ("__kmp_reap_worker: worker T#%d does not exist, returning\n",
1346 th->th.th_info.ds.ds_gtid ) );
1347 }
1348 else {
1349 KA_TRACE( 10, ("__kmp_reap_worker: try to join with worker T#%d\n",
1350 th->th.th_info.ds.ds_gtid ) );
1351
1352 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1353#ifdef KMP_DEBUG
1354 /* Don't expose these to the user until we understand when they trigger */
1355 if ( status != 0 ) {
1356 __kmp_msg(
1357 kmp_ms_fatal,
1358 KMP_MSG( ReapWorkerError ),
1359 KMP_ERR( status ),
1360 __kmp_msg_null
1361 );
1362 }
1363 if ( exit_val != th ) {
1364 KA_TRACE( 10, ( "__kmp_reap_worker: worker T#%d did not reap properly, "
1365 "exit_val = %p\n",
1366 th->th.th_info.ds.ds_gtid, exit_val ) );
1367 }
1368#endif /* KMP_DEBUG */
1369 }
1370 }
1371
1372 KA_TRACE( 10, ("__kmp_reap_worker: done reaping T#%d\n", th->th.th_info.ds.ds_gtid ) );
1373
1374 KMP_MB(); /* Flush all pending memory write invalidates. */
1375}
1376
1377
1378/* ------------------------------------------------------------------------ */
1379/* ------------------------------------------------------------------------ */
1380
1381#if KMP_HANDLE_SIGNALS
1382
1383
1384static void
1385__kmp_null_handler( int signo )
1386{
1387 // Do nothing, for doing SIG_IGN-type actions.
1388} // __kmp_null_handler
1389
1390
1391static void
1392__kmp_team_handler( int signo )
1393{
1394 if ( __kmp_global.g.g_abort == 0 ) {
1395 /* Stage 1 signal handler, let's shut down all of the threads */
1396 #ifdef KMP_DEBUG
1397 __kmp_debug_printf( "__kmp_team_handler: caught signal = %d\n", signo );
1398 #endif
1399 switch ( signo ) {
1400 case SIGHUP :
1401 case SIGINT :
1402 case SIGQUIT :
1403 case SIGILL :
1404 case SIGABRT :
1405 case SIGFPE :
1406 case SIGBUS :
1407 case SIGSEGV :
1408 #ifdef SIGSYS
1409 case SIGSYS :
1410 #endif
1411 case SIGTERM :
1412 if ( __kmp_debug_buf ) {
1413 __kmp_dump_debug_buffer( );
1414 }; // if
1415 KMP_MB(); // Flush all pending memory write invalidates.
1416 TCW_4( __kmp_global.g.g_abort, signo );
1417 KMP_MB(); // Flush all pending memory write invalidates.
1418 TCW_4( __kmp_global.g.g_done, TRUE );
1419 KMP_MB(); // Flush all pending memory write invalidates.
1420 break;
1421 default:
1422 #ifdef KMP_DEBUG
1423 __kmp_debug_printf( "__kmp_team_handler: unknown signal type" );
1424 #endif
1425 break;
1426 }; // switch
1427 }; // if
1428} // __kmp_team_handler
1429
1430
1431static
1432void __kmp_sigaction( int signum, const struct sigaction * act, struct sigaction * oldact ) {
1433 int rc = sigaction( signum, act, oldact );
1434 KMP_CHECK_SYSFAIL_ERRNO( "sigaction", rc );
1435}
1436
1437
1438static void
1439__kmp_install_one_handler( int sig, sig_func_t handler_func, int parallel_init )
1440{
1441 KMP_MB(); // Flush all pending memory write invalidates.
1442 KB_TRACE( 60, ( "__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init ) );
1443 if ( parallel_init ) {
1444 struct sigaction new_action;
1445 struct sigaction old_action;
1446 new_action.sa_handler = handler_func;
1447 new_action.sa_flags = 0;
1448 sigfillset( & new_action.sa_mask );
1449 __kmp_sigaction( sig, & new_action, & old_action );
1450 if ( old_action.sa_handler == __kmp_sighldrs[ sig ].sa_handler ) {
1451 sigaddset( & __kmp_sigset, sig );
1452 } else {
1453 // Restore/keep user's handler if one previously installed.
1454 __kmp_sigaction( sig, & old_action, NULL );
1455 }; // if
1456 } else {
1457 // Save initial/system signal handlers to see if user handlers installed.
1458 __kmp_sigaction( sig, NULL, & __kmp_sighldrs[ sig ] );
1459 }; // if
1460 KMP_MB(); // Flush all pending memory write invalidates.
1461} // __kmp_install_one_handler
1462
1463
1464static void
1465__kmp_remove_one_handler( int sig )
1466{
1467 KB_TRACE( 60, ( "__kmp_remove_one_handler( %d )\n", sig ) );
1468 if ( sigismember( & __kmp_sigset, sig ) ) {
1469 struct sigaction old;
1470 KMP_MB(); // Flush all pending memory write invalidates.
1471 __kmp_sigaction( sig, & __kmp_sighldrs[ sig ], & old );
1472 if ( ( old.sa_handler != __kmp_team_handler ) && ( old.sa_handler != __kmp_null_handler ) ) {
1473 // Restore the users signal handler.
1474 KB_TRACE( 10, ( "__kmp_remove_one_handler: oops, not our handler, restoring: sig=%d\n", sig ) );
1475 __kmp_sigaction( sig, & old, NULL );
1476 }; // if
1477 sigdelset( & __kmp_sigset, sig );
1478 KMP_MB(); // Flush all pending memory write invalidates.
1479 }; // if
1480} // __kmp_remove_one_handler
1481
1482
1483void
1484__kmp_install_signals( int parallel_init )
1485{
1486 KB_TRACE( 10, ( "__kmp_install_signals( %d )\n", parallel_init ) );
1487 if ( __kmp_handle_signals || ! parallel_init ) {
1488 // If ! parallel_init, we do not install handlers, just save original handlers.
1489 // Let us do it even __handle_signals is 0.
1490 sigemptyset( & __kmp_sigset );
1491 __kmp_install_one_handler( SIGHUP, __kmp_team_handler, parallel_init );
1492 __kmp_install_one_handler( SIGINT, __kmp_team_handler, parallel_init );
1493 __kmp_install_one_handler( SIGQUIT, __kmp_team_handler, parallel_init );
1494 __kmp_install_one_handler( SIGILL, __kmp_team_handler, parallel_init );
1495 __kmp_install_one_handler( SIGABRT, __kmp_team_handler, parallel_init );
1496 __kmp_install_one_handler( SIGFPE, __kmp_team_handler, parallel_init );
1497 __kmp_install_one_handler( SIGBUS, __kmp_team_handler, parallel_init );
1498 __kmp_install_one_handler( SIGSEGV, __kmp_team_handler, parallel_init );
1499 #ifdef SIGSYS
1500 __kmp_install_one_handler( SIGSYS, __kmp_team_handler, parallel_init );
1501 #endif // SIGSYS
1502 __kmp_install_one_handler( SIGTERM, __kmp_team_handler, parallel_init );
1503 #ifdef SIGPIPE
1504 __kmp_install_one_handler( SIGPIPE, __kmp_team_handler, parallel_init );
1505 #endif // SIGPIPE
1506 }; // if
1507} // __kmp_install_signals
1508
1509
1510void
1511__kmp_remove_signals( void )
1512{
1513 int sig;
1514 KB_TRACE( 10, ( "__kmp_remove_signals()\n" ) );
1515 for ( sig = 1; sig < NSIG; ++ sig ) {
1516 __kmp_remove_one_handler( sig );
1517 }; // for sig
1518} // __kmp_remove_signals
1519
1520
1521#endif // KMP_HANDLE_SIGNALS
1522
1523/* ------------------------------------------------------------------------ */
1524/* ------------------------------------------------------------------------ */
1525
1526void
1527__kmp_enable( int new_state )
1528{
1529 #ifdef KMP_CANCEL_THREADS
1530 int status, old_state;
1531 status = pthread_setcancelstate( new_state, & old_state );
1532 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
1533 KMP_DEBUG_ASSERT( old_state == PTHREAD_CANCEL_DISABLE );
1534 #endif
1535}
1536
1537void
1538__kmp_disable( int * old_state )
1539{
1540 #ifdef KMP_CANCEL_THREADS
1541 int status;
1542 status = pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, old_state );
1543 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
1544 #endif
1545}
1546
1547/* ------------------------------------------------------------------------ */
1548/* ------------------------------------------------------------------------ */
1549
1550static void
1551__kmp_atfork_prepare (void)
1552{
1553 /* nothing to do */
1554}
1555
1556static void
1557__kmp_atfork_parent (void)
1558{
1559 /* nothing to do */
1560}
1561
1562/*
1563 Reset the library so execution in the child starts "all over again" with
1564 clean data structures in initial states. Don't worry about freeing memory
1565 allocated by parent, just abandon it to be safe.
1566*/
1567static void
1568__kmp_atfork_child (void)
1569{
1570 /* TODO make sure this is done right for nested/sibling */
1571 // ATT: Memory leaks are here? TODO: Check it and fix.
1572 /* KMP_ASSERT( 0 ); */
1573
1574 ++__kmp_fork_count;
1575
1576 __kmp_init_runtime = FALSE;
1577 __kmp_init_monitor = 0;
1578 __kmp_init_parallel = FALSE;
1579 __kmp_init_middle = FALSE;
1580 __kmp_init_serial = FALSE;
1581 TCW_4(__kmp_init_gtid, FALSE);
1582 __kmp_init_common = FALSE;
1583
1584 TCW_4(__kmp_init_user_locks, FALSE);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001585#if ! KMP_USE_DYNAMIC_LOCK
Jim Cownie07ea89f2014-09-03 11:10:54 +00001586 __kmp_user_lock_table.used = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001587 __kmp_user_lock_table.allocated = 0;
1588 __kmp_user_lock_table.table = NULL;
1589 __kmp_lock_blocks = NULL;
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001590#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001591
1592 __kmp_all_nth = 0;
1593 TCW_4(__kmp_nth, 0);
1594
1595 /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate here
1596 so threadprivate doesn't use stale data */
1597 KA_TRACE( 10, ( "__kmp_atfork_child: checking cache address list %p\n",
1598 __kmp_threadpriv_cache_list ) );
1599
1600 while ( __kmp_threadpriv_cache_list != NULL ) {
1601
1602 if ( *__kmp_threadpriv_cache_list -> addr != NULL ) {
1603 KC_TRACE( 50, ( "__kmp_atfork_child: zeroing cache at address %p\n",
1604 &(*__kmp_threadpriv_cache_list -> addr) ) );
1605
1606 *__kmp_threadpriv_cache_list -> addr = NULL;
1607 }
1608 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list -> next;
1609 }
1610
1611 __kmp_init_runtime = FALSE;
1612
1613 /* reset statically initialized locks */
1614 __kmp_init_bootstrap_lock( &__kmp_initz_lock );
1615 __kmp_init_bootstrap_lock( &__kmp_stdio_lock );
1616 __kmp_init_bootstrap_lock( &__kmp_console_lock );
1617
1618 /* This is necessary to make sure no stale data is left around */
1619 /* AC: customers complain that we use unsafe routines in the atfork
1620 handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1621 in dynamic_link when check the presence of shared tbbmalloc library.
1622 Suggestion is to make the library initialization lazier, similar
1623 to what done for __kmpc_begin(). */
1624 // TODO: synchronize all static initializations with regular library
1625 // startup; look at kmp_global.c and etc.
1626 //__kmp_internal_begin ();
1627
1628}
1629
1630void
1631__kmp_register_atfork(void) {
1632 if ( __kmp_need_register_atfork ) {
1633 int status = pthread_atfork( __kmp_atfork_prepare, __kmp_atfork_parent, __kmp_atfork_child );
1634 KMP_CHECK_SYSFAIL( "pthread_atfork", status );
1635 __kmp_need_register_atfork = FALSE;
1636 }
1637}
1638
1639void
1640__kmp_suspend_initialize( void )
1641{
1642 int status;
1643 status = pthread_mutexattr_init( &__kmp_suspend_mutex_attr );
1644 KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status );
1645 status = pthread_condattr_init( &__kmp_suspend_cond_attr );
1646 KMP_CHECK_SYSFAIL( "pthread_condattr_init", status );
1647}
1648
1649static void
1650__kmp_suspend_initialize_thread( kmp_info_t *th )
1651{
1652 if ( th->th.th_suspend_init_count <= __kmp_fork_count ) {
1653 /* this means we haven't initialized the suspension pthread objects for this thread
1654 in this instance of the process */
1655 int status;
1656 status = pthread_cond_init( &th->th.th_suspend_cv.c_cond, &__kmp_suspend_cond_attr );
1657 KMP_CHECK_SYSFAIL( "pthread_cond_init", status );
1658 status = pthread_mutex_init( &th->th.th_suspend_mx.m_mutex, & __kmp_suspend_mutex_attr );
1659 KMP_CHECK_SYSFAIL( "pthread_mutex_init", status );
1660 *(volatile int*)&th->th.th_suspend_init_count = __kmp_fork_count + 1;
1661 };
1662}
1663
1664void
1665__kmp_suspend_uninitialize_thread( kmp_info_t *th )
1666{
1667 if(th->th.th_suspend_init_count > __kmp_fork_count) {
1668 /* this means we have initialize the suspension pthread objects for this thread
1669 in this instance of the process */
1670 int status;
1671
1672 status = pthread_cond_destroy( &th->th.th_suspend_cv.c_cond );
1673 if ( status != 0 && status != EBUSY ) {
1674 KMP_SYSFAIL( "pthread_cond_destroy", status );
1675 };
1676 status = pthread_mutex_destroy( &th->th.th_suspend_mx.m_mutex );
1677 if ( status != 0 && status != EBUSY ) {
1678 KMP_SYSFAIL( "pthread_mutex_destroy", status );
1679 };
1680 --th->th.th_suspend_init_count;
1681 KMP_DEBUG_ASSERT(th->th.th_suspend_init_count == __kmp_fork_count);
1682 }
1683}
1684
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001685/* This routine puts the calling thread to sleep after setting the
1686 * sleep bit for the indicated flag variable to true.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001687 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001688template <class C>
1689static inline void __kmp_suspend_template( int th_gtid, C *flag )
Jim Cownie5e8470a2013-09-27 10:38:44 +00001690{
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001691 KMP_TIME_BLOCK(USER_suspend);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001692 kmp_info_t *th = __kmp_threads[th_gtid];
1693 int status;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001694 typename C::flag_t old_spin;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001695
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001696 KF_TRACE( 30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid, flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001697
1698 __kmp_suspend_initialize_thread( th );
1699
1700 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1701 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
1702
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001703 KF_TRACE( 10, ( "__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1704 th_gtid, flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001705
1706 /* TODO: shouldn't this use release semantics to ensure that __kmp_suspend_initialize_thread
1707 gets called first?
1708 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001709 old_spin = flag->set_sleeping();
Jim Cownie5e8470a2013-09-27 10:38:44 +00001710
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001711 KF_TRACE( 5, ( "__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%d\n",
1712 th_gtid, flag->get(), *(flag->get()) ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001713
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001714 if ( flag->done_check_val(old_spin) ) {
1715 old_spin = flag->unset_sleeping();
1716 KF_TRACE( 5, ( "__kmp_suspend_template: T#%d false alarm, reset sleep bit for spin(%p)\n",
1717 th_gtid, flag->get()) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001718 } else {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001719 /* Encapsulate in a loop as the documentation states that this may
1720 * "with low probability" return when the condition variable has
1721 * not been signaled or broadcast
1722 */
1723 int deactivated = FALSE;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001724 TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1725 while ( flag->is_sleeping() ) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001726#ifdef DEBUG_SUSPEND
1727 char buffer[128];
1728 __kmp_suspend_count++;
1729 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001730 __kmp_printf( "__kmp_suspend_template: suspending T#%d: %s\n", th_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001731#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001732 // Mark the thread as no longer active (only in the first iteration of the loop).
Jim Cownie5e8470a2013-09-27 10:38:44 +00001733 if ( ! deactivated ) {
1734 th->th.th_active = FALSE;
1735 if ( th->th.th_active_in_pool ) {
1736 th->th.th_active_in_pool = FALSE;
1737 KMP_TEST_THEN_DEC32(
1738 (kmp_int32 *) &__kmp_thread_pool_active_nth );
1739 KMP_DEBUG_ASSERT( TCR_4(__kmp_thread_pool_active_nth) >= 0 );
1740 }
1741 deactivated = TRUE;
1742
1743
1744 }
1745
1746#if USE_SUSPEND_TIMEOUT
1747 struct timespec now;
1748 struct timeval tval;
1749 int msecs;
1750
1751 status = gettimeofday( &tval, NULL );
1752 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
1753 TIMEVAL_TO_TIMESPEC( &tval, &now );
1754
1755 msecs = (4*__kmp_dflt_blocktime) + 200;
1756 now.tv_sec += msecs / 1000;
1757 now.tv_nsec += (msecs % 1000)*1000;
1758
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001759 KF_TRACE( 15, ( "__kmp_suspend_template: T#%d about to perform pthread_cond_timedwait\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001760 th_gtid ) );
1761 status = pthread_cond_timedwait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex, & now );
1762#else
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001763 KF_TRACE( 15, ( "__kmp_suspend_template: T#%d about to perform pthread_cond_wait\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001764 th_gtid ) );
1765
1766 status = pthread_cond_wait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex );
1767#endif
1768
1769 if ( (status != 0) && (status != EINTR) && (status != ETIMEDOUT) ) {
1770 KMP_SYSFAIL( "pthread_cond_wait", status );
1771 }
1772#ifdef KMP_DEBUG
1773 if (status == ETIMEDOUT) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001774 if ( flag->is_sleeping() ) {
1775 KF_TRACE( 100, ( "__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001776 } else {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001777 KF_TRACE( 2, ( "__kmp_suspend_template: T#%d timeout wakeup, sleep bit not set!\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001778 th_gtid ) );
1779 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001780 } else if ( flag->is_sleeping() ) {
1781 KF_TRACE( 100, ( "__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001782 }
1783#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001784 } // while
1785
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001786 // Mark the thread as active again (if it was previous marked as inactive)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001787 if ( deactivated ) {
1788 th->th.th_active = TRUE;
1789 if ( TCR_4(th->th.th_in_pool) ) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001790 KMP_TEST_THEN_INC32( (kmp_int32 *) &__kmp_thread_pool_active_nth );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001791 th->th.th_active_in_pool = TRUE;
1792 }
1793 }
1794 }
1795
1796#ifdef DEBUG_SUSPEND
1797 {
1798 char buffer[128];
1799 __kmp_print_cond( buffer, &th->th.th_suspend_cv);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001800 __kmp_printf( "__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001801 }
1802#endif
1803
1804
1805 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1806 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1807
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001808 KF_TRACE( 30, ("__kmp_suspend_template: T#%d exit\n", th_gtid ) );
1809}
1810
1811void __kmp_suspend_32(int th_gtid, kmp_flag_32 *flag) {
1812 __kmp_suspend_template(th_gtid, flag);
1813}
1814void __kmp_suspend_64(int th_gtid, kmp_flag_64 *flag) {
1815 __kmp_suspend_template(th_gtid, flag);
1816}
1817void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) {
1818 __kmp_suspend_template(th_gtid, flag);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001819}
1820
1821
1822/* This routine signals the thread specified by target_gtid to wake up
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001823 * after setting the sleep bit indicated by the flag argument to FALSE.
1824 * The target thread must already have called __kmp_suspend_template()
Jim Cownie5e8470a2013-09-27 10:38:44 +00001825 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001826template <class C>
1827static inline void __kmp_resume_template( int target_gtid, C *flag )
Jim Cownie5e8470a2013-09-27 10:38:44 +00001828{
1829 kmp_info_t *th = __kmp_threads[target_gtid];
1830 int status;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001831
1832#ifdef KMP_DEBUG
1833 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1834#endif
1835
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001836 KF_TRACE( 30, ( "__kmp_resume_template: T#%d wants to wakeup T#%d enter\n", gtid, target_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001837 KMP_DEBUG_ASSERT( gtid != target_gtid );
1838
1839 __kmp_suspend_initialize_thread( th );
1840
1841 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1842 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001843
1844 if (!flag) {
1845 flag = (C *)th->th.th_sleep_loc;
1846 }
1847
1848 if (!flag) {
1849 KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p)\n",
1850 gtid, target_gtid, NULL ) );
1851 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1852 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1853 return;
1854 }
1855 else {
1856 typename C::flag_t old_spin = flag->unset_sleeping();
1857 if ( ! flag->is_sleeping_val(old_spin) ) {
1858 KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p): "
1859 "%u => %u\n",
1860 gtid, target_gtid, flag->get(), old_spin, *flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001861
1862 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1863 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1864 return;
1865 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001866 KF_TRACE( 5, ( "__kmp_resume_template: T#%d about to wakeup T#%d, reset sleep bit for flag's loc(%p): "
1867 "%u => %u\n",
1868 gtid, target_gtid, flag->get(), old_spin, *flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001869 }
1870 TCW_PTR(th->th.th_sleep_loc, NULL);
1871
Jim Cownie5e8470a2013-09-27 10:38:44 +00001872
1873#ifdef DEBUG_SUSPEND
1874 {
1875 char buffer[128];
1876 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001877 __kmp_printf( "__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid, target_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001878 }
1879#endif
1880
1881
1882 status = pthread_cond_signal( &th->th.th_suspend_cv.c_cond );
1883 KMP_CHECK_SYSFAIL( "pthread_cond_signal", status );
1884 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1885 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001886 KF_TRACE( 30, ( "__kmp_resume_template: T#%d exiting after signaling wake up for T#%d\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001887 gtid, target_gtid ) );
1888}
1889
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001890void __kmp_resume_32(int target_gtid, kmp_flag_32 *flag) {
1891 __kmp_resume_template(target_gtid, flag);
1892}
1893void __kmp_resume_64(int target_gtid, kmp_flag_64 *flag) {
1894 __kmp_resume_template(target_gtid, flag);
1895}
1896void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) {
1897 __kmp_resume_template(target_gtid, flag);
1898}
1899
Jim Cownie07ea89f2014-09-03 11:10:54 +00001900void
1901__kmp_resume_monitor()
1902{
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001903 KMP_TIME_BLOCK(USER_resume);
Jim Cownie07ea89f2014-09-03 11:10:54 +00001904 int status;
1905#ifdef KMP_DEBUG
1906 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1907 KF_TRACE( 30, ( "__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n",
1908 gtid, KMP_GTID_MONITOR ) );
1909 KMP_DEBUG_ASSERT( gtid != KMP_GTID_MONITOR );
1910#endif
1911 status = pthread_mutex_lock( &__kmp_wait_mx.m_mutex );
1912 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
1913#ifdef DEBUG_SUSPEND
1914 {
1915 char buffer[128];
1916 __kmp_print_cond( buffer, &__kmp_wait_cv.c_cond );
1917 __kmp_printf( "__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid, KMP_GTID_MONITOR, buffer );
1918 }
1919#endif
1920 status = pthread_cond_signal( &__kmp_wait_cv.c_cond );
1921 KMP_CHECK_SYSFAIL( "pthread_cond_signal", status );
1922 status = pthread_mutex_unlock( &__kmp_wait_mx.m_mutex );
1923 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1924 KF_TRACE( 30, ( "__kmp_resume_monitor: T#%d exiting after signaling wake up for T#%d\n",
1925 gtid, KMP_GTID_MONITOR ) );
1926}
Jim Cownie5e8470a2013-09-27 10:38:44 +00001927
1928/* ------------------------------------------------------------------------ */
1929/* ------------------------------------------------------------------------ */
1930
1931void
1932__kmp_yield( int cond )
1933{
1934 if (cond && __kmp_yielding_on) {
1935 sched_yield();
1936 }
1937}
1938
1939/* ------------------------------------------------------------------------ */
1940/* ------------------------------------------------------------------------ */
1941
1942void
1943__kmp_gtid_set_specific( int gtid )
1944{
1945 int status;
1946 KMP_ASSERT( __kmp_init_runtime );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001947 status = pthread_setspecific( __kmp_gtid_threadprivate_key, (void*)(intptr_t)(gtid+1) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001948 KMP_CHECK_SYSFAIL( "pthread_setspecific", status );
1949}
1950
1951int
1952__kmp_gtid_get_specific()
1953{
1954 int gtid;
1955 if ( !__kmp_init_runtime ) {
1956 KA_TRACE( 50, ("__kmp_get_specific: runtime shutdown, returning KMP_GTID_SHUTDOWN\n" ) );
1957 return KMP_GTID_SHUTDOWN;
1958 }
1959 gtid = (int)(size_t)pthread_getspecific( __kmp_gtid_threadprivate_key );
1960 if ( gtid == 0 ) {
1961 gtid = KMP_GTID_DNE;
1962 }
1963 else {
1964 gtid--;
1965 }
1966 KA_TRACE( 50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
1967 __kmp_gtid_threadprivate_key, gtid ));
1968 return gtid;
1969}
1970
1971/* ------------------------------------------------------------------------ */
1972/* ------------------------------------------------------------------------ */
1973
1974double
1975__kmp_read_cpu_time( void )
1976{
1977 /*clock_t t;*/
1978 struct tms buffer;
1979
1980 /*t =*/ times( & buffer );
1981
1982 return (buffer.tms_utime + buffer.tms_cutime) / (double) CLOCKS_PER_SEC;
1983}
1984
1985int
1986__kmp_read_system_info( struct kmp_sys_info *info )
1987{
1988 int status;
1989 struct rusage r_usage;
1990
1991 memset( info, 0, sizeof( *info ) );
1992
1993 status = getrusage( RUSAGE_SELF, &r_usage);
1994 KMP_CHECK_SYSFAIL_ERRNO( "getrusage", status );
1995
1996 info->maxrss = r_usage.ru_maxrss; /* the maximum resident set size utilized (in kilobytes) */
1997 info->minflt = r_usage.ru_minflt; /* the number of page faults serviced without any I/O */
1998 info->majflt = r_usage.ru_majflt; /* the number of page faults serviced that required I/O */
1999 info->nswap = r_usage.ru_nswap; /* the number of times a process was "swapped" out of memory */
2000 info->inblock = r_usage.ru_inblock; /* the number of times the file system had to perform input */
2001 info->oublock = r_usage.ru_oublock; /* the number of times the file system had to perform output */
2002 info->nvcsw = r_usage.ru_nvcsw; /* the number of times a context switch was voluntarily */
2003 info->nivcsw = r_usage.ru_nivcsw; /* the number of times a context switch was forced */
2004
2005 return (status != 0);
2006}
2007
2008/* ------------------------------------------------------------------------ */
2009/* ------------------------------------------------------------------------ */
2010
2011
2012void
2013__kmp_read_system_time( double *delta )
2014{
2015 double t_ns;
2016 struct timeval tval;
2017 struct timespec stop;
2018 int status;
2019
2020 status = gettimeofday( &tval, NULL );
2021 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
2022 TIMEVAL_TO_TIMESPEC( &tval, &stop );
2023 t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start);
2024 *delta = (t_ns * 1e-9);
2025}
2026
2027void
2028__kmp_clear_system_time( void )
2029{
2030 struct timeval tval;
2031 int status;
2032 status = gettimeofday( &tval, NULL );
2033 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
2034 TIMEVAL_TO_TIMESPEC( &tval, &__kmp_sys_timer_data.start );
2035}
2036
2037/* ------------------------------------------------------------------------ */
2038/* ------------------------------------------------------------------------ */
2039
2040#ifdef BUILD_TV
2041
2042void
2043__kmp_tv_threadprivate_store( kmp_info_t *th, void *global_addr, void *thread_addr )
2044{
2045 struct tv_data *p;
2046
2047 p = (struct tv_data *) __kmp_allocate( sizeof( *p ) );
2048
2049 p->u.tp.global_addr = global_addr;
2050 p->u.tp.thread_addr = thread_addr;
2051
2052 p->type = (void *) 1;
2053
2054 p->next = th->th.th_local.tv_data;
2055 th->th.th_local.tv_data = p;
2056
2057 if ( p->next == 0 ) {
2058 int rc = pthread_setspecific( __kmp_tv_key, p );
2059 KMP_CHECK_SYSFAIL( "pthread_setspecific", rc );
2060 }
2061}
2062
2063#endif /* BUILD_TV */
2064
2065/* ------------------------------------------------------------------------ */
2066/* ------------------------------------------------------------------------ */
2067
2068static int
2069__kmp_get_xproc( void ) {
2070
2071 int r = 0;
2072
2073 #if KMP_OS_LINUX
2074
2075 r = sysconf( _SC_NPROCESSORS_ONLN );
2076
2077 #elif KMP_OS_DARWIN
2078
2079 // Bug C77011 High "OpenMP Threads and number of active cores".
2080
2081 // Find the number of available CPUs.
2082 kern_return_t rc;
2083 host_basic_info_data_t info;
2084 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
2085 rc = host_info( mach_host_self(), HOST_BASIC_INFO, (host_info_t) & info, & num );
2086 if ( rc == 0 && num == HOST_BASIC_INFO_COUNT ) {
2087 // Cannot use KA_TRACE() here because this code works before trace support is
2088 // initialized.
2089 r = info.avail_cpus;
2090 } else {
2091 KMP_WARNING( CantGetNumAvailCPU );
2092 KMP_INFORM( AssumedNumCPU );
2093 }; // if
2094
Alp Toker763b9392014-02-28 09:42:41 +00002095 #elif KMP_OS_FREEBSD
2096
2097 int mib[] = { CTL_HW, HW_NCPU };
2098 size_t len = sizeof( r );
2099 if ( sysctl( mib, 2, &r, &len, NULL, 0 ) < 0 ) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002100 r = 0;
2101 KMP_WARNING( CantGetNumAvailCPU );
2102 KMP_INFORM( AssumedNumCPU );
Alp Toker763b9392014-02-28 09:42:41 +00002103 }
2104
Jim Cownie5e8470a2013-09-27 10:38:44 +00002105 #else
2106
2107 #error "Unknown or unsupported OS."
2108
2109 #endif
2110
2111 return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
2112
2113} // __kmp_get_xproc
2114
Jim Cownie181b4bb2013-12-23 17:28:57 +00002115int
2116__kmp_read_from_file( char const *path, char const *format, ... )
2117{
2118 int result;
2119 va_list args;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002120
Jim Cownie181b4bb2013-12-23 17:28:57 +00002121 va_start(args, format);
2122 FILE *f = fopen(path, "rb");
2123 if ( f == NULL )
2124 return 0;
2125 result = vfscanf(f, format, args);
2126 fclose(f);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002127
Jim Cownie5e8470a2013-09-27 10:38:44 +00002128 return result;
Jim Cownie181b4bb2013-12-23 17:28:57 +00002129}
Jim Cownie5e8470a2013-09-27 10:38:44 +00002130
2131void
2132__kmp_runtime_initialize( void )
2133{
2134 int status;
2135 pthread_mutexattr_t mutex_attr;
2136 pthread_condattr_t cond_attr;
2137
2138 if ( __kmp_init_runtime ) {
2139 return;
2140 }; // if
2141
2142 #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 )
2143 if ( ! __kmp_cpuinfo.initialized ) {
2144 __kmp_query_cpuid( &__kmp_cpuinfo );
2145 }; // if
2146 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2147
Jim Cownie5e8470a2013-09-27 10:38:44 +00002148 __kmp_xproc = __kmp_get_xproc();
2149
2150 if ( sysconf( _SC_THREADS ) ) {
2151
2152 /* Query the maximum number of threads */
2153 __kmp_sys_max_nth = sysconf( _SC_THREAD_THREADS_MAX );
2154 if ( __kmp_sys_max_nth == -1 ) {
2155 /* Unlimited threads for NPTL */
2156 __kmp_sys_max_nth = INT_MAX;
2157 }
2158 else if ( __kmp_sys_max_nth <= 1 ) {
2159 /* Can't tell, just use PTHREAD_THREADS_MAX */
2160 __kmp_sys_max_nth = KMP_MAX_NTH;
2161 }
2162
2163 /* Query the minimum stack size */
2164 __kmp_sys_min_stksize = sysconf( _SC_THREAD_STACK_MIN );
2165 if ( __kmp_sys_min_stksize <= 1 ) {
2166 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
2167 }
2168 }
2169
2170 /* Set up minimum number of threads to switch to TLS gtid */
2171 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
2172
2173
2174 #ifdef BUILD_TV
2175 {
2176 int rc = pthread_key_create( & __kmp_tv_key, 0 );
2177 KMP_CHECK_SYSFAIL( "pthread_key_create", rc );
2178 }
2179 #endif
2180
2181 status = pthread_key_create( &__kmp_gtid_threadprivate_key, __kmp_internal_end_dest );
2182 KMP_CHECK_SYSFAIL( "pthread_key_create", status );
2183 status = pthread_mutexattr_init( & mutex_attr );
2184 KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status );
2185 status = pthread_mutex_init( & __kmp_wait_mx.m_mutex, & mutex_attr );
2186 KMP_CHECK_SYSFAIL( "pthread_mutex_init", status );
2187 status = pthread_condattr_init( & cond_attr );
2188 KMP_CHECK_SYSFAIL( "pthread_condattr_init", status );
2189 status = pthread_cond_init( & __kmp_wait_cv.c_cond, & cond_attr );
2190 KMP_CHECK_SYSFAIL( "pthread_cond_init", status );
2191#if USE_ITT_BUILD
2192 __kmp_itt_initialize();
2193#endif /* USE_ITT_BUILD */
2194
2195 __kmp_init_runtime = TRUE;
2196}
2197
2198void
2199__kmp_runtime_destroy( void )
2200{
2201 int status;
2202
2203 if ( ! __kmp_init_runtime ) {
2204 return; // Nothing to do.
2205 };
2206
2207#if USE_ITT_BUILD
2208 __kmp_itt_destroy();
2209#endif /* USE_ITT_BUILD */
2210
2211 status = pthread_key_delete( __kmp_gtid_threadprivate_key );
2212 KMP_CHECK_SYSFAIL( "pthread_key_delete", status );
2213 #ifdef BUILD_TV
2214 status = pthread_key_delete( __kmp_tv_key );
2215 KMP_CHECK_SYSFAIL( "pthread_key_delete", status );
2216 #endif
2217
2218 status = pthread_mutex_destroy( & __kmp_wait_mx.m_mutex );
2219 if ( status != 0 && status != EBUSY ) {
2220 KMP_SYSFAIL( "pthread_mutex_destroy", status );
2221 }
2222 status = pthread_cond_destroy( & __kmp_wait_cv.c_cond );
2223 if ( status != 0 && status != EBUSY ) {
2224 KMP_SYSFAIL( "pthread_cond_destroy", status );
2225 }
Alp Toker763b9392014-02-28 09:42:41 +00002226 #if KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +00002227 __kmp_affinity_uninitialize();
Jim Cownie5e8470a2013-09-27 10:38:44 +00002228 #endif
2229
2230 __kmp_init_runtime = FALSE;
2231}
2232
2233
2234/* Put the thread to sleep for a time period */
2235/* NOTE: not currently used anywhere */
2236void
2237__kmp_thread_sleep( int millis )
2238{
2239 sleep( ( millis + 500 ) / 1000 );
2240}
2241
2242/* Calculate the elapsed wall clock time for the user */
2243void
2244__kmp_elapsed( double *t )
2245{
2246 int status;
2247# ifdef FIX_SGI_CLOCK
2248 struct timespec ts;
2249
2250 status = clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &ts );
2251 KMP_CHECK_SYSFAIL_ERRNO( "clock_gettime", status );
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +00002252 *t = (double) ts.tv_nsec * (1.0 / (double) KMP_NSEC_PER_SEC) +
Jim Cownie5e8470a2013-09-27 10:38:44 +00002253 (double) ts.tv_sec;
2254# else
2255 struct timeval tv;
2256
2257 status = gettimeofday( & tv, NULL );
2258 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
Jonathan Peyton1e7a1dd2015-06-04 17:29:13 +00002259 *t = (double) tv.tv_usec * (1.0 / (double) KMP_USEC_PER_SEC) +
Jim Cownie5e8470a2013-09-27 10:38:44 +00002260 (double) tv.tv_sec;
2261# endif
2262}
2263
2264/* Calculate the elapsed wall clock tick for the user */
2265void
2266__kmp_elapsed_tick( double *t )
2267{
2268 *t = 1 / (double) CLOCKS_PER_SEC;
2269}
2270
2271/*
2272 Determine whether the given address is mapped into the current address space.
2273*/
2274
2275int
2276__kmp_is_address_mapped( void * addr ) {
2277
2278 int found = 0;
2279 int rc;
2280
2281 #if KMP_OS_LINUX
2282
2283 /*
2284 On Linux* OS, read the /proc/<pid>/maps pseudo-file to get all the address ranges mapped
2285 into the address space.
2286 */
2287
2288 char * name = __kmp_str_format( "/proc/%d/maps", getpid() );
2289 FILE * file = NULL;
2290
2291 file = fopen( name, "r" );
2292 KMP_ASSERT( file != NULL );
2293
2294 for ( ; ; ) {
2295
2296 void * beginning = NULL;
2297 void * ending = NULL;
2298 char perms[ 5 ];
2299
2300 rc = fscanf( file, "%p-%p %4s %*[^\n]\n", & beginning, & ending, perms );
2301 if ( rc == EOF ) {
2302 break;
2303 }; // if
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002304 KMP_ASSERT( rc == 3 && KMP_STRLEN( perms ) == 4 ); // Make sure all fields are read.
Jim Cownie5e8470a2013-09-27 10:38:44 +00002305
2306 // Ending address is not included in the region, but beginning is.
2307 if ( ( addr >= beginning ) && ( addr < ending ) ) {
2308 perms[ 2 ] = 0; // 3th and 4th character does not matter.
2309 if ( strcmp( perms, "rw" ) == 0 ) {
2310 // Memory we are looking for should be readable and writable.
2311 found = 1;
2312 }; // if
2313 break;
2314 }; // if
2315
2316 }; // forever
2317
2318 // Free resources.
2319 fclose( file );
2320 KMP_INTERNAL_FREE( name );
2321
2322 #elif KMP_OS_DARWIN
2323
2324 /*
2325 On OS X*, /proc pseudo filesystem is not available. Try to read memory using vm
2326 interface.
2327 */
2328
2329 int buffer;
2330 vm_size_t count;
2331 rc =
2332 vm_read_overwrite(
2333 mach_task_self(), // Task to read memory of.
2334 (vm_address_t)( addr ), // Address to read from.
2335 1, // Number of bytes to be read.
2336 (vm_address_t)( & buffer ), // Address of buffer to save read bytes in.
2337 & count // Address of var to save number of read bytes in.
2338 );
2339 if ( rc == 0 ) {
2340 // Memory successfully read.
2341 found = 1;
2342 }; // if
2343
Alp Toker763b9392014-02-28 09:42:41 +00002344 #elif KMP_OS_FREEBSD
2345
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002346 // FIXME(FreeBSD*): Implement this
Alp Toker763b9392014-02-28 09:42:41 +00002347 found = 1;
2348
Jim Cownie5e8470a2013-09-27 10:38:44 +00002349 #else
2350
2351 #error "Unknown or unsupported OS"
2352
2353 #endif
2354
2355 return found;
2356
2357} // __kmp_is_address_mapped
2358
2359#ifdef USE_LOAD_BALANCE
2360
2361
2362# if KMP_OS_DARWIN
2363
2364// The function returns the rounded value of the system load average
2365// during given time interval which depends on the value of
2366// __kmp_load_balance_interval variable (default is 60 sec, other values
2367// may be 300 sec or 900 sec).
2368// It returns -1 in case of error.
2369int
2370__kmp_get_load_balance( int max )
2371{
2372 double averages[3];
2373 int ret_avg = 0;
2374
2375 int res = getloadavg( averages, 3 );
2376
2377 //Check __kmp_load_balance_interval to determine which of averages to use.
2378 // getloadavg() may return the number of samples less than requested that is
2379 // less than 3.
2380 if ( __kmp_load_balance_interval < 180 && ( res >= 1 ) ) {
2381 ret_avg = averages[0];// 1 min
2382 } else if ( ( __kmp_load_balance_interval >= 180
2383 && __kmp_load_balance_interval < 600 ) && ( res >= 2 ) ) {
2384 ret_avg = averages[1];// 5 min
2385 } else if ( ( __kmp_load_balance_interval >= 600 ) && ( res == 3 ) ) {
2386 ret_avg = averages[2];// 15 min
Alp Toker8f2d3f02014-02-24 10:40:15 +00002387 } else {// Error occurred
Jim Cownie5e8470a2013-09-27 10:38:44 +00002388 return -1;
2389 }
2390
2391 return ret_avg;
2392}
2393
2394# else // Linux* OS
2395
2396// The fuction returns number of running (not sleeping) threads, or -1 in case of error.
2397// Error could be reported if Linux* OS kernel too old (without "/proc" support).
2398// Counting running threads stops if max running threads encountered.
2399int
2400__kmp_get_load_balance( int max )
2401{
2402 static int permanent_error = 0;
2403
2404 static int glb_running_threads = 0; /* Saved count of the running threads for the thread balance algortihm */
2405 static double glb_call_time = 0; /* Thread balance algorithm call time */
2406
2407 int running_threads = 0; // Number of running threads in the system.
2408
2409 DIR * proc_dir = NULL; // Handle of "/proc/" directory.
2410 struct dirent * proc_entry = NULL;
2411
2412 kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2413 DIR * task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2414 struct dirent * task_entry = NULL;
2415 int task_path_fixed_len;
2416
2417 kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2418 int stat_file = -1;
2419 int stat_path_fixed_len;
2420
2421 int total_processes = 0; // Total number of processes in system.
2422 int total_threads = 0; // Total number of threads in system.
2423
2424 double call_time = 0.0;
2425
2426 __kmp_str_buf_init( & task_path );
2427 __kmp_str_buf_init( & stat_path );
2428
2429 __kmp_elapsed( & call_time );
2430
2431 if ( glb_call_time &&
2432 ( call_time - glb_call_time < __kmp_load_balance_interval ) ) {
2433 running_threads = glb_running_threads;
2434 goto finish;
2435 }
2436
2437 glb_call_time = call_time;
2438
2439 // Do not spend time on scanning "/proc/" if we have a permanent error.
2440 if ( permanent_error ) {
2441 running_threads = -1;
2442 goto finish;
2443 }; // if
2444
2445 if ( max <= 0 ) {
2446 max = INT_MAX;
2447 }; // if
2448
2449 // Open "/proc/" directory.
2450 proc_dir = opendir( "/proc" );
2451 if ( proc_dir == NULL ) {
2452 // Cannot open "/prroc/". Probably the kernel does not support it. Return an error now and
2453 // in subsequent calls.
2454 running_threads = -1;
2455 permanent_error = 1;
2456 goto finish;
2457 }; // if
2458
2459 // Initialize fixed part of task_path. This part will not change.
2460 __kmp_str_buf_cat( & task_path, "/proc/", 6 );
2461 task_path_fixed_len = task_path.used; // Remember number of used characters.
2462
2463 proc_entry = readdir( proc_dir );
2464 while ( proc_entry != NULL ) {
2465 // Proc entry is a directory and name starts with a digit. Assume it is a process'
2466 // directory.
2467 if ( proc_entry->d_type == DT_DIR && isdigit( proc_entry->d_name[ 0 ] ) ) {
2468
2469 ++ total_processes;
2470 // Make sure init process is the very first in "/proc", so we can replace
2471 // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes == 1.
2472 // We are going to check that total_processes == 1 => d_name == "1" is true (where
2473 // "=>" is implication). Since C++ does not have => operator, let us replace it with its
2474 // equivalent: a => b == ! a || b.
2475 KMP_DEBUG_ASSERT( total_processes != 1 || strcmp( proc_entry->d_name, "1" ) == 0 );
2476
2477 // Construct task_path.
2478 task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002479 __kmp_str_buf_cat( & task_path, proc_entry->d_name, KMP_STRLEN( proc_entry->d_name ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00002480 __kmp_str_buf_cat( & task_path, "/task", 5 );
2481
2482 task_dir = opendir( task_path.str );
2483 if ( task_dir == NULL ) {
2484 // Process can finish between reading "/proc/" directory entry and opening process'
2485 // "task/" directory. So, in general case we should not complain, but have to skip
2486 // this process and read the next one.
2487 // But on systems with no "task/" support we will spend lot of time to scan "/proc/"
2488 // tree again and again without any benefit. "init" process (its pid is 1) should
2489 // exist always, so, if we cannot open "/proc/1/task/" directory, it means "task/"
2490 // is not supported by kernel. Report an error now and in the future.
2491 if ( strcmp( proc_entry->d_name, "1" ) == 0 ) {
2492 running_threads = -1;
2493 permanent_error = 1;
2494 goto finish;
2495 }; // if
2496 } else {
2497 // Construct fixed part of stat file path.
2498 __kmp_str_buf_clear( & stat_path );
2499 __kmp_str_buf_cat( & stat_path, task_path.str, task_path.used );
2500 __kmp_str_buf_cat( & stat_path, "/", 1 );
2501 stat_path_fixed_len = stat_path.used;
2502
2503 task_entry = readdir( task_dir );
2504 while ( task_entry != NULL ) {
2505 // It is a directory and name starts with a digit.
2506 if ( proc_entry->d_type == DT_DIR && isdigit( task_entry->d_name[ 0 ] ) ) {
2507
2508 ++ total_threads;
2509
2510 // Consruct complete stat file path. Easiest way would be:
2511 // __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str, task_entry->d_name );
2512 // but seriae of __kmp_str_buf_cat works a bit faster.
2513 stat_path.used = stat_path_fixed_len; // Reset stat path to its fixed part.
Andrey Churbanov74bf17b2015-04-02 13:27:08 +00002514 __kmp_str_buf_cat( & stat_path, task_entry->d_name, KMP_STRLEN( task_entry->d_name ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00002515 __kmp_str_buf_cat( & stat_path, "/stat", 5 );
2516
2517 // Note: Low-level API (open/read/close) is used. High-level API
2518 // (fopen/fclose) works ~ 30 % slower.
2519 stat_file = open( stat_path.str, O_RDONLY );
2520 if ( stat_file == -1 ) {
2521 // We cannot report an error because task (thread) can terminate just
2522 // before reading this file.
2523 } else {
2524 /*
2525 Content of "stat" file looks like:
2526
2527 24285 (program) S ...
2528
2529 It is a single line (if program name does not include fanny
2530 symbols). First number is a thread id, then name of executable file
2531 name in paretheses, then state of the thread. We need just thread
2532 state.
2533
2534 Good news: Length of program name is 15 characters max. Longer
2535 names are truncated.
2536
2537 Thus, we need rather short buffer: 15 chars for program name +
2538 2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2539
2540 Bad news: Program name may contain special symbols like space,
2541 closing parenthesis, or even new line. This makes parsing "stat"
2542 file not 100 % reliable. In case of fanny program names parsing
2543 may fail (report incorrect thread state).
2544
2545 Parsing "status" file looks more promissing (due to different
2546 file structure and escaping special symbols) but reading and
2547 parsing of "status" file works slower.
2548
2549 -- ln
2550 */
2551 char buffer[ 65 ];
2552 int len;
2553 len = read( stat_file, buffer, sizeof( buffer ) - 1 );
2554 if ( len >= 0 ) {
2555 buffer[ len ] = 0;
2556 // Using scanf:
2557 // sscanf( buffer, "%*d (%*s) %c ", & state );
2558 // looks very nice, but searching for a closing parenthesis works a
2559 // bit faster.
2560 char * close_parent = strstr( buffer, ") " );
2561 if ( close_parent != NULL ) {
2562 char state = * ( close_parent + 2 );
2563 if ( state == 'R' ) {
2564 ++ running_threads;
2565 if ( running_threads >= max ) {
2566 goto finish;
2567 }; // if
2568 }; // if
2569 }; // if
2570 }; // if
2571 close( stat_file );
2572 stat_file = -1;
2573 }; // if
2574 }; // if
2575 task_entry = readdir( task_dir );
2576 }; // while
2577 closedir( task_dir );
2578 task_dir = NULL;
2579 }; // if
2580 }; // if
2581 proc_entry = readdir( proc_dir );
2582 }; // while
2583
2584 //
2585 // There _might_ be a timing hole where the thread executing this
2586 // code get skipped in the load balance, and running_threads is 0.
2587 // Assert in the debug builds only!!!
2588 //
2589 KMP_DEBUG_ASSERT( running_threads > 0 );
2590 if ( running_threads <= 0 ) {
2591 running_threads = 1;
2592 }
2593
2594 finish: // Clean up and exit.
2595 if ( proc_dir != NULL ) {
2596 closedir( proc_dir );
2597 }; // if
2598 __kmp_str_buf_free( & task_path );
2599 if ( task_dir != NULL ) {
2600 closedir( task_dir );
2601 }; // if
2602 __kmp_str_buf_free( & stat_path );
2603 if ( stat_file != -1 ) {
2604 close( stat_file );
2605 }; // if
2606
2607 glb_running_threads = running_threads;
2608
2609 return running_threads;
2610
2611} // __kmp_get_load_balance
2612
2613# endif // KMP_OS_DARWIN
2614
2615#endif // USE_LOAD_BALANCE
2616
Andrey Churbanovedc370e2015-08-05 11:23:10 +00002617#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC)
Jim Cownie3051f972014-08-07 10:12:54 +00002618
2619// we really only need the case with 1 argument, because CLANG always build
2620// a struct of pointers to shared variables referenced in the outlined function
2621int
2622__kmp_invoke_microtask( microtask_t pkfn,
2623 int gtid, int tid,
Jonathan Peyton122dd762015-07-13 18:55:45 +00002624 int argc, void *p_argv[]
2625#if OMPT_SUPPORT
2626 , void **exit_frame_ptr
2627#endif
2628)
2629{
2630#if OMPT_SUPPORT
2631 *exit_frame_ptr = __builtin_frame_address(0);
2632#endif
2633
Jim Cownie3051f972014-08-07 10:12:54 +00002634 switch (argc) {
2635 default:
2636 fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2637 fflush(stderr);
2638 exit(-1);
2639 case 0:
2640 (*pkfn)(&gtid, &tid);
2641 break;
2642 case 1:
2643 (*pkfn)(&gtid, &tid, p_argv[0]);
2644 break;
2645 case 2:
2646 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2647 break;
2648 case 3:
2649 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2650 break;
2651 case 4:
2652 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2653 break;
2654 case 5:
2655 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2656 break;
2657 case 6:
2658 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2659 p_argv[5]);
2660 break;
2661 case 7:
2662 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2663 p_argv[5], p_argv[6]);
2664 break;
2665 case 8:
2666 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2667 p_argv[5], p_argv[6], p_argv[7]);
2668 break;
2669 case 9:
2670 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2671 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2672 break;
2673 case 10:
2674 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2675 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2676 break;
2677 case 11:
2678 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2679 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2680 break;
2681 case 12:
2682 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2683 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2684 p_argv[11]);
2685 break;
2686 case 13:
2687 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2688 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2689 p_argv[11], p_argv[12]);
2690 break;
2691 case 14:
2692 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2693 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2694 p_argv[11], p_argv[12], p_argv[13]);
2695 break;
2696 case 15:
2697 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2698 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2699 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2700 break;
2701 }
2702
Jonathan Peyton122dd762015-07-13 18:55:45 +00002703#if OMPT_SUPPORT
2704 *exit_frame_ptr = 0;
2705#endif
2706
Jim Cownie3051f972014-08-07 10:12:54 +00002707 return 1;
2708}
2709
2710#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +00002711
Jim Cownie5e8470a2013-09-27 10:38:44 +00002712// end of file //
2713