blob: 75b61f12855c4c88db78883861602fd5b0548607 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * z_Linux_util.c -- platform specific routines.
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "kmp.h"
17#include "kmp_wrapper_getpid.h"
18#include "kmp_itt.h"
19#include "kmp_str.h"
20#include "kmp_i18n.h"
21#include "kmp_io.h"
Jim Cownie4cc4bb42014-10-07 16:25:50 +000022#include "kmp_stats.h"
23#include "kmp_wait_release.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000024
Alp Toker763b9392014-02-28 09:42:41 +000025#if !KMP_OS_FREEBSD
26# include <alloca.h>
27#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +000028#include <unistd.h>
29#include <math.h> // HUGE_VAL.
30#include <sys/time.h>
31#include <sys/times.h>
32#include <sys/resource.h>
33#include <sys/syscall.h>
34
Jim Cownie3051f972014-08-07 10:12:54 +000035#if KMP_OS_LINUX && !KMP_OS_CNK
Jim Cownie5e8470a2013-09-27 10:38:44 +000036# include <sys/sysinfo.h>
Andrey Churbanovcbda8682015-01-13 14:43:35 +000037# if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +000038// We should really include <futex.h>, but that causes compatibility problems on different
39// Linux* OS distributions that either require that you include (or break when you try to include)
40// <pci/types.h>.
41// Since all we need is the two macros below (which are part of the kernel ABI, so can't change)
42// we just define the constants here and don't include <futex.h>
43# ifndef FUTEX_WAIT
44# define FUTEX_WAIT 0
45# endif
46# ifndef FUTEX_WAKE
47# define FUTEX_WAKE 1
48# endif
49# endif
50#elif KMP_OS_DARWIN
51# include <sys/sysctl.h>
52# include <mach/mach.h>
Alp Toker763b9392014-02-28 09:42:41 +000053#elif KMP_OS_FREEBSD
54# include <sys/sysctl.h>
55# include <pthread_np.h>
Jim Cownie5e8470a2013-09-27 10:38:44 +000056#endif
57
58
59#include <dirent.h>
60#include <ctype.h>
61#include <fcntl.h>
62
Jim Cownie181b4bb2013-12-23 17:28:57 +000063// For non-x86 architecture
Andrey Churbanovcbda8682015-01-13 14:43:35 +000064#if KMP_COMPILER_GCC && !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64)
Jim Cownie181b4bb2013-12-23 17:28:57 +000065# include <stdbool.h>
66# include <ffi.h>
67#endif
68
Jim Cownie5e8470a2013-09-27 10:38:44 +000069/* ------------------------------------------------------------------------ */
70/* ------------------------------------------------------------------------ */
71
72struct kmp_sys_timer {
73 struct timespec start;
74};
75
76// Convert timespec to nanoseconds.
77#define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec)
78
79static struct kmp_sys_timer __kmp_sys_timer_data;
80
81#if KMP_HANDLE_SIGNALS
82 typedef void (* sig_func_t )( int );
83 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[ NSIG ];
84 static sigset_t __kmp_sigset;
85#endif
86
87static int __kmp_init_runtime = FALSE;
88
89static int __kmp_fork_count = 0;
90
91static pthread_condattr_t __kmp_suspend_cond_attr;
92static pthread_mutexattr_t __kmp_suspend_mutex_attr;
93
94static kmp_cond_align_t __kmp_wait_cv;
95static kmp_mutex_align_t __kmp_wait_mx;
96
97/* ------------------------------------------------------------------------ */
98/* ------------------------------------------------------------------------ */
99
100#ifdef DEBUG_SUSPEND
101static void
102__kmp_print_cond( char *buffer, kmp_cond_align_t *cond )
103{
104 sprintf( buffer, "(cond (lock (%ld, %d)), (descr (%p)))",
105 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
106 cond->c_cond.__c_waiting );
107}
108#endif
109
110/* ------------------------------------------------------------------------ */
111/* ------------------------------------------------------------------------ */
112
Jim Cownie3051f972014-08-07 10:12:54 +0000113#if ( KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000114
115/*
116 * Affinity support
117 */
118
119/*
120 * On some of the older OS's that we build on, these constants aren't present
121 * in <asm/unistd.h> #included from <sys.syscall.h>. They must be the same on
122 * all systems of the same arch where they are defined, and they cannot change.
123 * stone forever.
124 */
125
Jim Cownie181b4bb2013-12-23 17:28:57 +0000126# if KMP_ARCH_X86 || KMP_ARCH_ARM
Jim Cownie5e8470a2013-09-27 10:38:44 +0000127# ifndef __NR_sched_setaffinity
128# define __NR_sched_setaffinity 241
129# elif __NR_sched_setaffinity != 241
130# error Wrong code for setaffinity system call.
131# endif /* __NR_sched_setaffinity */
132# ifndef __NR_sched_getaffinity
133# define __NR_sched_getaffinity 242
134# elif __NR_sched_getaffinity != 242
135# error Wrong code for getaffinity system call.
136# endif /* __NR_sched_getaffinity */
137
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000138# elif KMP_ARCH_AARCH64
139# ifndef __NR_sched_setaffinity
140# define __NR_sched_setaffinity 122
141# elif __NR_sched_setaffinity != 122
142# error Wrong code for setaffinity system call.
143# endif /* __NR_sched_setaffinity */
144# ifndef __NR_sched_getaffinity
145# define __NR_sched_getaffinity 123
146# elif __NR_sched_getaffinity != 123
147# error Wrong code for getaffinity system call.
148# endif /* __NR_sched_getaffinity */
149
Jim Cownie5e8470a2013-09-27 10:38:44 +0000150# elif KMP_ARCH_X86_64
151# ifndef __NR_sched_setaffinity
152# define __NR_sched_setaffinity 203
153# elif __NR_sched_setaffinity != 203
154# error Wrong code for setaffinity system call.
155# endif /* __NR_sched_setaffinity */
156# ifndef __NR_sched_getaffinity
157# define __NR_sched_getaffinity 204
158# elif __NR_sched_getaffinity != 204
159# error Wrong code for getaffinity system call.
160# endif /* __NR_sched_getaffinity */
161
Jim Cownie3051f972014-08-07 10:12:54 +0000162# elif KMP_ARCH_PPC64
163# ifndef __NR_sched_setaffinity
164# define __NR_sched_setaffinity 222
165# elif __NR_sched_setaffinity != 222
166# error Wrong code for setaffinity system call.
167# endif /* __NR_sched_setaffinity */
168# ifndef __NR_sched_getaffinity
169# define __NR_sched_getaffinity 223
170# elif __NR_sched_getaffinity != 223
171# error Wrong code for getaffinity system call.
172# endif /* __NR_sched_getaffinity */
173
174
Jim Cownie5e8470a2013-09-27 10:38:44 +0000175# else
176# error Unknown or unsupported architecture
177
178# endif /* KMP_ARCH_* */
179
180int
181__kmp_set_system_affinity( kmp_affin_mask_t const *mask, int abort_on_error )
182{
183 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
184 "Illegal set affinity operation when not capable");
185
186 int retval = syscall( __NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask );
187 if (retval >= 0) {
188 return 0;
189 }
190 int error = errno;
191 if (abort_on_error) {
192 __kmp_msg(
193 kmp_ms_fatal,
194 KMP_MSG( FatalSysError ),
195 KMP_ERR( error ),
196 __kmp_msg_null
197 );
198 }
199 return error;
200}
201
202int
203__kmp_get_system_affinity( kmp_affin_mask_t *mask, int abort_on_error )
204{
205 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
206 "Illegal get affinity operation when not capable");
207
208 int retval = syscall( __NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask );
209 if (retval >= 0) {
210 return 0;
211 }
212 int error = errno;
213 if (abort_on_error) {
214 __kmp_msg(
215 kmp_ms_fatal,
216 KMP_MSG( FatalSysError ),
217 KMP_ERR( error ),
218 __kmp_msg_null
219 );
220 }
221 return error;
222}
223
224void
225__kmp_affinity_bind_thread( int which )
226{
227 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
228 "Illegal set affinity operation when not capable");
229
230 kmp_affin_mask_t *mask = (kmp_affin_mask_t *)alloca(__kmp_affin_mask_size);
231 KMP_CPU_ZERO(mask);
232 KMP_CPU_SET(which, mask);
233 __kmp_set_system_affinity(mask, TRUE);
234}
235
236/*
237 * Determine if we can access affinity functionality on this version of
238 * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
239 * __kmp_affin_mask_size to the appropriate value (0 means not capable).
240 */
241void
242__kmp_affinity_determine_capable(const char *env_var)
243{
244 //
245 // Check and see if the OS supports thread affinity.
246 //
247
248# define KMP_CPU_SET_SIZE_LIMIT (1024*1024)
249
250 int gCode;
251 int sCode;
252 kmp_affin_mask_t *buf;
253 buf = ( kmp_affin_mask_t * ) KMP_INTERNAL_MALLOC( KMP_CPU_SET_SIZE_LIMIT );
254
255 // If Linux* OS:
256 // If the syscall fails or returns a suggestion for the size,
257 // then we don't have to search for an appropriate size.
258 gCode = syscall( __NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf );
259 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
Alp Toker8f2d3f02014-02-24 10:40:15 +0000260 "initial getaffinity call returned %d errno = %d\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +0000261 gCode, errno));
262
263 //if ((gCode < 0) && (errno == ENOSYS))
264 if (gCode < 0) {
265 //
266 // System call not supported
267 //
268 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
269 && (__kmp_affinity_type != affinity_none)
270 && (__kmp_affinity_type != affinity_default)
271 && (__kmp_affinity_type != affinity_disabled))) {
272 int error = errno;
273 __kmp_msg(
274 kmp_ms_warning,
275 KMP_MSG( GetAffSysCallNotSupported, env_var ),
276 KMP_ERR( error ),
277 __kmp_msg_null
278 );
279 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000280 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000281 KMP_INTERNAL_FREE(buf);
282 return;
283 }
284 if (gCode > 0) { // Linux* OS only
285 // The optimal situation: the OS returns the size of the buffer
286 // it expects.
287 //
288 // A verification of correct behavior is that Isetaffinity on a NULL
289 // buffer with the same size fails with errno set to EFAULT.
290 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
291 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
292 "setaffinity for mask size %d returned %d errno = %d\n",
293 gCode, sCode, errno));
294 if (sCode < 0) {
295 if (errno == ENOSYS) {
296 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
297 && (__kmp_affinity_type != affinity_none)
298 && (__kmp_affinity_type != affinity_default)
299 && (__kmp_affinity_type != affinity_disabled))) {
300 int error = errno;
301 __kmp_msg(
302 kmp_ms_warning,
303 KMP_MSG( SetAffSysCallNotSupported, env_var ),
304 KMP_ERR( error ),
305 __kmp_msg_null
306 );
307 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000308 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000309 KMP_INTERNAL_FREE(buf);
310 }
311 if (errno == EFAULT) {
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000312 KMP_AFFINITY_ENABLE(gCode);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000313 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
314 "affinity supported (mask size %d)\n",
315 (int)__kmp_affin_mask_size));
316 KMP_INTERNAL_FREE(buf);
317 return;
318 }
319 }
320 }
321
322 //
323 // Call the getaffinity system call repeatedly with increasing set sizes
324 // until we succeed, or reach an upper bound on the search.
325 //
326 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
327 "searching for proper set size\n"));
328 int size;
329 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
330 gCode = syscall( __NR_sched_getaffinity, 0, size, buf );
331 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
332 "getaffinity for mask size %d returned %d errno = %d\n", size,
333 gCode, errno));
334
335 if (gCode < 0) {
336 if ( errno == ENOSYS )
337 {
338 //
339 // We shouldn't get here
340 //
341 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
342 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
343 size));
344 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
345 && (__kmp_affinity_type != affinity_none)
346 && (__kmp_affinity_type != affinity_default)
347 && (__kmp_affinity_type != affinity_disabled))) {
348 int error = errno;
349 __kmp_msg(
350 kmp_ms_warning,
351 KMP_MSG( GetAffSysCallNotSupported, env_var ),
352 KMP_ERR( error ),
353 __kmp_msg_null
354 );
355 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000356 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000357 KMP_INTERNAL_FREE(buf);
358 return;
359 }
360 continue;
361 }
362
363 sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL );
364 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
365 "setaffinity for mask size %d returned %d errno = %d\n",
366 gCode, sCode, errno));
367 if (sCode < 0) {
368 if (errno == ENOSYS) { // Linux* OS only
369 //
370 // We shouldn't get here
371 //
372 KA_TRACE(30, ( "__kmp_affinity_determine_capable: "
373 "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n",
374 size));
375 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
376 && (__kmp_affinity_type != affinity_none)
377 && (__kmp_affinity_type != affinity_default)
378 && (__kmp_affinity_type != affinity_disabled))) {
379 int error = errno;
380 __kmp_msg(
381 kmp_ms_warning,
382 KMP_MSG( SetAffSysCallNotSupported, env_var ),
383 KMP_ERR( error ),
384 __kmp_msg_null
385 );
386 }
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000387 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000388 KMP_INTERNAL_FREE(buf);
389 return;
390 }
391 if (errno == EFAULT) {
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000392 KMP_AFFINITY_ENABLE(gCode);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000393 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
394 "affinity supported (mask size %d)\n",
395 (int)__kmp_affin_mask_size));
396 KMP_INTERNAL_FREE(buf);
397 return;
398 }
399 }
400 }
401 //int error = errno; // save uncaught error code
402 KMP_INTERNAL_FREE(buf);
403 // errno = error; // restore uncaught error code, will be printed at the next KMP_WARNING below
404
405 //
406 // Affinity is not supported
407 //
Andrey Churbanov1f037e42015-03-10 09:15:26 +0000408 KMP_AFFINITY_DISABLE();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000409 KA_TRACE(10, ( "__kmp_affinity_determine_capable: "
410 "cannot determine mask size - affinity not supported\n"));
411 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
412 && (__kmp_affinity_type != affinity_none)
413 && (__kmp_affinity_type != affinity_default)
414 && (__kmp_affinity_type != affinity_disabled))) {
415 KMP_WARNING( AffCantGetMaskSize, env_var );
416 }
417}
418
419
420/*
421 * Change thread to the affinity mask pointed to by affin_mask argument
422 * and return a pointer to the old value in the old_mask argument, if argument
423 * is non-NULL.
424 */
425
426void
427__kmp_change_thread_affinity_mask( int gtid, kmp_affin_mask_t *new_mask,
428 kmp_affin_mask_t *old_mask )
429{
430 KMP_DEBUG_ASSERT( gtid == __kmp_get_gtid() );
431 if ( KMP_AFFINITY_CAPABLE() ) {
432 int status;
433 kmp_info_t *th = __kmp_threads[ gtid ];
434
435 KMP_DEBUG_ASSERT( new_mask != NULL );
436
437 if ( old_mask != NULL ) {
438 status = __kmp_get_system_affinity( old_mask, TRUE );
439 int error = errno;
Andrey Churbanov7b2ab712015-03-10 09:03:42 +0000440kmp_int8
441__kmp_test_then_or8( volatile kmp_int8 *p, kmp_int8 d )
442{
443 kmp_int8 old_value, new_value;
444
445 old_value = TCR_1( *p );
446 new_value = old_value | d;
447
448 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
449 {
450 KMP_CPU_PAUSE();
451 old_value = TCR_1( *p );
452 new_value = old_value | d;
453 }
454 return old_value;
455}
456
457kmp_int8
458__kmp_test_then_and8( volatile kmp_int8 *p, kmp_int8 d )
459{
460 kmp_int8 old_value, new_value;
461
462 old_value = TCR_1( *p );
463 new_value = old_value & d;
464
465 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
466 {
467 KMP_CPU_PAUSE();
468 old_value = TCR_1( *p );
469 new_value = old_value & d;
470 }
471 return old_value;
472}
473
Jim Cownie5e8470a2013-09-27 10:38:44 +0000474 if ( status != 0 ) {
475 __kmp_msg(
476 kmp_ms_fatal,
477 KMP_MSG( ChangeThreadAffMaskError ),
478 KMP_ERR( error ),
479 __kmp_msg_null
480 );
481 }
482 }
483
484 __kmp_set_system_affinity( new_mask, TRUE );
485
486 if (__kmp_affinity_verbose) {
487 char old_buf[KMP_AFFIN_MASK_PRINT_LEN];
488 char new_buf[KMP_AFFIN_MASK_PRINT_LEN];
489 __kmp_affinity_print_mask(old_buf, KMP_AFFIN_MASK_PRINT_LEN, old_mask);
490 __kmp_affinity_print_mask(new_buf, KMP_AFFIN_MASK_PRINT_LEN, new_mask);
491 KMP_INFORM( ChangeAffMask, "KMP_AFFINITY (Bind)", gtid, old_buf, new_buf );
492
493 }
494
495 /* Make sure old value is correct in thread data structures */
496 KMP_DEBUG_ASSERT( old_mask != NULL && (memcmp(old_mask,
497 th->th.th_affin_mask, __kmp_affin_mask_size) == 0) );
498 KMP_CPU_COPY( th->th.th_affin_mask, new_mask );
499 }
500}
501
Alp Toker98758b02014-03-02 04:12:06 +0000502#endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +0000503
504/* ------------------------------------------------------------------------ */
505/* ------------------------------------------------------------------------ */
506
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000507#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && !KMP_OS_CNK
Jim Cownie5e8470a2013-09-27 10:38:44 +0000508
Andrey Churbanov7b2ab712015-03-10 09:03:42 +0000509kmp_int8
510__kmp_test_then_add8( volatile kmp_int8 *p, kmp_int8 d )
511{
512 kmp_int8 old_value, new_value;
513
514 old_value = TCR_1( *p );
515 new_value = old_value + d;
516
517 while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) )
518 {
519 KMP_CPU_PAUSE();
520 old_value = TCR_1( *p );
521 new_value = old_value + d;
522 }
523 return old_value;
524}
525
Jim Cownie5e8470a2013-09-27 10:38:44 +0000526int
527__kmp_futex_determine_capable()
528{
529 int loc = 0;
530 int rc = syscall( __NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0 );
531 int retval = ( rc == 0 ) || ( errno != ENOSYS );
532
533 KA_TRACE(10, ( "__kmp_futex_determine_capable: rc = %d errno = %d\n", rc,
534 errno ) );
535 KA_TRACE(10, ( "__kmp_futex_determine_capable: futex syscall%s supported\n",
536 retval ? "" : " not" ) );
537
538 return retval;
539}
540
Jim Cownie3051f972014-08-07 10:12:54 +0000541#endif // KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM) && !KMP_OS_CNK
Jim Cownie5e8470a2013-09-27 10:38:44 +0000542
543/* ------------------------------------------------------------------------ */
544/* ------------------------------------------------------------------------ */
545
546#if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS)
547/*
548 * Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
549 * use compare_and_store for these routines
550 */
551
552kmp_int32
553__kmp_test_then_or32( volatile kmp_int32 *p, kmp_int32 d )
554{
555 kmp_int32 old_value, new_value;
556
557 old_value = TCR_4( *p );
558 new_value = old_value | d;
559
Jim Cownie3051f972014-08-07 10:12:54 +0000560 while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000561 {
562 KMP_CPU_PAUSE();
563 old_value = TCR_4( *p );
564 new_value = old_value | d;
565 }
566 return old_value;
567}
568
569kmp_int32
570__kmp_test_then_and32( volatile kmp_int32 *p, kmp_int32 d )
571{
572 kmp_int32 old_value, new_value;
573
574 old_value = TCR_4( *p );
575 new_value = old_value & d;
576
Jim Cownie3051f972014-08-07 10:12:54 +0000577 while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000578 {
579 KMP_CPU_PAUSE();
580 old_value = TCR_4( *p );
581 new_value = old_value & d;
582 }
583 return old_value;
584}
585
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000586# if KMP_ARCH_X86 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64
Jim Cownie5e8470a2013-09-27 10:38:44 +0000587kmp_int64
588__kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 d )
589{
590 kmp_int64 old_value, new_value;
591
592 old_value = TCR_8( *p );
593 new_value = old_value + d;
594
Jim Cownie3051f972014-08-07 10:12:54 +0000595 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000596 {
597 KMP_CPU_PAUSE();
598 old_value = TCR_8( *p );
599 new_value = old_value + d;
600 }
601 return old_value;
602}
603# endif /* KMP_ARCH_X86 */
604
605kmp_int64
606__kmp_test_then_or64( volatile kmp_int64 *p, kmp_int64 d )
607{
608 kmp_int64 old_value, new_value;
609
610 old_value = TCR_8( *p );
611 new_value = old_value | d;
Jim Cownie3051f972014-08-07 10:12:54 +0000612 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000613 {
614 KMP_CPU_PAUSE();
615 old_value = TCR_8( *p );
616 new_value = old_value | d;
617 }
618 return old_value;
619}
620
621kmp_int64
622__kmp_test_then_and64( volatile kmp_int64 *p, kmp_int64 d )
623{
624 kmp_int64 old_value, new_value;
625
626 old_value = TCR_8( *p );
627 new_value = old_value & d;
Jim Cownie3051f972014-08-07 10:12:54 +0000628 while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000629 {
630 KMP_CPU_PAUSE();
631 old_value = TCR_8( *p );
632 new_value = old_value & d;
633 }
634 return old_value;
635}
636
637#endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
638
639void
640__kmp_terminate_thread( int gtid )
641{
642 int status;
643 kmp_info_t *th = __kmp_threads[ gtid ];
644
645 if ( !th ) return;
646
647 #ifdef KMP_CANCEL_THREADS
648 KA_TRACE( 10, ("__kmp_terminate_thread: kill (%d)\n", gtid ) );
649 status = pthread_cancel( th->th.th_info.ds.ds_thread );
650 if ( status != 0 && status != ESRCH ) {
651 __kmp_msg(
652 kmp_ms_fatal,
653 KMP_MSG( CantTerminateWorkerThread ),
654 KMP_ERR( status ),
655 __kmp_msg_null
656 );
657 }; // if
658 #endif
659 __kmp_yield( TRUE );
660} //
661
662/* ------------------------------------------------------------------------ */
663/* ------------------------------------------------------------------------ */
664
665/* ------------------------------------------------------------------------ */
666/* ------------------------------------------------------------------------ */
667
668/*
669 * Set thread stack info according to values returned by
670 * pthread_getattr_np().
671 * If values are unreasonable, assume call failed and use
672 * incremental stack refinement method instead.
673 * Returns TRUE if the stack parameters could be determined exactly,
674 * FALSE if incremental refinement is necessary.
675 */
676static kmp_int32
677__kmp_set_stack_info( int gtid, kmp_info_t *th )
678{
679 int stack_data;
Alp Toker763b9392014-02-28 09:42:41 +0000680#if KMP_OS_LINUX || KMP_OS_FREEBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000681 /* Linux* OS only -- no pthread_getattr_np support on OS X* */
682 pthread_attr_t attr;
683 int status;
684 size_t size = 0;
685 void * addr = 0;
686
687 /* Always do incremental stack refinement for ubermaster threads since the initial
688 thread stack range can be reduced by sibling thread creation so pthread_attr_getstack
689 may cause thread gtid aliasing */
690 if ( ! KMP_UBER_GTID(gtid) ) {
691
692 /* Fetch the real thread attributes */
693 status = pthread_attr_init( &attr );
694 KMP_CHECK_SYSFAIL( "pthread_attr_init", status );
Alp Toker763b9392014-02-28 09:42:41 +0000695#if KMP_OS_FREEBSD
696 status = pthread_attr_get_np( pthread_self(), &attr );
697 KMP_CHECK_SYSFAIL( "pthread_attr_get_np", status );
698#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000699 status = pthread_getattr_np( pthread_self(), &attr );
700 KMP_CHECK_SYSFAIL( "pthread_getattr_np", status );
Alp Toker763b9392014-02-28 09:42:41 +0000701#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000702 status = pthread_attr_getstack( &attr, &addr, &size );
703 KMP_CHECK_SYSFAIL( "pthread_attr_getstack", status );
704 KA_TRACE( 60, ( "__kmp_set_stack_info: T#%d pthread_attr_getstack returned size: %lu, "
705 "low addr: %p\n",
706 gtid, size, addr ));
707
708 status = pthread_attr_destroy( &attr );
709 KMP_CHECK_SYSFAIL( "pthread_attr_destroy", status );
710 }
711
712 if ( size != 0 && addr != 0 ) { /* was stack parameter determination successful? */
713 /* Store the correct base and size */
714 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
715 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
716 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
717 return TRUE;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000718 }
Alp Toker763b9392014-02-28 09:42:41 +0000719#endif /* KMP_OS_LINUX || KMP_OS_FREEBSD */
Alp Toker763b9392014-02-28 09:42:41 +0000720 /* Use incremental refinement starting from initial conservative estimate */
721 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
722 TCW_PTR(th -> th.th_info.ds.ds_stackbase, &stack_data);
723 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
724 return FALSE;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000725}
726
727static void*
728__kmp_launch_worker( void *thr )
729{
730 int status, old_type, old_state;
731#ifdef KMP_BLOCK_SIGNALS
732 sigset_t new_set, old_set;
733#endif /* KMP_BLOCK_SIGNALS */
734 void *exit_val;
735 void *padding = 0;
736 int gtid;
737 int error;
738
739 gtid = ((kmp_info_t*)thr) -> th.th_info.ds.ds_gtid;
740 __kmp_gtid_set_specific( gtid );
741#ifdef KMP_TDATA_GTID
742 __kmp_gtid = gtid;
743#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000744#if KMP_STATS_ENABLED
745 // set __thread local index to point to thread-specific stats
746 __kmp_stats_thread_ptr = ((kmp_info_t*)thr)->th.th_stats;
747#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000748
749#if USE_ITT_BUILD
750 __kmp_itt_thread_name( gtid );
751#endif /* USE_ITT_BUILD */
752
Alp Toker763b9392014-02-28 09:42:41 +0000753#if KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +0000754 __kmp_affinity_set_init_mask( gtid, FALSE );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000755#endif
756
757#ifdef KMP_CANCEL_THREADS
758 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
759 KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status );
760 /* josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? */
761 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
762 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
763#endif
764
765#if KMP_ARCH_X86 || KMP_ARCH_X86_64
766 //
767 // Set the FP control regs to be a copy of
768 // the parallel initialization thread's.
769 //
770 __kmp_clear_x87_fpu_status_word();
771 __kmp_load_x87_fpu_control_word( &__kmp_init_x87_fpu_control_word );
772 __kmp_load_mxcsr( &__kmp_init_mxcsr );
773#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
774
775#ifdef KMP_BLOCK_SIGNALS
776 status = sigfillset( & new_set );
777 KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status );
778 status = pthread_sigmask( SIG_BLOCK, & new_set, & old_set );
779 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
780#endif /* KMP_BLOCK_SIGNALS */
781
Alp Toker763b9392014-02-28 09:42:41 +0000782#if KMP_OS_LINUX || KMP_OS_FREEBSD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000783 if ( __kmp_stkoffset > 0 && gtid > 0 ) {
784 padding = alloca( gtid * __kmp_stkoffset );
785 }
786#endif
787
788 KMP_MB();
789 __kmp_set_stack_info( gtid, (kmp_info_t*)thr );
790
791 __kmp_check_stack_overlap( (kmp_info_t*)thr );
792
793 exit_val = __kmp_launch_thread( (kmp_info_t *) thr );
794
795#ifdef KMP_BLOCK_SIGNALS
796 status = pthread_sigmask( SIG_SETMASK, & old_set, NULL );
797 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
798#endif /* KMP_BLOCK_SIGNALS */
799
800 return exit_val;
801}
802
803
804/* The monitor thread controls all of the threads in the complex */
805
806static void*
807__kmp_launch_monitor( void *thr )
808{
809 int status, old_type, old_state;
810#ifdef KMP_BLOCK_SIGNALS
811 sigset_t new_set;
812#endif /* KMP_BLOCK_SIGNALS */
813 struct timespec interval;
814 int yield_count;
815 int yield_cycles = 0;
816 int error;
817
818 KMP_MB(); /* Flush all pending memory write invalidates. */
819
820 KA_TRACE( 10, ("__kmp_launch_monitor: #1 launched\n" ) );
821
822 /* register us as the monitor thread */
823 __kmp_gtid_set_specific( KMP_GTID_MONITOR );
824#ifdef KMP_TDATA_GTID
825 __kmp_gtid = KMP_GTID_MONITOR;
826#endif
827
828 KMP_MB();
829
830#if USE_ITT_BUILD
831 __kmp_itt_thread_ignore(); // Instruct Intel(R) Threading Tools to ignore monitor thread.
832#endif /* USE_ITT_BUILD */
833
834 __kmp_set_stack_info( ((kmp_info_t*)thr)->th.th_info.ds.ds_gtid, (kmp_info_t*)thr );
835
836 __kmp_check_stack_overlap( (kmp_info_t*)thr );
837
838#ifdef KMP_CANCEL_THREADS
839 status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type );
840 KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status );
841 /* josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? */
842 status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state );
843 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
844#endif
845
846 #if KMP_REAL_TIME_FIX
847 // This is a potential fix which allows application with real-time scheduling policy work.
848 // However, decision about the fix is not made yet, so it is disabled by default.
849 { // Are program started with real-time scheduling policy?
850 int sched = sched_getscheduler( 0 );
851 if ( sched == SCHED_FIFO || sched == SCHED_RR ) {
852 // Yes, we are a part of real-time application. Try to increase the priority of the
853 // monitor.
854 struct sched_param param;
855 int max_priority = sched_get_priority_max( sched );
856 int rc;
857 KMP_WARNING( RealTimeSchedNotSupported );
858 sched_getparam( 0, & param );
859 if ( param.sched_priority < max_priority ) {
860 param.sched_priority += 1;
861 rc = sched_setscheduler( 0, sched, & param );
862 if ( rc != 0 ) {
863 int error = errno;
864 __kmp_msg(
865 kmp_ms_warning,
866 KMP_MSG( CantChangeMonitorPriority ),
867 KMP_ERR( error ),
868 KMP_MSG( MonitorWillStarve ),
869 __kmp_msg_null
870 );
871 }; // if
872 } else {
873 // We cannot abort here, because number of CPUs may be enough for all the threads,
874 // including the monitor thread, so application could potentially work...
875 __kmp_msg(
876 kmp_ms_warning,
877 KMP_MSG( RunningAtMaxPriority ),
878 KMP_MSG( MonitorWillStarve ),
879 KMP_HNT( RunningAtMaxPriority ),
880 __kmp_msg_null
881 );
882 }; // if
883 }; // if
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000884 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 ); // AC: free thread that waits for monitor started
Jim Cownie5e8470a2013-09-27 10:38:44 +0000885 }
886 #endif // KMP_REAL_TIME_FIX
887
888 KMP_MB(); /* Flush all pending memory write invalidates. */
889
890 if ( __kmp_monitor_wakeups == 1 ) {
891 interval.tv_sec = 1;
892 interval.tv_nsec = 0;
893 } else {
894 interval.tv_sec = 0;
895 interval.tv_nsec = (NSEC_PER_SEC / __kmp_monitor_wakeups);
896 }
897
898 KA_TRACE( 10, ("__kmp_launch_monitor: #2 monitor\n" ) );
899
900 if (__kmp_yield_cycle) {
901 __kmp_yielding_on = 0; /* Start out with yielding shut off */
902 yield_count = __kmp_yield_off_count;
903 } else {
904 __kmp_yielding_on = 1; /* Yielding is on permanently */
905 }
906
907 while( ! TCR_4( __kmp_global.g.g_done ) ) {
908 struct timespec now;
909 struct timeval tval;
910
911 /* This thread monitors the state of the system */
912
913 KA_TRACE( 15, ( "__kmp_launch_monitor: update\n" ) );
914
915 status = gettimeofday( &tval, NULL );
916 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
917 TIMEVAL_TO_TIMESPEC( &tval, &now );
918
919 now.tv_sec += interval.tv_sec;
920 now.tv_nsec += interval.tv_nsec;
921
922 if (now.tv_nsec >= NSEC_PER_SEC) {
923 now.tv_sec += 1;
924 now.tv_nsec -= NSEC_PER_SEC;
925 }
926
927 status = pthread_mutex_lock( & __kmp_wait_mx.m_mutex );
928 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
Jim Cownie07ea89f2014-09-03 11:10:54 +0000929 // AC: the monitor should not fall asleep if g_done has been set
930 if ( !TCR_4(__kmp_global.g.g_done) ) { // check once more under mutex
931 status = pthread_cond_timedwait( &__kmp_wait_cv.c_cond, &__kmp_wait_mx.m_mutex, &now );
932 if ( status != 0 ) {
933 if ( status != ETIMEDOUT && status != EINTR ) {
934 KMP_SYSFAIL( "pthread_cond_timedwait", status );
935 };
Jim Cownie5e8470a2013-09-27 10:38:44 +0000936 };
937 };
Jim Cownie5e8470a2013-09-27 10:38:44 +0000938 status = pthread_mutex_unlock( & __kmp_wait_mx.m_mutex );
939 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
940
941 if (__kmp_yield_cycle) {
942 yield_cycles++;
943 if ( (yield_cycles % yield_count) == 0 ) {
944 if (__kmp_yielding_on) {
945 __kmp_yielding_on = 0; /* Turn it off now */
946 yield_count = __kmp_yield_off_count;
947 } else {
948 __kmp_yielding_on = 1; /* Turn it on now */
949 yield_count = __kmp_yield_on_count;
950 }
951 yield_cycles = 0;
952 }
953 } else {
954 __kmp_yielding_on = 1;
955 }
956
957 TCW_4( __kmp_global.g.g_time.dt.t_value,
958 TCR_4( __kmp_global.g.g_time.dt.t_value ) + 1 );
959
960 KMP_MB(); /* Flush all pending memory write invalidates. */
961 }
962
963 KA_TRACE( 10, ("__kmp_launch_monitor: #3 cleanup\n" ) );
964
965#ifdef KMP_BLOCK_SIGNALS
966 status = sigfillset( & new_set );
967 KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status );
968 status = pthread_sigmask( SIG_UNBLOCK, & new_set, NULL );
969 KMP_CHECK_SYSFAIL( "pthread_sigmask", status );
970#endif /* KMP_BLOCK_SIGNALS */
971
972 KA_TRACE( 10, ("__kmp_launch_monitor: #4 finished\n" ) );
973
974 if( __kmp_global.g.g_abort != 0 ) {
975 /* now we need to terminate the worker threads */
976 /* the value of t_abort is the signal we caught */
977
978 int gtid;
979
980 KA_TRACE( 10, ("__kmp_launch_monitor: #5 terminate sig=%d\n", __kmp_global.g.g_abort ) );
981
982 /* terminate the OpenMP worker threads */
983 /* TODO this is not valid for sibling threads!!
984 * the uber master might not be 0 anymore.. */
985 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
986 __kmp_terminate_thread( gtid );
987
988 __kmp_cleanup();
989
990 KA_TRACE( 10, ("__kmp_launch_monitor: #6 raise sig=%d\n", __kmp_global.g.g_abort ) );
991
992 if (__kmp_global.g.g_abort > 0)
993 raise( __kmp_global.g.g_abort );
994
995 }
996
997 KA_TRACE( 10, ("__kmp_launch_monitor: #7 exit\n" ) );
998
999 return thr;
1000}
1001
1002void
1003__kmp_create_worker( int gtid, kmp_info_t *th, size_t stack_size )
1004{
1005 pthread_t handle;
1006 pthread_attr_t thread_attr;
1007 int status;
1008
1009
1010 th->th.th_info.ds.ds_gtid = gtid;
1011
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001012#if KMP_STATS_ENABLED
1013 // sets up worker thread stats
1014 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
1015
1016 // th->th.th_stats is used to transfer thread specific stats-pointer to __kmp_launch_worker
1017 // So when thread is created (goes into __kmp_launch_worker) it will
1018 // set it's __thread local pointer to th->th.th_stats
1019 th->th.th_stats = __kmp_stats_list.push_back(gtid);
1020 if(KMP_UBER_GTID(gtid)) {
1021 __kmp_stats_start_time = tsc_tick_count::now();
1022 __kmp_stats_thread_ptr = th->th.th_stats;
1023 __kmp_stats_init();
1024 KMP_START_EXPLICIT_TIMER(OMP_serial);
1025 KMP_START_EXPLICIT_TIMER(OMP_start_end);
1026 }
1027 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
1028
1029#endif // KMP_STATS_ENABLED
1030
Jim Cownie5e8470a2013-09-27 10:38:44 +00001031 if ( KMP_UBER_GTID(gtid) ) {
1032 KA_TRACE( 10, ("__kmp_create_worker: uber thread (%d)\n", gtid ) );
1033 th -> th.th_info.ds.ds_thread = pthread_self();
1034 __kmp_set_stack_info( gtid, th );
1035 __kmp_check_stack_overlap( th );
1036 return;
1037 }; // if
1038
1039 KA_TRACE( 10, ("__kmp_create_worker: try to create thread (%d)\n", gtid ) );
1040
1041 KMP_MB(); /* Flush all pending memory write invalidates. */
1042
1043#ifdef KMP_THREAD_ATTR
1044 {
1045 status = pthread_attr_init( &thread_attr );
1046 if ( status != 0 ) {
1047 __kmp_msg(
1048 kmp_ms_fatal,
1049 KMP_MSG( CantInitThreadAttrs ),
1050 KMP_ERR( status ),
1051 __kmp_msg_null
1052 );
1053 }; // if
1054 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
1055 if ( status != 0 ) {
1056 __kmp_msg(
1057 kmp_ms_fatal,
1058 KMP_MSG( CantSetWorkerState ),
1059 KMP_ERR( status ),
1060 __kmp_msg_null
1061 );
1062 }; // if
1063
1064 /* Set stack size for this thread now. */
1065 stack_size += gtid * __kmp_stkoffset;
1066
1067 KA_TRACE( 10, ( "__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
1068 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
1069 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size ) );
1070
1071# ifdef _POSIX_THREAD_ATTR_STACKSIZE
1072 status = pthread_attr_setstacksize( & thread_attr, stack_size );
1073# ifdef KMP_BACKUP_STKSIZE
1074 if ( status != 0 ) {
1075 if ( ! __kmp_env_stksize ) {
1076 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
1077 __kmp_stksize = KMP_BACKUP_STKSIZE;
1078 KA_TRACE( 10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
1079 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
1080 "bytes\n",
1081 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size )
1082 );
1083 status = pthread_attr_setstacksize( &thread_attr, stack_size );
1084 }; // if
1085 }; // if
1086# endif /* KMP_BACKUP_STKSIZE */
1087 if ( status != 0 ) {
1088 __kmp_msg(
1089 kmp_ms_fatal,
1090 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1091 KMP_ERR( status ),
1092 KMP_HNT( ChangeWorkerStackSize ),
1093 __kmp_msg_null
1094 );
1095 }; // if
1096# endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1097 }
1098#endif /* KMP_THREAD_ATTR */
1099
1100 {
1101 status = pthread_create( & handle, & thread_attr, __kmp_launch_worker, (void *) th );
1102 if ( status != 0 || ! handle ) { // ??? Why do we check handle??
1103#ifdef _POSIX_THREAD_ATTR_STACKSIZE
1104 if ( status == EINVAL ) {
1105 __kmp_msg(
1106 kmp_ms_fatal,
1107 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1108 KMP_ERR( status ),
1109 KMP_HNT( IncreaseWorkerStackSize ),
1110 __kmp_msg_null
1111 );
1112 };
1113 if ( status == ENOMEM ) {
1114 __kmp_msg(
1115 kmp_ms_fatal,
1116 KMP_MSG( CantSetWorkerStackSize, stack_size ),
1117 KMP_ERR( status ),
1118 KMP_HNT( DecreaseWorkerStackSize ),
1119 __kmp_msg_null
1120 );
1121 };
1122#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1123 if ( status == EAGAIN ) {
1124 __kmp_msg(
1125 kmp_ms_fatal,
1126 KMP_MSG( NoResourcesForWorkerThread ),
1127 KMP_ERR( status ),
1128 KMP_HNT( Decrease_NUM_THREADS ),
1129 __kmp_msg_null
1130 );
1131 }; // if
1132 KMP_SYSFAIL( "pthread_create", status );
1133 }; // if
1134
1135 th->th.th_info.ds.ds_thread = handle;
1136 }
1137
1138#ifdef KMP_THREAD_ATTR
1139 {
1140 status = pthread_attr_destroy( & thread_attr );
1141 if ( status ) {
1142 __kmp_msg(
1143 kmp_ms_warning,
1144 KMP_MSG( CantDestroyThreadAttrs ),
1145 KMP_ERR( status ),
1146 __kmp_msg_null
1147 );
1148 }; // if
1149 }
1150#endif /* KMP_THREAD_ATTR */
1151
1152 KMP_MB(); /* Flush all pending memory write invalidates. */
1153
1154 KA_TRACE( 10, ("__kmp_create_worker: done creating thread (%d)\n", gtid ) );
1155
1156} // __kmp_create_worker
1157
1158
1159void
1160__kmp_create_monitor( kmp_info_t *th )
1161{
1162 pthread_t handle;
1163 pthread_attr_t thread_attr;
1164 size_t size;
1165 int status;
1166 int caller_gtid = __kmp_get_gtid();
1167 int auto_adj_size = FALSE;
1168
1169 KA_TRACE( 10, ("__kmp_create_monitor: try to create monitor\n" ) );
1170
1171 KMP_MB(); /* Flush all pending memory write invalidates. */
1172
1173 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
1174 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
1175 #if KMP_REAL_TIME_FIX
1176 TCW_4( __kmp_global.g.g_time.dt.t_value, -1 ); // Will use it for synchronization a bit later.
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001177 #else
1178 TCW_4( __kmp_global.g.g_time.dt.t_value, 0 );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001179 #endif // KMP_REAL_TIME_FIX
1180
1181 #ifdef KMP_THREAD_ATTR
1182 if ( __kmp_monitor_stksize == 0 ) {
1183 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1184 auto_adj_size = TRUE;
1185 }
1186 status = pthread_attr_init( &thread_attr );
1187 if ( status != 0 ) {
1188 __kmp_msg(
1189 kmp_ms_fatal,
1190 KMP_MSG( CantInitThreadAttrs ),
1191 KMP_ERR( status ),
1192 __kmp_msg_null
1193 );
1194 }; // if
1195 status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE );
1196 if ( status != 0 ) {
1197 __kmp_msg(
1198 kmp_ms_fatal,
1199 KMP_MSG( CantSetMonitorState ),
1200 KMP_ERR( status ),
1201 __kmp_msg_null
1202 );
1203 }; // if
1204
1205 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1206 status = pthread_attr_getstacksize( & thread_attr, & size );
1207 KMP_CHECK_SYSFAIL( "pthread_attr_getstacksize", status );
1208 #else
1209 size = __kmp_sys_min_stksize;
1210 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1211 #endif /* KMP_THREAD_ATTR */
1212
1213 if ( __kmp_monitor_stksize == 0 ) {
1214 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
1215 }
1216 if ( __kmp_monitor_stksize < __kmp_sys_min_stksize ) {
1217 __kmp_monitor_stksize = __kmp_sys_min_stksize;
1218 }
1219
1220 KA_TRACE( 10, ( "__kmp_create_monitor: default stacksize = %lu bytes,"
1221 "requested stacksize = %lu bytes\n",
1222 size, __kmp_monitor_stksize ) );
1223
1224 retry:
1225
1226 /* Set stack size for this thread now. */
1227
1228 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1229 KA_TRACE( 10, ( "__kmp_create_monitor: setting stacksize = %lu bytes,",
1230 __kmp_monitor_stksize ) );
1231 status = pthread_attr_setstacksize( & thread_attr, __kmp_monitor_stksize );
1232 if ( status != 0 ) {
1233 if ( auto_adj_size ) {
1234 __kmp_monitor_stksize *= 2;
1235 goto retry;
1236 }
1237 __kmp_msg(
1238 kmp_ms_warning, // should this be fatal? BB
1239 KMP_MSG( CantSetMonitorStackSize, (long int) __kmp_monitor_stksize ),
1240 KMP_ERR( status ),
1241 KMP_HNT( ChangeMonitorStackSize ),
1242 __kmp_msg_null
1243 );
1244 }; // if
1245 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1246
Jim Cownie5e8470a2013-09-27 10:38:44 +00001247 status = pthread_create( &handle, & thread_attr, __kmp_launch_monitor, (void *) th );
1248
1249 if ( status != 0 ) {
1250 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
1251 if ( status == EINVAL ) {
1252 if ( auto_adj_size && ( __kmp_monitor_stksize < (size_t)0x40000000 ) ) {
1253 __kmp_monitor_stksize *= 2;
1254 goto retry;
1255 }
1256 __kmp_msg(
1257 kmp_ms_fatal,
1258 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1259 KMP_ERR( status ),
1260 KMP_HNT( IncreaseMonitorStackSize ),
1261 __kmp_msg_null
1262 );
1263 }; // if
1264 if ( status == ENOMEM ) {
1265 __kmp_msg(
1266 kmp_ms_fatal,
1267 KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ),
1268 KMP_ERR( status ),
1269 KMP_HNT( DecreaseMonitorStackSize ),
1270 __kmp_msg_null
1271 );
1272 }; // if
1273 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1274 if ( status == EAGAIN ) {
1275 __kmp_msg(
1276 kmp_ms_fatal,
1277 KMP_MSG( NoResourcesForMonitorThread ),
1278 KMP_ERR( status ),
1279 KMP_HNT( DecreaseNumberOfThreadsInUse ),
1280 __kmp_msg_null
1281 );
1282 }; // if
1283 KMP_SYSFAIL( "pthread_create", status );
1284 }; // if
1285
1286 th->th.th_info.ds.ds_thread = handle;
1287
1288 #if KMP_REAL_TIME_FIX
1289 // Wait for the monitor thread is really started and set its *priority*.
1290 KMP_DEBUG_ASSERT( sizeof( kmp_uint32 ) == sizeof( __kmp_global.g.g_time.dt.t_value ) );
1291 __kmp_wait_yield_4(
1292 (kmp_uint32 volatile *) & __kmp_global.g.g_time.dt.t_value, -1, & __kmp_neq_4, NULL
1293 );
1294 #endif // KMP_REAL_TIME_FIX
1295
1296 #ifdef KMP_THREAD_ATTR
1297 status = pthread_attr_destroy( & thread_attr );
1298 if ( status != 0 ) {
1299 __kmp_msg( //
1300 kmp_ms_warning,
1301 KMP_MSG( CantDestroyThreadAttrs ),
1302 KMP_ERR( status ),
1303 __kmp_msg_null
1304 );
1305 }; // if
1306 #endif
1307
1308 KMP_MB(); /* Flush all pending memory write invalidates. */
1309
1310 KA_TRACE( 10, ( "__kmp_create_monitor: monitor created %#.8lx\n", th->th.th_info.ds.ds_thread ) );
1311
1312} // __kmp_create_monitor
1313
1314void
1315__kmp_exit_thread(
1316 int exit_status
1317) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001318 pthread_exit( (void *)(intptr_t) exit_status );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001319} // __kmp_exit_thread
1320
Jim Cownie07ea89f2014-09-03 11:10:54 +00001321void __kmp_resume_monitor();
1322
Jim Cownie5e8470a2013-09-27 10:38:44 +00001323void
1324__kmp_reap_monitor( kmp_info_t *th )
1325{
1326 int status, i;
1327 void *exit_val;
1328
1329 KA_TRACE( 10, ("__kmp_reap_monitor: try to reap monitor thread with handle %#.8lx\n",
1330 th->th.th_info.ds.ds_thread ) );
1331
1332 // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
1333 // If both tid and gtid are 0, it means the monitor did not ever start.
1334 // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
1335 KMP_DEBUG_ASSERT( th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid );
1336 if ( th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR ) {
1337 return;
1338 }; // if
1339
1340 KMP_MB(); /* Flush all pending memory write invalidates. */
1341
1342
1343 /* First, check to see whether the monitor thread exists. This could prevent a hang,
1344 but if the monitor dies after the pthread_kill call and before the pthread_join
1345 call, it will still hang. */
1346
1347 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
1348 if (status == ESRCH) {
1349
1350 KA_TRACE( 10, ("__kmp_reap_monitor: monitor does not exist, returning\n") );
1351
1352 } else
1353 {
Jim Cownie07ea89f2014-09-03 11:10:54 +00001354 __kmp_resume_monitor(); // Wake up the monitor thread
Jim Cownie5e8470a2013-09-27 10:38:44 +00001355 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1356 if (exit_val != th) {
1357 __kmp_msg(
1358 kmp_ms_fatal,
1359 KMP_MSG( ReapMonitorError ),
1360 KMP_ERR( status ),
1361 __kmp_msg_null
1362 );
1363 }
1364 }
1365
1366 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1367 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1368
1369 KA_TRACE( 10, ("__kmp_reap_monitor: done reaping monitor thread with handle %#.8lx\n",
1370 th->th.th_info.ds.ds_thread ) );
1371
1372 KMP_MB(); /* Flush all pending memory write invalidates. */
1373
1374}
1375
1376void
1377__kmp_reap_worker( kmp_info_t *th )
1378{
1379 int status;
1380 void *exit_val;
1381
1382 KMP_MB(); /* Flush all pending memory write invalidates. */
1383
1384 KA_TRACE( 10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid ) );
1385
1386 /* First, check to see whether the worker thread exists. This could prevent a hang,
1387 but if the worker dies after the pthread_kill call and before the pthread_join
1388 call, it will still hang. */
1389
1390 {
1391 status = pthread_kill( th->th.th_info.ds.ds_thread, 0 );
1392 if (status == ESRCH) {
1393 KA_TRACE( 10, ("__kmp_reap_worker: worker T#%d does not exist, returning\n",
1394 th->th.th_info.ds.ds_gtid ) );
1395 }
1396 else {
1397 KA_TRACE( 10, ("__kmp_reap_worker: try to join with worker T#%d\n",
1398 th->th.th_info.ds.ds_gtid ) );
1399
1400 status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val);
1401#ifdef KMP_DEBUG
1402 /* Don't expose these to the user until we understand when they trigger */
1403 if ( status != 0 ) {
1404 __kmp_msg(
1405 kmp_ms_fatal,
1406 KMP_MSG( ReapWorkerError ),
1407 KMP_ERR( status ),
1408 __kmp_msg_null
1409 );
1410 }
1411 if ( exit_val != th ) {
1412 KA_TRACE( 10, ( "__kmp_reap_worker: worker T#%d did not reap properly, "
1413 "exit_val = %p\n",
1414 th->th.th_info.ds.ds_gtid, exit_val ) );
1415 }
1416#endif /* KMP_DEBUG */
1417 }
1418 }
1419
1420 KA_TRACE( 10, ("__kmp_reap_worker: done reaping T#%d\n", th->th.th_info.ds.ds_gtid ) );
1421
1422 KMP_MB(); /* Flush all pending memory write invalidates. */
1423}
1424
1425
1426/* ------------------------------------------------------------------------ */
1427/* ------------------------------------------------------------------------ */
1428
1429#if KMP_HANDLE_SIGNALS
1430
1431
1432static void
1433__kmp_null_handler( int signo )
1434{
1435 // Do nothing, for doing SIG_IGN-type actions.
1436} // __kmp_null_handler
1437
1438
1439static void
1440__kmp_team_handler( int signo )
1441{
1442 if ( __kmp_global.g.g_abort == 0 ) {
1443 /* Stage 1 signal handler, let's shut down all of the threads */
1444 #ifdef KMP_DEBUG
1445 __kmp_debug_printf( "__kmp_team_handler: caught signal = %d\n", signo );
1446 #endif
1447 switch ( signo ) {
1448 case SIGHUP :
1449 case SIGINT :
1450 case SIGQUIT :
1451 case SIGILL :
1452 case SIGABRT :
1453 case SIGFPE :
1454 case SIGBUS :
1455 case SIGSEGV :
1456 #ifdef SIGSYS
1457 case SIGSYS :
1458 #endif
1459 case SIGTERM :
1460 if ( __kmp_debug_buf ) {
1461 __kmp_dump_debug_buffer( );
1462 }; // if
1463 KMP_MB(); // Flush all pending memory write invalidates.
1464 TCW_4( __kmp_global.g.g_abort, signo );
1465 KMP_MB(); // Flush all pending memory write invalidates.
1466 TCW_4( __kmp_global.g.g_done, TRUE );
1467 KMP_MB(); // Flush all pending memory write invalidates.
1468 break;
1469 default:
1470 #ifdef KMP_DEBUG
1471 __kmp_debug_printf( "__kmp_team_handler: unknown signal type" );
1472 #endif
1473 break;
1474 }; // switch
1475 }; // if
1476} // __kmp_team_handler
1477
1478
1479static
1480void __kmp_sigaction( int signum, const struct sigaction * act, struct sigaction * oldact ) {
1481 int rc = sigaction( signum, act, oldact );
1482 KMP_CHECK_SYSFAIL_ERRNO( "sigaction", rc );
1483}
1484
1485
1486static void
1487__kmp_install_one_handler( int sig, sig_func_t handler_func, int parallel_init )
1488{
1489 KMP_MB(); // Flush all pending memory write invalidates.
1490 KB_TRACE( 60, ( "__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init ) );
1491 if ( parallel_init ) {
1492 struct sigaction new_action;
1493 struct sigaction old_action;
1494 new_action.sa_handler = handler_func;
1495 new_action.sa_flags = 0;
1496 sigfillset( & new_action.sa_mask );
1497 __kmp_sigaction( sig, & new_action, & old_action );
1498 if ( old_action.sa_handler == __kmp_sighldrs[ sig ].sa_handler ) {
1499 sigaddset( & __kmp_sigset, sig );
1500 } else {
1501 // Restore/keep user's handler if one previously installed.
1502 __kmp_sigaction( sig, & old_action, NULL );
1503 }; // if
1504 } else {
1505 // Save initial/system signal handlers to see if user handlers installed.
1506 __kmp_sigaction( sig, NULL, & __kmp_sighldrs[ sig ] );
1507 }; // if
1508 KMP_MB(); // Flush all pending memory write invalidates.
1509} // __kmp_install_one_handler
1510
1511
1512static void
1513__kmp_remove_one_handler( int sig )
1514{
1515 KB_TRACE( 60, ( "__kmp_remove_one_handler( %d )\n", sig ) );
1516 if ( sigismember( & __kmp_sigset, sig ) ) {
1517 struct sigaction old;
1518 KMP_MB(); // Flush all pending memory write invalidates.
1519 __kmp_sigaction( sig, & __kmp_sighldrs[ sig ], & old );
1520 if ( ( old.sa_handler != __kmp_team_handler ) && ( old.sa_handler != __kmp_null_handler ) ) {
1521 // Restore the users signal handler.
1522 KB_TRACE( 10, ( "__kmp_remove_one_handler: oops, not our handler, restoring: sig=%d\n", sig ) );
1523 __kmp_sigaction( sig, & old, NULL );
1524 }; // if
1525 sigdelset( & __kmp_sigset, sig );
1526 KMP_MB(); // Flush all pending memory write invalidates.
1527 }; // if
1528} // __kmp_remove_one_handler
1529
1530
1531void
1532__kmp_install_signals( int parallel_init )
1533{
1534 KB_TRACE( 10, ( "__kmp_install_signals( %d )\n", parallel_init ) );
1535 if ( __kmp_handle_signals || ! parallel_init ) {
1536 // If ! parallel_init, we do not install handlers, just save original handlers.
1537 // Let us do it even __handle_signals is 0.
1538 sigemptyset( & __kmp_sigset );
1539 __kmp_install_one_handler( SIGHUP, __kmp_team_handler, parallel_init );
1540 __kmp_install_one_handler( SIGINT, __kmp_team_handler, parallel_init );
1541 __kmp_install_one_handler( SIGQUIT, __kmp_team_handler, parallel_init );
1542 __kmp_install_one_handler( SIGILL, __kmp_team_handler, parallel_init );
1543 __kmp_install_one_handler( SIGABRT, __kmp_team_handler, parallel_init );
1544 __kmp_install_one_handler( SIGFPE, __kmp_team_handler, parallel_init );
1545 __kmp_install_one_handler( SIGBUS, __kmp_team_handler, parallel_init );
1546 __kmp_install_one_handler( SIGSEGV, __kmp_team_handler, parallel_init );
1547 #ifdef SIGSYS
1548 __kmp_install_one_handler( SIGSYS, __kmp_team_handler, parallel_init );
1549 #endif // SIGSYS
1550 __kmp_install_one_handler( SIGTERM, __kmp_team_handler, parallel_init );
1551 #ifdef SIGPIPE
1552 __kmp_install_one_handler( SIGPIPE, __kmp_team_handler, parallel_init );
1553 #endif // SIGPIPE
1554 }; // if
1555} // __kmp_install_signals
1556
1557
1558void
1559__kmp_remove_signals( void )
1560{
1561 int sig;
1562 KB_TRACE( 10, ( "__kmp_remove_signals()\n" ) );
1563 for ( sig = 1; sig < NSIG; ++ sig ) {
1564 __kmp_remove_one_handler( sig );
1565 }; // for sig
1566} // __kmp_remove_signals
1567
1568
1569#endif // KMP_HANDLE_SIGNALS
1570
1571/* ------------------------------------------------------------------------ */
1572/* ------------------------------------------------------------------------ */
1573
1574void
1575__kmp_enable( int new_state )
1576{
1577 #ifdef KMP_CANCEL_THREADS
1578 int status, old_state;
1579 status = pthread_setcancelstate( new_state, & old_state );
1580 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
1581 KMP_DEBUG_ASSERT( old_state == PTHREAD_CANCEL_DISABLE );
1582 #endif
1583}
1584
1585void
1586__kmp_disable( int * old_state )
1587{
1588 #ifdef KMP_CANCEL_THREADS
1589 int status;
1590 status = pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, old_state );
1591 KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status );
1592 #endif
1593}
1594
1595/* ------------------------------------------------------------------------ */
1596/* ------------------------------------------------------------------------ */
1597
1598static void
1599__kmp_atfork_prepare (void)
1600{
1601 /* nothing to do */
1602}
1603
1604static void
1605__kmp_atfork_parent (void)
1606{
1607 /* nothing to do */
1608}
1609
1610/*
1611 Reset the library so execution in the child starts "all over again" with
1612 clean data structures in initial states. Don't worry about freeing memory
1613 allocated by parent, just abandon it to be safe.
1614*/
1615static void
1616__kmp_atfork_child (void)
1617{
1618 /* TODO make sure this is done right for nested/sibling */
1619 // ATT: Memory leaks are here? TODO: Check it and fix.
1620 /* KMP_ASSERT( 0 ); */
1621
1622 ++__kmp_fork_count;
1623
1624 __kmp_init_runtime = FALSE;
1625 __kmp_init_monitor = 0;
1626 __kmp_init_parallel = FALSE;
1627 __kmp_init_middle = FALSE;
1628 __kmp_init_serial = FALSE;
1629 TCW_4(__kmp_init_gtid, FALSE);
1630 __kmp_init_common = FALSE;
1631
1632 TCW_4(__kmp_init_user_locks, FALSE);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001633#if ! KMP_USE_DYNAMIC_LOCK
Jim Cownie07ea89f2014-09-03 11:10:54 +00001634 __kmp_user_lock_table.used = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001635 __kmp_user_lock_table.allocated = 0;
1636 __kmp_user_lock_table.table = NULL;
1637 __kmp_lock_blocks = NULL;
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001638#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001639
1640 __kmp_all_nth = 0;
1641 TCW_4(__kmp_nth, 0);
1642
1643 /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate here
1644 so threadprivate doesn't use stale data */
1645 KA_TRACE( 10, ( "__kmp_atfork_child: checking cache address list %p\n",
1646 __kmp_threadpriv_cache_list ) );
1647
1648 while ( __kmp_threadpriv_cache_list != NULL ) {
1649
1650 if ( *__kmp_threadpriv_cache_list -> addr != NULL ) {
1651 KC_TRACE( 50, ( "__kmp_atfork_child: zeroing cache at address %p\n",
1652 &(*__kmp_threadpriv_cache_list -> addr) ) );
1653
1654 *__kmp_threadpriv_cache_list -> addr = NULL;
1655 }
1656 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list -> next;
1657 }
1658
1659 __kmp_init_runtime = FALSE;
1660
1661 /* reset statically initialized locks */
1662 __kmp_init_bootstrap_lock( &__kmp_initz_lock );
1663 __kmp_init_bootstrap_lock( &__kmp_stdio_lock );
1664 __kmp_init_bootstrap_lock( &__kmp_console_lock );
1665
1666 /* This is necessary to make sure no stale data is left around */
1667 /* AC: customers complain that we use unsafe routines in the atfork
1668 handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1669 in dynamic_link when check the presence of shared tbbmalloc library.
1670 Suggestion is to make the library initialization lazier, similar
1671 to what done for __kmpc_begin(). */
1672 // TODO: synchronize all static initializations with regular library
1673 // startup; look at kmp_global.c and etc.
1674 //__kmp_internal_begin ();
1675
1676}
1677
1678void
1679__kmp_register_atfork(void) {
1680 if ( __kmp_need_register_atfork ) {
1681 int status = pthread_atfork( __kmp_atfork_prepare, __kmp_atfork_parent, __kmp_atfork_child );
1682 KMP_CHECK_SYSFAIL( "pthread_atfork", status );
1683 __kmp_need_register_atfork = FALSE;
1684 }
1685}
1686
1687void
1688__kmp_suspend_initialize( void )
1689{
1690 int status;
1691 status = pthread_mutexattr_init( &__kmp_suspend_mutex_attr );
1692 KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status );
1693 status = pthread_condattr_init( &__kmp_suspend_cond_attr );
1694 KMP_CHECK_SYSFAIL( "pthread_condattr_init", status );
1695}
1696
1697static void
1698__kmp_suspend_initialize_thread( kmp_info_t *th )
1699{
1700 if ( th->th.th_suspend_init_count <= __kmp_fork_count ) {
1701 /* this means we haven't initialized the suspension pthread objects for this thread
1702 in this instance of the process */
1703 int status;
1704 status = pthread_cond_init( &th->th.th_suspend_cv.c_cond, &__kmp_suspend_cond_attr );
1705 KMP_CHECK_SYSFAIL( "pthread_cond_init", status );
1706 status = pthread_mutex_init( &th->th.th_suspend_mx.m_mutex, & __kmp_suspend_mutex_attr );
1707 KMP_CHECK_SYSFAIL( "pthread_mutex_init", status );
1708 *(volatile int*)&th->th.th_suspend_init_count = __kmp_fork_count + 1;
1709 };
1710}
1711
1712void
1713__kmp_suspend_uninitialize_thread( kmp_info_t *th )
1714{
1715 if(th->th.th_suspend_init_count > __kmp_fork_count) {
1716 /* this means we have initialize the suspension pthread objects for this thread
1717 in this instance of the process */
1718 int status;
1719
1720 status = pthread_cond_destroy( &th->th.th_suspend_cv.c_cond );
1721 if ( status != 0 && status != EBUSY ) {
1722 KMP_SYSFAIL( "pthread_cond_destroy", status );
1723 };
1724 status = pthread_mutex_destroy( &th->th.th_suspend_mx.m_mutex );
1725 if ( status != 0 && status != EBUSY ) {
1726 KMP_SYSFAIL( "pthread_mutex_destroy", status );
1727 };
1728 --th->th.th_suspend_init_count;
1729 KMP_DEBUG_ASSERT(th->th.th_suspend_init_count == __kmp_fork_count);
1730 }
1731}
1732
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001733/* This routine puts the calling thread to sleep after setting the
1734 * sleep bit for the indicated flag variable to true.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001735 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001736template <class C>
1737static inline void __kmp_suspend_template( int th_gtid, C *flag )
Jim Cownie5e8470a2013-09-27 10:38:44 +00001738{
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001739 KMP_TIME_BLOCK(USER_suspend);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001740 kmp_info_t *th = __kmp_threads[th_gtid];
1741 int status;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001742 typename C::flag_t old_spin;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001743
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001744 KF_TRACE( 30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid, flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001745
1746 __kmp_suspend_initialize_thread( th );
1747
1748 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1749 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
1750
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001751 KF_TRACE( 10, ( "__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1752 th_gtid, flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001753
1754 /* TODO: shouldn't this use release semantics to ensure that __kmp_suspend_initialize_thread
1755 gets called first?
1756 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001757 old_spin = flag->set_sleeping();
Jim Cownie5e8470a2013-09-27 10:38:44 +00001758
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001759 KF_TRACE( 5, ( "__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%d\n",
1760 th_gtid, flag->get(), *(flag->get()) ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001761
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001762 if ( flag->done_check_val(old_spin) ) {
1763 old_spin = flag->unset_sleeping();
1764 KF_TRACE( 5, ( "__kmp_suspend_template: T#%d false alarm, reset sleep bit for spin(%p)\n",
1765 th_gtid, flag->get()) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001766 } else {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001767 /* Encapsulate in a loop as the documentation states that this may
1768 * "with low probability" return when the condition variable has
1769 * not been signaled or broadcast
1770 */
1771 int deactivated = FALSE;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001772 TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1773 while ( flag->is_sleeping() ) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001774#ifdef DEBUG_SUSPEND
1775 char buffer[128];
1776 __kmp_suspend_count++;
1777 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001778 __kmp_printf( "__kmp_suspend_template: suspending T#%d: %s\n", th_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001779#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001780 // Mark the thread as no longer active (only in the first iteration of the loop).
Jim Cownie5e8470a2013-09-27 10:38:44 +00001781 if ( ! deactivated ) {
1782 th->th.th_active = FALSE;
1783 if ( th->th.th_active_in_pool ) {
1784 th->th.th_active_in_pool = FALSE;
1785 KMP_TEST_THEN_DEC32(
1786 (kmp_int32 *) &__kmp_thread_pool_active_nth );
1787 KMP_DEBUG_ASSERT( TCR_4(__kmp_thread_pool_active_nth) >= 0 );
1788 }
1789 deactivated = TRUE;
1790
1791
1792 }
1793
1794#if USE_SUSPEND_TIMEOUT
1795 struct timespec now;
1796 struct timeval tval;
1797 int msecs;
1798
1799 status = gettimeofday( &tval, NULL );
1800 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
1801 TIMEVAL_TO_TIMESPEC( &tval, &now );
1802
1803 msecs = (4*__kmp_dflt_blocktime) + 200;
1804 now.tv_sec += msecs / 1000;
1805 now.tv_nsec += (msecs % 1000)*1000;
1806
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001807 KF_TRACE( 15, ( "__kmp_suspend_template: T#%d about to perform pthread_cond_timedwait\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001808 th_gtid ) );
1809 status = pthread_cond_timedwait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex, & now );
1810#else
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001811 KF_TRACE( 15, ( "__kmp_suspend_template: T#%d about to perform pthread_cond_wait\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001812 th_gtid ) );
1813
1814 status = pthread_cond_wait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex );
1815#endif
1816
1817 if ( (status != 0) && (status != EINTR) && (status != ETIMEDOUT) ) {
1818 KMP_SYSFAIL( "pthread_cond_wait", status );
1819 }
1820#ifdef KMP_DEBUG
1821 if (status == ETIMEDOUT) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001822 if ( flag->is_sleeping() ) {
1823 KF_TRACE( 100, ( "__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001824 } else {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001825 KF_TRACE( 2, ( "__kmp_suspend_template: T#%d timeout wakeup, sleep bit not set!\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001826 th_gtid ) );
1827 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001828 } else if ( flag->is_sleeping() ) {
1829 KF_TRACE( 100, ( "__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001830 }
1831#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001832 } // while
1833
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001834 // Mark the thread as active again (if it was previous marked as inactive)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001835 if ( deactivated ) {
1836 th->th.th_active = TRUE;
1837 if ( TCR_4(th->th.th_in_pool) ) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001838 KMP_TEST_THEN_INC32( (kmp_int32 *) &__kmp_thread_pool_active_nth );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001839 th->th.th_active_in_pool = TRUE;
1840 }
1841 }
1842 }
1843
1844#ifdef DEBUG_SUSPEND
1845 {
1846 char buffer[128];
1847 __kmp_print_cond( buffer, &th->th.th_suspend_cv);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001848 __kmp_printf( "__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001849 }
1850#endif
1851
1852
1853 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1854 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1855
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001856 KF_TRACE( 30, ("__kmp_suspend_template: T#%d exit\n", th_gtid ) );
1857}
1858
1859void __kmp_suspend_32(int th_gtid, kmp_flag_32 *flag) {
1860 __kmp_suspend_template(th_gtid, flag);
1861}
1862void __kmp_suspend_64(int th_gtid, kmp_flag_64 *flag) {
1863 __kmp_suspend_template(th_gtid, flag);
1864}
1865void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) {
1866 __kmp_suspend_template(th_gtid, flag);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001867}
1868
1869
1870/* This routine signals the thread specified by target_gtid to wake up
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001871 * after setting the sleep bit indicated by the flag argument to FALSE.
1872 * The target thread must already have called __kmp_suspend_template()
Jim Cownie5e8470a2013-09-27 10:38:44 +00001873 */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001874template <class C>
1875static inline void __kmp_resume_template( int target_gtid, C *flag )
Jim Cownie5e8470a2013-09-27 10:38:44 +00001876{
1877 kmp_info_t *th = __kmp_threads[target_gtid];
1878 int status;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001879
1880#ifdef KMP_DEBUG
1881 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1882#endif
1883
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001884 KF_TRACE( 30, ( "__kmp_resume_template: T#%d wants to wakeup T#%d enter\n", gtid, target_gtid ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001885 KMP_DEBUG_ASSERT( gtid != target_gtid );
1886
1887 __kmp_suspend_initialize_thread( th );
1888
1889 status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex );
1890 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001891
1892 if (!flag) {
1893 flag = (C *)th->th.th_sleep_loc;
1894 }
1895
1896 if (!flag) {
1897 KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p)\n",
1898 gtid, target_gtid, NULL ) );
1899 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1900 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1901 return;
1902 }
1903 else {
1904 typename C::flag_t old_spin = flag->unset_sleeping();
1905 if ( ! flag->is_sleeping_val(old_spin) ) {
1906 KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p): "
1907 "%u => %u\n",
1908 gtid, target_gtid, flag->get(), old_spin, *flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001909
1910 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1911 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1912 return;
1913 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001914 KF_TRACE( 5, ( "__kmp_resume_template: T#%d about to wakeup T#%d, reset sleep bit for flag's loc(%p): "
1915 "%u => %u\n",
1916 gtid, target_gtid, flag->get(), old_spin, *flag->get() ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001917 }
1918 TCW_PTR(th->th.th_sleep_loc, NULL);
1919
Jim Cownie5e8470a2013-09-27 10:38:44 +00001920
1921#ifdef DEBUG_SUSPEND
1922 {
1923 char buffer[128];
1924 __kmp_print_cond( buffer, &th->th.th_suspend_cv );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001925 __kmp_printf( "__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid, target_gtid, buffer );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001926 }
1927#endif
1928
1929
1930 status = pthread_cond_signal( &th->th.th_suspend_cv.c_cond );
1931 KMP_CHECK_SYSFAIL( "pthread_cond_signal", status );
1932 status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
1933 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001934 KF_TRACE( 30, ( "__kmp_resume_template: T#%d exiting after signaling wake up for T#%d\n",
Jim Cownie5e8470a2013-09-27 10:38:44 +00001935 gtid, target_gtid ) );
1936}
1937
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001938void __kmp_resume_32(int target_gtid, kmp_flag_32 *flag) {
1939 __kmp_resume_template(target_gtid, flag);
1940}
1941void __kmp_resume_64(int target_gtid, kmp_flag_64 *flag) {
1942 __kmp_resume_template(target_gtid, flag);
1943}
1944void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) {
1945 __kmp_resume_template(target_gtid, flag);
1946}
1947
Jim Cownie07ea89f2014-09-03 11:10:54 +00001948void
1949__kmp_resume_monitor()
1950{
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001951 KMP_TIME_BLOCK(USER_resume);
Jim Cownie07ea89f2014-09-03 11:10:54 +00001952 int status;
1953#ifdef KMP_DEBUG
1954 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1955 KF_TRACE( 30, ( "__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n",
1956 gtid, KMP_GTID_MONITOR ) );
1957 KMP_DEBUG_ASSERT( gtid != KMP_GTID_MONITOR );
1958#endif
1959 status = pthread_mutex_lock( &__kmp_wait_mx.m_mutex );
1960 KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status );
1961#ifdef DEBUG_SUSPEND
1962 {
1963 char buffer[128];
1964 __kmp_print_cond( buffer, &__kmp_wait_cv.c_cond );
1965 __kmp_printf( "__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid, KMP_GTID_MONITOR, buffer );
1966 }
1967#endif
1968 status = pthread_cond_signal( &__kmp_wait_cv.c_cond );
1969 KMP_CHECK_SYSFAIL( "pthread_cond_signal", status );
1970 status = pthread_mutex_unlock( &__kmp_wait_mx.m_mutex );
1971 KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
1972 KF_TRACE( 30, ( "__kmp_resume_monitor: T#%d exiting after signaling wake up for T#%d\n",
1973 gtid, KMP_GTID_MONITOR ) );
1974}
Jim Cownie5e8470a2013-09-27 10:38:44 +00001975
1976/* ------------------------------------------------------------------------ */
1977/* ------------------------------------------------------------------------ */
1978
1979void
1980__kmp_yield( int cond )
1981{
1982 if (cond && __kmp_yielding_on) {
1983 sched_yield();
1984 }
1985}
1986
1987/* ------------------------------------------------------------------------ */
1988/* ------------------------------------------------------------------------ */
1989
1990void
1991__kmp_gtid_set_specific( int gtid )
1992{
1993 int status;
1994 KMP_ASSERT( __kmp_init_runtime );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001995 status = pthread_setspecific( __kmp_gtid_threadprivate_key, (void*)(intptr_t)(gtid+1) );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001996 KMP_CHECK_SYSFAIL( "pthread_setspecific", status );
1997}
1998
1999int
2000__kmp_gtid_get_specific()
2001{
2002 int gtid;
2003 if ( !__kmp_init_runtime ) {
2004 KA_TRACE( 50, ("__kmp_get_specific: runtime shutdown, returning KMP_GTID_SHUTDOWN\n" ) );
2005 return KMP_GTID_SHUTDOWN;
2006 }
2007 gtid = (int)(size_t)pthread_getspecific( __kmp_gtid_threadprivate_key );
2008 if ( gtid == 0 ) {
2009 gtid = KMP_GTID_DNE;
2010 }
2011 else {
2012 gtid--;
2013 }
2014 KA_TRACE( 50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
2015 __kmp_gtid_threadprivate_key, gtid ));
2016 return gtid;
2017}
2018
2019/* ------------------------------------------------------------------------ */
2020/* ------------------------------------------------------------------------ */
2021
2022double
2023__kmp_read_cpu_time( void )
2024{
2025 /*clock_t t;*/
2026 struct tms buffer;
2027
2028 /*t =*/ times( & buffer );
2029
2030 return (buffer.tms_utime + buffer.tms_cutime) / (double) CLOCKS_PER_SEC;
2031}
2032
2033int
2034__kmp_read_system_info( struct kmp_sys_info *info )
2035{
2036 int status;
2037 struct rusage r_usage;
2038
2039 memset( info, 0, sizeof( *info ) );
2040
2041 status = getrusage( RUSAGE_SELF, &r_usage);
2042 KMP_CHECK_SYSFAIL_ERRNO( "getrusage", status );
2043
2044 info->maxrss = r_usage.ru_maxrss; /* the maximum resident set size utilized (in kilobytes) */
2045 info->minflt = r_usage.ru_minflt; /* the number of page faults serviced without any I/O */
2046 info->majflt = r_usage.ru_majflt; /* the number of page faults serviced that required I/O */
2047 info->nswap = r_usage.ru_nswap; /* the number of times a process was "swapped" out of memory */
2048 info->inblock = r_usage.ru_inblock; /* the number of times the file system had to perform input */
2049 info->oublock = r_usage.ru_oublock; /* the number of times the file system had to perform output */
2050 info->nvcsw = r_usage.ru_nvcsw; /* the number of times a context switch was voluntarily */
2051 info->nivcsw = r_usage.ru_nivcsw; /* the number of times a context switch was forced */
2052
2053 return (status != 0);
2054}
2055
2056/* ------------------------------------------------------------------------ */
2057/* ------------------------------------------------------------------------ */
2058
2059
2060void
2061__kmp_read_system_time( double *delta )
2062{
2063 double t_ns;
2064 struct timeval tval;
2065 struct timespec stop;
2066 int status;
2067
2068 status = gettimeofday( &tval, NULL );
2069 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
2070 TIMEVAL_TO_TIMESPEC( &tval, &stop );
2071 t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start);
2072 *delta = (t_ns * 1e-9);
2073}
2074
2075void
2076__kmp_clear_system_time( void )
2077{
2078 struct timeval tval;
2079 int status;
2080 status = gettimeofday( &tval, NULL );
2081 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
2082 TIMEVAL_TO_TIMESPEC( &tval, &__kmp_sys_timer_data.start );
2083}
2084
2085/* ------------------------------------------------------------------------ */
2086/* ------------------------------------------------------------------------ */
2087
2088#ifdef BUILD_TV
2089
2090void
2091__kmp_tv_threadprivate_store( kmp_info_t *th, void *global_addr, void *thread_addr )
2092{
2093 struct tv_data *p;
2094
2095 p = (struct tv_data *) __kmp_allocate( sizeof( *p ) );
2096
2097 p->u.tp.global_addr = global_addr;
2098 p->u.tp.thread_addr = thread_addr;
2099
2100 p->type = (void *) 1;
2101
2102 p->next = th->th.th_local.tv_data;
2103 th->th.th_local.tv_data = p;
2104
2105 if ( p->next == 0 ) {
2106 int rc = pthread_setspecific( __kmp_tv_key, p );
2107 KMP_CHECK_SYSFAIL( "pthread_setspecific", rc );
2108 }
2109}
2110
2111#endif /* BUILD_TV */
2112
2113/* ------------------------------------------------------------------------ */
2114/* ------------------------------------------------------------------------ */
2115
2116static int
2117__kmp_get_xproc( void ) {
2118
2119 int r = 0;
2120
2121 #if KMP_OS_LINUX
2122
2123 r = sysconf( _SC_NPROCESSORS_ONLN );
2124
2125 #elif KMP_OS_DARWIN
2126
2127 // Bug C77011 High "OpenMP Threads and number of active cores".
2128
2129 // Find the number of available CPUs.
2130 kern_return_t rc;
2131 host_basic_info_data_t info;
2132 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
2133 rc = host_info( mach_host_self(), HOST_BASIC_INFO, (host_info_t) & info, & num );
2134 if ( rc == 0 && num == HOST_BASIC_INFO_COUNT ) {
2135 // Cannot use KA_TRACE() here because this code works before trace support is
2136 // initialized.
2137 r = info.avail_cpus;
2138 } else {
2139 KMP_WARNING( CantGetNumAvailCPU );
2140 KMP_INFORM( AssumedNumCPU );
2141 }; // if
2142
Alp Toker763b9392014-02-28 09:42:41 +00002143 #elif KMP_OS_FREEBSD
2144
2145 int mib[] = { CTL_HW, HW_NCPU };
2146 size_t len = sizeof( r );
2147 if ( sysctl( mib, 2, &r, &len, NULL, 0 ) < 0 ) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002148 r = 0;
2149 KMP_WARNING( CantGetNumAvailCPU );
2150 KMP_INFORM( AssumedNumCPU );
Alp Toker763b9392014-02-28 09:42:41 +00002151 }
2152
Jim Cownie5e8470a2013-09-27 10:38:44 +00002153 #else
2154
2155 #error "Unknown or unsupported OS."
2156
2157 #endif
2158
2159 return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
2160
2161} // __kmp_get_xproc
2162
Jim Cownie181b4bb2013-12-23 17:28:57 +00002163int
2164__kmp_read_from_file( char const *path, char const *format, ... )
2165{
2166 int result;
2167 va_list args;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002168
Jim Cownie181b4bb2013-12-23 17:28:57 +00002169 va_start(args, format);
2170 FILE *f = fopen(path, "rb");
2171 if ( f == NULL )
2172 return 0;
2173 result = vfscanf(f, format, args);
2174 fclose(f);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002175
Jim Cownie5e8470a2013-09-27 10:38:44 +00002176 return result;
Jim Cownie181b4bb2013-12-23 17:28:57 +00002177}
Jim Cownie5e8470a2013-09-27 10:38:44 +00002178
2179void
2180__kmp_runtime_initialize( void )
2181{
2182 int status;
2183 pthread_mutexattr_t mutex_attr;
2184 pthread_condattr_t cond_attr;
2185
2186 if ( __kmp_init_runtime ) {
2187 return;
2188 }; // if
2189
2190 #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 )
2191 if ( ! __kmp_cpuinfo.initialized ) {
2192 __kmp_query_cpuid( &__kmp_cpuinfo );
2193 }; // if
2194 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2195
Jim Cownie5e8470a2013-09-27 10:38:44 +00002196 __kmp_xproc = __kmp_get_xproc();
2197
2198 if ( sysconf( _SC_THREADS ) ) {
2199
2200 /* Query the maximum number of threads */
2201 __kmp_sys_max_nth = sysconf( _SC_THREAD_THREADS_MAX );
2202 if ( __kmp_sys_max_nth == -1 ) {
2203 /* Unlimited threads for NPTL */
2204 __kmp_sys_max_nth = INT_MAX;
2205 }
2206 else if ( __kmp_sys_max_nth <= 1 ) {
2207 /* Can't tell, just use PTHREAD_THREADS_MAX */
2208 __kmp_sys_max_nth = KMP_MAX_NTH;
2209 }
2210
2211 /* Query the minimum stack size */
2212 __kmp_sys_min_stksize = sysconf( _SC_THREAD_STACK_MIN );
2213 if ( __kmp_sys_min_stksize <= 1 ) {
2214 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
2215 }
2216 }
2217
2218 /* Set up minimum number of threads to switch to TLS gtid */
2219 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
2220
2221
2222 #ifdef BUILD_TV
2223 {
2224 int rc = pthread_key_create( & __kmp_tv_key, 0 );
2225 KMP_CHECK_SYSFAIL( "pthread_key_create", rc );
2226 }
2227 #endif
2228
2229 status = pthread_key_create( &__kmp_gtid_threadprivate_key, __kmp_internal_end_dest );
2230 KMP_CHECK_SYSFAIL( "pthread_key_create", status );
2231 status = pthread_mutexattr_init( & mutex_attr );
2232 KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status );
2233 status = pthread_mutex_init( & __kmp_wait_mx.m_mutex, & mutex_attr );
2234 KMP_CHECK_SYSFAIL( "pthread_mutex_init", status );
2235 status = pthread_condattr_init( & cond_attr );
2236 KMP_CHECK_SYSFAIL( "pthread_condattr_init", status );
2237 status = pthread_cond_init( & __kmp_wait_cv.c_cond, & cond_attr );
2238 KMP_CHECK_SYSFAIL( "pthread_cond_init", status );
2239#if USE_ITT_BUILD
2240 __kmp_itt_initialize();
2241#endif /* USE_ITT_BUILD */
2242
2243 __kmp_init_runtime = TRUE;
2244}
2245
2246void
2247__kmp_runtime_destroy( void )
2248{
2249 int status;
2250
2251 if ( ! __kmp_init_runtime ) {
2252 return; // Nothing to do.
2253 };
2254
2255#if USE_ITT_BUILD
2256 __kmp_itt_destroy();
2257#endif /* USE_ITT_BUILD */
2258
2259 status = pthread_key_delete( __kmp_gtid_threadprivate_key );
2260 KMP_CHECK_SYSFAIL( "pthread_key_delete", status );
2261 #ifdef BUILD_TV
2262 status = pthread_key_delete( __kmp_tv_key );
2263 KMP_CHECK_SYSFAIL( "pthread_key_delete", status );
2264 #endif
2265
2266 status = pthread_mutex_destroy( & __kmp_wait_mx.m_mutex );
2267 if ( status != 0 && status != EBUSY ) {
2268 KMP_SYSFAIL( "pthread_mutex_destroy", status );
2269 }
2270 status = pthread_cond_destroy( & __kmp_wait_cv.c_cond );
2271 if ( status != 0 && status != EBUSY ) {
2272 KMP_SYSFAIL( "pthread_cond_destroy", status );
2273 }
Alp Toker763b9392014-02-28 09:42:41 +00002274 #if KMP_AFFINITY_SUPPORTED
Jim Cownie5e8470a2013-09-27 10:38:44 +00002275 __kmp_affinity_uninitialize();
Jim Cownie5e8470a2013-09-27 10:38:44 +00002276 #endif
2277
2278 __kmp_init_runtime = FALSE;
2279}
2280
2281
2282/* Put the thread to sleep for a time period */
2283/* NOTE: not currently used anywhere */
2284void
2285__kmp_thread_sleep( int millis )
2286{
2287 sleep( ( millis + 500 ) / 1000 );
2288}
2289
2290/* Calculate the elapsed wall clock time for the user */
2291void
2292__kmp_elapsed( double *t )
2293{
2294 int status;
2295# ifdef FIX_SGI_CLOCK
2296 struct timespec ts;
2297
2298 status = clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &ts );
2299 KMP_CHECK_SYSFAIL_ERRNO( "clock_gettime", status );
2300 *t = (double) ts.tv_nsec * (1.0 / (double) NSEC_PER_SEC) +
2301 (double) ts.tv_sec;
2302# else
2303 struct timeval tv;
2304
2305 status = gettimeofday( & tv, NULL );
2306 KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status );
2307 *t = (double) tv.tv_usec * (1.0 / (double) USEC_PER_SEC) +
2308 (double) tv.tv_sec;
2309# endif
2310}
2311
2312/* Calculate the elapsed wall clock tick for the user */
2313void
2314__kmp_elapsed_tick( double *t )
2315{
2316 *t = 1 / (double) CLOCKS_PER_SEC;
2317}
2318
2319/*
2320 Determine whether the given address is mapped into the current address space.
2321*/
2322
2323int
2324__kmp_is_address_mapped( void * addr ) {
2325
2326 int found = 0;
2327 int rc;
2328
2329 #if KMP_OS_LINUX
2330
2331 /*
2332 On Linux* OS, read the /proc/<pid>/maps pseudo-file to get all the address ranges mapped
2333 into the address space.
2334 */
2335
2336 char * name = __kmp_str_format( "/proc/%d/maps", getpid() );
2337 FILE * file = NULL;
2338
2339 file = fopen( name, "r" );
2340 KMP_ASSERT( file != NULL );
2341
2342 for ( ; ; ) {
2343
2344 void * beginning = NULL;
2345 void * ending = NULL;
2346 char perms[ 5 ];
2347
2348 rc = fscanf( file, "%p-%p %4s %*[^\n]\n", & beginning, & ending, perms );
2349 if ( rc == EOF ) {
2350 break;
2351 }; // if
2352 KMP_ASSERT( rc == 3 && strlen( perms ) == 4 ); // Make sure all fields are read.
2353
2354 // Ending address is not included in the region, but beginning is.
2355 if ( ( addr >= beginning ) && ( addr < ending ) ) {
2356 perms[ 2 ] = 0; // 3th and 4th character does not matter.
2357 if ( strcmp( perms, "rw" ) == 0 ) {
2358 // Memory we are looking for should be readable and writable.
2359 found = 1;
2360 }; // if
2361 break;
2362 }; // if
2363
2364 }; // forever
2365
2366 // Free resources.
2367 fclose( file );
2368 KMP_INTERNAL_FREE( name );
2369
2370 #elif KMP_OS_DARWIN
2371
2372 /*
2373 On OS X*, /proc pseudo filesystem is not available. Try to read memory using vm
2374 interface.
2375 */
2376
2377 int buffer;
2378 vm_size_t count;
2379 rc =
2380 vm_read_overwrite(
2381 mach_task_self(), // Task to read memory of.
2382 (vm_address_t)( addr ), // Address to read from.
2383 1, // Number of bytes to be read.
2384 (vm_address_t)( & buffer ), // Address of buffer to save read bytes in.
2385 & count // Address of var to save number of read bytes in.
2386 );
2387 if ( rc == 0 ) {
2388 // Memory successfully read.
2389 found = 1;
2390 }; // if
2391
Alp Toker763b9392014-02-28 09:42:41 +00002392 #elif KMP_OS_FREEBSD
2393
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002394 // FIXME(FreeBSD*): Implement this
Alp Toker763b9392014-02-28 09:42:41 +00002395 found = 1;
2396
Jim Cownie5e8470a2013-09-27 10:38:44 +00002397 #else
2398
2399 #error "Unknown or unsupported OS"
2400
2401 #endif
2402
2403 return found;
2404
2405} // __kmp_is_address_mapped
2406
2407#ifdef USE_LOAD_BALANCE
2408
2409
2410# if KMP_OS_DARWIN
2411
2412// The function returns the rounded value of the system load average
2413// during given time interval which depends on the value of
2414// __kmp_load_balance_interval variable (default is 60 sec, other values
2415// may be 300 sec or 900 sec).
2416// It returns -1 in case of error.
2417int
2418__kmp_get_load_balance( int max )
2419{
2420 double averages[3];
2421 int ret_avg = 0;
2422
2423 int res = getloadavg( averages, 3 );
2424
2425 //Check __kmp_load_balance_interval to determine which of averages to use.
2426 // getloadavg() may return the number of samples less than requested that is
2427 // less than 3.
2428 if ( __kmp_load_balance_interval < 180 && ( res >= 1 ) ) {
2429 ret_avg = averages[0];// 1 min
2430 } else if ( ( __kmp_load_balance_interval >= 180
2431 && __kmp_load_balance_interval < 600 ) && ( res >= 2 ) ) {
2432 ret_avg = averages[1];// 5 min
2433 } else if ( ( __kmp_load_balance_interval >= 600 ) && ( res == 3 ) ) {
2434 ret_avg = averages[2];// 15 min
Alp Toker8f2d3f02014-02-24 10:40:15 +00002435 } else {// Error occurred
Jim Cownie5e8470a2013-09-27 10:38:44 +00002436 return -1;
2437 }
2438
2439 return ret_avg;
2440}
2441
2442# else // Linux* OS
2443
2444// The fuction returns number of running (not sleeping) threads, or -1 in case of error.
2445// Error could be reported if Linux* OS kernel too old (without "/proc" support).
2446// Counting running threads stops if max running threads encountered.
2447int
2448__kmp_get_load_balance( int max )
2449{
2450 static int permanent_error = 0;
2451
2452 static int glb_running_threads = 0; /* Saved count of the running threads for the thread balance algortihm */
2453 static double glb_call_time = 0; /* Thread balance algorithm call time */
2454
2455 int running_threads = 0; // Number of running threads in the system.
2456
2457 DIR * proc_dir = NULL; // Handle of "/proc/" directory.
2458 struct dirent * proc_entry = NULL;
2459
2460 kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2461 DIR * task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2462 struct dirent * task_entry = NULL;
2463 int task_path_fixed_len;
2464
2465 kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2466 int stat_file = -1;
2467 int stat_path_fixed_len;
2468
2469 int total_processes = 0; // Total number of processes in system.
2470 int total_threads = 0; // Total number of threads in system.
2471
2472 double call_time = 0.0;
2473
2474 __kmp_str_buf_init( & task_path );
2475 __kmp_str_buf_init( & stat_path );
2476
2477 __kmp_elapsed( & call_time );
2478
2479 if ( glb_call_time &&
2480 ( call_time - glb_call_time < __kmp_load_balance_interval ) ) {
2481 running_threads = glb_running_threads;
2482 goto finish;
2483 }
2484
2485 glb_call_time = call_time;
2486
2487 // Do not spend time on scanning "/proc/" if we have a permanent error.
2488 if ( permanent_error ) {
2489 running_threads = -1;
2490 goto finish;
2491 }; // if
2492
2493 if ( max <= 0 ) {
2494 max = INT_MAX;
2495 }; // if
2496
2497 // Open "/proc/" directory.
2498 proc_dir = opendir( "/proc" );
2499 if ( proc_dir == NULL ) {
2500 // Cannot open "/prroc/". Probably the kernel does not support it. Return an error now and
2501 // in subsequent calls.
2502 running_threads = -1;
2503 permanent_error = 1;
2504 goto finish;
2505 }; // if
2506
2507 // Initialize fixed part of task_path. This part will not change.
2508 __kmp_str_buf_cat( & task_path, "/proc/", 6 );
2509 task_path_fixed_len = task_path.used; // Remember number of used characters.
2510
2511 proc_entry = readdir( proc_dir );
2512 while ( proc_entry != NULL ) {
2513 // Proc entry is a directory and name starts with a digit. Assume it is a process'
2514 // directory.
2515 if ( proc_entry->d_type == DT_DIR && isdigit( proc_entry->d_name[ 0 ] ) ) {
2516
2517 ++ total_processes;
2518 // Make sure init process is the very first in "/proc", so we can replace
2519 // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes == 1.
2520 // We are going to check that total_processes == 1 => d_name == "1" is true (where
2521 // "=>" is implication). Since C++ does not have => operator, let us replace it with its
2522 // equivalent: a => b == ! a || b.
2523 KMP_DEBUG_ASSERT( total_processes != 1 || strcmp( proc_entry->d_name, "1" ) == 0 );
2524
2525 // Construct task_path.
2526 task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
2527 __kmp_str_buf_cat( & task_path, proc_entry->d_name, strlen( proc_entry->d_name ) );
2528 __kmp_str_buf_cat( & task_path, "/task", 5 );
2529
2530 task_dir = opendir( task_path.str );
2531 if ( task_dir == NULL ) {
2532 // Process can finish between reading "/proc/" directory entry and opening process'
2533 // "task/" directory. So, in general case we should not complain, but have to skip
2534 // this process and read the next one.
2535 // But on systems with no "task/" support we will spend lot of time to scan "/proc/"
2536 // tree again and again without any benefit. "init" process (its pid is 1) should
2537 // exist always, so, if we cannot open "/proc/1/task/" directory, it means "task/"
2538 // is not supported by kernel. Report an error now and in the future.
2539 if ( strcmp( proc_entry->d_name, "1" ) == 0 ) {
2540 running_threads = -1;
2541 permanent_error = 1;
2542 goto finish;
2543 }; // if
2544 } else {
2545 // Construct fixed part of stat file path.
2546 __kmp_str_buf_clear( & stat_path );
2547 __kmp_str_buf_cat( & stat_path, task_path.str, task_path.used );
2548 __kmp_str_buf_cat( & stat_path, "/", 1 );
2549 stat_path_fixed_len = stat_path.used;
2550
2551 task_entry = readdir( task_dir );
2552 while ( task_entry != NULL ) {
2553 // It is a directory and name starts with a digit.
2554 if ( proc_entry->d_type == DT_DIR && isdigit( task_entry->d_name[ 0 ] ) ) {
2555
2556 ++ total_threads;
2557
2558 // Consruct complete stat file path. Easiest way would be:
2559 // __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str, task_entry->d_name );
2560 // but seriae of __kmp_str_buf_cat works a bit faster.
2561 stat_path.used = stat_path_fixed_len; // Reset stat path to its fixed part.
2562 __kmp_str_buf_cat( & stat_path, task_entry->d_name, strlen( task_entry->d_name ) );
2563 __kmp_str_buf_cat( & stat_path, "/stat", 5 );
2564
2565 // Note: Low-level API (open/read/close) is used. High-level API
2566 // (fopen/fclose) works ~ 30 % slower.
2567 stat_file = open( stat_path.str, O_RDONLY );
2568 if ( stat_file == -1 ) {
2569 // We cannot report an error because task (thread) can terminate just
2570 // before reading this file.
2571 } else {
2572 /*
2573 Content of "stat" file looks like:
2574
2575 24285 (program) S ...
2576
2577 It is a single line (if program name does not include fanny
2578 symbols). First number is a thread id, then name of executable file
2579 name in paretheses, then state of the thread. We need just thread
2580 state.
2581
2582 Good news: Length of program name is 15 characters max. Longer
2583 names are truncated.
2584
2585 Thus, we need rather short buffer: 15 chars for program name +
2586 2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2587
2588 Bad news: Program name may contain special symbols like space,
2589 closing parenthesis, or even new line. This makes parsing "stat"
2590 file not 100 % reliable. In case of fanny program names parsing
2591 may fail (report incorrect thread state).
2592
2593 Parsing "status" file looks more promissing (due to different
2594 file structure and escaping special symbols) but reading and
2595 parsing of "status" file works slower.
2596
2597 -- ln
2598 */
2599 char buffer[ 65 ];
2600 int len;
2601 len = read( stat_file, buffer, sizeof( buffer ) - 1 );
2602 if ( len >= 0 ) {
2603 buffer[ len ] = 0;
2604 // Using scanf:
2605 // sscanf( buffer, "%*d (%*s) %c ", & state );
2606 // looks very nice, but searching for a closing parenthesis works a
2607 // bit faster.
2608 char * close_parent = strstr( buffer, ") " );
2609 if ( close_parent != NULL ) {
2610 char state = * ( close_parent + 2 );
2611 if ( state == 'R' ) {
2612 ++ running_threads;
2613 if ( running_threads >= max ) {
2614 goto finish;
2615 }; // if
2616 }; // if
2617 }; // if
2618 }; // if
2619 close( stat_file );
2620 stat_file = -1;
2621 }; // if
2622 }; // if
2623 task_entry = readdir( task_dir );
2624 }; // while
2625 closedir( task_dir );
2626 task_dir = NULL;
2627 }; // if
2628 }; // if
2629 proc_entry = readdir( proc_dir );
2630 }; // while
2631
2632 //
2633 // There _might_ be a timing hole where the thread executing this
2634 // code get skipped in the load balance, and running_threads is 0.
2635 // Assert in the debug builds only!!!
2636 //
2637 KMP_DEBUG_ASSERT( running_threads > 0 );
2638 if ( running_threads <= 0 ) {
2639 running_threads = 1;
2640 }
2641
2642 finish: // Clean up and exit.
2643 if ( proc_dir != NULL ) {
2644 closedir( proc_dir );
2645 }; // if
2646 __kmp_str_buf_free( & task_path );
2647 if ( task_dir != NULL ) {
2648 closedir( task_dir );
2649 }; // if
2650 __kmp_str_buf_free( & stat_path );
2651 if ( stat_file != -1 ) {
2652 close( stat_file );
2653 }; // if
2654
2655 glb_running_threads = running_threads;
2656
2657 return running_threads;
2658
2659} // __kmp_get_load_balance
2660
2661# endif // KMP_OS_DARWIN
2662
2663#endif // USE_LOAD_BALANCE
2664
Jim Cownie181b4bb2013-12-23 17:28:57 +00002665
Andrey Churbanovcbda8682015-01-13 14:43:35 +00002666#if KMP_COMPILER_GCC && !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64)
Jim Cownie181b4bb2013-12-23 17:28:57 +00002667
2668int __kmp_invoke_microtask( microtask_t pkfn, int gtid, int tid, int argc,
2669 void *p_argv[] )
2670{
2671 int argc_full = argc + 2;
2672 int i;
2673 ffi_cif cif;
2674 ffi_type *types[argc_full];
2675 void *args[argc_full];
2676 void *idp[2];
2677
2678 /* We're only passing pointers to the target. */
2679 for (i = 0; i < argc_full; i++)
2680 types[i] = &ffi_type_pointer;
2681
2682 /* Ugly double-indirection, but that's how it goes... */
2683 idp[0] = &gtid;
2684 idp[1] = &tid;
2685 args[0] = &idp[0];
2686 args[1] = &idp[1];
2687
2688 for (i = 0; i < argc; i++)
2689 args[2 + i] = &p_argv[i];
2690
2691 if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, argc_full,
2692 &ffi_type_void, types) != FFI_OK)
2693 abort();
2694
2695 ffi_call(&cif, (void (*)(void))pkfn, NULL, args);
2696
2697 return 1;
2698}
2699
Jim Cownie3051f972014-08-07 10:12:54 +00002700#endif // KMP_COMPILER_GCC && !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_PPC64)
2701
Andrey Churbanovcbda8682015-01-13 14:43:35 +00002702#if KMP_ARCH_PPC64 || KMP_ARCH_AARCH64
Jim Cownie3051f972014-08-07 10:12:54 +00002703
2704// we really only need the case with 1 argument, because CLANG always build
2705// a struct of pointers to shared variables referenced in the outlined function
2706int
2707__kmp_invoke_microtask( microtask_t pkfn,
2708 int gtid, int tid,
2709 int argc, void *p_argv[] ) {
2710 switch (argc) {
2711 default:
2712 fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2713 fflush(stderr);
2714 exit(-1);
2715 case 0:
2716 (*pkfn)(&gtid, &tid);
2717 break;
2718 case 1:
2719 (*pkfn)(&gtid, &tid, p_argv[0]);
2720 break;
2721 case 2:
2722 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2723 break;
2724 case 3:
2725 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2726 break;
2727 case 4:
2728 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2729 break;
2730 case 5:
2731 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2732 break;
2733 case 6:
2734 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2735 p_argv[5]);
2736 break;
2737 case 7:
2738 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2739 p_argv[5], p_argv[6]);
2740 break;
2741 case 8:
2742 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2743 p_argv[5], p_argv[6], p_argv[7]);
2744 break;
2745 case 9:
2746 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2747 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2748 break;
2749 case 10:
2750 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2751 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2752 break;
2753 case 11:
2754 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2755 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2756 break;
2757 case 12:
2758 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2759 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2760 p_argv[11]);
2761 break;
2762 case 13:
2763 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2764 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2765 p_argv[11], p_argv[12]);
2766 break;
2767 case 14:
2768 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2769 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2770 p_argv[11], p_argv[12], p_argv[13]);
2771 break;
2772 case 15:
2773 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2774 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2775 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2776 break;
2777 }
2778
2779 return 1;
2780}
2781
2782#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +00002783
Jim Cownie5e8470a2013-09-27 10:38:44 +00002784// end of file //
2785