blob: e37fe7d839788d29ab858838f7edaa820095fb48 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_lock.h -- lock header file
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003 * $Revision: 43473 $
4 * $Date: 2014-09-26 15:02:57 -0500 (Fri, 26 Sep 2014) $
Jim Cownie5e8470a2013-09-27 10:38:44 +00005 */
6
7
8//===----------------------------------------------------------------------===//
9//
10// The LLVM Compiler Infrastructure
11//
12// This file is dual licensed under the MIT and the University of Illinois Open
13// Source Licenses. See LICENSE.txt for details.
14//
15//===----------------------------------------------------------------------===//
16
17
18#ifndef KMP_LOCK_H
19#define KMP_LOCK_H
20
21#include <limits.h> // CHAR_BIT
22#include <stddef.h> // offsetof
23
24#include "kmp_os.h"
25#include "kmp_debug.h"
26
27#ifdef __cplusplus
28extern "C" {
29#endif // __cplusplus
30
31// ----------------------------------------------------------------------------
32// Have to copy these definitions from kmp.h because kmp.h cannot be included
33// due to circular dependencies. Will undef these at end of file.
34
35#define KMP_PAD(type, sz) (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
36#define KMP_GTID_DNE (-2)
37
38// Forward declaration of ident and ident_t
39
40struct ident;
41typedef struct ident ident_t;
42
43// End of copied code.
44// ----------------------------------------------------------------------------
45
46//
47// We need to know the size of the area we can assume that the compiler(s)
48// allocated for obects of type omp_lock_t and omp_nest_lock_t. The Intel
49// compiler always allocates a pointer-sized area, as does visual studio.
50//
51// gcc however, only allocates 4 bytes for regular locks, even on 64-bit
52// intel archs. It allocates at least 8 bytes for nested lock (more on
53// recent versions), but we are bounded by the pointer-sized chunks that
54// the Intel compiler allocates.
55//
56
57#if KMP_OS_LINUX && defined(KMP_GOMP_COMPAT)
58# define OMP_LOCK_T_SIZE sizeof(int)
59# define OMP_NEST_LOCK_T_SIZE sizeof(void *)
60#else
61# define OMP_LOCK_T_SIZE sizeof(void *)
62# define OMP_NEST_LOCK_T_SIZE sizeof(void *)
63#endif
64
65//
66// The Intel compiler allocates a 32-byte chunk for a critical section.
67// Both gcc and visual studio only allocate enough space for a pointer.
68// Sometimes we know that the space was allocated by the Intel compiler.
69//
70#define OMP_CRITICAL_SIZE sizeof(void *)
71#define INTEL_CRITICAL_SIZE 32
72
73//
74// lock flags
75//
76typedef kmp_uint32 kmp_lock_flags_t;
77
78#define kmp_lf_critical_section 1
79
80//
81// When a lock table is used, the indices are of kmp_lock_index_t
82//
83typedef kmp_uint32 kmp_lock_index_t;
84
85//
86// When memory allocated for locks are on the lock pool (free list),
87// it is treated as structs of this type.
88//
89struct kmp_lock_pool {
90 union kmp_user_lock *next;
91 kmp_lock_index_t index;
92};
93
94typedef struct kmp_lock_pool kmp_lock_pool_t;
95
96
97extern void __kmp_validate_locks( void );
98
99
100// ----------------------------------------------------------------------------
101//
102// There are 5 lock implementations:
103//
104// 1. Test and set locks.
105// 2. futex locks (Linux* OS on x86 and Intel(R) Many Integrated Core architecture)
106// 3. Ticket (Lamport bakery) locks.
107// 4. Queuing locks (with separate spin fields).
108// 5. DRPA (Dynamically Reconfigurable Distributed Polling Area) locks
109//
110// and 3 lock purposes:
111//
112// 1. Bootstrap locks -- Used for a few locks available at library startup-shutdown time.
113// These do not require non-negative global thread ID's.
114// 2. Internal RTL locks -- Used everywhere else in the RTL
115// 3. User locks (includes critical sections)
116//
117// ----------------------------------------------------------------------------
118
119
120// ============================================================================
121// Lock implementations.
122// ============================================================================
123
124
125// ----------------------------------------------------------------------------
126// Test and set locks.
127//
128// Non-nested test and set locks differ from the other lock kinds (except
129// futex) in that we use the memory allocated by the compiler for the lock,
130// rather than a pointer to it.
131//
132// On lin32, lin_32e, and win_32, the space allocated may be as small as 4
133// bytes, so we have to use a lock table for nested locks, and avoid accessing
134// the depth_locked field for non-nested locks.
135//
136// Information normally available to the tools, such as lock location,
137// lock usage (normal lock vs. critical section), etc. is not available with
138// test and set locks.
139// ----------------------------------------------------------------------------
140
141struct kmp_base_tas_lock {
142 volatile kmp_int32 poll; // 0 => unlocked
143 // locked: (gtid+1) of owning thread
144 kmp_int32 depth_locked; // depth locked, for nested locks only
145};
146
147typedef struct kmp_base_tas_lock kmp_base_tas_lock_t;
148
149union kmp_tas_lock {
150 kmp_base_tas_lock_t lk;
151 kmp_lock_pool_t pool; // make certain struct is large enough
152 double lk_align; // use worst case alignment
153 // no cache line padding
154};
155
156typedef union kmp_tas_lock kmp_tas_lock_t;
157
158//
159// Static initializer for test and set lock variables. Usage:
160// kmp_tas_lock_t xlock = KMP_TAS_LOCK_INITIALIZER( xlock );
161//
162#define KMP_TAS_LOCK_INITIALIZER( lock ) { { 0, 0 } }
163
164extern void __kmp_acquire_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
165extern int __kmp_test_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
166extern void __kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
167extern void __kmp_init_tas_lock( kmp_tas_lock_t *lck );
168extern void __kmp_destroy_tas_lock( kmp_tas_lock_t *lck );
169
170extern void __kmp_acquire_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
171extern int __kmp_test_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
172extern void __kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
173extern void __kmp_init_nested_tas_lock( kmp_tas_lock_t *lck );
174extern void __kmp_destroy_nested_tas_lock( kmp_tas_lock_t *lck );
175
176
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000177#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000178
179// ----------------------------------------------------------------------------
180// futex locks. futex locks are only available on Linux* OS.
181//
182// Like non-nested test and set lock, non-nested futex locks use the memory
183// allocated by the compiler for the lock, rather than a pointer to it.
184//
185// Information normally available to the tools, such as lock location,
186// lock usage (normal lock vs. critical section), etc. is not available with
187// test and set locks. With non-nested futex locks, the lock owner is not
188// even available.
189// ----------------------------------------------------------------------------
190
191struct kmp_base_futex_lock {
192 volatile kmp_int32 poll; // 0 => unlocked
193 // 2*(gtid+1) of owning thread, 0 if unlocked
194 // locked: (gtid+1) of owning thread
195 kmp_int32 depth_locked; // depth locked, for nested locks only
196};
197
198typedef struct kmp_base_futex_lock kmp_base_futex_lock_t;
199
200union kmp_futex_lock {
201 kmp_base_futex_lock_t lk;
202 kmp_lock_pool_t pool; // make certain struct is large enough
203 double lk_align; // use worst case alignment
204 // no cache line padding
205};
206
207typedef union kmp_futex_lock kmp_futex_lock_t;
208
209//
210// Static initializer for futex lock variables. Usage:
211// kmp_futex_lock_t xlock = KMP_FUTEX_LOCK_INITIALIZER( xlock );
212//
213#define KMP_FUTEX_LOCK_INITIALIZER( lock ) { { 0, 0 } }
214
215extern void __kmp_acquire_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
216extern int __kmp_test_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
217extern void __kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
218extern void __kmp_init_futex_lock( kmp_futex_lock_t *lck );
219extern void __kmp_destroy_futex_lock( kmp_futex_lock_t *lck );
220
221extern void __kmp_acquire_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
222extern int __kmp_test_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
223extern void __kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
224extern void __kmp_init_nested_futex_lock( kmp_futex_lock_t *lck );
225extern void __kmp_destroy_nested_futex_lock( kmp_futex_lock_t *lck );
226
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000227#endif // KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000228
229
230// ----------------------------------------------------------------------------
231// Ticket locks.
232// ----------------------------------------------------------------------------
233
234struct kmp_base_ticket_lock {
235 // `initialized' must be the first entry in the lock data structure!
236 volatile union kmp_ticket_lock * initialized; // points to the lock union if in initialized state
237 ident_t const * location; // Source code location of omp_init_lock().
238 volatile kmp_uint32 next_ticket; // ticket number to give to next thread which acquires
239 volatile kmp_uint32 now_serving; // ticket number for thread which holds the lock
240 volatile kmp_int32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
241 kmp_int32 depth_locked; // depth locked, for nested locks only
242 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
243};
244
245typedef struct kmp_base_ticket_lock kmp_base_ticket_lock_t;
246
247union KMP_ALIGN_CACHE kmp_ticket_lock {
248 kmp_base_ticket_lock_t lk; // This field must be first to allow static initializing.
249 kmp_lock_pool_t pool;
250 double lk_align; // use worst case alignment
251 char lk_pad[ KMP_PAD( kmp_base_ticket_lock_t, CACHE_LINE ) ];
252};
253
254typedef union kmp_ticket_lock kmp_ticket_lock_t;
255
256//
257// Static initializer for simple ticket lock variables. Usage:
258// kmp_ticket_lock_t xlock = KMP_TICKET_LOCK_INITIALIZER( xlock );
259// Note the macro argument. It is important to make var properly initialized.
260//
261#define KMP_TICKET_LOCK_INITIALIZER( lock ) { { (kmp_ticket_lock_t *) & (lock), NULL, 0, 0, 0, -1 } }
262
263extern void __kmp_acquire_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
264extern int __kmp_test_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
265extern int __kmp_test_ticket_lock_with_cheks( kmp_ticket_lock_t *lck, kmp_int32 gtid );
266extern void __kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
267extern void __kmp_init_ticket_lock( kmp_ticket_lock_t *lck );
268extern void __kmp_destroy_ticket_lock( kmp_ticket_lock_t *lck );
269
270extern void __kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
271extern int __kmp_test_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
272extern void __kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
273extern void __kmp_init_nested_ticket_lock( kmp_ticket_lock_t *lck );
274extern void __kmp_destroy_nested_ticket_lock( kmp_ticket_lock_t *lck );
275
276
277// ----------------------------------------------------------------------------
278// Queuing locks.
279// ----------------------------------------------------------------------------
280
281#if KMP_USE_ADAPTIVE_LOCKS
282
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000283struct kmp_adaptive_lock_info;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000284
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000285typedef struct kmp_adaptive_lock_info kmp_adaptive_lock_info_t;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000286
287#if KMP_DEBUG_ADAPTIVE_LOCKS
288
289struct kmp_adaptive_lock_statistics {
290 /* So we can get stats from locks that haven't been destroyed. */
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000291 kmp_adaptive_lock_info_t * next;
292 kmp_adaptive_lock_info_t * prev;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000293
294 /* Other statistics */
295 kmp_uint32 successfulSpeculations;
296 kmp_uint32 hardFailedSpeculations;
297 kmp_uint32 softFailedSpeculations;
298 kmp_uint32 nonSpeculativeAcquires;
299 kmp_uint32 nonSpeculativeAcquireAttempts;
300 kmp_uint32 lemmingYields;
301};
302
303typedef struct kmp_adaptive_lock_statistics kmp_adaptive_lock_statistics_t;
304
305extern void __kmp_print_speculative_stats();
306extern void __kmp_init_speculative_stats();
307
308#endif // KMP_DEBUG_ADAPTIVE_LOCKS
309
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000310struct kmp_adaptive_lock_info
Jim Cownie5e8470a2013-09-27 10:38:44 +0000311{
312 /* Values used for adaptivity.
313 * Although these are accessed from multiple threads we don't access them atomically,
314 * because if we miss updates it probably doesn't matter much. (It just affects our
315 * decision about whether to try speculation on the lock).
316 */
317 kmp_uint32 volatile badness;
318 kmp_uint32 volatile acquire_attempts;
319 /* Parameters of the lock. */
320 kmp_uint32 max_badness;
321 kmp_uint32 max_soft_retries;
322
323#if KMP_DEBUG_ADAPTIVE_LOCKS
324 kmp_adaptive_lock_statistics_t volatile stats;
325#endif
326};
327
328#endif // KMP_USE_ADAPTIVE_LOCKS
329
330
331struct kmp_base_queuing_lock {
332
333 // `initialized' must be the first entry in the lock data structure!
334 volatile union kmp_queuing_lock *initialized; // Points to the lock union if in initialized state.
335
336 ident_t const * location; // Source code location of omp_init_lock().
337
338 KMP_ALIGN( 8 ) // tail_id must be 8-byte aligned!
339
340 volatile kmp_int32 tail_id; // (gtid+1) of thread at tail of wait queue, 0 if empty
341 // Must be no padding here since head/tail used in 8-byte CAS
342 volatile kmp_int32 head_id; // (gtid+1) of thread at head of wait queue, 0 if empty
343 // Decl order assumes little endian
344 // bakery-style lock
345 volatile kmp_uint32 next_ticket; // ticket number to give to next thread which acquires
346 volatile kmp_uint32 now_serving; // ticket number for thread which holds the lock
347 volatile kmp_int32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
348 kmp_int32 depth_locked; // depth locked, for nested locks only
349
350 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
Jim Cownie5e8470a2013-09-27 10:38:44 +0000351};
352
353typedef struct kmp_base_queuing_lock kmp_base_queuing_lock_t;
354
355KMP_BUILD_ASSERT( offsetof( kmp_base_queuing_lock_t, tail_id ) % 8 == 0 );
356
357union KMP_ALIGN_CACHE kmp_queuing_lock {
358 kmp_base_queuing_lock_t lk; // This field must be first to allow static initializing.
359 kmp_lock_pool_t pool;
360 double lk_align; // use worst case alignment
361 char lk_pad[ KMP_PAD( kmp_base_queuing_lock_t, CACHE_LINE ) ];
362};
363
364typedef union kmp_queuing_lock kmp_queuing_lock_t;
365
366extern void __kmp_acquire_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
367extern int __kmp_test_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
368extern void __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
369extern void __kmp_init_queuing_lock( kmp_queuing_lock_t *lck );
370extern void __kmp_destroy_queuing_lock( kmp_queuing_lock_t *lck );
371
372extern void __kmp_acquire_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
373extern int __kmp_test_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
374extern void __kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
375extern void __kmp_init_nested_queuing_lock( kmp_queuing_lock_t *lck );
376extern void __kmp_destroy_nested_queuing_lock( kmp_queuing_lock_t *lck );
377
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000378#if KMP_USE_ADAPTIVE_LOCKS
379
380// ----------------------------------------------------------------------------
381// Adaptive locks.
382// ----------------------------------------------------------------------------
383struct kmp_base_adaptive_lock {
384 kmp_base_queuing_lock qlk;
385 KMP_ALIGN(CACHE_LINE)
386 kmp_adaptive_lock_info_t adaptive; // Information for the speculative adaptive lock
387};
388
389typedef struct kmp_base_adaptive_lock kmp_base_adaptive_lock_t;
390
391union KMP_ALIGN_CACHE kmp_adaptive_lock {
392 kmp_base_adaptive_lock_t lk;
393 kmp_lock_pool_t pool;
394 double lk_align;
395 char lk_pad[ KMP_PAD(kmp_base_adaptive_lock_t, CACHE_LINE) ];
396};
397typedef union kmp_adaptive_lock kmp_adaptive_lock_t;
398
399# define GET_QLK_PTR(l) ((kmp_queuing_lock_t *) & (l)->lk.qlk)
400
401#endif // KMP_USE_ADAPTIVE_LOCKS
Jim Cownie5e8470a2013-09-27 10:38:44 +0000402
403// ----------------------------------------------------------------------------
404// DRDPA ticket locks.
405// ----------------------------------------------------------------------------
406
407struct kmp_base_drdpa_lock {
408 //
409 // All of the fields on the first cache line are only written when
410 // initializing or reconfiguring the lock. These are relatively rare
411 // operations, so data from the first cache line will usually stay
412 // resident in the cache of each thread trying to acquire the lock.
413 //
414 // initialized must be the first entry in the lock data structure!
415 //
416 KMP_ALIGN_CACHE
417
418 volatile union kmp_drdpa_lock * initialized; // points to the lock union if in initialized state
419 ident_t const * location; // Source code location of omp_init_lock().
420 volatile struct kmp_lock_poll {
421 kmp_uint64 poll;
422 } * volatile polls;
423 volatile kmp_uint64 mask; // is 2**num_polls-1 for mod op
424 kmp_uint64 cleanup_ticket; // thread with cleanup ticket
425 volatile struct kmp_lock_poll * old_polls; // will deallocate old_polls
426 kmp_uint32 num_polls; // must be power of 2
427
428 //
429 // next_ticket it needs to exist in a separate cache line, as it is
430 // invalidated every time a thread takes a new ticket.
431 //
432 KMP_ALIGN_CACHE
433
434 volatile kmp_uint64 next_ticket;
435
436 //
437 // now_serving is used to store our ticket value while we hold the lock.
Alp Toker8f2d3f02014-02-24 10:40:15 +0000438 // It has a slightly different meaning in the DRDPA ticket locks (where
Jim Cownie5e8470a2013-09-27 10:38:44 +0000439 // it is written by the acquiring thread) than it does in the simple
440 // ticket locks (where it is written by the releasing thread).
441 //
442 // Since now_serving is only read an written in the critical section,
443 // it is non-volatile, but it needs to exist on a separate cache line,
444 // as it is invalidated at every lock acquire.
445 //
446 // Likewise, the vars used for nested locks (owner_id and depth_locked)
447 // are only written by the thread owning the lock, so they are put in
448 // this cache line. owner_id is read by other threads, so it must be
449 // declared volatile.
450 //
451 KMP_ALIGN_CACHE
452
453 kmp_uint64 now_serving; // doesn't have to be volatile
454 volatile kmp_uint32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
455 kmp_int32 depth_locked; // depth locked
456 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
457};
458
459typedef struct kmp_base_drdpa_lock kmp_base_drdpa_lock_t;
460
461union KMP_ALIGN_CACHE kmp_drdpa_lock {
462 kmp_base_drdpa_lock_t lk; // This field must be first to allow static initializing. */
463 kmp_lock_pool_t pool;
464 double lk_align; // use worst case alignment
465 char lk_pad[ KMP_PAD( kmp_base_drdpa_lock_t, CACHE_LINE ) ];
466};
467
468typedef union kmp_drdpa_lock kmp_drdpa_lock_t;
469
470extern void __kmp_acquire_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
471extern int __kmp_test_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
472extern void __kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
473extern void __kmp_init_drdpa_lock( kmp_drdpa_lock_t *lck );
474extern void __kmp_destroy_drdpa_lock( kmp_drdpa_lock_t *lck );
475
476extern void __kmp_acquire_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
477extern int __kmp_test_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
478extern void __kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
479extern void __kmp_init_nested_drdpa_lock( kmp_drdpa_lock_t *lck );
480extern void __kmp_destroy_nested_drdpa_lock( kmp_drdpa_lock_t *lck );
481
482
483// ============================================================================
484// Lock purposes.
485// ============================================================================
486
487
488// ----------------------------------------------------------------------------
489// Bootstrap locks.
490// ----------------------------------------------------------------------------
491
492// Bootstrap locks -- very few locks used at library initialization time.
493// Bootstrap locks are currently implemented as ticket locks.
494// They could also be implemented as test and set lock, but cannot be
495// implemented with other lock kinds as they require gtids which are not
496// available at initialization time.
497
498typedef kmp_ticket_lock_t kmp_bootstrap_lock_t;
499
500#define KMP_BOOTSTRAP_LOCK_INITIALIZER( lock ) KMP_TICKET_LOCK_INITIALIZER( (lock) )
501
Jim Cownie181b4bb2013-12-23 17:28:57 +0000502static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000503__kmp_acquire_bootstrap_lock( kmp_bootstrap_lock_t *lck )
504{
505 __kmp_acquire_ticket_lock( lck, KMP_GTID_DNE );
506}
507
Jim Cownie181b4bb2013-12-23 17:28:57 +0000508static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000509__kmp_test_bootstrap_lock( kmp_bootstrap_lock_t *lck )
510{
511 return __kmp_test_ticket_lock( lck, KMP_GTID_DNE );
512}
513
Jim Cownie181b4bb2013-12-23 17:28:57 +0000514static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000515__kmp_release_bootstrap_lock( kmp_bootstrap_lock_t *lck )
516{
517 __kmp_release_ticket_lock( lck, KMP_GTID_DNE );
518}
519
Jim Cownie181b4bb2013-12-23 17:28:57 +0000520static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000521__kmp_init_bootstrap_lock( kmp_bootstrap_lock_t *lck )
522{
523 __kmp_init_ticket_lock( lck );
524}
525
Jim Cownie181b4bb2013-12-23 17:28:57 +0000526static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000527__kmp_destroy_bootstrap_lock( kmp_bootstrap_lock_t *lck )
528{
529 __kmp_destroy_ticket_lock( lck );
530}
531
532
533// ----------------------------------------------------------------------------
534// Internal RTL locks.
535// ----------------------------------------------------------------------------
536
537//
538// Internal RTL locks are also implemented as ticket locks, for now.
539//
540// FIXME - We should go through and figure out which lock kind works best for
Jim Cownie3051f972014-08-07 10:12:54 +0000541// each internal lock, and use the type declaration and function calls for
Jim Cownie5e8470a2013-09-27 10:38:44 +0000542// that explicit lock kind (and get rid of this section).
543//
544
545typedef kmp_ticket_lock_t kmp_lock_t;
546
Jim Cownie181b4bb2013-12-23 17:28:57 +0000547static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000548__kmp_acquire_lock( kmp_lock_t *lck, kmp_int32 gtid )
549{
550 __kmp_acquire_ticket_lock( lck, gtid );
551}
552
Jim Cownie181b4bb2013-12-23 17:28:57 +0000553static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000554__kmp_test_lock( kmp_lock_t *lck, kmp_int32 gtid )
555{
556 return __kmp_test_ticket_lock( lck, gtid );
557}
558
Jim Cownie181b4bb2013-12-23 17:28:57 +0000559static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000560__kmp_release_lock( kmp_lock_t *lck, kmp_int32 gtid )
561{
562 __kmp_release_ticket_lock( lck, gtid );
563}
564
Jim Cownie181b4bb2013-12-23 17:28:57 +0000565static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000566__kmp_init_lock( kmp_lock_t *lck )
567{
568 __kmp_init_ticket_lock( lck );
569}
570
Jim Cownie181b4bb2013-12-23 17:28:57 +0000571static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000572__kmp_destroy_lock( kmp_lock_t *lck )
573{
574 __kmp_destroy_ticket_lock( lck );
575}
576
577
578// ----------------------------------------------------------------------------
579// User locks.
580// ----------------------------------------------------------------------------
581
582//
583// Do not allocate objects of type union kmp_user_lock!!!
584// This will waste space unless __kmp_user_lock_kind == lk_drdpa.
585// Instead, check the value of __kmp_user_lock_kind and allocate objects of
586// the type of the appropriate union member, and cast their addresses to
587// kmp_user_lock_p.
588//
589
590enum kmp_lock_kind {
591 lk_default = 0,
592 lk_tas,
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000593#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000594 lk_futex,
595#endif
596 lk_ticket,
597 lk_queuing,
598 lk_drdpa,
599#if KMP_USE_ADAPTIVE_LOCKS
600 lk_adaptive
601#endif // KMP_USE_ADAPTIVE_LOCKS
602};
603
604typedef enum kmp_lock_kind kmp_lock_kind_t;
605
606extern kmp_lock_kind_t __kmp_user_lock_kind;
607
608union kmp_user_lock {
609 kmp_tas_lock_t tas;
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000610#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000611 kmp_futex_lock_t futex;
612#endif
613 kmp_ticket_lock_t ticket;
614 kmp_queuing_lock_t queuing;
615 kmp_drdpa_lock_t drdpa;
616#if KMP_USE_ADAPTIVE_LOCKS
617 kmp_adaptive_lock_t adaptive;
618#endif // KMP_USE_ADAPTIVE_LOCKS
619 kmp_lock_pool_t pool;
620};
621
622typedef union kmp_user_lock *kmp_user_lock_p;
623
624extern size_t __kmp_base_user_lock_size;
625extern size_t __kmp_user_lock_size;
626
627extern kmp_int32 ( *__kmp_get_user_lock_owner_ )( kmp_user_lock_p lck );
628
Jim Cownie181b4bb2013-12-23 17:28:57 +0000629static inline kmp_int32
Jim Cownie5e8470a2013-09-27 10:38:44 +0000630__kmp_get_user_lock_owner( kmp_user_lock_p lck )
631{
632 KMP_DEBUG_ASSERT( __kmp_get_user_lock_owner_ != NULL );
633 return ( *__kmp_get_user_lock_owner_ )( lck );
634}
635
636extern void ( *__kmp_acquire_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
637
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000638#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000639
640#define __kmp_acquire_user_lock_with_checks(lck,gtid) \
641 if (__kmp_user_lock_kind == lk_tas) { \
642 if ( __kmp_env_consistency_check ) { \
643 char const * const func = "omp_set_lock"; \
644 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE ) \
645 && lck->tas.lk.depth_locked != -1 ) { \
646 KMP_FATAL( LockNestableUsedAsSimple, func ); \
647 } \
648 if ( ( gtid >= 0 ) && ( lck->tas.lk.poll - 1 == gtid ) ) { \
649 KMP_FATAL( LockIsAlreadyOwned, func ); \
650 } \
651 } \
652 if ( ( lck->tas.lk.poll != 0 ) || \
653 ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
654 kmp_uint32 spins; \
655 KMP_FSYNC_PREPARE( lck ); \
656 KMP_INIT_YIELD( spins ); \
657 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
658 KMP_YIELD( TRUE ); \
659 } else { \
660 KMP_YIELD_SPIN( spins ); \
661 } \
662 while ( ( lck->tas.lk.poll != 0 ) || \
663 ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
664 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
665 KMP_YIELD( TRUE ); \
666 } else { \
667 KMP_YIELD_SPIN( spins ); \
668 } \
669 } \
670 } \
671 KMP_FSYNC_ACQUIRED( lck ); \
672 } else { \
673 KMP_DEBUG_ASSERT( __kmp_acquire_user_lock_with_checks_ != NULL ); \
674 ( *__kmp_acquire_user_lock_with_checks_ )( lck, gtid ); \
675 }
676
677#else
Jim Cownie181b4bb2013-12-23 17:28:57 +0000678static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000679__kmp_acquire_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
680{
681 KMP_DEBUG_ASSERT( __kmp_acquire_user_lock_with_checks_ != NULL );
682 ( *__kmp_acquire_user_lock_with_checks_ )( lck, gtid );
683}
684#endif
685
686extern int ( *__kmp_test_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
687
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000688#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000689
690#include "kmp_i18n.h" /* AC: KMP_FATAL definition */
691extern int __kmp_env_consistency_check; /* AC: copy from kmp.h here */
Jim Cownie181b4bb2013-12-23 17:28:57 +0000692static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000693__kmp_test_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
694{
695 if ( __kmp_user_lock_kind == lk_tas ) {
696 if ( __kmp_env_consistency_check ) {
697 char const * const func = "omp_test_lock";
698 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE )
699 && lck->tas.lk.depth_locked != -1 ) {
700 KMP_FATAL( LockNestableUsedAsSimple, func );
701 }
702 }
703 return ( ( lck->tas.lk.poll == 0 ) &&
704 KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) );
705 } else {
706 KMP_DEBUG_ASSERT( __kmp_test_user_lock_with_checks_ != NULL );
707 return ( *__kmp_test_user_lock_with_checks_ )( lck, gtid );
708 }
709}
710#else
Jim Cownie181b4bb2013-12-23 17:28:57 +0000711static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000712__kmp_test_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
713{
714 KMP_DEBUG_ASSERT( __kmp_test_user_lock_with_checks_ != NULL );
715 return ( *__kmp_test_user_lock_with_checks_ )( lck, gtid );
716}
717#endif
718
719extern void ( *__kmp_release_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
720
Jim Cownie181b4bb2013-12-23 17:28:57 +0000721static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000722__kmp_release_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
723{
724 KMP_DEBUG_ASSERT( __kmp_release_user_lock_with_checks_ != NULL );
725 ( *__kmp_release_user_lock_with_checks_ ) ( lck, gtid );
726}
727
728extern void ( *__kmp_init_user_lock_with_checks_ )( kmp_user_lock_p lck );
729
Jim Cownie181b4bb2013-12-23 17:28:57 +0000730static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000731__kmp_init_user_lock_with_checks( kmp_user_lock_p lck )
732{
733 KMP_DEBUG_ASSERT( __kmp_init_user_lock_with_checks_ != NULL );
734 ( *__kmp_init_user_lock_with_checks_ )( lck );
735}
736
737//
738// We need a non-checking version of destroy lock for when the RTL is
739// doing the cleanup as it can't always tell if the lock is nested or not.
740//
741extern void ( *__kmp_destroy_user_lock_ )( kmp_user_lock_p lck );
742
Jim Cownie181b4bb2013-12-23 17:28:57 +0000743static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000744__kmp_destroy_user_lock( kmp_user_lock_p lck )
745{
746 KMP_DEBUG_ASSERT( __kmp_destroy_user_lock_ != NULL );
747 ( *__kmp_destroy_user_lock_ )( lck );
748}
749
750extern void ( *__kmp_destroy_user_lock_with_checks_ )( kmp_user_lock_p lck );
751
Jim Cownie181b4bb2013-12-23 17:28:57 +0000752static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000753__kmp_destroy_user_lock_with_checks( kmp_user_lock_p lck )
754{
755 KMP_DEBUG_ASSERT( __kmp_destroy_user_lock_with_checks_ != NULL );
756 ( *__kmp_destroy_user_lock_with_checks_ )( lck );
757}
758
759extern void ( *__kmp_acquire_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
760
761#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
762
763#define __kmp_acquire_nested_user_lock_with_checks(lck,gtid) \
764 if (__kmp_user_lock_kind == lk_tas) { \
765 if ( __kmp_env_consistency_check ) { \
766 char const * const func = "omp_set_nest_lock"; \
767 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_NEST_LOCK_T_SIZE ) \
768 && lck->tas.lk.depth_locked == -1 ) { \
769 KMP_FATAL( LockSimpleUsedAsNestable, func ); \
770 } \
771 } \
772 if ( lck->tas.lk.poll - 1 == gtid ) { \
773 lck->tas.lk.depth_locked += 1; \
774 } else { \
775 if ( ( lck->tas.lk.poll != 0 ) || \
776 ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
777 kmp_uint32 spins; \
778 KMP_FSYNC_PREPARE( lck ); \
779 KMP_INIT_YIELD( spins ); \
780 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
781 KMP_YIELD( TRUE ); \
782 } else { \
783 KMP_YIELD_SPIN( spins ); \
784 } \
785 while ( ( lck->tas.lk.poll != 0 ) || \
786 ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
787 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
788 KMP_YIELD( TRUE ); \
789 } else { \
790 KMP_YIELD_SPIN( spins ); \
791 } \
792 } \
793 } \
794 lck->tas.lk.depth_locked = 1; \
795 } \
796 KMP_FSYNC_ACQUIRED( lck ); \
797 } else { \
798 KMP_DEBUG_ASSERT( __kmp_acquire_nested_user_lock_with_checks_ != NULL ); \
799 ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid ); \
800 }
801
802#else
Jim Cownie181b4bb2013-12-23 17:28:57 +0000803static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000804__kmp_acquire_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
805{
806 KMP_DEBUG_ASSERT( __kmp_acquire_nested_user_lock_with_checks_ != NULL );
807 ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid );
808}
809#endif
810
811extern int ( *__kmp_test_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
812
813#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
Jim Cownie181b4bb2013-12-23 17:28:57 +0000814static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000815__kmp_test_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
816{
817 if ( __kmp_user_lock_kind == lk_tas ) {
818 int retval;
819 if ( __kmp_env_consistency_check ) {
820 char const * const func = "omp_test_nest_lock";
821 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_NEST_LOCK_T_SIZE )
822 && lck->tas.lk.depth_locked == -1 ) {
823 KMP_FATAL( LockSimpleUsedAsNestable, func );
824 }
825 }
826 KMP_DEBUG_ASSERT( gtid >= 0 );
827 if ( lck->tas.lk.poll - 1 == gtid ) { /* __kmp_get_tas_lock_owner( lck ) == gtid */
828 return ++lck->tas.lk.depth_locked; /* same owner, depth increased */
829 }
830 retval = ( ( lck->tas.lk.poll == 0 ) &&
831 KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) );
832 if ( retval ) {
833 KMP_MB();
834 lck->tas.lk.depth_locked = 1;
835 }
836 return retval;
837 } else {
838 KMP_DEBUG_ASSERT( __kmp_test_nested_user_lock_with_checks_ != NULL );
839 return ( *__kmp_test_nested_user_lock_with_checks_ )( lck, gtid );
840 }
841}
842#else
Jim Cownie181b4bb2013-12-23 17:28:57 +0000843static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000844__kmp_test_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
845{
846 KMP_DEBUG_ASSERT( __kmp_test_nested_user_lock_with_checks_ != NULL );
847 return ( *__kmp_test_nested_user_lock_with_checks_ )( lck, gtid );
848}
849#endif
850
851extern void ( *__kmp_release_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
852
Jim Cownie181b4bb2013-12-23 17:28:57 +0000853static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000854__kmp_release_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
855{
856 KMP_DEBUG_ASSERT( __kmp_release_nested_user_lock_with_checks_ != NULL );
857 ( *__kmp_release_nested_user_lock_with_checks_ )( lck, gtid );
858}
859
860extern void ( *__kmp_init_nested_user_lock_with_checks_ )( kmp_user_lock_p lck );
861
Jim Cownie181b4bb2013-12-23 17:28:57 +0000862static inline void __kmp_init_nested_user_lock_with_checks( kmp_user_lock_p lck )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000863{
864 KMP_DEBUG_ASSERT( __kmp_init_nested_user_lock_with_checks_ != NULL );
865 ( *__kmp_init_nested_user_lock_with_checks_ )( lck );
866}
867
868extern void ( *__kmp_destroy_nested_user_lock_with_checks_ )( kmp_user_lock_p lck );
869
Jim Cownie181b4bb2013-12-23 17:28:57 +0000870static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000871__kmp_destroy_nested_user_lock_with_checks( kmp_user_lock_p lck )
872{
873 KMP_DEBUG_ASSERT( __kmp_destroy_nested_user_lock_with_checks_ != NULL );
874 ( *__kmp_destroy_nested_user_lock_with_checks_ )( lck );
875}
876
877//
878// user lock functions which do not necessarily exist for all lock kinds.
879//
880// The "set" functions usually have wrapper routines that check for a NULL set
881// function pointer and call it if non-NULL.
882//
883// In some cases, it makes sense to have a "get" wrapper function check for a
884// NULL get function pointer and return NULL / invalid value / error code if
885// the function pointer is NULL.
886//
887// In other cases, the calling code really should differentiate between an
888// unimplemented function and one that is implemented but returning NULL /
889// invalied value. If this is the case, no get function wrapper exists.
890//
891
892extern int ( *__kmp_is_user_lock_initialized_ )( kmp_user_lock_p lck );
893
894// no set function; fields set durining local allocation
895
896extern const ident_t * ( *__kmp_get_user_lock_location_ )( kmp_user_lock_p lck );
897
Jim Cownie181b4bb2013-12-23 17:28:57 +0000898static inline const ident_t *
Jim Cownie5e8470a2013-09-27 10:38:44 +0000899__kmp_get_user_lock_location( kmp_user_lock_p lck )
900{
901 if ( __kmp_get_user_lock_location_ != NULL ) {
902 return ( *__kmp_get_user_lock_location_ )( lck );
903 }
904 else {
905 return NULL;
906 }
907}
908
909extern void ( *__kmp_set_user_lock_location_ )( kmp_user_lock_p lck, const ident_t *loc );
910
Jim Cownie181b4bb2013-12-23 17:28:57 +0000911static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000912__kmp_set_user_lock_location( kmp_user_lock_p lck, const ident_t *loc )
913{
914 if ( __kmp_set_user_lock_location_ != NULL ) {
915 ( *__kmp_set_user_lock_location_ )( lck, loc );
916 }
917}
918
919extern kmp_lock_flags_t ( *__kmp_get_user_lock_flags_ )( kmp_user_lock_p lck );
920
921extern void ( *__kmp_set_user_lock_flags_ )( kmp_user_lock_p lck, kmp_lock_flags_t flags );
922
Jim Cownie181b4bb2013-12-23 17:28:57 +0000923static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000924__kmp_set_user_lock_flags( kmp_user_lock_p lck, kmp_lock_flags_t flags )
925{
926 if ( __kmp_set_user_lock_flags_ != NULL ) {
927 ( *__kmp_set_user_lock_flags_ )( lck, flags );
928 }
929}
930
931//
932// The fuction which sets up all of the vtbl pointers for kmp_user_lock_t.
933//
934extern void __kmp_set_user_lock_vptrs( kmp_lock_kind_t user_lock_kind );
935
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000936//
937// Macros for binding user lock functions.
938//
939#define KMP_BIND_USER_LOCK_TEMPLATE(nest, kind, suffix) { \
940 __kmp_acquire##nest##user_lock_with_checks_ = ( void (*)( kmp_user_lock_p, kmp_int32 ) ) \
941 __kmp_acquire##nest##kind##_##suffix; \
942 __kmp_release##nest##user_lock_with_checks_ = ( void (*)( kmp_user_lock_p, kmp_int32 ) ) \
943 __kmp_release##nest##kind##_##suffix; \
944 __kmp_test##nest##user_lock_with_checks_ = ( int (*)( kmp_user_lock_p, kmp_int32 ) ) \
945 __kmp_test##nest##kind##_##suffix; \
946 __kmp_init##nest##user_lock_with_checks_ = ( void (*)( kmp_user_lock_p ) ) \
947 __kmp_init##nest##kind##_##suffix; \
948 __kmp_destroy##nest##user_lock_with_checks_ = ( void (*)( kmp_user_lock_p ) ) \
949 __kmp_destroy##nest##kind##_##suffix; \
950}
Jim Cownie5e8470a2013-09-27 10:38:44 +0000951
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000952#define KMP_BIND_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock)
953#define KMP_BIND_USER_LOCK_WITH_CHECKS(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock_with_checks)
954#define KMP_BIND_NESTED_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock)
955#define KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(kind) KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock_with_checks)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000956
957// ----------------------------------------------------------------------------
958// User lock table & lock allocation
959// ----------------------------------------------------------------------------
960
961/*
962 On 64-bit Linux* OS (and OS X*) GNU compiler allocates only 4 bytems memory for lock variable, which
963 is not enough to store a pointer, so we have to use lock indexes instead of pointers and
964 maintain lock table to map indexes to pointers.
965
966
967 Note: The first element of the table is not a pointer to lock! It is a pointer to previously
968 allocated table (or NULL if it is the first table).
969
970 Usage:
971
972 if ( OMP_LOCK_T_SIZE < sizeof( <lock> ) ) { // or OMP_NEST_LOCK_T_SIZE
973 Lock table is fully utilized. User locks are indexes, so table is
974 used on user lock operation.
975 Note: it may be the case (lin_32) that we don't need to use a lock
976 table for regular locks, but do need the table for nested locks.
977 }
978 else {
979 Lock table initialized but not actually used.
980 }
981*/
982
983struct kmp_lock_table {
984 kmp_lock_index_t used; // Number of used elements
985 kmp_lock_index_t allocated; // Number of allocated elements
986 kmp_user_lock_p * table; // Lock table.
987};
988
989typedef struct kmp_lock_table kmp_lock_table_t;
990
991extern kmp_lock_table_t __kmp_user_lock_table;
992extern kmp_user_lock_p __kmp_lock_pool;
993
994struct kmp_block_of_locks {
995 struct kmp_block_of_locks * next_block;
996 void * locks;
997};
998
999typedef struct kmp_block_of_locks kmp_block_of_locks_t;
1000
1001extern kmp_block_of_locks_t *__kmp_lock_blocks;
1002extern int __kmp_num_locks_in_block;
1003
Jim Cownie181b4bb2013-12-23 17:28:57 +00001004extern kmp_user_lock_p __kmp_user_lock_allocate( void **user_lock, kmp_int32 gtid, kmp_lock_flags_t flags );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001005extern void __kmp_user_lock_free( void **user_lock, kmp_int32 gtid, kmp_user_lock_p lck );
1006extern kmp_user_lock_p __kmp_lookup_user_lock( void **user_lock, char const *func );
1007extern void __kmp_cleanup_user_locks();
1008
1009#define KMP_CHECK_USER_LOCK_INIT() \
1010 { \
1011 if ( ! TCR_4( __kmp_init_user_locks ) ) { \
1012 __kmp_acquire_bootstrap_lock( &__kmp_initz_lock ); \
1013 if ( ! TCR_4( __kmp_init_user_locks ) ) { \
1014 TCW_4( __kmp_init_user_locks, TRUE ); \
1015 } \
1016 __kmp_release_bootstrap_lock( &__kmp_initz_lock ); \
1017 } \
1018 }
1019
1020#undef KMP_PAD
1021#undef KMP_GTID_DNE
1022
1023#ifdef __cplusplus
1024} // extern "C"
1025#endif // __cplusplus
1026
1027#endif /* KMP_LOCK_H */
1028