blob: 2d88b42f5bbcf43dba7a8c8fb2075ad24bbc2c17 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_lock.h -- lock header file
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#ifndef KMP_LOCK_H
17#define KMP_LOCK_H
18
19#include <limits.h> // CHAR_BIT
20#include <stddef.h> // offsetof
21
22#include "kmp_os.h"
23#include "kmp_debug.h"
24
25#ifdef __cplusplus
26extern "C" {
27#endif // __cplusplus
28
29// ----------------------------------------------------------------------------
30// Have to copy these definitions from kmp.h because kmp.h cannot be included
31// due to circular dependencies. Will undef these at end of file.
32
33#define KMP_PAD(type, sz) (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
34#define KMP_GTID_DNE (-2)
35
36// Forward declaration of ident and ident_t
37
38struct ident;
39typedef struct ident ident_t;
40
41// End of copied code.
42// ----------------------------------------------------------------------------
43
44//
45// We need to know the size of the area we can assume that the compiler(s)
46// allocated for obects of type omp_lock_t and omp_nest_lock_t. The Intel
47// compiler always allocates a pointer-sized area, as does visual studio.
48//
49// gcc however, only allocates 4 bytes for regular locks, even on 64-bit
50// intel archs. It allocates at least 8 bytes for nested lock (more on
51// recent versions), but we are bounded by the pointer-sized chunks that
52// the Intel compiler allocates.
53//
54
55#if KMP_OS_LINUX && defined(KMP_GOMP_COMPAT)
56# define OMP_LOCK_T_SIZE sizeof(int)
57# define OMP_NEST_LOCK_T_SIZE sizeof(void *)
58#else
59# define OMP_LOCK_T_SIZE sizeof(void *)
60# define OMP_NEST_LOCK_T_SIZE sizeof(void *)
61#endif
62
63//
64// The Intel compiler allocates a 32-byte chunk for a critical section.
65// Both gcc and visual studio only allocate enough space for a pointer.
66// Sometimes we know that the space was allocated by the Intel compiler.
67//
68#define OMP_CRITICAL_SIZE sizeof(void *)
69#define INTEL_CRITICAL_SIZE 32
70
71//
72// lock flags
73//
74typedef kmp_uint32 kmp_lock_flags_t;
75
76#define kmp_lf_critical_section 1
77
78//
79// When a lock table is used, the indices are of kmp_lock_index_t
80//
81typedef kmp_uint32 kmp_lock_index_t;
82
83//
84// When memory allocated for locks are on the lock pool (free list),
85// it is treated as structs of this type.
86//
87struct kmp_lock_pool {
88 union kmp_user_lock *next;
89 kmp_lock_index_t index;
90};
91
92typedef struct kmp_lock_pool kmp_lock_pool_t;
93
94
95extern void __kmp_validate_locks( void );
96
97
98// ----------------------------------------------------------------------------
99//
100// There are 5 lock implementations:
101//
102// 1. Test and set locks.
103// 2. futex locks (Linux* OS on x86 and Intel(R) Many Integrated Core architecture)
104// 3. Ticket (Lamport bakery) locks.
105// 4. Queuing locks (with separate spin fields).
106// 5. DRPA (Dynamically Reconfigurable Distributed Polling Area) locks
107//
108// and 3 lock purposes:
109//
110// 1. Bootstrap locks -- Used for a few locks available at library startup-shutdown time.
111// These do not require non-negative global thread ID's.
112// 2. Internal RTL locks -- Used everywhere else in the RTL
113// 3. User locks (includes critical sections)
114//
115// ----------------------------------------------------------------------------
116
117
118// ============================================================================
119// Lock implementations.
120// ============================================================================
121
122
123// ----------------------------------------------------------------------------
124// Test and set locks.
125//
126// Non-nested test and set locks differ from the other lock kinds (except
127// futex) in that we use the memory allocated by the compiler for the lock,
128// rather than a pointer to it.
129//
130// On lin32, lin_32e, and win_32, the space allocated may be as small as 4
131// bytes, so we have to use a lock table for nested locks, and avoid accessing
132// the depth_locked field for non-nested locks.
133//
134// Information normally available to the tools, such as lock location,
135// lock usage (normal lock vs. critical section), etc. is not available with
136// test and set locks.
137// ----------------------------------------------------------------------------
138
139struct kmp_base_tas_lock {
140 volatile kmp_int32 poll; // 0 => unlocked
141 // locked: (gtid+1) of owning thread
142 kmp_int32 depth_locked; // depth locked, for nested locks only
143};
144
145typedef struct kmp_base_tas_lock kmp_base_tas_lock_t;
146
147union kmp_tas_lock {
148 kmp_base_tas_lock_t lk;
149 kmp_lock_pool_t pool; // make certain struct is large enough
150 double lk_align; // use worst case alignment
151 // no cache line padding
152};
153
154typedef union kmp_tas_lock kmp_tas_lock_t;
155
156//
157// Static initializer for test and set lock variables. Usage:
158// kmp_tas_lock_t xlock = KMP_TAS_LOCK_INITIALIZER( xlock );
159//
160#define KMP_TAS_LOCK_INITIALIZER( lock ) { { 0, 0 } }
161
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000162extern int __kmp_acquire_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000163extern int __kmp_test_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000164extern int __kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000165extern void __kmp_init_tas_lock( kmp_tas_lock_t *lck );
166extern void __kmp_destroy_tas_lock( kmp_tas_lock_t *lck );
167
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000168extern int __kmp_acquire_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000169extern int __kmp_test_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000170extern int __kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000171extern void __kmp_init_nested_tas_lock( kmp_tas_lock_t *lck );
172extern void __kmp_destroy_nested_tas_lock( kmp_tas_lock_t *lck );
173
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000174#define KMP_LOCK_RELEASED 1
175#define KMP_LOCK_STILL_HELD 0
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000176#define KMP_LOCK_ACQUIRED_FIRST 1
177#define KMP_LOCK_ACQUIRED_NEXT 0
Jim Cownie5e8470a2013-09-27 10:38:44 +0000178
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000179#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000180
181// ----------------------------------------------------------------------------
182// futex locks. futex locks are only available on Linux* OS.
183//
184// Like non-nested test and set lock, non-nested futex locks use the memory
185// allocated by the compiler for the lock, rather than a pointer to it.
186//
187// Information normally available to the tools, such as lock location,
188// lock usage (normal lock vs. critical section), etc. is not available with
189// test and set locks. With non-nested futex locks, the lock owner is not
190// even available.
191// ----------------------------------------------------------------------------
192
193struct kmp_base_futex_lock {
194 volatile kmp_int32 poll; // 0 => unlocked
195 // 2*(gtid+1) of owning thread, 0 if unlocked
196 // locked: (gtid+1) of owning thread
197 kmp_int32 depth_locked; // depth locked, for nested locks only
198};
199
200typedef struct kmp_base_futex_lock kmp_base_futex_lock_t;
201
202union kmp_futex_lock {
203 kmp_base_futex_lock_t lk;
204 kmp_lock_pool_t pool; // make certain struct is large enough
205 double lk_align; // use worst case alignment
206 // no cache line padding
207};
208
209typedef union kmp_futex_lock kmp_futex_lock_t;
210
211//
212// Static initializer for futex lock variables. Usage:
213// kmp_futex_lock_t xlock = KMP_FUTEX_LOCK_INITIALIZER( xlock );
214//
215#define KMP_FUTEX_LOCK_INITIALIZER( lock ) { { 0, 0 } }
216
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000217extern int __kmp_acquire_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000218extern int __kmp_test_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000219extern int __kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000220extern void __kmp_init_futex_lock( kmp_futex_lock_t *lck );
221extern void __kmp_destroy_futex_lock( kmp_futex_lock_t *lck );
222
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000223extern int __kmp_acquire_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000224extern int __kmp_test_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000225extern int __kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000226extern void __kmp_init_nested_futex_lock( kmp_futex_lock_t *lck );
227extern void __kmp_destroy_nested_futex_lock( kmp_futex_lock_t *lck );
228
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000229#endif // KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000230
231
232// ----------------------------------------------------------------------------
233// Ticket locks.
234// ----------------------------------------------------------------------------
235
236struct kmp_base_ticket_lock {
237 // `initialized' must be the first entry in the lock data structure!
238 volatile union kmp_ticket_lock * initialized; // points to the lock union if in initialized state
239 ident_t const * location; // Source code location of omp_init_lock().
240 volatile kmp_uint32 next_ticket; // ticket number to give to next thread which acquires
241 volatile kmp_uint32 now_serving; // ticket number for thread which holds the lock
242 volatile kmp_int32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
243 kmp_int32 depth_locked; // depth locked, for nested locks only
244 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
245};
246
247typedef struct kmp_base_ticket_lock kmp_base_ticket_lock_t;
248
249union KMP_ALIGN_CACHE kmp_ticket_lock {
250 kmp_base_ticket_lock_t lk; // This field must be first to allow static initializing.
251 kmp_lock_pool_t pool;
252 double lk_align; // use worst case alignment
253 char lk_pad[ KMP_PAD( kmp_base_ticket_lock_t, CACHE_LINE ) ];
254};
255
256typedef union kmp_ticket_lock kmp_ticket_lock_t;
257
258//
259// Static initializer for simple ticket lock variables. Usage:
260// kmp_ticket_lock_t xlock = KMP_TICKET_LOCK_INITIALIZER( xlock );
261// Note the macro argument. It is important to make var properly initialized.
262//
263#define KMP_TICKET_LOCK_INITIALIZER( lock ) { { (kmp_ticket_lock_t *) & (lock), NULL, 0, 0, 0, -1 } }
264
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000265extern int __kmp_acquire_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000266extern int __kmp_test_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
267extern int __kmp_test_ticket_lock_with_cheks( kmp_ticket_lock_t *lck, kmp_int32 gtid );
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000268extern int __kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000269extern void __kmp_init_ticket_lock( kmp_ticket_lock_t *lck );
270extern void __kmp_destroy_ticket_lock( kmp_ticket_lock_t *lck );
271
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000272extern int __kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000273extern int __kmp_test_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000274extern int __kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000275extern void __kmp_init_nested_ticket_lock( kmp_ticket_lock_t *lck );
276extern void __kmp_destroy_nested_ticket_lock( kmp_ticket_lock_t *lck );
277
278
279// ----------------------------------------------------------------------------
280// Queuing locks.
281// ----------------------------------------------------------------------------
282
283#if KMP_USE_ADAPTIVE_LOCKS
284
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000285struct kmp_adaptive_lock_info;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000286
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000287typedef struct kmp_adaptive_lock_info kmp_adaptive_lock_info_t;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000288
289#if KMP_DEBUG_ADAPTIVE_LOCKS
290
291struct kmp_adaptive_lock_statistics {
292 /* So we can get stats from locks that haven't been destroyed. */
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000293 kmp_adaptive_lock_info_t * next;
294 kmp_adaptive_lock_info_t * prev;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000295
296 /* Other statistics */
297 kmp_uint32 successfulSpeculations;
298 kmp_uint32 hardFailedSpeculations;
299 kmp_uint32 softFailedSpeculations;
300 kmp_uint32 nonSpeculativeAcquires;
301 kmp_uint32 nonSpeculativeAcquireAttempts;
302 kmp_uint32 lemmingYields;
303};
304
305typedef struct kmp_adaptive_lock_statistics kmp_adaptive_lock_statistics_t;
306
307extern void __kmp_print_speculative_stats();
308extern void __kmp_init_speculative_stats();
309
310#endif // KMP_DEBUG_ADAPTIVE_LOCKS
311
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000312struct kmp_adaptive_lock_info
Jim Cownie5e8470a2013-09-27 10:38:44 +0000313{
314 /* Values used for adaptivity.
315 * Although these are accessed from multiple threads we don't access them atomically,
316 * because if we miss updates it probably doesn't matter much. (It just affects our
317 * decision about whether to try speculation on the lock).
318 */
319 kmp_uint32 volatile badness;
320 kmp_uint32 volatile acquire_attempts;
321 /* Parameters of the lock. */
322 kmp_uint32 max_badness;
323 kmp_uint32 max_soft_retries;
324
325#if KMP_DEBUG_ADAPTIVE_LOCKS
326 kmp_adaptive_lock_statistics_t volatile stats;
327#endif
328};
329
330#endif // KMP_USE_ADAPTIVE_LOCKS
331
332
333struct kmp_base_queuing_lock {
334
335 // `initialized' must be the first entry in the lock data structure!
336 volatile union kmp_queuing_lock *initialized; // Points to the lock union if in initialized state.
337
338 ident_t const * location; // Source code location of omp_init_lock().
339
340 KMP_ALIGN( 8 ) // tail_id must be 8-byte aligned!
341
342 volatile kmp_int32 tail_id; // (gtid+1) of thread at tail of wait queue, 0 if empty
343 // Must be no padding here since head/tail used in 8-byte CAS
344 volatile kmp_int32 head_id; // (gtid+1) of thread at head of wait queue, 0 if empty
345 // Decl order assumes little endian
346 // bakery-style lock
347 volatile kmp_uint32 next_ticket; // ticket number to give to next thread which acquires
348 volatile kmp_uint32 now_serving; // ticket number for thread which holds the lock
349 volatile kmp_int32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
350 kmp_int32 depth_locked; // depth locked, for nested locks only
351
352 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
Jim Cownie5e8470a2013-09-27 10:38:44 +0000353};
354
355typedef struct kmp_base_queuing_lock kmp_base_queuing_lock_t;
356
357KMP_BUILD_ASSERT( offsetof( kmp_base_queuing_lock_t, tail_id ) % 8 == 0 );
358
359union KMP_ALIGN_CACHE kmp_queuing_lock {
360 kmp_base_queuing_lock_t lk; // This field must be first to allow static initializing.
361 kmp_lock_pool_t pool;
362 double lk_align; // use worst case alignment
363 char lk_pad[ KMP_PAD( kmp_base_queuing_lock_t, CACHE_LINE ) ];
364};
365
366typedef union kmp_queuing_lock kmp_queuing_lock_t;
367
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000368extern int __kmp_acquire_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000369extern int __kmp_test_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000370extern int __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000371extern void __kmp_init_queuing_lock( kmp_queuing_lock_t *lck );
372extern void __kmp_destroy_queuing_lock( kmp_queuing_lock_t *lck );
373
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000374extern int __kmp_acquire_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000375extern int __kmp_test_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000376extern int __kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000377extern void __kmp_init_nested_queuing_lock( kmp_queuing_lock_t *lck );
378extern void __kmp_destroy_nested_queuing_lock( kmp_queuing_lock_t *lck );
379
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000380#if KMP_USE_ADAPTIVE_LOCKS
381
382// ----------------------------------------------------------------------------
383// Adaptive locks.
384// ----------------------------------------------------------------------------
385struct kmp_base_adaptive_lock {
386 kmp_base_queuing_lock qlk;
387 KMP_ALIGN(CACHE_LINE)
388 kmp_adaptive_lock_info_t adaptive; // Information for the speculative adaptive lock
389};
390
391typedef struct kmp_base_adaptive_lock kmp_base_adaptive_lock_t;
392
393union KMP_ALIGN_CACHE kmp_adaptive_lock {
394 kmp_base_adaptive_lock_t lk;
395 kmp_lock_pool_t pool;
396 double lk_align;
397 char lk_pad[ KMP_PAD(kmp_base_adaptive_lock_t, CACHE_LINE) ];
398};
399typedef union kmp_adaptive_lock kmp_adaptive_lock_t;
400
401# define GET_QLK_PTR(l) ((kmp_queuing_lock_t *) & (l)->lk.qlk)
402
403#endif // KMP_USE_ADAPTIVE_LOCKS
Jim Cownie5e8470a2013-09-27 10:38:44 +0000404
405// ----------------------------------------------------------------------------
406// DRDPA ticket locks.
407// ----------------------------------------------------------------------------
408
409struct kmp_base_drdpa_lock {
410 //
411 // All of the fields on the first cache line are only written when
412 // initializing or reconfiguring the lock. These are relatively rare
413 // operations, so data from the first cache line will usually stay
414 // resident in the cache of each thread trying to acquire the lock.
415 //
416 // initialized must be the first entry in the lock data structure!
417 //
418 KMP_ALIGN_CACHE
419
420 volatile union kmp_drdpa_lock * initialized; // points to the lock union if in initialized state
421 ident_t const * location; // Source code location of omp_init_lock().
422 volatile struct kmp_lock_poll {
423 kmp_uint64 poll;
424 } * volatile polls;
425 volatile kmp_uint64 mask; // is 2**num_polls-1 for mod op
426 kmp_uint64 cleanup_ticket; // thread with cleanup ticket
427 volatile struct kmp_lock_poll * old_polls; // will deallocate old_polls
428 kmp_uint32 num_polls; // must be power of 2
429
430 //
431 // next_ticket it needs to exist in a separate cache line, as it is
432 // invalidated every time a thread takes a new ticket.
433 //
434 KMP_ALIGN_CACHE
435
436 volatile kmp_uint64 next_ticket;
437
438 //
439 // now_serving is used to store our ticket value while we hold the lock.
Alp Toker8f2d3f02014-02-24 10:40:15 +0000440 // It has a slightly different meaning in the DRDPA ticket locks (where
Jim Cownie5e8470a2013-09-27 10:38:44 +0000441 // it is written by the acquiring thread) than it does in the simple
442 // ticket locks (where it is written by the releasing thread).
443 //
444 // Since now_serving is only read an written in the critical section,
445 // it is non-volatile, but it needs to exist on a separate cache line,
446 // as it is invalidated at every lock acquire.
447 //
448 // Likewise, the vars used for nested locks (owner_id and depth_locked)
449 // are only written by the thread owning the lock, so they are put in
450 // this cache line. owner_id is read by other threads, so it must be
451 // declared volatile.
452 //
453 KMP_ALIGN_CACHE
454
455 kmp_uint64 now_serving; // doesn't have to be volatile
456 volatile kmp_uint32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
457 kmp_int32 depth_locked; // depth locked
458 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
459};
460
461typedef struct kmp_base_drdpa_lock kmp_base_drdpa_lock_t;
462
463union KMP_ALIGN_CACHE kmp_drdpa_lock {
464 kmp_base_drdpa_lock_t lk; // This field must be first to allow static initializing. */
465 kmp_lock_pool_t pool;
466 double lk_align; // use worst case alignment
467 char lk_pad[ KMP_PAD( kmp_base_drdpa_lock_t, CACHE_LINE ) ];
468};
469
470typedef union kmp_drdpa_lock kmp_drdpa_lock_t;
471
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000472extern int __kmp_acquire_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000473extern int __kmp_test_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000474extern int __kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000475extern void __kmp_init_drdpa_lock( kmp_drdpa_lock_t *lck );
476extern void __kmp_destroy_drdpa_lock( kmp_drdpa_lock_t *lck );
477
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000478extern int __kmp_acquire_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000479extern int __kmp_test_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000480extern int __kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000481extern void __kmp_init_nested_drdpa_lock( kmp_drdpa_lock_t *lck );
482extern void __kmp_destroy_nested_drdpa_lock( kmp_drdpa_lock_t *lck );
483
484
485// ============================================================================
486// Lock purposes.
487// ============================================================================
488
489
490// ----------------------------------------------------------------------------
491// Bootstrap locks.
492// ----------------------------------------------------------------------------
493
494// Bootstrap locks -- very few locks used at library initialization time.
495// Bootstrap locks are currently implemented as ticket locks.
496// They could also be implemented as test and set lock, but cannot be
497// implemented with other lock kinds as they require gtids which are not
498// available at initialization time.
499
500typedef kmp_ticket_lock_t kmp_bootstrap_lock_t;
501
502#define KMP_BOOTSTRAP_LOCK_INITIALIZER( lock ) KMP_TICKET_LOCK_INITIALIZER( (lock) )
503
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000504static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000505__kmp_acquire_bootstrap_lock( kmp_bootstrap_lock_t *lck )
506{
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000507 return __kmp_acquire_ticket_lock( lck, KMP_GTID_DNE );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000508}
509
Jim Cownie181b4bb2013-12-23 17:28:57 +0000510static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000511__kmp_test_bootstrap_lock( kmp_bootstrap_lock_t *lck )
512{
513 return __kmp_test_ticket_lock( lck, KMP_GTID_DNE );
514}
515
Jim Cownie181b4bb2013-12-23 17:28:57 +0000516static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000517__kmp_release_bootstrap_lock( kmp_bootstrap_lock_t *lck )
518{
519 __kmp_release_ticket_lock( lck, KMP_GTID_DNE );
520}
521
Jim Cownie181b4bb2013-12-23 17:28:57 +0000522static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000523__kmp_init_bootstrap_lock( kmp_bootstrap_lock_t *lck )
524{
525 __kmp_init_ticket_lock( lck );
526}
527
Jim Cownie181b4bb2013-12-23 17:28:57 +0000528static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000529__kmp_destroy_bootstrap_lock( kmp_bootstrap_lock_t *lck )
530{
531 __kmp_destroy_ticket_lock( lck );
532}
533
534
535// ----------------------------------------------------------------------------
536// Internal RTL locks.
537// ----------------------------------------------------------------------------
538
539//
540// Internal RTL locks are also implemented as ticket locks, for now.
541//
542// FIXME - We should go through and figure out which lock kind works best for
Jim Cownie3051f972014-08-07 10:12:54 +0000543// each internal lock, and use the type declaration and function calls for
Jim Cownie5e8470a2013-09-27 10:38:44 +0000544// that explicit lock kind (and get rid of this section).
545//
546
547typedef kmp_ticket_lock_t kmp_lock_t;
548
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000549static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000550__kmp_acquire_lock( kmp_lock_t *lck, kmp_int32 gtid )
551{
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000552 return __kmp_acquire_ticket_lock( lck, gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000553}
554
Jim Cownie181b4bb2013-12-23 17:28:57 +0000555static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000556__kmp_test_lock( kmp_lock_t *lck, kmp_int32 gtid )
557{
558 return __kmp_test_ticket_lock( lck, gtid );
559}
560
Jim Cownie181b4bb2013-12-23 17:28:57 +0000561static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000562__kmp_release_lock( kmp_lock_t *lck, kmp_int32 gtid )
563{
564 __kmp_release_ticket_lock( lck, gtid );
565}
566
Jim Cownie181b4bb2013-12-23 17:28:57 +0000567static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000568__kmp_init_lock( kmp_lock_t *lck )
569{
570 __kmp_init_ticket_lock( lck );
571}
572
Jim Cownie181b4bb2013-12-23 17:28:57 +0000573static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000574__kmp_destroy_lock( kmp_lock_t *lck )
575{
576 __kmp_destroy_ticket_lock( lck );
577}
578
579
580// ----------------------------------------------------------------------------
581// User locks.
582// ----------------------------------------------------------------------------
583
584//
585// Do not allocate objects of type union kmp_user_lock!!!
586// This will waste space unless __kmp_user_lock_kind == lk_drdpa.
587// Instead, check the value of __kmp_user_lock_kind and allocate objects of
588// the type of the appropriate union member, and cast their addresses to
589// kmp_user_lock_p.
590//
591
592enum kmp_lock_kind {
593 lk_default = 0,
594 lk_tas,
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000595#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000596 lk_futex,
597#endif
598 lk_ticket,
599 lk_queuing,
600 lk_drdpa,
601#if KMP_USE_ADAPTIVE_LOCKS
602 lk_adaptive
603#endif // KMP_USE_ADAPTIVE_LOCKS
604};
605
606typedef enum kmp_lock_kind kmp_lock_kind_t;
607
608extern kmp_lock_kind_t __kmp_user_lock_kind;
609
610union kmp_user_lock {
611 kmp_tas_lock_t tas;
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000612#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000613 kmp_futex_lock_t futex;
614#endif
615 kmp_ticket_lock_t ticket;
616 kmp_queuing_lock_t queuing;
617 kmp_drdpa_lock_t drdpa;
618#if KMP_USE_ADAPTIVE_LOCKS
619 kmp_adaptive_lock_t adaptive;
620#endif // KMP_USE_ADAPTIVE_LOCKS
621 kmp_lock_pool_t pool;
622};
623
624typedef union kmp_user_lock *kmp_user_lock_p;
625
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000626#if ! KMP_USE_DYNAMIC_LOCK
627
Jim Cownie5e8470a2013-09-27 10:38:44 +0000628extern size_t __kmp_base_user_lock_size;
629extern size_t __kmp_user_lock_size;
630
631extern kmp_int32 ( *__kmp_get_user_lock_owner_ )( kmp_user_lock_p lck );
632
Jim Cownie181b4bb2013-12-23 17:28:57 +0000633static inline kmp_int32
Jim Cownie5e8470a2013-09-27 10:38:44 +0000634__kmp_get_user_lock_owner( kmp_user_lock_p lck )
635{
636 KMP_DEBUG_ASSERT( __kmp_get_user_lock_owner_ != NULL );
637 return ( *__kmp_get_user_lock_owner_ )( lck );
638}
639
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000640extern int ( *__kmp_acquire_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000641
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000642#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000643
644#define __kmp_acquire_user_lock_with_checks(lck,gtid) \
645 if (__kmp_user_lock_kind == lk_tas) { \
646 if ( __kmp_env_consistency_check ) { \
647 char const * const func = "omp_set_lock"; \
648 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE ) \
649 && lck->tas.lk.depth_locked != -1 ) { \
650 KMP_FATAL( LockNestableUsedAsSimple, func ); \
651 } \
652 if ( ( gtid >= 0 ) && ( lck->tas.lk.poll - 1 == gtid ) ) { \
653 KMP_FATAL( LockIsAlreadyOwned, func ); \
654 } \
655 } \
656 if ( ( lck->tas.lk.poll != 0 ) || \
657 ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
658 kmp_uint32 spins; \
659 KMP_FSYNC_PREPARE( lck ); \
660 KMP_INIT_YIELD( spins ); \
661 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
662 KMP_YIELD( TRUE ); \
663 } else { \
664 KMP_YIELD_SPIN( spins ); \
665 } \
666 while ( ( lck->tas.lk.poll != 0 ) || \
667 ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
668 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
669 KMP_YIELD( TRUE ); \
670 } else { \
671 KMP_YIELD_SPIN( spins ); \
672 } \
673 } \
674 } \
675 KMP_FSYNC_ACQUIRED( lck ); \
676 } else { \
677 KMP_DEBUG_ASSERT( __kmp_acquire_user_lock_with_checks_ != NULL ); \
678 ( *__kmp_acquire_user_lock_with_checks_ )( lck, gtid ); \
679 }
680
681#else
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000682static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000683__kmp_acquire_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
684{
685 KMP_DEBUG_ASSERT( __kmp_acquire_user_lock_with_checks_ != NULL );
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000686 return ( *__kmp_acquire_user_lock_with_checks_ )( lck, gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000687}
688#endif
689
690extern int ( *__kmp_test_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
691
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000692#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000693
694#include "kmp_i18n.h" /* AC: KMP_FATAL definition */
695extern int __kmp_env_consistency_check; /* AC: copy from kmp.h here */
Jim Cownie181b4bb2013-12-23 17:28:57 +0000696static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000697__kmp_test_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
698{
699 if ( __kmp_user_lock_kind == lk_tas ) {
700 if ( __kmp_env_consistency_check ) {
701 char const * const func = "omp_test_lock";
702 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_LOCK_T_SIZE )
703 && lck->tas.lk.depth_locked != -1 ) {
704 KMP_FATAL( LockNestableUsedAsSimple, func );
705 }
706 }
707 return ( ( lck->tas.lk.poll == 0 ) &&
708 KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) );
709 } else {
710 KMP_DEBUG_ASSERT( __kmp_test_user_lock_with_checks_ != NULL );
711 return ( *__kmp_test_user_lock_with_checks_ )( lck, gtid );
712 }
713}
714#else
Jim Cownie181b4bb2013-12-23 17:28:57 +0000715static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000716__kmp_test_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
717{
718 KMP_DEBUG_ASSERT( __kmp_test_user_lock_with_checks_ != NULL );
719 return ( *__kmp_test_user_lock_with_checks_ )( lck, gtid );
720}
721#endif
722
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000723extern int ( *__kmp_release_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000724
Jim Cownie181b4bb2013-12-23 17:28:57 +0000725static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000726__kmp_release_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
727{
728 KMP_DEBUG_ASSERT( __kmp_release_user_lock_with_checks_ != NULL );
729 ( *__kmp_release_user_lock_with_checks_ ) ( lck, gtid );
730}
731
732extern void ( *__kmp_init_user_lock_with_checks_ )( kmp_user_lock_p lck );
733
Jim Cownie181b4bb2013-12-23 17:28:57 +0000734static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000735__kmp_init_user_lock_with_checks( kmp_user_lock_p lck )
736{
737 KMP_DEBUG_ASSERT( __kmp_init_user_lock_with_checks_ != NULL );
738 ( *__kmp_init_user_lock_with_checks_ )( lck );
739}
740
741//
742// We need a non-checking version of destroy lock for when the RTL is
743// doing the cleanup as it can't always tell if the lock is nested or not.
744//
745extern void ( *__kmp_destroy_user_lock_ )( kmp_user_lock_p lck );
746
Jim Cownie181b4bb2013-12-23 17:28:57 +0000747static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000748__kmp_destroy_user_lock( kmp_user_lock_p lck )
749{
750 KMP_DEBUG_ASSERT( __kmp_destroy_user_lock_ != NULL );
751 ( *__kmp_destroy_user_lock_ )( lck );
752}
753
754extern void ( *__kmp_destroy_user_lock_with_checks_ )( kmp_user_lock_p lck );
755
Jim Cownie181b4bb2013-12-23 17:28:57 +0000756static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000757__kmp_destroy_user_lock_with_checks( kmp_user_lock_p lck )
758{
759 KMP_DEBUG_ASSERT( __kmp_destroy_user_lock_with_checks_ != NULL );
760 ( *__kmp_destroy_user_lock_with_checks_ )( lck );
761}
762
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000763extern int ( *__kmp_acquire_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000764
765#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
766
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000767#define __kmp_acquire_nested_user_lock_with_checks(lck,gtid,depth) \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000768 if (__kmp_user_lock_kind == lk_tas) { \
769 if ( __kmp_env_consistency_check ) { \
770 char const * const func = "omp_set_nest_lock"; \
771 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_NEST_LOCK_T_SIZE ) \
772 && lck->tas.lk.depth_locked == -1 ) { \
773 KMP_FATAL( LockSimpleUsedAsNestable, func ); \
774 } \
775 } \
776 if ( lck->tas.lk.poll - 1 == gtid ) { \
777 lck->tas.lk.depth_locked += 1; \
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000778 *depth = KMP_LOCK_ACQUIRED_NEXT; \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000779 } else { \
780 if ( ( lck->tas.lk.poll != 0 ) || \
781 ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
782 kmp_uint32 spins; \
783 KMP_FSYNC_PREPARE( lck ); \
784 KMP_INIT_YIELD( spins ); \
785 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
786 KMP_YIELD( TRUE ); \
787 } else { \
788 KMP_YIELD_SPIN( spins ); \
789 } \
790 while ( ( lck->tas.lk.poll != 0 ) || \
791 ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \
792 if ( TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) ) { \
793 KMP_YIELD( TRUE ); \
794 } else { \
795 KMP_YIELD_SPIN( spins ); \
796 } \
797 } \
798 } \
799 lck->tas.lk.depth_locked = 1; \
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000800 *depth = KMP_LOCK_ACQUIRED_FIRST; \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000801 } \
802 KMP_FSYNC_ACQUIRED( lck ); \
803 } else { \
804 KMP_DEBUG_ASSERT( __kmp_acquire_nested_user_lock_with_checks_ != NULL ); \
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000805 *depth = ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid ); \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000806 }
807
808#else
Jim Cownie181b4bb2013-12-23 17:28:57 +0000809static inline void
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000810__kmp_acquire_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid, int* depth )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000811{
812 KMP_DEBUG_ASSERT( __kmp_acquire_nested_user_lock_with_checks_ != NULL );
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000813 *depth = ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000814}
815#endif
816
817extern int ( *__kmp_test_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
818
819#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
Jim Cownie181b4bb2013-12-23 17:28:57 +0000820static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000821__kmp_test_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
822{
823 if ( __kmp_user_lock_kind == lk_tas ) {
824 int retval;
825 if ( __kmp_env_consistency_check ) {
826 char const * const func = "omp_test_nest_lock";
827 if ( ( sizeof ( kmp_tas_lock_t ) <= OMP_NEST_LOCK_T_SIZE )
828 && lck->tas.lk.depth_locked == -1 ) {
829 KMP_FATAL( LockSimpleUsedAsNestable, func );
830 }
831 }
832 KMP_DEBUG_ASSERT( gtid >= 0 );
833 if ( lck->tas.lk.poll - 1 == gtid ) { /* __kmp_get_tas_lock_owner( lck ) == gtid */
834 return ++lck->tas.lk.depth_locked; /* same owner, depth increased */
835 }
836 retval = ( ( lck->tas.lk.poll == 0 ) &&
837 KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) );
838 if ( retval ) {
839 KMP_MB();
840 lck->tas.lk.depth_locked = 1;
841 }
842 return retval;
843 } else {
844 KMP_DEBUG_ASSERT( __kmp_test_nested_user_lock_with_checks_ != NULL );
845 return ( *__kmp_test_nested_user_lock_with_checks_ )( lck, gtid );
846 }
847}
848#else
Jim Cownie181b4bb2013-12-23 17:28:57 +0000849static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000850__kmp_test_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
851{
852 KMP_DEBUG_ASSERT( __kmp_test_nested_user_lock_with_checks_ != NULL );
853 return ( *__kmp_test_nested_user_lock_with_checks_ )( lck, gtid );
854}
855#endif
856
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000857extern int ( *__kmp_release_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000858
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000859static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000860__kmp_release_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
861{
862 KMP_DEBUG_ASSERT( __kmp_release_nested_user_lock_with_checks_ != NULL );
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000863 return ( *__kmp_release_nested_user_lock_with_checks_ )( lck, gtid );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000864}
865
866extern void ( *__kmp_init_nested_user_lock_with_checks_ )( kmp_user_lock_p lck );
867
Jim Cownie181b4bb2013-12-23 17:28:57 +0000868static inline void __kmp_init_nested_user_lock_with_checks( kmp_user_lock_p lck )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000869{
870 KMP_DEBUG_ASSERT( __kmp_init_nested_user_lock_with_checks_ != NULL );
871 ( *__kmp_init_nested_user_lock_with_checks_ )( lck );
872}
873
874extern void ( *__kmp_destroy_nested_user_lock_with_checks_ )( kmp_user_lock_p lck );
875
Jim Cownie181b4bb2013-12-23 17:28:57 +0000876static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000877__kmp_destroy_nested_user_lock_with_checks( kmp_user_lock_p lck )
878{
879 KMP_DEBUG_ASSERT( __kmp_destroy_nested_user_lock_with_checks_ != NULL );
880 ( *__kmp_destroy_nested_user_lock_with_checks_ )( lck );
881}
882
883//
884// user lock functions which do not necessarily exist for all lock kinds.
885//
886// The "set" functions usually have wrapper routines that check for a NULL set
887// function pointer and call it if non-NULL.
888//
889// In some cases, it makes sense to have a "get" wrapper function check for a
890// NULL get function pointer and return NULL / invalid value / error code if
891// the function pointer is NULL.
892//
893// In other cases, the calling code really should differentiate between an
894// unimplemented function and one that is implemented but returning NULL /
895// invalied value. If this is the case, no get function wrapper exists.
896//
897
898extern int ( *__kmp_is_user_lock_initialized_ )( kmp_user_lock_p lck );
899
900// no set function; fields set durining local allocation
901
902extern const ident_t * ( *__kmp_get_user_lock_location_ )( kmp_user_lock_p lck );
903
Jim Cownie181b4bb2013-12-23 17:28:57 +0000904static inline const ident_t *
Jim Cownie5e8470a2013-09-27 10:38:44 +0000905__kmp_get_user_lock_location( kmp_user_lock_p lck )
906{
907 if ( __kmp_get_user_lock_location_ != NULL ) {
908 return ( *__kmp_get_user_lock_location_ )( lck );
909 }
910 else {
911 return NULL;
912 }
913}
914
915extern void ( *__kmp_set_user_lock_location_ )( kmp_user_lock_p lck, const ident_t *loc );
916
Jim Cownie181b4bb2013-12-23 17:28:57 +0000917static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000918__kmp_set_user_lock_location( kmp_user_lock_p lck, const ident_t *loc )
919{
920 if ( __kmp_set_user_lock_location_ != NULL ) {
921 ( *__kmp_set_user_lock_location_ )( lck, loc );
922 }
923}
924
925extern kmp_lock_flags_t ( *__kmp_get_user_lock_flags_ )( kmp_user_lock_p lck );
926
927extern void ( *__kmp_set_user_lock_flags_ )( kmp_user_lock_p lck, kmp_lock_flags_t flags );
928
Jim Cownie181b4bb2013-12-23 17:28:57 +0000929static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000930__kmp_set_user_lock_flags( kmp_user_lock_p lck, kmp_lock_flags_t flags )
931{
932 if ( __kmp_set_user_lock_flags_ != NULL ) {
933 ( *__kmp_set_user_lock_flags_ )( lck, flags );
934 }
935}
936
937//
938// The fuction which sets up all of the vtbl pointers for kmp_user_lock_t.
939//
940extern void __kmp_set_user_lock_vptrs( kmp_lock_kind_t user_lock_kind );
941
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000942//
943// Macros for binding user lock functions.
944//
945#define KMP_BIND_USER_LOCK_TEMPLATE(nest, kind, suffix) { \
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000946 __kmp_acquire##nest##user_lock_with_checks_ = ( int (*)( kmp_user_lock_p, kmp_int32 ) ) \
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000947 __kmp_acquire##nest##kind##_##suffix; \
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000948 __kmp_release##nest##user_lock_with_checks_ = ( int (*)( kmp_user_lock_p, kmp_int32 ) ) \
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000949 __kmp_release##nest##kind##_##suffix; \
950 __kmp_test##nest##user_lock_with_checks_ = ( int (*)( kmp_user_lock_p, kmp_int32 ) ) \
951 __kmp_test##nest##kind##_##suffix; \
952 __kmp_init##nest##user_lock_with_checks_ = ( void (*)( kmp_user_lock_p ) ) \
953 __kmp_init##nest##kind##_##suffix; \
954 __kmp_destroy##nest##user_lock_with_checks_ = ( void (*)( kmp_user_lock_p ) ) \
955 __kmp_destroy##nest##kind##_##suffix; \
956}
Jim Cownie5e8470a2013-09-27 10:38:44 +0000957
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000958#define KMP_BIND_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock)
959#define KMP_BIND_USER_LOCK_WITH_CHECKS(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock_with_checks)
960#define KMP_BIND_NESTED_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock)
961#define KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(kind) KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock_with_checks)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000962
963// ----------------------------------------------------------------------------
964// User lock table & lock allocation
965// ----------------------------------------------------------------------------
966
967/*
968 On 64-bit Linux* OS (and OS X*) GNU compiler allocates only 4 bytems memory for lock variable, which
969 is not enough to store a pointer, so we have to use lock indexes instead of pointers and
970 maintain lock table to map indexes to pointers.
971
972
973 Note: The first element of the table is not a pointer to lock! It is a pointer to previously
974 allocated table (or NULL if it is the first table).
975
976 Usage:
977
978 if ( OMP_LOCK_T_SIZE < sizeof( <lock> ) ) { // or OMP_NEST_LOCK_T_SIZE
979 Lock table is fully utilized. User locks are indexes, so table is
980 used on user lock operation.
981 Note: it may be the case (lin_32) that we don't need to use a lock
982 table for regular locks, but do need the table for nested locks.
983 }
984 else {
985 Lock table initialized but not actually used.
986 }
987*/
988
989struct kmp_lock_table {
990 kmp_lock_index_t used; // Number of used elements
991 kmp_lock_index_t allocated; // Number of allocated elements
992 kmp_user_lock_p * table; // Lock table.
993};
994
995typedef struct kmp_lock_table kmp_lock_table_t;
996
997extern kmp_lock_table_t __kmp_user_lock_table;
998extern kmp_user_lock_p __kmp_lock_pool;
999
1000struct kmp_block_of_locks {
1001 struct kmp_block_of_locks * next_block;
1002 void * locks;
1003};
1004
1005typedef struct kmp_block_of_locks kmp_block_of_locks_t;
1006
1007extern kmp_block_of_locks_t *__kmp_lock_blocks;
1008extern int __kmp_num_locks_in_block;
1009
Jim Cownie181b4bb2013-12-23 17:28:57 +00001010extern kmp_user_lock_p __kmp_user_lock_allocate( void **user_lock, kmp_int32 gtid, kmp_lock_flags_t flags );
Jim Cownie5e8470a2013-09-27 10:38:44 +00001011extern void __kmp_user_lock_free( void **user_lock, kmp_int32 gtid, kmp_user_lock_p lck );
1012extern kmp_user_lock_p __kmp_lookup_user_lock( void **user_lock, char const *func );
1013extern void __kmp_cleanup_user_locks();
1014
1015#define KMP_CHECK_USER_LOCK_INIT() \
1016 { \
1017 if ( ! TCR_4( __kmp_init_user_locks ) ) { \
1018 __kmp_acquire_bootstrap_lock( &__kmp_initz_lock ); \
1019 if ( ! TCR_4( __kmp_init_user_locks ) ) { \
1020 TCW_4( __kmp_init_user_locks, TRUE ); \
1021 } \
1022 __kmp_release_bootstrap_lock( &__kmp_initz_lock ); \
1023 } \
1024 }
1025
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001026#endif // KMP_USE_DYNAMIC_LOCK
1027
Jim Cownie5e8470a2013-09-27 10:38:44 +00001028#undef KMP_PAD
1029#undef KMP_GTID_DNE
1030
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001031#if KMP_USE_DYNAMIC_LOCK
1032
1033#define DYNA_HAS_FUTEX (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM))
1034#define DYNA_HAS_HLE (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC)
1035#define DYNA_USE_FAST_FUTEX 0 && DYNA_HAS_FUTEX
1036#define DYNA_USE_FAST_TAS 1 && DYNA_HAS_FUTEX
1037
1038// List of lock definitions; all nested locks are indirect locks.
1039// hle lock is xchg lock prefixed with XACQUIRE/XRELEASE.
1040// All nested locks are indirect lock types.
1041#if DYNA_HAS_FUTEX
1042# if DYNA_HAS_HLE
1043# define FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) m(hle, a)
1044# define DYNA_LAST_D_LOCK_SEQ lockseq_hle
1045# else
1046# define FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a)
1047# define DYNA_LAST_D_LOCK_SEQ lockseq_futex
1048# endif // DYNA_HAS_HLE
1049# if KMP_USE_ADAPTIVE_LOCKS
1050# define FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) \
1051 m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
1052 m(nested_queuing, a) m(nested_drdpa, a)
1053# else
1054# define FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(drdpa, a) \
1055 m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
1056 m(nested_queuing, a) m(nested_drdpa, a)
1057# endif // KMP_USE_ADAPTIVE_LOCKS
1058#else
1059# if DYNA_HAS_HLE
1060# define FOREACH_D_LOCK(m, a) m(tas, a) m(hle, a)
1061# define DYNA_LAST_D_LOCK_SEQ lockseq_hle
1062# else
1063# define FOREACH_D_LOCK(m, a) m(tas, a)
1064# define DYNA_LAST_D_LOCK_SEQ lockseq_tas
1065# endif // DYNA_HAS_HLE
1066# if KMP_USE_ADAPTIVE_LOCKS
1067# define FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) \
1068 m(nested_tas, a) m(nested_ticket, a) \
1069 m(nested_queuing, a) m(nested_drdpa, a)
1070# else
1071# define FOREACH_I_LOCK(m, a) m(ticket, a) m(queuing, a) m(drdpa, a) \
1072 m(nested_tas, a) m(nested_ticket, a) \
1073 m(nested_queuing, a) m(nested_drdpa, a)
1074# endif // KMP_USE_ADAPTIVE_LOCKS
1075#endif // DYNA_HAS_FUTEX
1076
1077// Information used in dynamic dispatch
1078#define DYNA_LOCK_VALUE_SHIFT 8
1079#define DYNA_LOCK_TYPE_MASK ((1<<DYNA_LOCK_VALUE_SHIFT)-1)
1080#define DYNA_NUM_D_LOCKS DYNA_LAST_D_LOCK_SEQ
1081#define DYNA_NUM_I_LOCKS (locktag_nested_drdpa+1)
1082
1083// Base type for dynamic locks.
1084typedef kmp_uint32 kmp_dyna_lock_t;
1085
1086// Lock sequence that enumerates all lock kinds.
1087// Always make this enumeration consistent with kmp_lockseq_t in the include directory.
1088typedef enum {
1089 lockseq_indirect = 0,
1090#define expand_seq(l,a) lockseq_##l,
1091 FOREACH_D_LOCK(expand_seq, 0)
1092 FOREACH_I_LOCK(expand_seq, 0)
1093#undef expand_seq
1094} kmp_dyna_lockseq_t;
1095
1096// Enumerates indirect lock tags.
1097typedef enum {
1098#define expand_tag(l,a) locktag_##l,
1099 FOREACH_I_LOCK(expand_tag, 0)
1100#undef expand_tag
1101} kmp_indirect_locktag_t;
1102
1103// Utility macros that extract information from lock sequences.
1104#define DYNA_IS_D_LOCK(seq) (seq >= lockseq_tas && seq <= DYNA_LAST_D_LOCK_SEQ)
1105#define DYNA_IS_I_LOCK(seq) (seq >= lockseq_ticket && seq <= lockseq_nested_drdpa)
1106#define DYNA_GET_I_TAG(seq) (kmp_indirect_locktag_t)(seq - lockseq_ticket)
1107#define DYNA_GET_D_TAG(seq) (seq<<1 | 1)
1108
1109// Enumerates direct lock tags starting from indirect tag.
1110typedef enum {
1111#define expand_tag(l,a) locktag_##l = DYNA_GET_D_TAG(lockseq_##l),
1112 FOREACH_D_LOCK(expand_tag, 0)
1113#undef expand_tag
1114} kmp_direct_locktag_t;
1115
1116// Indirect lock type
1117typedef struct {
1118 kmp_user_lock_p lock;
1119 kmp_indirect_locktag_t type;
1120} kmp_indirect_lock_t;
1121
1122// Function tables for direct locks. Set/unset/test differentiate functions with/without consistency checking.
1123extern void (*__kmp_direct_init_ops[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t);
1124extern void (*__kmp_direct_destroy_ops[])(kmp_dyna_lock_t *);
1125extern void (*(*__kmp_direct_set_ops))(kmp_dyna_lock_t *, kmp_int32);
1126extern void (*(*__kmp_direct_unset_ops))(kmp_dyna_lock_t *, kmp_int32);
1127extern int (*(*__kmp_direct_test_ops))(kmp_dyna_lock_t *, kmp_int32);
1128
1129// Function tables for indirect locks. Set/unset/test differentiate functions with/withuot consistency checking.
1130extern void (*__kmp_indirect_init_ops[])(kmp_user_lock_p);
1131extern void (*__kmp_indirect_destroy_ops[])(kmp_user_lock_p);
1132extern void (*(*__kmp_indirect_set_ops))(kmp_user_lock_p, kmp_int32);
1133extern void (*(*__kmp_indirect_unset_ops))(kmp_user_lock_p, kmp_int32);
1134extern int (*(*__kmp_indirect_test_ops))(kmp_user_lock_p, kmp_int32);
1135
1136// Extracts direct lock tag from a user lock pointer
1137#define DYNA_EXTRACT_D_TAG(l) (*((kmp_dyna_lock_t *)(l)) & DYNA_LOCK_TYPE_MASK & -(*((kmp_dyna_lock_t *)(l)) & 1))
1138
1139// Extracts indirect lock index from a user lock pointer
1140#define DYNA_EXTRACT_I_INDEX(l) (*(kmp_lock_index_t *)(l) >> 1)
1141
1142// Returns function pointer to the direct lock function with l (kmp_dyna_lock_t *) and op (operation type).
1143#define DYNA_D_LOCK_FUNC(l, op) __kmp_direct_##op##_ops[DYNA_EXTRACT_D_TAG(l)]
1144
1145// Returns function pointer to the indirect lock function with l (kmp_indirect_lock_t *) and op (operation type).
1146#define DYNA_I_LOCK_FUNC(l, op) __kmp_indirect_##op##_ops[((kmp_indirect_lock_t *)(l))->type]
1147
1148// Initializes a direct lock with the given lock pointer and lock sequence.
1149#define DYNA_INIT_D_LOCK(l, seq) __kmp_direct_init_ops[DYNA_GET_D_TAG(seq)]((kmp_dyna_lock_t *)l, seq)
1150
1151// Initializes an indirect lock with the given lock pointer and lock sequence.
1152#define DYNA_INIT_I_LOCK(l, seq) __kmp_direct_init_ops[0]((kmp_dyna_lock_t *)(l), seq)
1153
1154// Returns "free" lock value for the given lock type.
1155#define DYNA_LOCK_FREE(type) (locktag_##type)
1156
1157// Returns "busy" lock value for the given lock teyp.
1158#define DYNA_LOCK_BUSY(v, type) ((v)<<DYNA_LOCK_VALUE_SHIFT | locktag_##type)
1159
1160// Returns lock value after removing (shifting) lock tag.
1161#define DYNA_LOCK_STRIP(v) ((v)>>DYNA_LOCK_VALUE_SHIFT)
1162
1163// Updates __kmp_user_lock_seq with the give lock type.
1164#define DYNA_STORE_LOCK_SEQ(type) (__kmp_user_lock_seq = lockseq_##type)
1165
1166// Internal entries for hinted lock initializers.
1167extern void __kmp_init_lock_hinted(void **, int);
1168extern void __kmp_init_nest_lock_hinted(void **, int);
1169
1170// Initializes global states and data structures for managing dynamic user locks.
1171extern void __kmp_init_dynamic_user_locks();
1172
1173// Allocates and returns an indirect lock with the given indirect lock tag.
1174extern kmp_indirect_lock_t * __kmp_allocate_indirect_lock(void **, kmp_int32, kmp_indirect_locktag_t);
1175
1176// Cleans up global states and data structures for managing dynamic user locks.
1177extern void __kmp_cleanup_indirect_user_locks();
1178
1179// Default user lock sequence when not using hinted locks.
1180extern kmp_dyna_lockseq_t __kmp_user_lock_seq;
1181
1182// Jump table for "set lock location", available only for indirect locks.
1183extern void (*__kmp_indirect_set_location[DYNA_NUM_I_LOCKS])(kmp_user_lock_p, const ident_t *);
1184#define DYNA_SET_I_LOCK_LOCATION(lck, loc) { \
1185 if (__kmp_indirect_set_location[(lck)->type] != NULL) \
1186 __kmp_indirect_set_location[(lck)->type]((lck)->lock, loc); \
1187}
1188
1189// Jump table for "set lock flags", available only for indirect locks.
1190extern void (*__kmp_indirect_set_flags[DYNA_NUM_I_LOCKS])(kmp_user_lock_p, kmp_lock_flags_t);
1191#define DYNA_SET_I_LOCK_FLAGS(lck, flag) { \
1192 if (__kmp_indirect_set_flags[(lck)->type] != NULL) \
1193 __kmp_indirect_set_flags[(lck)->type]((lck)->lock, flag); \
1194}
1195
1196// Jump table for "get lock location", available only for indirect locks.
1197extern const ident_t * (*__kmp_indirect_get_location[DYNA_NUM_I_LOCKS])(kmp_user_lock_p);
1198#define DYNA_GET_I_LOCK_LOCATION(lck) ( __kmp_indirect_get_location[(lck)->type] != NULL \
1199 ? __kmp_indirect_get_location[(lck)->type]((lck)->lock) \
1200 : NULL )
1201
1202// Jump table for "get lock flags", available only for indirect locks.
1203extern kmp_lock_flags_t (*__kmp_indirect_get_flags[DYNA_NUM_I_LOCKS])(kmp_user_lock_p);
1204#define DYNA_GET_I_LOCK_FLAGS(lck) ( __kmp_indirect_get_flags[(lck)->type] != NULL \
1205 ? __kmp_indirect_get_flags[(lck)->type]((lck)->lock) \
1206 : NULL )
1207
1208//
1209// Lock table for indirect locks.
1210//
1211// Simple linear structure is used to keep pointers to allocated indirect locks.
1212extern kmp_indirect_lock_t **__kmp_indirect_lock_table;
1213// Current size of the lock table; it may increase but never shrink.
1214extern kmp_lock_index_t __kmp_indirect_lock_table_size;
1215// Next index to be used for a new indirect lock (= number of indirect locks allocated).
1216extern kmp_lock_index_t __kmp_indirect_lock_table_next;
1217// Number of locks in a lock block, which is fixed to "1" now.
1218// TODO: No lock block implementation now. If we do support, we need to manage lock block data
1219// structure for each indirect lock type.
1220extern int __kmp_num_locks_in_block;
1221
1222// Fast lock table lookup without consistency checking
1223#define DYNA_LOOKUP_I_LOCK(l) ( (OMP_LOCK_T_SIZE < sizeof(void *)) \
1224 ? __kmp_indirect_lock_table[DYNA_EXTRACT_I_INDEX(l)] \
1225 : *((kmp_indirect_lock_t **)l) )
1226
1227// Used once in kmp_error.c
1228extern kmp_int32
1229__kmp_get_user_lock_owner(kmp_user_lock_p, kmp_uint32);
1230
1231#else // KMP_USE_DYNAMIC_LOCK
1232
1233# define DYNA_LOCK_BUSY(v, type) (v)
1234# define DYNA_LOCK_FREE(type) 0
1235# define DYNA_LOCK_STRIP(v) (v)
1236# define DYNA_STORE_LOCK_SEQ(seq)
1237
1238#endif // KMP_USE_DYNAMIC_LOCK
1239
Jim Cownie5e8470a2013-09-27 10:38:44 +00001240#ifdef __cplusplus
1241} // extern "C"
1242#endif // __cplusplus
1243
1244#endif /* KMP_LOCK_H */
1245