blob: c24647cf53bd9da30164af2cfe051b80073b9830 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_lock.h -- lock header file
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#ifndef KMP_LOCK_H
17#define KMP_LOCK_H
18
Jonathan Peyton30419822017-05-12 18:01:32 +000019#include <limits.h> // CHAR_BIT
20#include <stddef.h> // offsetof
Jim Cownie5e8470a2013-09-27 10:38:44 +000021
Jim Cownie5e8470a2013-09-27 10:38:44 +000022#include "kmp_debug.h"
Jonathan Peyton30419822017-05-12 18:01:32 +000023#include "kmp_os.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000024
25#ifdef __cplusplus
Paul Osmialowskif7cc6af2016-05-31 20:20:32 +000026#include <atomic>
27
Jim Cownie5e8470a2013-09-27 10:38:44 +000028extern "C" {
29#endif // __cplusplus
30
31// ----------------------------------------------------------------------------
32// Have to copy these definitions from kmp.h because kmp.h cannot be included
33// due to circular dependencies. Will undef these at end of file.
34
Jonathan Peyton30419822017-05-12 18:01:32 +000035#define KMP_PAD(type, sz) \
36 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
Jim Cownie5e8470a2013-09-27 10:38:44 +000037#define KMP_GTID_DNE (-2)
38
39// Forward declaration of ident and ident_t
40
41struct ident;
42typedef struct ident ident_t;
43
44// End of copied code.
45// ----------------------------------------------------------------------------
46
Jim Cownie5e8470a2013-09-27 10:38:44 +000047// We need to know the size of the area we can assume that the compiler(s)
48// allocated for obects of type omp_lock_t and omp_nest_lock_t. The Intel
49// compiler always allocates a pointer-sized area, as does visual studio.
50//
51// gcc however, only allocates 4 bytes for regular locks, even on 64-bit
52// intel archs. It allocates at least 8 bytes for nested lock (more on
53// recent versions), but we are bounded by the pointer-sized chunks that
54// the Intel compiler allocates.
Jim Cownie5e8470a2013-09-27 10:38:44 +000055
56#if KMP_OS_LINUX && defined(KMP_GOMP_COMPAT)
Jonathan Peyton30419822017-05-12 18:01:32 +000057#define OMP_LOCK_T_SIZE sizeof(int)
58#define OMP_NEST_LOCK_T_SIZE sizeof(void *)
Jim Cownie5e8470a2013-09-27 10:38:44 +000059#else
Jonathan Peyton30419822017-05-12 18:01:32 +000060#define OMP_LOCK_T_SIZE sizeof(void *)
61#define OMP_NEST_LOCK_T_SIZE sizeof(void *)
Jim Cownie5e8470a2013-09-27 10:38:44 +000062#endif
63
Jim Cownie5e8470a2013-09-27 10:38:44 +000064// The Intel compiler allocates a 32-byte chunk for a critical section.
65// Both gcc and visual studio only allocate enough space for a pointer.
66// Sometimes we know that the space was allocated by the Intel compiler.
Jonathan Peyton30419822017-05-12 18:01:32 +000067#define OMP_CRITICAL_SIZE sizeof(void *)
68#define INTEL_CRITICAL_SIZE 32
Jim Cownie5e8470a2013-09-27 10:38:44 +000069
Jim Cownie5e8470a2013-09-27 10:38:44 +000070// lock flags
Jim Cownie5e8470a2013-09-27 10:38:44 +000071typedef kmp_uint32 kmp_lock_flags_t;
72
73#define kmp_lf_critical_section 1
74
Jim Cownie5e8470a2013-09-27 10:38:44 +000075// When a lock table is used, the indices are of kmp_lock_index_t
Jim Cownie5e8470a2013-09-27 10:38:44 +000076typedef kmp_uint32 kmp_lock_index_t;
77
Jim Cownie5e8470a2013-09-27 10:38:44 +000078// When memory allocated for locks are on the lock pool (free list),
79// it is treated as structs of this type.
Jim Cownie5e8470a2013-09-27 10:38:44 +000080struct kmp_lock_pool {
Jonathan Peyton30419822017-05-12 18:01:32 +000081 union kmp_user_lock *next;
82 kmp_lock_index_t index;
Jim Cownie5e8470a2013-09-27 10:38:44 +000083};
84
85typedef struct kmp_lock_pool kmp_lock_pool_t;
86
Jonathan Peyton30419822017-05-12 18:01:32 +000087extern void __kmp_validate_locks(void);
Jim Cownie5e8470a2013-09-27 10:38:44 +000088
89// ----------------------------------------------------------------------------
Jim Cownie5e8470a2013-09-27 10:38:44 +000090// There are 5 lock implementations:
Jim Cownie5e8470a2013-09-27 10:38:44 +000091// 1. Test and set locks.
Jonathan Peyton30419822017-05-12 18:01:32 +000092// 2. futex locks (Linux* OS on x86 and Intel(R) Many Integrated Core
93// architecture)
Jim Cownie5e8470a2013-09-27 10:38:44 +000094// 3. Ticket (Lamport bakery) locks.
95// 4. Queuing locks (with separate spin fields).
96// 5. DRPA (Dynamically Reconfigurable Distributed Polling Area) locks
97//
98// and 3 lock purposes:
Jonathan Peyton30419822017-05-12 18:01:32 +000099// 1. Bootstrap locks -- Used for a few locks available at library
100// startup-shutdown time.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000101// These do not require non-negative global thread ID's.
102// 2. Internal RTL locks -- Used everywhere else in the RTL
103// 3. User locks (includes critical sections)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000104// ----------------------------------------------------------------------------
105
Jim Cownie5e8470a2013-09-27 10:38:44 +0000106// ============================================================================
107// Lock implementations.
Jonathan Peyton30419822017-05-12 18:01:32 +0000108//
Jim Cownie5e8470a2013-09-27 10:38:44 +0000109// Test and set locks.
110//
111// Non-nested test and set locks differ from the other lock kinds (except
112// futex) in that we use the memory allocated by the compiler for the lock,
113// rather than a pointer to it.
114//
115// On lin32, lin_32e, and win_32, the space allocated may be as small as 4
116// bytes, so we have to use a lock table for nested locks, and avoid accessing
117// the depth_locked field for non-nested locks.
118//
Jonathan Peyton30419822017-05-12 18:01:32 +0000119// Information normally available to the tools, such as lock location, lock
120// usage (normal lock vs. critical section), etc. is not available with test and
121// set locks.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000122// ----------------------------------------------------------------------------
123
124struct kmp_base_tas_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000125 volatile kmp_int32 poll; // 0 => unlocked; locked: (gtid+1) of owning thread
126 kmp_int32 depth_locked; // depth locked, for nested locks only
Jim Cownie5e8470a2013-09-27 10:38:44 +0000127};
128
129typedef struct kmp_base_tas_lock kmp_base_tas_lock_t;
130
131union kmp_tas_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000132 kmp_base_tas_lock_t lk;
133 kmp_lock_pool_t pool; // make certain struct is large enough
134 double lk_align; // use worst case alignment; no cache line padding
Jim Cownie5e8470a2013-09-27 10:38:44 +0000135};
136
137typedef union kmp_tas_lock kmp_tas_lock_t;
138
Jim Cownie5e8470a2013-09-27 10:38:44 +0000139// Static initializer for test and set lock variables. Usage:
140// kmp_tas_lock_t xlock = KMP_TAS_LOCK_INITIALIZER( xlock );
Jonathan Peyton30419822017-05-12 18:01:32 +0000141#define KMP_TAS_LOCK_INITIALIZER(lock) \
142 { \
143 { 0, 0 } \
144 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000145
Jonathan Peyton30419822017-05-12 18:01:32 +0000146extern int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
147extern int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
148extern int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
149extern void __kmp_init_tas_lock(kmp_tas_lock_t *lck);
150extern void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000151
Jonathan Peyton30419822017-05-12 18:01:32 +0000152extern int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
153extern int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
154extern int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
155extern void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck);
156extern void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000157
Jonathan Peyton30419822017-05-12 18:01:32 +0000158#define KMP_LOCK_RELEASED 1
159#define KMP_LOCK_STILL_HELD 0
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000160#define KMP_LOCK_ACQUIRED_FIRST 1
Jonathan Peyton30419822017-05-12 18:01:32 +0000161#define KMP_LOCK_ACQUIRED_NEXT 0
Jim Cownie5e8470a2013-09-27 10:38:44 +0000162
Jonathan Peyton30419822017-05-12 18:01:32 +0000163#define KMP_USE_FUTEX \
164 (KMP_OS_LINUX && !KMP_OS_CNK && \
165 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64))
Jonathan Peyton9d2412c2016-06-22 16:35:12 +0000166
167#if KMP_USE_FUTEX
Jim Cownie5e8470a2013-09-27 10:38:44 +0000168
169// ----------------------------------------------------------------------------
170// futex locks. futex locks are only available on Linux* OS.
171//
172// Like non-nested test and set lock, non-nested futex locks use the memory
173// allocated by the compiler for the lock, rather than a pointer to it.
174//
Jonathan Peyton30419822017-05-12 18:01:32 +0000175// Information normally available to the tools, such as lock location, lock
176// usage (normal lock vs. critical section), etc. is not available with test and
177// set locks. With non-nested futex locks, the lock owner is not even available.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000178// ----------------------------------------------------------------------------
179
180struct kmp_base_futex_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000181 volatile kmp_int32 poll; // 0 => unlocked
182 // 2*(gtid+1) of owning thread, 0 if unlocked
183 // locked: (gtid+1) of owning thread
184 kmp_int32 depth_locked; // depth locked, for nested locks only
Jim Cownie5e8470a2013-09-27 10:38:44 +0000185};
186
187typedef struct kmp_base_futex_lock kmp_base_futex_lock_t;
188
189union kmp_futex_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000190 kmp_base_futex_lock_t lk;
191 kmp_lock_pool_t pool; // make certain struct is large enough
192 double lk_align; // use worst case alignment
193 // no cache line padding
Jim Cownie5e8470a2013-09-27 10:38:44 +0000194};
195
196typedef union kmp_futex_lock kmp_futex_lock_t;
197
Jim Cownie5e8470a2013-09-27 10:38:44 +0000198// Static initializer for futex lock variables. Usage:
199// kmp_futex_lock_t xlock = KMP_FUTEX_LOCK_INITIALIZER( xlock );
Jonathan Peyton30419822017-05-12 18:01:32 +0000200#define KMP_FUTEX_LOCK_INITIALIZER(lock) \
201 { \
202 { 0, 0 } \
203 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000204
Jonathan Peyton30419822017-05-12 18:01:32 +0000205extern int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
206extern int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
207extern int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
208extern void __kmp_init_futex_lock(kmp_futex_lock_t *lck);
209extern void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000210
Jonathan Peyton30419822017-05-12 18:01:32 +0000211extern int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck,
212 kmp_int32 gtid);
213extern int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
214extern int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck,
215 kmp_int32 gtid);
216extern void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck);
217extern void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000218
Jonathan Peyton9d2412c2016-06-22 16:35:12 +0000219#endif // KMP_USE_FUTEX
Jim Cownie5e8470a2013-09-27 10:38:44 +0000220
Jim Cownie5e8470a2013-09-27 10:38:44 +0000221// ----------------------------------------------------------------------------
222// Ticket locks.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000223
Paul Osmialowskif7cc6af2016-05-31 20:20:32 +0000224#ifdef __cplusplus
225
Hans Wennborg5b89fbc2016-06-09 15:54:43 +0000226#ifdef _MSC_VER
Jonathan Peyton30419822017-05-12 18:01:32 +0000227// MSVC won't allow use of std::atomic<> in a union since it has non-trivial
228// copy constructor.
Hans Wennborg5b89fbc2016-06-09 15:54:43 +0000229
230struct kmp_base_ticket_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000231 // `initialized' must be the first entry in the lock data structure!
232 std::atomic_bool initialized;
233 volatile union kmp_ticket_lock *self; // points to the lock union
234 ident_t const *location; // Source code location of omp_init_lock().
235 std::atomic_uint
236 next_ticket; // ticket number to give to next thread which acquires
237 std::atomic_uint now_serving; // ticket number for thread which holds the lock
238 std::atomic_int owner_id; // (gtid+1) of owning thread, 0 if unlocked
239 std::atomic_int depth_locked; // depth locked, for nested locks only
240 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
Hans Wennborg5b89fbc2016-06-09 15:54:43 +0000241};
242#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000243struct kmp_base_ticket_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000244 // `initialized' must be the first entry in the lock data structure!
245 std::atomic<bool> initialized;
246 volatile union kmp_ticket_lock *self; // points to the lock union
247 ident_t const *location; // Source code location of omp_init_lock().
248 std::atomic<unsigned>
249 next_ticket; // ticket number to give to next thread which acquires
250 std::atomic<unsigned>
251 now_serving; // ticket number for thread which holds the lock
252 std::atomic<int> owner_id; // (gtid+1) of owning thread, 0 if unlocked
253 std::atomic<int> depth_locked; // depth locked, for nested locks only
254 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
Jim Cownie5e8470a2013-09-27 10:38:44 +0000255};
Hans Wennborg5b89fbc2016-06-09 15:54:43 +0000256#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000257
Paul Osmialowskif7cc6af2016-05-31 20:20:32 +0000258#else // __cplusplus
259
260struct kmp_base_ticket_lock;
261
262#endif // !__cplusplus
263
Jim Cownie5e8470a2013-09-27 10:38:44 +0000264typedef struct kmp_base_ticket_lock kmp_base_ticket_lock_t;
265
266union KMP_ALIGN_CACHE kmp_ticket_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000267 kmp_base_ticket_lock_t
268 lk; // This field must be first to allow static initializing.
269 kmp_lock_pool_t pool;
270 double lk_align; // use worst case alignment
271 char lk_pad[KMP_PAD(kmp_base_ticket_lock_t, CACHE_LINE)];
Jim Cownie5e8470a2013-09-27 10:38:44 +0000272};
273
274typedef union kmp_ticket_lock kmp_ticket_lock_t;
275
Jim Cownie5e8470a2013-09-27 10:38:44 +0000276// Static initializer for simple ticket lock variables. Usage:
277// kmp_ticket_lock_t xlock = KMP_TICKET_LOCK_INITIALIZER( xlock );
278// Note the macro argument. It is important to make var properly initialized.
Jonathan Peyton30419822017-05-12 18:01:32 +0000279#define KMP_TICKET_LOCK_INITIALIZER(lock) \
280 { \
281 { \
282 ATOMIC_VAR_INIT(true) \
283 , &(lock), NULL, ATOMIC_VAR_INIT(0U), ATOMIC_VAR_INIT(0U), \
284 ATOMIC_VAR_INIT(0), ATOMIC_VAR_INIT(-1) \
285 } \
286 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000287
Jonathan Peyton30419822017-05-12 18:01:32 +0000288extern int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
289extern int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
290extern int __kmp_test_ticket_lock_with_cheks(kmp_ticket_lock_t *lck,
291 kmp_int32 gtid);
292extern int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
293extern void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck);
294extern void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000295
Jonathan Peyton30419822017-05-12 18:01:32 +0000296extern int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck,
297 kmp_int32 gtid);
298extern int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck,
299 kmp_int32 gtid);
300extern int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck,
301 kmp_int32 gtid);
302extern void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck);
303extern void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000304
305// ----------------------------------------------------------------------------
306// Queuing locks.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000307
308#if KMP_USE_ADAPTIVE_LOCKS
309
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000310struct kmp_adaptive_lock_info;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000311
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000312typedef struct kmp_adaptive_lock_info kmp_adaptive_lock_info_t;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000313
314#if KMP_DEBUG_ADAPTIVE_LOCKS
315
316struct kmp_adaptive_lock_statistics {
Jonathan Peyton30419822017-05-12 18:01:32 +0000317 /* So we can get stats from locks that haven't been destroyed. */
318 kmp_adaptive_lock_info_t *next;
319 kmp_adaptive_lock_info_t *prev;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000320
Jonathan Peyton30419822017-05-12 18:01:32 +0000321 /* Other statistics */
322 kmp_uint32 successfulSpeculations;
323 kmp_uint32 hardFailedSpeculations;
324 kmp_uint32 softFailedSpeculations;
325 kmp_uint32 nonSpeculativeAcquires;
326 kmp_uint32 nonSpeculativeAcquireAttempts;
327 kmp_uint32 lemmingYields;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000328};
329
330typedef struct kmp_adaptive_lock_statistics kmp_adaptive_lock_statistics_t;
331
332extern void __kmp_print_speculative_stats();
333extern void __kmp_init_speculative_stats();
334
335#endif // KMP_DEBUG_ADAPTIVE_LOCKS
336
Jonathan Peyton30419822017-05-12 18:01:32 +0000337struct kmp_adaptive_lock_info {
338 /* Values used for adaptivity.
339 Although these are accessed from multiple threads we don't access them
340 atomically, because if we miss updates it probably doesn't matter much. (It
341 just affects our decision about whether to try speculation on the lock). */
342 kmp_uint32 volatile badness;
343 kmp_uint32 volatile acquire_attempts;
344 /* Parameters of the lock. */
345 kmp_uint32 max_badness;
346 kmp_uint32 max_soft_retries;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000347
348#if KMP_DEBUG_ADAPTIVE_LOCKS
Jonathan Peyton30419822017-05-12 18:01:32 +0000349 kmp_adaptive_lock_statistics_t volatile stats;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000350#endif
351};
352
353#endif // KMP_USE_ADAPTIVE_LOCKS
354
Jim Cownie5e8470a2013-09-27 10:38:44 +0000355struct kmp_base_queuing_lock {
356
Jonathan Peyton30419822017-05-12 18:01:32 +0000357 // `initialized' must be the first entry in the lock data structure!
358 volatile union kmp_queuing_lock
359 *initialized; // Points to the lock union if in initialized state.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000360
Jonathan Peyton30419822017-05-12 18:01:32 +0000361 ident_t const *location; // Source code location of omp_init_lock().
Jim Cownie5e8470a2013-09-27 10:38:44 +0000362
Jonathan Peyton30419822017-05-12 18:01:32 +0000363 KMP_ALIGN(8) // tail_id must be 8-byte aligned!
Jim Cownie5e8470a2013-09-27 10:38:44 +0000364
Jonathan Peyton30419822017-05-12 18:01:32 +0000365 volatile kmp_int32
366 tail_id; // (gtid+1) of thread at tail of wait queue, 0 if empty
367 // Must be no padding here since head/tail used in 8-byte CAS
368 volatile kmp_int32
369 head_id; // (gtid+1) of thread at head of wait queue, 0 if empty
370 // Decl order assumes little endian
371 // bakery-style lock
372 volatile kmp_uint32
373 next_ticket; // ticket number to give to next thread which acquires
374 volatile kmp_uint32
375 now_serving; // ticket number for thread which holds the lock
376 volatile kmp_int32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
377 kmp_int32 depth_locked; // depth locked, for nested locks only
Jim Cownie5e8470a2013-09-27 10:38:44 +0000378
Jonathan Peyton30419822017-05-12 18:01:32 +0000379 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
Jim Cownie5e8470a2013-09-27 10:38:44 +0000380};
381
382typedef struct kmp_base_queuing_lock kmp_base_queuing_lock_t;
383
Jonathan Peyton30419822017-05-12 18:01:32 +0000384KMP_BUILD_ASSERT(offsetof(kmp_base_queuing_lock_t, tail_id) % 8 == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000385
386union KMP_ALIGN_CACHE kmp_queuing_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000387 kmp_base_queuing_lock_t
388 lk; // This field must be first to allow static initializing.
389 kmp_lock_pool_t pool;
390 double lk_align; // use worst case alignment
391 char lk_pad[KMP_PAD(kmp_base_queuing_lock_t, CACHE_LINE)];
Jim Cownie5e8470a2013-09-27 10:38:44 +0000392};
393
394typedef union kmp_queuing_lock kmp_queuing_lock_t;
395
Jonathan Peyton30419822017-05-12 18:01:32 +0000396extern int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
397extern int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
398extern int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
399extern void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck);
400extern void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000401
Jonathan Peyton30419822017-05-12 18:01:32 +0000402extern int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck,
403 kmp_int32 gtid);
404extern int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck,
405 kmp_int32 gtid);
406extern int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck,
407 kmp_int32 gtid);
408extern void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck);
409extern void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000410
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000411#if KMP_USE_ADAPTIVE_LOCKS
412
413// ----------------------------------------------------------------------------
414// Adaptive locks.
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000415struct kmp_base_adaptive_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000416 kmp_base_queuing_lock qlk;
417 KMP_ALIGN(CACHE_LINE)
418 kmp_adaptive_lock_info_t
419 adaptive; // Information for the speculative adaptive lock
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000420};
421
422typedef struct kmp_base_adaptive_lock kmp_base_adaptive_lock_t;
423
424union KMP_ALIGN_CACHE kmp_adaptive_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000425 kmp_base_adaptive_lock_t lk;
426 kmp_lock_pool_t pool;
427 double lk_align;
428 char lk_pad[KMP_PAD(kmp_base_adaptive_lock_t, CACHE_LINE)];
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000429};
430typedef union kmp_adaptive_lock kmp_adaptive_lock_t;
431
Jonathan Peyton30419822017-05-12 18:01:32 +0000432#define GET_QLK_PTR(l) ((kmp_queuing_lock_t *)&(l)->lk.qlk)
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000433
434#endif // KMP_USE_ADAPTIVE_LOCKS
Jim Cownie5e8470a2013-09-27 10:38:44 +0000435
436// ----------------------------------------------------------------------------
437// DRDPA ticket locks.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000438struct kmp_base_drdpa_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000439 // All of the fields on the first cache line are only written when
440 // initializing or reconfiguring the lock. These are relatively rare
441 // operations, so data from the first cache line will usually stay resident in
442 // the cache of each thread trying to acquire the lock.
443 //
444 // initialized must be the first entry in the lock data structure!
445 KMP_ALIGN_CACHE
Jim Cownie5e8470a2013-09-27 10:38:44 +0000446
Jonathan Peyton30419822017-05-12 18:01:32 +0000447 volatile union kmp_drdpa_lock
448 *initialized; // points to the lock union if in initialized state
449 ident_t const *location; // Source code location of omp_init_lock().
450 volatile struct kmp_lock_poll { kmp_uint64 poll; } * volatile polls;
451 volatile kmp_uint64 mask; // is 2**num_polls-1 for mod op
452 kmp_uint64 cleanup_ticket; // thread with cleanup ticket
453 volatile struct kmp_lock_poll *old_polls; // will deallocate old_polls
454 kmp_uint32 num_polls; // must be power of 2
Jim Cownie5e8470a2013-09-27 10:38:44 +0000455
Jonathan Peyton30419822017-05-12 18:01:32 +0000456 // next_ticket it needs to exist in a separate cache line, as it is
457 // invalidated every time a thread takes a new ticket.
458 KMP_ALIGN_CACHE
Jim Cownie5e8470a2013-09-27 10:38:44 +0000459
Jonathan Peyton30419822017-05-12 18:01:32 +0000460 volatile kmp_uint64 next_ticket;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000461
Jonathan Peyton30419822017-05-12 18:01:32 +0000462 // now_serving is used to store our ticket value while we hold the lock. It
463 // has a slightly different meaning in the DRDPA ticket locks (where it is
464 // written by the acquiring thread) than it does in the simple ticket locks
465 // (where it is written by the releasing thread).
466 //
467 // Since now_serving is only read an written in the critical section,
468 // it is non-volatile, but it needs to exist on a separate cache line,
469 // as it is invalidated at every lock acquire.
470 //
471 // Likewise, the vars used for nested locks (owner_id and depth_locked) are
472 // only written by the thread owning the lock, so they are put in this cache
473 // line. owner_id is read by other threads, so it must be declared volatile.
474 KMP_ALIGN_CACHE
475 kmp_uint64 now_serving; // doesn't have to be volatile
476 volatile kmp_uint32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
477 kmp_int32 depth_locked; // depth locked
478 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
Jim Cownie5e8470a2013-09-27 10:38:44 +0000479};
480
481typedef struct kmp_base_drdpa_lock kmp_base_drdpa_lock_t;
482
483union KMP_ALIGN_CACHE kmp_drdpa_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000484 kmp_base_drdpa_lock_t
485 lk; // This field must be first to allow static initializing. */
486 kmp_lock_pool_t pool;
487 double lk_align; // use worst case alignment
488 char lk_pad[KMP_PAD(kmp_base_drdpa_lock_t, CACHE_LINE)];
Jim Cownie5e8470a2013-09-27 10:38:44 +0000489};
490
491typedef union kmp_drdpa_lock kmp_drdpa_lock_t;
492
Jonathan Peyton30419822017-05-12 18:01:32 +0000493extern int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
494extern int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
495extern int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
496extern void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck);
497extern void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000498
Jonathan Peyton30419822017-05-12 18:01:32 +0000499extern int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
500 kmp_int32 gtid);
501extern int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
502extern int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
503 kmp_int32 gtid);
504extern void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
505extern void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000506
507// ============================================================================
508// Lock purposes.
509// ============================================================================
510
Jim Cownie5e8470a2013-09-27 10:38:44 +0000511// Bootstrap locks.
Jonathan Peyton30419822017-05-12 18:01:32 +0000512//
Jim Cownie5e8470a2013-09-27 10:38:44 +0000513// Bootstrap locks -- very few locks used at library initialization time.
514// Bootstrap locks are currently implemented as ticket locks.
515// They could also be implemented as test and set lock, but cannot be
516// implemented with other lock kinds as they require gtids which are not
517// available at initialization time.
518
519typedef kmp_ticket_lock_t kmp_bootstrap_lock_t;
520
Jonathan Peyton30419822017-05-12 18:01:32 +0000521#define KMP_BOOTSTRAP_LOCK_INITIALIZER(lock) KMP_TICKET_LOCK_INITIALIZER((lock))
Jim Cownie5e8470a2013-09-27 10:38:44 +0000522
Jonathan Peyton30419822017-05-12 18:01:32 +0000523static inline int __kmp_acquire_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
524 return __kmp_acquire_ticket_lock(lck, KMP_GTID_DNE);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000525}
526
Jonathan Peyton30419822017-05-12 18:01:32 +0000527static inline int __kmp_test_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
528 return __kmp_test_ticket_lock(lck, KMP_GTID_DNE);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000529}
530
Jonathan Peyton30419822017-05-12 18:01:32 +0000531static inline void __kmp_release_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
532 __kmp_release_ticket_lock(lck, KMP_GTID_DNE);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000533}
534
Jonathan Peyton30419822017-05-12 18:01:32 +0000535static inline void __kmp_init_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
536 __kmp_init_ticket_lock(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000537}
538
Jonathan Peyton30419822017-05-12 18:01:32 +0000539static inline void __kmp_destroy_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
540 __kmp_destroy_ticket_lock(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000541}
542
Jim Cownie5e8470a2013-09-27 10:38:44 +0000543// Internal RTL locks.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000544//
545// Internal RTL locks are also implemented as ticket locks, for now.
546//
547// FIXME - We should go through and figure out which lock kind works best for
Jim Cownie3051f972014-08-07 10:12:54 +0000548// each internal lock, and use the type declaration and function calls for
Jim Cownie5e8470a2013-09-27 10:38:44 +0000549// that explicit lock kind (and get rid of this section).
Jim Cownie5e8470a2013-09-27 10:38:44 +0000550
551typedef kmp_ticket_lock_t kmp_lock_t;
552
Jonathan Peyton30419822017-05-12 18:01:32 +0000553static inline int __kmp_acquire_lock(kmp_lock_t *lck, kmp_int32 gtid) {
554 return __kmp_acquire_ticket_lock(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000555}
556
Jonathan Peyton30419822017-05-12 18:01:32 +0000557static inline int __kmp_test_lock(kmp_lock_t *lck, kmp_int32 gtid) {
558 return __kmp_test_ticket_lock(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000559}
560
Jonathan Peyton30419822017-05-12 18:01:32 +0000561static inline void __kmp_release_lock(kmp_lock_t *lck, kmp_int32 gtid) {
562 __kmp_release_ticket_lock(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000563}
564
Jonathan Peyton30419822017-05-12 18:01:32 +0000565static inline void __kmp_init_lock(kmp_lock_t *lck) {
566 __kmp_init_ticket_lock(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000567}
568
Jonathan Peyton30419822017-05-12 18:01:32 +0000569static inline void __kmp_destroy_lock(kmp_lock_t *lck) {
570 __kmp_destroy_ticket_lock(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000571}
572
Jim Cownie5e8470a2013-09-27 10:38:44 +0000573// User locks.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000574//
Jonathan Peyton30419822017-05-12 18:01:32 +0000575// Do not allocate objects of type union kmp_user_lock!!! This will waste space
576// unless __kmp_user_lock_kind == lk_drdpa. Instead, check the value of
577// __kmp_user_lock_kind and allocate objects of the type of the appropriate
578// union member, and cast their addresses to kmp_user_lock_p.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000579
580enum kmp_lock_kind {
Jonathan Peyton30419822017-05-12 18:01:32 +0000581 lk_default = 0,
582 lk_tas,
Jonathan Peyton9d2412c2016-06-22 16:35:12 +0000583#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +0000584 lk_futex,
Jim Cownie5e8470a2013-09-27 10:38:44 +0000585#endif
Jonathan Peytondae13d82015-12-11 21:57:06 +0000586#if KMP_USE_DYNAMIC_LOCK && KMP_USE_TSX
Jonathan Peyton30419822017-05-12 18:01:32 +0000587 lk_hle,
588 lk_rtm,
Jonathan Peytondae13d82015-12-11 21:57:06 +0000589#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000590 lk_ticket,
591 lk_queuing,
592 lk_drdpa,
Jim Cownie5e8470a2013-09-27 10:38:44 +0000593#if KMP_USE_ADAPTIVE_LOCKS
Jonathan Peyton30419822017-05-12 18:01:32 +0000594 lk_adaptive
Jim Cownie5e8470a2013-09-27 10:38:44 +0000595#endif // KMP_USE_ADAPTIVE_LOCKS
596};
597
598typedef enum kmp_lock_kind kmp_lock_kind_t;
599
600extern kmp_lock_kind_t __kmp_user_lock_kind;
601
602union kmp_user_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000603 kmp_tas_lock_t tas;
Jonathan Peyton9d2412c2016-06-22 16:35:12 +0000604#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +0000605 kmp_futex_lock_t futex;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000606#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000607 kmp_ticket_lock_t ticket;
608 kmp_queuing_lock_t queuing;
609 kmp_drdpa_lock_t drdpa;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000610#if KMP_USE_ADAPTIVE_LOCKS
Jonathan Peyton30419822017-05-12 18:01:32 +0000611 kmp_adaptive_lock_t adaptive;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000612#endif // KMP_USE_ADAPTIVE_LOCKS
Jonathan Peyton30419822017-05-12 18:01:32 +0000613 kmp_lock_pool_t pool;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000614};
615
616typedef union kmp_user_lock *kmp_user_lock_p;
617
Jonathan Peyton30419822017-05-12 18:01:32 +0000618#if !KMP_USE_DYNAMIC_LOCK
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000619
Jim Cownie5e8470a2013-09-27 10:38:44 +0000620extern size_t __kmp_base_user_lock_size;
621extern size_t __kmp_user_lock_size;
622
Jonathan Peyton30419822017-05-12 18:01:32 +0000623extern kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000624
Jonathan Peyton30419822017-05-12 18:01:32 +0000625static inline kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck) {
626 KMP_DEBUG_ASSERT(__kmp_get_user_lock_owner_ != NULL);
627 return (*__kmp_get_user_lock_owner_)(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000628}
629
Jonathan Peyton30419822017-05-12 18:01:32 +0000630extern int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
631 kmp_int32 gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000632
Jonathan Peyton30419822017-05-12 18:01:32 +0000633#if KMP_OS_LINUX && \
634 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000635
Jonathan Peyton30419822017-05-12 18:01:32 +0000636#define __kmp_acquire_user_lock_with_checks(lck, gtid) \
637 if (__kmp_user_lock_kind == lk_tas) { \
638 if (__kmp_env_consistency_check) { \
639 char const *const func = "omp_set_lock"; \
640 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) && \
641 lck->tas.lk.depth_locked != -1) { \
642 KMP_FATAL(LockNestableUsedAsSimple, func); \
643 } \
644 if ((gtid >= 0) && (lck->tas.lk.poll - 1 == gtid)) { \
645 KMP_FATAL(LockIsAlreadyOwned, func); \
646 } \
647 } \
648 if ((lck->tas.lk.poll != 0) || \
649 (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1))) { \
650 kmp_uint32 spins; \
651 KMP_FSYNC_PREPARE(lck); \
652 KMP_INIT_YIELD(spins); \
653 if (TCR_4(__kmp_nth) > \
654 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
655 KMP_YIELD(TRUE); \
656 } else { \
657 KMP_YIELD_SPIN(spins); \
658 } \
659 while ( \
660 (lck->tas.lk.poll != 0) || \
661 (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1))) { \
662 if (TCR_4(__kmp_nth) > \
663 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
664 KMP_YIELD(TRUE); \
665 } else { \
666 KMP_YIELD_SPIN(spins); \
667 } \
668 } \
669 } \
670 KMP_FSYNC_ACQUIRED(lck); \
671 } else { \
672 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL); \
673 (*__kmp_acquire_user_lock_with_checks_)(lck, gtid); \
674 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000675
676#else
Jonathan Peyton30419822017-05-12 18:01:32 +0000677static inline int __kmp_acquire_user_lock_with_checks(kmp_user_lock_p lck,
678 kmp_int32 gtid) {
679 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL);
680 return (*__kmp_acquire_user_lock_with_checks_)(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000681}
682#endif
683
Jonathan Peyton30419822017-05-12 18:01:32 +0000684extern int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
685 kmp_int32 gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000686
Jonathan Peyton30419822017-05-12 18:01:32 +0000687#if KMP_OS_LINUX && \
688 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000689
Jonathan Peyton30419822017-05-12 18:01:32 +0000690#include "kmp_i18n.h" /* AC: KMP_FATAL definition */
691extern int __kmp_env_consistency_check; /* AC: copy from kmp.h here */
692static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
693 kmp_int32 gtid) {
694 if (__kmp_user_lock_kind == lk_tas) {
695 if (__kmp_env_consistency_check) {
696 char const *const func = "omp_test_lock";
697 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
698 lck->tas.lk.depth_locked != -1) {
699 KMP_FATAL(LockNestableUsedAsSimple, func);
700 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000701 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000702 return ((lck->tas.lk.poll == 0) &&
703 KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1));
704 } else {
705 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
706 return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
707 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000708}
709#else
Jonathan Peyton30419822017-05-12 18:01:32 +0000710static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
711 kmp_int32 gtid) {
712 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
713 return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000714}
715#endif
716
Jonathan Peyton30419822017-05-12 18:01:32 +0000717extern int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
718 kmp_int32 gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000719
Jonathan Peyton30419822017-05-12 18:01:32 +0000720static inline void __kmp_release_user_lock_with_checks(kmp_user_lock_p lck,
721 kmp_int32 gtid) {
722 KMP_DEBUG_ASSERT(__kmp_release_user_lock_with_checks_ != NULL);
723 (*__kmp_release_user_lock_with_checks_)(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000724}
725
Jonathan Peyton30419822017-05-12 18:01:32 +0000726extern void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000727
Jonathan Peyton30419822017-05-12 18:01:32 +0000728static inline void __kmp_init_user_lock_with_checks(kmp_user_lock_p lck) {
729 KMP_DEBUG_ASSERT(__kmp_init_user_lock_with_checks_ != NULL);
730 (*__kmp_init_user_lock_with_checks_)(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000731}
732
Jim Cownie5e8470a2013-09-27 10:38:44 +0000733// We need a non-checking version of destroy lock for when the RTL is
734// doing the cleanup as it can't always tell if the lock is nested or not.
Jonathan Peyton30419822017-05-12 18:01:32 +0000735extern void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000736
Jonathan Peyton30419822017-05-12 18:01:32 +0000737static inline void __kmp_destroy_user_lock(kmp_user_lock_p lck) {
738 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_ != NULL);
739 (*__kmp_destroy_user_lock_)(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000740}
741
Jonathan Peyton30419822017-05-12 18:01:32 +0000742extern void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000743
Jonathan Peyton30419822017-05-12 18:01:32 +0000744static inline void __kmp_destroy_user_lock_with_checks(kmp_user_lock_p lck) {
745 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_with_checks_ != NULL);
746 (*__kmp_destroy_user_lock_with_checks_)(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000747}
748
Jonathan Peyton30419822017-05-12 18:01:32 +0000749extern int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
750 kmp_int32 gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000751
752#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
753
Jonathan Peyton30419822017-05-12 18:01:32 +0000754#define __kmp_acquire_nested_user_lock_with_checks(lck, gtid, depth) \
755 if (__kmp_user_lock_kind == lk_tas) { \
756 if (__kmp_env_consistency_check) { \
757 char const *const func = "omp_set_nest_lock"; \
758 if ((sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) && \
759 lck->tas.lk.depth_locked == -1) { \
760 KMP_FATAL(LockSimpleUsedAsNestable, func); \
761 } \
762 } \
763 if (lck->tas.lk.poll - 1 == gtid) { \
764 lck->tas.lk.depth_locked += 1; \
765 *depth = KMP_LOCK_ACQUIRED_NEXT; \
766 } else { \
767 if ((lck->tas.lk.poll != 0) || \
768 (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1))) { \
769 kmp_uint32 spins; \
770 KMP_FSYNC_PREPARE(lck); \
771 KMP_INIT_YIELD(spins); \
772 if (TCR_4(__kmp_nth) > \
773 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
774 KMP_YIELD(TRUE); \
775 } else { \
776 KMP_YIELD_SPIN(spins); \
777 } \
778 while ((lck->tas.lk.poll != 0) || \
779 (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, \
780 gtid + 1))) { \
781 if (TCR_4(__kmp_nth) > \
782 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
783 KMP_YIELD(TRUE); \
784 } else { \
785 KMP_YIELD_SPIN(spins); \
786 } \
787 } \
788 } \
789 lck->tas.lk.depth_locked = 1; \
790 *depth = KMP_LOCK_ACQUIRED_FIRST; \
791 } \
792 KMP_FSYNC_ACQUIRED(lck); \
793 } else { \
794 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL); \
795 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid); \
796 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000797
798#else
Jim Cownie181b4bb2013-12-23 17:28:57 +0000799static inline void
Jonathan Peyton30419822017-05-12 18:01:32 +0000800__kmp_acquire_nested_user_lock_with_checks(kmp_user_lock_p lck, kmp_int32 gtid,
801 int *depth) {
802 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL);
803 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000804}
805#endif
806
Jonathan Peyton30419822017-05-12 18:01:32 +0000807extern int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
808 kmp_int32 gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000809
810#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
Jonathan Peyton30419822017-05-12 18:01:32 +0000811static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
812 kmp_int32 gtid) {
813 if (__kmp_user_lock_kind == lk_tas) {
814 int retval;
815 if (__kmp_env_consistency_check) {
816 char const *const func = "omp_test_nest_lock";
817 if ((sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) &&
818 lck->tas.lk.depth_locked == -1) {
819 KMP_FATAL(LockSimpleUsedAsNestable, func);
820 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000821 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000822 KMP_DEBUG_ASSERT(gtid >= 0);
823 if (lck->tas.lk.poll - 1 ==
824 gtid) { /* __kmp_get_tas_lock_owner( lck ) == gtid */
825 return ++lck->tas.lk.depth_locked; /* same owner, depth increased */
826 }
827 retval = ((lck->tas.lk.poll == 0) &&
828 KMP_COMPARE_AND_STORE_ACQ32(&(lck->tas.lk.poll), 0, gtid + 1));
829 if (retval) {
830 KMP_MB();
831 lck->tas.lk.depth_locked = 1;
832 }
833 return retval;
834 } else {
835 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
836 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
837 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000838}
839#else
Jonathan Peyton30419822017-05-12 18:01:32 +0000840static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
841 kmp_int32 gtid) {
842 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
843 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000844}
845#endif
846
Jonathan Peyton30419822017-05-12 18:01:32 +0000847extern int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
848 kmp_int32 gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000849
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000850static inline int
Jonathan Peyton30419822017-05-12 18:01:32 +0000851__kmp_release_nested_user_lock_with_checks(kmp_user_lock_p lck,
852 kmp_int32 gtid) {
853 KMP_DEBUG_ASSERT(__kmp_release_nested_user_lock_with_checks_ != NULL);
854 return (*__kmp_release_nested_user_lock_with_checks_)(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000855}
856
Jonathan Peyton30419822017-05-12 18:01:32 +0000857extern void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000858
Jim Cownie181b4bb2013-12-23 17:28:57 +0000859static inline void
Jonathan Peyton30419822017-05-12 18:01:32 +0000860__kmp_init_nested_user_lock_with_checks(kmp_user_lock_p lck) {
861 KMP_DEBUG_ASSERT(__kmp_init_nested_user_lock_with_checks_ != NULL);
862 (*__kmp_init_nested_user_lock_with_checks_)(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000863}
864
Jonathan Peyton30419822017-05-12 18:01:32 +0000865extern void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
866
867static inline void
868__kmp_destroy_nested_user_lock_with_checks(kmp_user_lock_p lck) {
869 KMP_DEBUG_ASSERT(__kmp_destroy_nested_user_lock_with_checks_ != NULL);
870 (*__kmp_destroy_nested_user_lock_with_checks_)(lck);
871}
872
Jim Cownie5e8470a2013-09-27 10:38:44 +0000873// user lock functions which do not necessarily exist for all lock kinds.
874//
875// The "set" functions usually have wrapper routines that check for a NULL set
876// function pointer and call it if non-NULL.
877//
878// In some cases, it makes sense to have a "get" wrapper function check for a
879// NULL get function pointer and return NULL / invalid value / error code if
880// the function pointer is NULL.
881//
882// In other cases, the calling code really should differentiate between an
883// unimplemented function and one that is implemented but returning NULL /
884// invalied value. If this is the case, no get function wrapper exists.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000885
Jonathan Peyton30419822017-05-12 18:01:32 +0000886extern int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000887
888// no set function; fields set durining local allocation
889
Jonathan Peyton30419822017-05-12 18:01:32 +0000890extern const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000891
Jonathan Peyton30419822017-05-12 18:01:32 +0000892static inline const ident_t *__kmp_get_user_lock_location(kmp_user_lock_p lck) {
893 if (__kmp_get_user_lock_location_ != NULL) {
894 return (*__kmp_get_user_lock_location_)(lck);
895 } else {
896 return NULL;
897 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000898}
899
Jonathan Peyton30419822017-05-12 18:01:32 +0000900extern void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
901 const ident_t *loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000902
Jonathan Peyton30419822017-05-12 18:01:32 +0000903static inline void __kmp_set_user_lock_location(kmp_user_lock_p lck,
904 const ident_t *loc) {
905 if (__kmp_set_user_lock_location_ != NULL) {
906 (*__kmp_set_user_lock_location_)(lck, loc);
907 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000908}
909
Jonathan Peyton30419822017-05-12 18:01:32 +0000910extern kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000911
Jonathan Peyton30419822017-05-12 18:01:32 +0000912extern void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
913 kmp_lock_flags_t flags);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000914
Jonathan Peyton30419822017-05-12 18:01:32 +0000915static inline void __kmp_set_user_lock_flags(kmp_user_lock_p lck,
916 kmp_lock_flags_t flags) {
917 if (__kmp_set_user_lock_flags_ != NULL) {
918 (*__kmp_set_user_lock_flags_)(lck, flags);
919 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000920}
921
Jim Cownie5e8470a2013-09-27 10:38:44 +0000922// The fuction which sets up all of the vtbl pointers for kmp_user_lock_t.
Jonathan Peyton30419822017-05-12 18:01:32 +0000923extern void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000924
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000925// Macros for binding user lock functions.
Jonathan Peyton30419822017-05-12 18:01:32 +0000926#define KMP_BIND_USER_LOCK_TEMPLATE(nest, kind, suffix) \
927 { \
928 __kmp_acquire##nest##user_lock_with_checks_ = (int (*)( \
929 kmp_user_lock_p, kmp_int32))__kmp_acquire##nest##kind##_##suffix; \
930 __kmp_release##nest##user_lock_with_checks_ = (int (*)( \
931 kmp_user_lock_p, kmp_int32))__kmp_release##nest##kind##_##suffix; \
932 __kmp_test##nest##user_lock_with_checks_ = (int (*)( \
933 kmp_user_lock_p, kmp_int32))__kmp_test##nest##kind##_##suffix; \
934 __kmp_init##nest##user_lock_with_checks_ = \
935 (void (*)(kmp_user_lock_p))__kmp_init##nest##kind##_##suffix; \
936 __kmp_destroy##nest##user_lock_with_checks_ = \
937 (void (*)(kmp_user_lock_p))__kmp_destroy##nest##kind##_##suffix; \
938 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000939
Jonathan Peyton30419822017-05-12 18:01:32 +0000940#define KMP_BIND_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock)
941#define KMP_BIND_USER_LOCK_WITH_CHECKS(kind) \
942 KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock_with_checks)
943#define KMP_BIND_NESTED_USER_LOCK(kind) \
944 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock)
945#define KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(kind) \
946 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock_with_checks)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000947
Jim Cownie5e8470a2013-09-27 10:38:44 +0000948// User lock table & lock allocation
Jonathan Peyton30419822017-05-12 18:01:32 +0000949/* On 64-bit Linux* OS (and OS X*) GNU compiler allocates only 4 bytems memory
950 for lock variable, which is not enough to store a pointer, so we have to use
951 lock indexes instead of pointers and maintain lock table to map indexes to
952 pointers.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000953
954
Jonathan Peyton30419822017-05-12 18:01:32 +0000955 Note: The first element of the table is not a pointer to lock! It is a
956 pointer to previously allocated table (or NULL if it is the first table).
Jim Cownie5e8470a2013-09-27 10:38:44 +0000957
Jonathan Peyton30419822017-05-12 18:01:32 +0000958 Usage:
Jim Cownie5e8470a2013-09-27 10:38:44 +0000959
Jonathan Peyton30419822017-05-12 18:01:32 +0000960 if ( OMP_LOCK_T_SIZE < sizeof( <lock> ) ) { // or OMP_NEST_LOCK_T_SIZE
961 Lock table is fully utilized. User locks are indexes, so table is used on
962 user lock operation.
963 Note: it may be the case (lin_32) that we don't need to use a lock
964 table for regular locks, but do need the table for nested locks.
965 }
966 else {
967 Lock table initialized but not actually used.
968 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000969*/
970
971struct kmp_lock_table {
Jonathan Peyton30419822017-05-12 18:01:32 +0000972 kmp_lock_index_t used; // Number of used elements
973 kmp_lock_index_t allocated; // Number of allocated elements
974 kmp_user_lock_p *table; // Lock table.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000975};
976
977typedef struct kmp_lock_table kmp_lock_table_t;
978
979extern kmp_lock_table_t __kmp_user_lock_table;
980extern kmp_user_lock_p __kmp_lock_pool;
981
982struct kmp_block_of_locks {
Jonathan Peyton30419822017-05-12 18:01:32 +0000983 struct kmp_block_of_locks *next_block;
984 void *locks;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000985};
986
987typedef struct kmp_block_of_locks kmp_block_of_locks_t;
988
989extern kmp_block_of_locks_t *__kmp_lock_blocks;
990extern int __kmp_num_locks_in_block;
991
Jonathan Peyton30419822017-05-12 18:01:32 +0000992extern kmp_user_lock_p __kmp_user_lock_allocate(void **user_lock,
993 kmp_int32 gtid,
994 kmp_lock_flags_t flags);
995extern void __kmp_user_lock_free(void **user_lock, kmp_int32 gtid,
996 kmp_user_lock_p lck);
997extern kmp_user_lock_p __kmp_lookup_user_lock(void **user_lock,
998 char const *func);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000999extern void __kmp_cleanup_user_locks();
1000
Jonathan Peyton30419822017-05-12 18:01:32 +00001001#define KMP_CHECK_USER_LOCK_INIT() \
1002 { \
1003 if (!TCR_4(__kmp_init_user_locks)) { \
1004 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock); \
1005 if (!TCR_4(__kmp_init_user_locks)) { \
1006 TCW_4(__kmp_init_user_locks, TRUE); \
1007 } \
1008 __kmp_release_bootstrap_lock(&__kmp_initz_lock); \
1009 } \
1010 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001011
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001012#endif // KMP_USE_DYNAMIC_LOCK
1013
Jim Cownie5e8470a2013-09-27 10:38:44 +00001014#undef KMP_PAD
1015#undef KMP_GTID_DNE
1016
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001017#if KMP_USE_DYNAMIC_LOCK
Jonathan Peyton30419822017-05-12 18:01:32 +00001018// KMP_USE_DYNAMIC_LOCK enables dynamic dispatch of lock functions without
1019// breaking the current compatibility. Essential functionality of this new code
1020// is dynamic dispatch, but it also implements (or enables implementation of)
1021// hinted user lock and critical section which will be part of OMP 4.5 soon.
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001022//
Jonathan Peyton30419822017-05-12 18:01:32 +00001023// Lock type can be decided at creation time (i.e., lock initialization), and
1024// subsequent lock function call on the created lock object requires type
1025// extraction and call through jump table using the extracted type. This type
1026// information is stored in two different ways depending on the size of the lock
1027// object, and we differentiate lock types by this size requirement - direct and
1028// indirect locks.
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001029//
1030// Direct locks:
Jonathan Peyton30419822017-05-12 18:01:32 +00001031// A direct lock object fits into the space created by the compiler for an
1032// omp_lock_t object, and TAS/Futex lock falls into this category. We use low
1033// one byte of the lock object as the storage for the lock type, and appropriate
1034// bit operation is required to access the data meaningful to the lock
1035// algorithms. Also, to differentiate direct lock from indirect lock, 1 is
1036// written to LSB of the lock object. The newly introduced "hle" lock is also a
1037// direct lock.
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001038//
1039// Indirect locks:
Jonathan Peyton30419822017-05-12 18:01:32 +00001040// An indirect lock object requires more space than the compiler-generated
1041// space, and it should be allocated from heap. Depending on the size of the
1042// compiler-generated space for the lock (i.e., size of omp_lock_t), this
1043// omp_lock_t object stores either the address of the heap-allocated indirect
1044// lock (void * fits in the object) or an index to the indirect lock table entry
1045// that holds the address. Ticket/Queuing/DRDPA/Adaptive lock falls into this
1046// category, and the newly introduced "rtm" lock is also an indirect lock which
1047// was implemented on top of the Queuing lock. When the omp_lock_t object holds
1048// an index (not lock address), 0 is written to LSB to differentiate the lock
1049// from a direct lock, and the remaining part is the actual index to the
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001050// indirect lock table.
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001051
1052#include <stdint.h> // for uintptr_t
1053
Jonathan Peytondae13d82015-12-11 21:57:06 +00001054// Shortcuts
Jonathan Peyton30419822017-05-12 18:01:32 +00001055#define KMP_USE_INLINED_TAS \
1056 (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)) && 1
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00001057#define KMP_USE_INLINED_FUTEX KMP_USE_FUTEX && 0
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001058
1059// List of lock definitions; all nested locks are indirect locks.
1060// hle lock is xchg lock prefixed with XACQUIRE/XRELEASE.
1061// All nested locks are indirect lock types.
Jonathan Peytondae13d82015-12-11 21:57:06 +00001062#if KMP_USE_TSX
Jonathan Peyton30419822017-05-12 18:01:32 +00001063#if KMP_USE_FUTEX
1064#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) m(hle, a)
1065#define KMP_FOREACH_I_LOCK(m, a) \
1066 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \
1067 m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
1068 m(nested_queuing, a) m(nested_drdpa, a)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001069#else
Jonathan Peyton30419822017-05-12 18:01:32 +00001070#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(hle, a)
1071#define KMP_FOREACH_I_LOCK(m, a) \
1072 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \
1073 m(nested_tas, a) m(nested_ticket, a) m(nested_queuing, a) \
1074 m(nested_drdpa, a)
1075#endif // KMP_USE_FUTEX
1076#define KMP_LAST_D_LOCK lockseq_hle
1077#else
1078#if KMP_USE_FUTEX
1079#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a)
1080#define KMP_FOREACH_I_LOCK(m, a) \
1081 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_futex, a) \
1082 m(nested_ticket, a) m(nested_queuing, a) m(nested_drdpa, a)
1083#define KMP_LAST_D_LOCK lockseq_futex
1084#else
1085#define KMP_FOREACH_D_LOCK(m, a) m(tas, a)
1086#define KMP_FOREACH_I_LOCK(m, a) \
1087 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_ticket, a) \
1088 m(nested_queuing, a) m(nested_drdpa, a)
1089#define KMP_LAST_D_LOCK lockseq_tas
1090#endif // KMP_USE_FUTEX
Jonathan Peytondae13d82015-12-11 21:57:06 +00001091#endif // KMP_USE_TSX
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001092
1093// Information used in dynamic dispatch
Jonathan Peyton30419822017-05-12 18:01:32 +00001094#define KMP_LOCK_SHIFT \
1095 8 // number of low bits to be used as tag for direct locks
Jonathan Peytona03533d2015-12-11 21:49:08 +00001096#define KMP_FIRST_D_LOCK lockseq_tas
1097#define KMP_FIRST_I_LOCK lockseq_ticket
Jonathan Peyton30419822017-05-12 18:01:32 +00001098#define KMP_LAST_I_LOCK lockseq_nested_drdpa
1099#define KMP_NUM_I_LOCKS \
1100 (locktag_nested_drdpa + 1) // number of indirect lock types
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001101
1102// Base type for dynamic locks.
1103typedef kmp_uint32 kmp_dyna_lock_t;
1104
Jonathan Peyton30419822017-05-12 18:01:32 +00001105// Lock sequence that enumerates all lock kinds. Always make this enumeration
1106// consistent with kmp_lockseq_t in the include directory.
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001107typedef enum {
Jonathan Peyton30419822017-05-12 18:01:32 +00001108 lockseq_indirect = 0,
1109#define expand_seq(l, a) lockseq_##l,
1110 KMP_FOREACH_D_LOCK(expand_seq, 0) KMP_FOREACH_I_LOCK(expand_seq, 0)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001111#undef expand_seq
1112} kmp_dyna_lockseq_t;
1113
1114// Enumerates indirect lock tags.
1115typedef enum {
Jonathan Peyton30419822017-05-12 18:01:32 +00001116#define expand_tag(l, a) locktag_##l,
1117 KMP_FOREACH_I_LOCK(expand_tag, 0)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001118#undef expand_tag
1119} kmp_indirect_locktag_t;
1120
1121// Utility macros that extract information from lock sequences.
Jonathan Peyton30419822017-05-12 18:01:32 +00001122#define KMP_IS_D_LOCK(seq) \
1123 ((seq) >= KMP_FIRST_D_LOCK && (seq) <= KMP_LAST_D_LOCK)
1124#define KMP_IS_I_LOCK(seq) \
1125 ((seq) >= KMP_FIRST_I_LOCK && (seq) <= KMP_LAST_I_LOCK)
1126#define KMP_GET_I_TAG(seq) (kmp_indirect_locktag_t)((seq)-KMP_FIRST_I_LOCK)
1127#define KMP_GET_D_TAG(seq) ((seq) << 1 | 1)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001128
1129// Enumerates direct lock tags starting from indirect tag.
1130typedef enum {
Jonathan Peyton30419822017-05-12 18:01:32 +00001131#define expand_tag(l, a) locktag_##l = KMP_GET_D_TAG(lockseq_##l),
1132 KMP_FOREACH_D_LOCK(expand_tag, 0)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001133#undef expand_tag
1134} kmp_direct_locktag_t;
1135
1136// Indirect lock type
1137typedef struct {
Jonathan Peyton30419822017-05-12 18:01:32 +00001138 kmp_user_lock_p lock;
1139 kmp_indirect_locktag_t type;
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001140} kmp_indirect_lock_t;
1141
Jonathan Peyton30419822017-05-12 18:01:32 +00001142// Function tables for direct locks. Set/unset/test differentiate functions
1143// with/without consistency checking.
Jonathan Peytona03533d2015-12-11 21:49:08 +00001144extern void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t);
1145extern void (*__kmp_direct_destroy[])(kmp_dyna_lock_t *);
1146extern void (*(*__kmp_direct_set))(kmp_dyna_lock_t *, kmp_int32);
Jonathan Peyton30419822017-05-12 18:01:32 +00001147extern int (*(*__kmp_direct_unset))(kmp_dyna_lock_t *, kmp_int32);
1148extern int (*(*__kmp_direct_test))(kmp_dyna_lock_t *, kmp_int32);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001149
Jonathan Peyton30419822017-05-12 18:01:32 +00001150// Function tables for indirect locks. Set/unset/test differentiate functions
1151// with/withuot consistency checking.
Jonathan Peytona03533d2015-12-11 21:49:08 +00001152extern void (*__kmp_indirect_init[])(kmp_user_lock_p);
1153extern void (*__kmp_indirect_destroy[])(kmp_user_lock_p);
1154extern void (*(*__kmp_indirect_set))(kmp_user_lock_p, kmp_int32);
Jonathan Peyton30419822017-05-12 18:01:32 +00001155extern int (*(*__kmp_indirect_unset))(kmp_user_lock_p, kmp_int32);
1156extern int (*(*__kmp_indirect_test))(kmp_user_lock_p, kmp_int32);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001157
1158// Extracts direct lock tag from a user lock pointer
Jonathan Peyton30419822017-05-12 18:01:32 +00001159#define KMP_EXTRACT_D_TAG(l) \
1160 (*((kmp_dyna_lock_t *)(l)) & ((1 << KMP_LOCK_SHIFT) - 1) & \
1161 -(*((kmp_dyna_lock_t *)(l)) & 1))
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001162
1163// Extracts indirect lock index from a user lock pointer
Jonathan Peytonf2d119f2015-12-03 19:37:20 +00001164#define KMP_EXTRACT_I_INDEX(l) (*(kmp_lock_index_t *)(l) >> 1)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001165
Jonathan Peyton30419822017-05-12 18:01:32 +00001166// Returns function pointer to the direct lock function with l (kmp_dyna_lock_t
1167// *) and op (operation type).
Jonathan Peytona03533d2015-12-11 21:49:08 +00001168#define KMP_D_LOCK_FUNC(l, op) __kmp_direct_##op[KMP_EXTRACT_D_TAG(l)]
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001169
Jonathan Peyton30419822017-05-12 18:01:32 +00001170// Returns function pointer to the indirect lock function with l
1171// (kmp_indirect_lock_t *) and op (operation type).
1172#define KMP_I_LOCK_FUNC(l, op) \
1173 __kmp_indirect_##op[((kmp_indirect_lock_t *)(l))->type]
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001174
1175// Initializes a direct lock with the given lock pointer and lock sequence.
Jonathan Peyton30419822017-05-12 18:01:32 +00001176#define KMP_INIT_D_LOCK(l, seq) \
1177 __kmp_direct_init[KMP_GET_D_TAG(seq)]((kmp_dyna_lock_t *)l, seq)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001178
1179// Initializes an indirect lock with the given lock pointer and lock sequence.
Jonathan Peyton30419822017-05-12 18:01:32 +00001180#define KMP_INIT_I_LOCK(l, seq) \
1181 __kmp_direct_init[0]((kmp_dyna_lock_t *)(l), seq)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001182
1183// Returns "free" lock value for the given lock type.
Jonathan Peyton30419822017-05-12 18:01:32 +00001184#define KMP_LOCK_FREE(type) (locktag_##type)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001185
1186// Returns "busy" lock value for the given lock teyp.
Jonathan Peyton30419822017-05-12 18:01:32 +00001187#define KMP_LOCK_BUSY(v, type) ((v) << KMP_LOCK_SHIFT | locktag_##type)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001188
1189// Returns lock value after removing (shifting) lock tag.
Jonathan Peyton30419822017-05-12 18:01:32 +00001190#define KMP_LOCK_STRIP(v) ((v) >> KMP_LOCK_SHIFT)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001191
Jonathan Peyton30419822017-05-12 18:01:32 +00001192// Initializes global states and data structures for managing dynamic user
1193// locks.
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001194extern void __kmp_init_dynamic_user_locks();
1195
1196// Allocates and returns an indirect lock with the given indirect lock tag.
Jonathan Peyton30419822017-05-12 18:01:32 +00001197extern kmp_indirect_lock_t *
1198__kmp_allocate_indirect_lock(void **, kmp_int32, kmp_indirect_locktag_t);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001199
1200// Cleans up global states and data structures for managing dynamic user locks.
1201extern void __kmp_cleanup_indirect_user_locks();
1202
Jonathan Peyton61118492016-05-20 19:03:38 +00001203// Default user lock sequence when not using hinted locks.
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001204extern kmp_dyna_lockseq_t __kmp_user_lock_seq;
1205
1206// Jump table for "set lock location", available only for indirect locks.
Jonathan Peyton30419822017-05-12 18:01:32 +00001207extern void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
1208 const ident_t *);
1209#define KMP_SET_I_LOCK_LOCATION(lck, loc) \
1210 { \
1211 if (__kmp_indirect_set_location[(lck)->type] != NULL) \
1212 __kmp_indirect_set_location[(lck)->type]((lck)->lock, loc); \
1213 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001214
1215// Jump table for "set lock flags", available only for indirect locks.
Jonathan Peyton30419822017-05-12 18:01:32 +00001216extern void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
1217 kmp_lock_flags_t);
1218#define KMP_SET_I_LOCK_FLAGS(lck, flag) \
1219 { \
1220 if (__kmp_indirect_set_flags[(lck)->type] != NULL) \
1221 __kmp_indirect_set_flags[(lck)->type]((lck)->lock, flag); \
1222 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001223
1224// Jump table for "get lock location", available only for indirect locks.
Jonathan Peyton30419822017-05-12 18:01:32 +00001225extern const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
1226 kmp_user_lock_p);
1227#define KMP_GET_I_LOCK_LOCATION(lck) \
1228 (__kmp_indirect_get_location[(lck)->type] != NULL \
1229 ? __kmp_indirect_get_location[(lck)->type]((lck)->lock) \
1230 : NULL)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001231
1232// Jump table for "get lock flags", available only for indirect locks.
Jonathan Peyton30419822017-05-12 18:01:32 +00001233extern kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
1234 kmp_user_lock_p);
1235#define KMP_GET_I_LOCK_FLAGS(lck) \
1236 (__kmp_indirect_get_flags[(lck)->type] != NULL \
1237 ? __kmp_indirect_get_flags[(lck)->type]((lck)->lock) \
1238 : NULL)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001239
Jonathan Peyton30419822017-05-12 18:01:32 +00001240#define KMP_I_LOCK_CHUNK \
1241 1024 // number of kmp_indirect_lock_t objects to be allocated together
Jonathan Peytondae13d82015-12-11 21:57:06 +00001242
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001243// Lock table for indirect locks.
Jonathan Peytondae13d82015-12-11 21:57:06 +00001244typedef struct kmp_indirect_lock_table {
Jonathan Peyton30419822017-05-12 18:01:32 +00001245 kmp_indirect_lock_t **table; // blocks of indirect locks allocated
1246 kmp_lock_index_t size; // size of the indirect lock table
1247 kmp_lock_index_t next; // index to the next lock to be allocated
Jonathan Peytondae13d82015-12-11 21:57:06 +00001248} kmp_indirect_lock_table_t;
1249
1250extern kmp_indirect_lock_table_t __kmp_i_lock_table;
1251
1252// Returns the indirect lock associated with the given index.
Jonathan Peyton30419822017-05-12 18:01:32 +00001253#define KMP_GET_I_LOCK(index) \
1254 (*(__kmp_i_lock_table.table + (index) / KMP_I_LOCK_CHUNK) + \
1255 (index) % KMP_I_LOCK_CHUNK)
Jonathan Peytondae13d82015-12-11 21:57:06 +00001256
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001257// Number of locks in a lock block, which is fixed to "1" now.
Jonathan Peyton30419822017-05-12 18:01:32 +00001258// TODO: No lock block implementation now. If we do support, we need to manage
1259// lock block data structure for each indirect lock type.
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001260extern int __kmp_num_locks_in_block;
1261
1262// Fast lock table lookup without consistency checking
Jonathan Peyton30419822017-05-12 18:01:32 +00001263#define KMP_LOOKUP_I_LOCK(l) \
1264 ((OMP_LOCK_T_SIZE < sizeof(void *)) ? KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(l)) \
1265 : *((kmp_indirect_lock_t **)(l)))
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001266
Jonathan Peytonde4749b2016-12-14 23:01:24 +00001267// Used once in kmp_error.cpp
Jonathan Peyton30419822017-05-12 18:01:32 +00001268extern kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p, kmp_uint32);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001269
1270#else // KMP_USE_DYNAMIC_LOCK
1271
Jonathan Peyton30419822017-05-12 18:01:32 +00001272#define KMP_LOCK_BUSY(v, type) (v)
1273#define KMP_LOCK_FREE(type) 0
1274#define KMP_LOCK_STRIP(v) (v)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001275
1276#endif // KMP_USE_DYNAMIC_LOCK
1277
Jonathan Peyton377aa402016-04-14 16:00:37 +00001278// data structure for using backoff within spin locks.
1279typedef struct {
Jonathan Peyton30419822017-05-12 18:01:32 +00001280 kmp_uint32 step; // current step
1281 kmp_uint32 max_backoff; // upper bound of outer delay loop
1282 kmp_uint32 min_tick; // size of inner delay loop in ticks (machine-dependent)
Jonathan Peyton377aa402016-04-14 16:00:37 +00001283} kmp_backoff_t;
1284
1285// Runtime's default backoff parameters
1286extern kmp_backoff_t __kmp_spin_backoff_params;
1287
1288// Backoff function
1289extern void __kmp_spin_backoff(kmp_backoff_t *);
1290
Jim Cownie5e8470a2013-09-27 10:38:44 +00001291#ifdef __cplusplus
1292} // extern "C"
1293#endif // __cplusplus
1294
1295#endif /* KMP_LOCK_H */