blob: 9ad86a51657de33139969e28b92f252c561d3a67 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_lock.h -- lock header file
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
Jim Cownie5e8470a2013-09-27 10:38:44 +00005//===----------------------------------------------------------------------===//
6//
Chandler Carruth57b08b02019-01-19 10:56:40 +00007// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Jim Cownie5e8470a2013-09-27 10:38:44 +000010//
11//===----------------------------------------------------------------------===//
12
Jim Cownie5e8470a2013-09-27 10:38:44 +000013#ifndef KMP_LOCK_H
14#define KMP_LOCK_H
15
Jonathan Peyton30419822017-05-12 18:01:32 +000016#include <limits.h> // CHAR_BIT
17#include <stddef.h> // offsetof
Jim Cownie5e8470a2013-09-27 10:38:44 +000018
Jim Cownie5e8470a2013-09-27 10:38:44 +000019#include "kmp_debug.h"
Jonathan Peyton30419822017-05-12 18:01:32 +000020#include "kmp_os.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000021
22#ifdef __cplusplus
Paul Osmialowskif7cc6af2016-05-31 20:20:32 +000023#include <atomic>
24
Jim Cownie5e8470a2013-09-27 10:38:44 +000025extern "C" {
26#endif // __cplusplus
27
28// ----------------------------------------------------------------------------
29// Have to copy these definitions from kmp.h because kmp.h cannot be included
30// due to circular dependencies. Will undef these at end of file.
31
Jonathan Peyton30419822017-05-12 18:01:32 +000032#define KMP_PAD(type, sz) \
33 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
Jim Cownie5e8470a2013-09-27 10:38:44 +000034#define KMP_GTID_DNE (-2)
35
36// Forward declaration of ident and ident_t
37
38struct ident;
39typedef struct ident ident_t;
40
41// End of copied code.
42// ----------------------------------------------------------------------------
43
Jim Cownie5e8470a2013-09-27 10:38:44 +000044// We need to know the size of the area we can assume that the compiler(s)
45// allocated for obects of type omp_lock_t and omp_nest_lock_t. The Intel
46// compiler always allocates a pointer-sized area, as does visual studio.
47//
48// gcc however, only allocates 4 bytes for regular locks, even on 64-bit
49// intel archs. It allocates at least 8 bytes for nested lock (more on
50// recent versions), but we are bounded by the pointer-sized chunks that
51// the Intel compiler allocates.
Jim Cownie5e8470a2013-09-27 10:38:44 +000052
53#if KMP_OS_LINUX && defined(KMP_GOMP_COMPAT)
Jonathan Peyton30419822017-05-12 18:01:32 +000054#define OMP_LOCK_T_SIZE sizeof(int)
55#define OMP_NEST_LOCK_T_SIZE sizeof(void *)
Jim Cownie5e8470a2013-09-27 10:38:44 +000056#else
Jonathan Peyton30419822017-05-12 18:01:32 +000057#define OMP_LOCK_T_SIZE sizeof(void *)
58#define OMP_NEST_LOCK_T_SIZE sizeof(void *)
Jim Cownie5e8470a2013-09-27 10:38:44 +000059#endif
60
Jim Cownie5e8470a2013-09-27 10:38:44 +000061// The Intel compiler allocates a 32-byte chunk for a critical section.
62// Both gcc and visual studio only allocate enough space for a pointer.
63// Sometimes we know that the space was allocated by the Intel compiler.
Jonathan Peyton30419822017-05-12 18:01:32 +000064#define OMP_CRITICAL_SIZE sizeof(void *)
65#define INTEL_CRITICAL_SIZE 32
Jim Cownie5e8470a2013-09-27 10:38:44 +000066
Jim Cownie5e8470a2013-09-27 10:38:44 +000067// lock flags
Jim Cownie5e8470a2013-09-27 10:38:44 +000068typedef kmp_uint32 kmp_lock_flags_t;
69
70#define kmp_lf_critical_section 1
71
Jim Cownie5e8470a2013-09-27 10:38:44 +000072// When a lock table is used, the indices are of kmp_lock_index_t
Jim Cownie5e8470a2013-09-27 10:38:44 +000073typedef kmp_uint32 kmp_lock_index_t;
74
Jim Cownie5e8470a2013-09-27 10:38:44 +000075// When memory allocated for locks are on the lock pool (free list),
76// it is treated as structs of this type.
Jim Cownie5e8470a2013-09-27 10:38:44 +000077struct kmp_lock_pool {
Jonathan Peyton30419822017-05-12 18:01:32 +000078 union kmp_user_lock *next;
79 kmp_lock_index_t index;
Jim Cownie5e8470a2013-09-27 10:38:44 +000080};
81
82typedef struct kmp_lock_pool kmp_lock_pool_t;
83
Jonathan Peyton30419822017-05-12 18:01:32 +000084extern void __kmp_validate_locks(void);
Jim Cownie5e8470a2013-09-27 10:38:44 +000085
86// ----------------------------------------------------------------------------
Jim Cownie5e8470a2013-09-27 10:38:44 +000087// There are 5 lock implementations:
Jim Cownie5e8470a2013-09-27 10:38:44 +000088// 1. Test and set locks.
Jonathan Peyton8c432f22018-01-04 22:56:47 +000089// 2. futex locks (Linux* OS on x86 and
90// Intel(R) Many Integrated Core Architecture)
Jim Cownie5e8470a2013-09-27 10:38:44 +000091// 3. Ticket (Lamport bakery) locks.
92// 4. Queuing locks (with separate spin fields).
93// 5. DRPA (Dynamically Reconfigurable Distributed Polling Area) locks
94//
95// and 3 lock purposes:
Jonathan Peyton30419822017-05-12 18:01:32 +000096// 1. Bootstrap locks -- Used for a few locks available at library
97// startup-shutdown time.
Jim Cownie5e8470a2013-09-27 10:38:44 +000098// These do not require non-negative global thread ID's.
99// 2. Internal RTL locks -- Used everywhere else in the RTL
100// 3. User locks (includes critical sections)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000101// ----------------------------------------------------------------------------
102
Jim Cownie5e8470a2013-09-27 10:38:44 +0000103// ============================================================================
104// Lock implementations.
Jonathan Peyton30419822017-05-12 18:01:32 +0000105//
Jim Cownie5e8470a2013-09-27 10:38:44 +0000106// Test and set locks.
107//
108// Non-nested test and set locks differ from the other lock kinds (except
109// futex) in that we use the memory allocated by the compiler for the lock,
110// rather than a pointer to it.
111//
112// On lin32, lin_32e, and win_32, the space allocated may be as small as 4
113// bytes, so we have to use a lock table for nested locks, and avoid accessing
114// the depth_locked field for non-nested locks.
115//
Jonathan Peyton30419822017-05-12 18:01:32 +0000116// Information normally available to the tools, such as lock location, lock
117// usage (normal lock vs. critical section), etc. is not available with test and
118// set locks.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000119// ----------------------------------------------------------------------------
120
121struct kmp_base_tas_lock {
Jonathan Peytonccfed2e2017-06-06 20:24:41 +0000122 // KMP_LOCK_FREE(tas) => unlocked; locked: (gtid+1) of owning thread
Jonathan Peyton37e2ef52018-07-09 17:36:22 +0000123 std::atomic<kmp_int32> poll;
Jonathan Peyton30419822017-05-12 18:01:32 +0000124 kmp_int32 depth_locked; // depth locked, for nested locks only
Jim Cownie5e8470a2013-09-27 10:38:44 +0000125};
126
127typedef struct kmp_base_tas_lock kmp_base_tas_lock_t;
128
129union kmp_tas_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000130 kmp_base_tas_lock_t lk;
131 kmp_lock_pool_t pool; // make certain struct is large enough
132 double lk_align; // use worst case alignment; no cache line padding
Jim Cownie5e8470a2013-09-27 10:38:44 +0000133};
134
135typedef union kmp_tas_lock kmp_tas_lock_t;
136
Jim Cownie5e8470a2013-09-27 10:38:44 +0000137// Static initializer for test and set lock variables. Usage:
138// kmp_tas_lock_t xlock = KMP_TAS_LOCK_INITIALIZER( xlock );
Jonathan Peyton30419822017-05-12 18:01:32 +0000139#define KMP_TAS_LOCK_INITIALIZER(lock) \
140 { \
Jonathan Peyton37e2ef52018-07-09 17:36:22 +0000141 { ATOMIC_VAR_INIT(KMP_LOCK_FREE(tas)), 0 } \
Jonathan Peyton30419822017-05-12 18:01:32 +0000142 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000143
Jonathan Peyton30419822017-05-12 18:01:32 +0000144extern int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
145extern int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
146extern int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
147extern void __kmp_init_tas_lock(kmp_tas_lock_t *lck);
148extern void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000149
Jonathan Peyton30419822017-05-12 18:01:32 +0000150extern int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
151extern int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
152extern int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
153extern void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck);
154extern void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000155
Jonathan Peyton30419822017-05-12 18:01:32 +0000156#define KMP_LOCK_RELEASED 1
157#define KMP_LOCK_STILL_HELD 0
Jonathan Peyton0e6d4572015-10-16 16:52:58 +0000158#define KMP_LOCK_ACQUIRED_FIRST 1
Jonathan Peyton30419822017-05-12 18:01:32 +0000159#define KMP_LOCK_ACQUIRED_NEXT 0
Jonathan Peytonbff8ded2018-01-10 18:24:09 +0000160#ifndef KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +0000161#define KMP_USE_FUTEX \
162 (KMP_OS_LINUX && !KMP_OS_CNK && \
163 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64))
Jonathan Peytonbff8ded2018-01-10 18:24:09 +0000164#endif
Jonathan Peyton9d2412c2016-06-22 16:35:12 +0000165#if KMP_USE_FUTEX
Jim Cownie5e8470a2013-09-27 10:38:44 +0000166
167// ----------------------------------------------------------------------------
168// futex locks. futex locks are only available on Linux* OS.
169//
170// Like non-nested test and set lock, non-nested futex locks use the memory
171// allocated by the compiler for the lock, rather than a pointer to it.
172//
Jonathan Peyton30419822017-05-12 18:01:32 +0000173// Information normally available to the tools, such as lock location, lock
174// usage (normal lock vs. critical section), etc. is not available with test and
175// set locks. With non-nested futex locks, the lock owner is not even available.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000176// ----------------------------------------------------------------------------
177
178struct kmp_base_futex_lock {
Jonathan Peytonccfed2e2017-06-06 20:24:41 +0000179 volatile kmp_int32 poll; // KMP_LOCK_FREE(futex) => unlocked
Jonathan Peyton30419822017-05-12 18:01:32 +0000180 // 2*(gtid+1) of owning thread, 0 if unlocked
181 // locked: (gtid+1) of owning thread
182 kmp_int32 depth_locked; // depth locked, for nested locks only
Jim Cownie5e8470a2013-09-27 10:38:44 +0000183};
184
185typedef struct kmp_base_futex_lock kmp_base_futex_lock_t;
186
187union kmp_futex_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000188 kmp_base_futex_lock_t lk;
189 kmp_lock_pool_t pool; // make certain struct is large enough
190 double lk_align; // use worst case alignment
191 // no cache line padding
Jim Cownie5e8470a2013-09-27 10:38:44 +0000192};
193
194typedef union kmp_futex_lock kmp_futex_lock_t;
195
Jim Cownie5e8470a2013-09-27 10:38:44 +0000196// Static initializer for futex lock variables. Usage:
197// kmp_futex_lock_t xlock = KMP_FUTEX_LOCK_INITIALIZER( xlock );
Jonathan Peyton30419822017-05-12 18:01:32 +0000198#define KMP_FUTEX_LOCK_INITIALIZER(lock) \
199 { \
Jonathan Peytonccfed2e2017-06-06 20:24:41 +0000200 { KMP_LOCK_FREE(futex), 0 } \
Jonathan Peyton30419822017-05-12 18:01:32 +0000201 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000202
Jonathan Peyton30419822017-05-12 18:01:32 +0000203extern int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
204extern int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
205extern int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
206extern void __kmp_init_futex_lock(kmp_futex_lock_t *lck);
207extern void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000208
Jonathan Peyton30419822017-05-12 18:01:32 +0000209extern int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck,
210 kmp_int32 gtid);
211extern int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
212extern int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck,
213 kmp_int32 gtid);
214extern void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck);
215extern void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000216
Jonathan Peyton9d2412c2016-06-22 16:35:12 +0000217#endif // KMP_USE_FUTEX
Jim Cownie5e8470a2013-09-27 10:38:44 +0000218
Jim Cownie5e8470a2013-09-27 10:38:44 +0000219// ----------------------------------------------------------------------------
220// Ticket locks.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000221
Paul Osmialowskif7cc6af2016-05-31 20:20:32 +0000222#ifdef __cplusplus
223
Hans Wennborg5b89fbc2016-06-09 15:54:43 +0000224#ifdef _MSC_VER
Jonathan Peyton30419822017-05-12 18:01:32 +0000225// MSVC won't allow use of std::atomic<> in a union since it has non-trivial
226// copy constructor.
Hans Wennborg5b89fbc2016-06-09 15:54:43 +0000227
228struct kmp_base_ticket_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000229 // `initialized' must be the first entry in the lock data structure!
230 std::atomic_bool initialized;
231 volatile union kmp_ticket_lock *self; // points to the lock union
232 ident_t const *location; // Source code location of omp_init_lock().
233 std::atomic_uint
234 next_ticket; // ticket number to give to next thread which acquires
235 std::atomic_uint now_serving; // ticket number for thread which holds the lock
236 std::atomic_int owner_id; // (gtid+1) of owning thread, 0 if unlocked
237 std::atomic_int depth_locked; // depth locked, for nested locks only
238 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
Hans Wennborg5b89fbc2016-06-09 15:54:43 +0000239};
240#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000241struct kmp_base_ticket_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000242 // `initialized' must be the first entry in the lock data structure!
243 std::atomic<bool> initialized;
244 volatile union kmp_ticket_lock *self; // points to the lock union
245 ident_t const *location; // Source code location of omp_init_lock().
246 std::atomic<unsigned>
247 next_ticket; // ticket number to give to next thread which acquires
248 std::atomic<unsigned>
249 now_serving; // ticket number for thread which holds the lock
250 std::atomic<int> owner_id; // (gtid+1) of owning thread, 0 if unlocked
251 std::atomic<int> depth_locked; // depth locked, for nested locks only
252 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
Jim Cownie5e8470a2013-09-27 10:38:44 +0000253};
Hans Wennborg5b89fbc2016-06-09 15:54:43 +0000254#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000255
Paul Osmialowskif7cc6af2016-05-31 20:20:32 +0000256#else // __cplusplus
257
258struct kmp_base_ticket_lock;
259
260#endif // !__cplusplus
261
Jim Cownie5e8470a2013-09-27 10:38:44 +0000262typedef struct kmp_base_ticket_lock kmp_base_ticket_lock_t;
263
264union KMP_ALIGN_CACHE kmp_ticket_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000265 kmp_base_ticket_lock_t
266 lk; // This field must be first to allow static initializing.
267 kmp_lock_pool_t pool;
268 double lk_align; // use worst case alignment
269 char lk_pad[KMP_PAD(kmp_base_ticket_lock_t, CACHE_LINE)];
Jim Cownie5e8470a2013-09-27 10:38:44 +0000270};
271
272typedef union kmp_ticket_lock kmp_ticket_lock_t;
273
Jim Cownie5e8470a2013-09-27 10:38:44 +0000274// Static initializer for simple ticket lock variables. Usage:
275// kmp_ticket_lock_t xlock = KMP_TICKET_LOCK_INITIALIZER( xlock );
276// Note the macro argument. It is important to make var properly initialized.
Jonathan Peyton30419822017-05-12 18:01:32 +0000277#define KMP_TICKET_LOCK_INITIALIZER(lock) \
278 { \
279 { \
280 ATOMIC_VAR_INIT(true) \
281 , &(lock), NULL, ATOMIC_VAR_INIT(0U), ATOMIC_VAR_INIT(0U), \
282 ATOMIC_VAR_INIT(0), ATOMIC_VAR_INIT(-1) \
283 } \
284 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000285
Jonathan Peyton30419822017-05-12 18:01:32 +0000286extern int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
287extern int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
288extern int __kmp_test_ticket_lock_with_cheks(kmp_ticket_lock_t *lck,
289 kmp_int32 gtid);
290extern int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
291extern void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck);
292extern void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000293
Jonathan Peyton30419822017-05-12 18:01:32 +0000294extern int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck,
295 kmp_int32 gtid);
296extern int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck,
297 kmp_int32 gtid);
298extern int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck,
299 kmp_int32 gtid);
300extern void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck);
301extern void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000302
303// ----------------------------------------------------------------------------
304// Queuing locks.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000305
306#if KMP_USE_ADAPTIVE_LOCKS
307
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000308struct kmp_adaptive_lock_info;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000309
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000310typedef struct kmp_adaptive_lock_info kmp_adaptive_lock_info_t;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000311
312#if KMP_DEBUG_ADAPTIVE_LOCKS
313
314struct kmp_adaptive_lock_statistics {
Jonathan Peyton30419822017-05-12 18:01:32 +0000315 /* So we can get stats from locks that haven't been destroyed. */
316 kmp_adaptive_lock_info_t *next;
317 kmp_adaptive_lock_info_t *prev;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000318
Jonathan Peyton30419822017-05-12 18:01:32 +0000319 /* Other statistics */
320 kmp_uint32 successfulSpeculations;
321 kmp_uint32 hardFailedSpeculations;
322 kmp_uint32 softFailedSpeculations;
323 kmp_uint32 nonSpeculativeAcquires;
324 kmp_uint32 nonSpeculativeAcquireAttempts;
325 kmp_uint32 lemmingYields;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000326};
327
328typedef struct kmp_adaptive_lock_statistics kmp_adaptive_lock_statistics_t;
329
330extern void __kmp_print_speculative_stats();
331extern void __kmp_init_speculative_stats();
332
333#endif // KMP_DEBUG_ADAPTIVE_LOCKS
334
Jonathan Peyton30419822017-05-12 18:01:32 +0000335struct kmp_adaptive_lock_info {
336 /* Values used for adaptivity.
337 Although these are accessed from multiple threads we don't access them
338 atomically, because if we miss updates it probably doesn't matter much. (It
339 just affects our decision about whether to try speculation on the lock). */
340 kmp_uint32 volatile badness;
341 kmp_uint32 volatile acquire_attempts;
342 /* Parameters of the lock. */
343 kmp_uint32 max_badness;
344 kmp_uint32 max_soft_retries;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000345
346#if KMP_DEBUG_ADAPTIVE_LOCKS
Jonathan Peyton30419822017-05-12 18:01:32 +0000347 kmp_adaptive_lock_statistics_t volatile stats;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000348#endif
349};
350
351#endif // KMP_USE_ADAPTIVE_LOCKS
352
Jim Cownie5e8470a2013-09-27 10:38:44 +0000353struct kmp_base_queuing_lock {
354
Jonathan Peyton30419822017-05-12 18:01:32 +0000355 // `initialized' must be the first entry in the lock data structure!
356 volatile union kmp_queuing_lock
357 *initialized; // Points to the lock union if in initialized state.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000358
Jonathan Peyton30419822017-05-12 18:01:32 +0000359 ident_t const *location; // Source code location of omp_init_lock().
Jim Cownie5e8470a2013-09-27 10:38:44 +0000360
Jonathan Peyton30419822017-05-12 18:01:32 +0000361 KMP_ALIGN(8) // tail_id must be 8-byte aligned!
Jim Cownie5e8470a2013-09-27 10:38:44 +0000362
Jonathan Peyton30419822017-05-12 18:01:32 +0000363 volatile kmp_int32
364 tail_id; // (gtid+1) of thread at tail of wait queue, 0 if empty
365 // Must be no padding here since head/tail used in 8-byte CAS
366 volatile kmp_int32
367 head_id; // (gtid+1) of thread at head of wait queue, 0 if empty
368 // Decl order assumes little endian
369 // bakery-style lock
370 volatile kmp_uint32
371 next_ticket; // ticket number to give to next thread which acquires
372 volatile kmp_uint32
373 now_serving; // ticket number for thread which holds the lock
374 volatile kmp_int32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
375 kmp_int32 depth_locked; // depth locked, for nested locks only
Jim Cownie5e8470a2013-09-27 10:38:44 +0000376
Jonathan Peyton30419822017-05-12 18:01:32 +0000377 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
Jim Cownie5e8470a2013-09-27 10:38:44 +0000378};
379
380typedef struct kmp_base_queuing_lock kmp_base_queuing_lock_t;
381
Jonathan Peyton30419822017-05-12 18:01:32 +0000382KMP_BUILD_ASSERT(offsetof(kmp_base_queuing_lock_t, tail_id) % 8 == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000383
384union KMP_ALIGN_CACHE kmp_queuing_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000385 kmp_base_queuing_lock_t
386 lk; // This field must be first to allow static initializing.
387 kmp_lock_pool_t pool;
388 double lk_align; // use worst case alignment
389 char lk_pad[KMP_PAD(kmp_base_queuing_lock_t, CACHE_LINE)];
Jim Cownie5e8470a2013-09-27 10:38:44 +0000390};
391
392typedef union kmp_queuing_lock kmp_queuing_lock_t;
393
Jonathan Peyton30419822017-05-12 18:01:32 +0000394extern int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
395extern int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
396extern int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
397extern void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck);
398extern void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000399
Jonathan Peyton30419822017-05-12 18:01:32 +0000400extern int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck,
401 kmp_int32 gtid);
402extern int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck,
403 kmp_int32 gtid);
404extern int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck,
405 kmp_int32 gtid);
406extern void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck);
407extern void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000408
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000409#if KMP_USE_ADAPTIVE_LOCKS
410
411// ----------------------------------------------------------------------------
412// Adaptive locks.
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000413struct kmp_base_adaptive_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000414 kmp_base_queuing_lock qlk;
415 KMP_ALIGN(CACHE_LINE)
416 kmp_adaptive_lock_info_t
417 adaptive; // Information for the speculative adaptive lock
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000418};
419
420typedef struct kmp_base_adaptive_lock kmp_base_adaptive_lock_t;
421
422union KMP_ALIGN_CACHE kmp_adaptive_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000423 kmp_base_adaptive_lock_t lk;
424 kmp_lock_pool_t pool;
425 double lk_align;
426 char lk_pad[KMP_PAD(kmp_base_adaptive_lock_t, CACHE_LINE)];
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000427};
428typedef union kmp_adaptive_lock kmp_adaptive_lock_t;
429
Jonathan Peyton30419822017-05-12 18:01:32 +0000430#define GET_QLK_PTR(l) ((kmp_queuing_lock_t *)&(l)->lk.qlk)
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000431
432#endif // KMP_USE_ADAPTIVE_LOCKS
Jim Cownie5e8470a2013-09-27 10:38:44 +0000433
434// ----------------------------------------------------------------------------
435// DRDPA ticket locks.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000436struct kmp_base_drdpa_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000437 // All of the fields on the first cache line are only written when
438 // initializing or reconfiguring the lock. These are relatively rare
439 // operations, so data from the first cache line will usually stay resident in
440 // the cache of each thread trying to acquire the lock.
441 //
442 // initialized must be the first entry in the lock data structure!
443 KMP_ALIGN_CACHE
Jim Cownie5e8470a2013-09-27 10:38:44 +0000444
Jonathan Peyton30419822017-05-12 18:01:32 +0000445 volatile union kmp_drdpa_lock
446 *initialized; // points to the lock union if in initialized state
447 ident_t const *location; // Source code location of omp_init_lock().
Jonathan Peyton37e2ef52018-07-09 17:36:22 +0000448 std::atomic<std::atomic<kmp_uint64> *> polls;
449 std::atomic<kmp_uint64> mask; // is 2**num_polls-1 for mod op
Jonathan Peyton30419822017-05-12 18:01:32 +0000450 kmp_uint64 cleanup_ticket; // thread with cleanup ticket
Jonathan Peyton37e2ef52018-07-09 17:36:22 +0000451 std::atomic<kmp_uint64> *old_polls; // will deallocate old_polls
Jonathan Peyton30419822017-05-12 18:01:32 +0000452 kmp_uint32 num_polls; // must be power of 2
Jim Cownie5e8470a2013-09-27 10:38:44 +0000453
Jonathan Peyton30419822017-05-12 18:01:32 +0000454 // next_ticket it needs to exist in a separate cache line, as it is
455 // invalidated every time a thread takes a new ticket.
456 KMP_ALIGN_CACHE
Jim Cownie5e8470a2013-09-27 10:38:44 +0000457
Jonathan Peyton37e2ef52018-07-09 17:36:22 +0000458 std::atomic<kmp_uint64> next_ticket;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000459
Jonathan Peyton30419822017-05-12 18:01:32 +0000460 // now_serving is used to store our ticket value while we hold the lock. It
461 // has a slightly different meaning in the DRDPA ticket locks (where it is
462 // written by the acquiring thread) than it does in the simple ticket locks
463 // (where it is written by the releasing thread).
464 //
465 // Since now_serving is only read an written in the critical section,
466 // it is non-volatile, but it needs to exist on a separate cache line,
467 // as it is invalidated at every lock acquire.
468 //
469 // Likewise, the vars used for nested locks (owner_id and depth_locked) are
470 // only written by the thread owning the lock, so they are put in this cache
471 // line. owner_id is read by other threads, so it must be declared volatile.
472 KMP_ALIGN_CACHE
473 kmp_uint64 now_serving; // doesn't have to be volatile
474 volatile kmp_uint32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
475 kmp_int32 depth_locked; // depth locked
476 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
Jim Cownie5e8470a2013-09-27 10:38:44 +0000477};
478
479typedef struct kmp_base_drdpa_lock kmp_base_drdpa_lock_t;
480
481union KMP_ALIGN_CACHE kmp_drdpa_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000482 kmp_base_drdpa_lock_t
483 lk; // This field must be first to allow static initializing. */
484 kmp_lock_pool_t pool;
485 double lk_align; // use worst case alignment
486 char lk_pad[KMP_PAD(kmp_base_drdpa_lock_t, CACHE_LINE)];
Jim Cownie5e8470a2013-09-27 10:38:44 +0000487};
488
489typedef union kmp_drdpa_lock kmp_drdpa_lock_t;
490
Jonathan Peyton30419822017-05-12 18:01:32 +0000491extern int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
492extern int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
493extern int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
494extern void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck);
495extern void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000496
Jonathan Peyton30419822017-05-12 18:01:32 +0000497extern int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
498 kmp_int32 gtid);
499extern int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
500extern int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck,
501 kmp_int32 gtid);
502extern void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
503extern void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000504
505// ============================================================================
506// Lock purposes.
507// ============================================================================
508
Jim Cownie5e8470a2013-09-27 10:38:44 +0000509// Bootstrap locks.
Jonathan Peyton30419822017-05-12 18:01:32 +0000510//
Jim Cownie5e8470a2013-09-27 10:38:44 +0000511// Bootstrap locks -- very few locks used at library initialization time.
512// Bootstrap locks are currently implemented as ticket locks.
513// They could also be implemented as test and set lock, but cannot be
514// implemented with other lock kinds as they require gtids which are not
515// available at initialization time.
516
517typedef kmp_ticket_lock_t kmp_bootstrap_lock_t;
518
Jonathan Peyton30419822017-05-12 18:01:32 +0000519#define KMP_BOOTSTRAP_LOCK_INITIALIZER(lock) KMP_TICKET_LOCK_INITIALIZER((lock))
Jonathan Peyton37e2ef52018-07-09 17:36:22 +0000520#define KMP_BOOTSTRAP_LOCK_INIT(lock) \
521 kmp_bootstrap_lock_t lock = KMP_TICKET_LOCK_INITIALIZER(lock)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000522
Jonathan Peyton30419822017-05-12 18:01:32 +0000523static inline int __kmp_acquire_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
524 return __kmp_acquire_ticket_lock(lck, KMP_GTID_DNE);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000525}
526
Jonathan Peyton30419822017-05-12 18:01:32 +0000527static inline int __kmp_test_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
528 return __kmp_test_ticket_lock(lck, KMP_GTID_DNE);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000529}
530
Jonathan Peyton30419822017-05-12 18:01:32 +0000531static inline void __kmp_release_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
532 __kmp_release_ticket_lock(lck, KMP_GTID_DNE);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000533}
534
Jonathan Peyton30419822017-05-12 18:01:32 +0000535static inline void __kmp_init_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
536 __kmp_init_ticket_lock(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000537}
538
Jonathan Peyton30419822017-05-12 18:01:32 +0000539static inline void __kmp_destroy_bootstrap_lock(kmp_bootstrap_lock_t *lck) {
540 __kmp_destroy_ticket_lock(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000541}
542
Jim Cownie5e8470a2013-09-27 10:38:44 +0000543// Internal RTL locks.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000544//
545// Internal RTL locks are also implemented as ticket locks, for now.
546//
547// FIXME - We should go through and figure out which lock kind works best for
Jim Cownie3051f972014-08-07 10:12:54 +0000548// each internal lock, and use the type declaration and function calls for
Jim Cownie5e8470a2013-09-27 10:38:44 +0000549// that explicit lock kind (and get rid of this section).
Jim Cownie5e8470a2013-09-27 10:38:44 +0000550
551typedef kmp_ticket_lock_t kmp_lock_t;
552
Jonathan Peyton37e2ef52018-07-09 17:36:22 +0000553#define KMP_LOCK_INIT(lock) kmp_lock_t lock = KMP_TICKET_LOCK_INITIALIZER(lock)
554
Jonathan Peyton30419822017-05-12 18:01:32 +0000555static inline int __kmp_acquire_lock(kmp_lock_t *lck, kmp_int32 gtid) {
556 return __kmp_acquire_ticket_lock(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000557}
558
Jonathan Peyton30419822017-05-12 18:01:32 +0000559static inline int __kmp_test_lock(kmp_lock_t *lck, kmp_int32 gtid) {
560 return __kmp_test_ticket_lock(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000561}
562
Jonathan Peyton30419822017-05-12 18:01:32 +0000563static inline void __kmp_release_lock(kmp_lock_t *lck, kmp_int32 gtid) {
564 __kmp_release_ticket_lock(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000565}
566
Jonathan Peyton30419822017-05-12 18:01:32 +0000567static inline void __kmp_init_lock(kmp_lock_t *lck) {
568 __kmp_init_ticket_lock(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000569}
570
Jonathan Peyton30419822017-05-12 18:01:32 +0000571static inline void __kmp_destroy_lock(kmp_lock_t *lck) {
572 __kmp_destroy_ticket_lock(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000573}
574
Jim Cownie5e8470a2013-09-27 10:38:44 +0000575// User locks.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000576//
Jonathan Peyton30419822017-05-12 18:01:32 +0000577// Do not allocate objects of type union kmp_user_lock!!! This will waste space
578// unless __kmp_user_lock_kind == lk_drdpa. Instead, check the value of
579// __kmp_user_lock_kind and allocate objects of the type of the appropriate
580// union member, and cast their addresses to kmp_user_lock_p.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000581
582enum kmp_lock_kind {
Jonathan Peyton30419822017-05-12 18:01:32 +0000583 lk_default = 0,
584 lk_tas,
Jonathan Peyton9d2412c2016-06-22 16:35:12 +0000585#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +0000586 lk_futex,
Jim Cownie5e8470a2013-09-27 10:38:44 +0000587#endif
Jonathan Peytondae13d82015-12-11 21:57:06 +0000588#if KMP_USE_DYNAMIC_LOCK && KMP_USE_TSX
Jonathan Peyton30419822017-05-12 18:01:32 +0000589 lk_hle,
590 lk_rtm,
Jonathan Peytondae13d82015-12-11 21:57:06 +0000591#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000592 lk_ticket,
593 lk_queuing,
594 lk_drdpa,
Jim Cownie5e8470a2013-09-27 10:38:44 +0000595#if KMP_USE_ADAPTIVE_LOCKS
Jonathan Peyton30419822017-05-12 18:01:32 +0000596 lk_adaptive
Jim Cownie5e8470a2013-09-27 10:38:44 +0000597#endif // KMP_USE_ADAPTIVE_LOCKS
598};
599
600typedef enum kmp_lock_kind kmp_lock_kind_t;
601
602extern kmp_lock_kind_t __kmp_user_lock_kind;
603
604union kmp_user_lock {
Jonathan Peyton30419822017-05-12 18:01:32 +0000605 kmp_tas_lock_t tas;
Jonathan Peyton9d2412c2016-06-22 16:35:12 +0000606#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +0000607 kmp_futex_lock_t futex;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000608#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000609 kmp_ticket_lock_t ticket;
610 kmp_queuing_lock_t queuing;
611 kmp_drdpa_lock_t drdpa;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000612#if KMP_USE_ADAPTIVE_LOCKS
Jonathan Peyton30419822017-05-12 18:01:32 +0000613 kmp_adaptive_lock_t adaptive;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000614#endif // KMP_USE_ADAPTIVE_LOCKS
Jonathan Peyton30419822017-05-12 18:01:32 +0000615 kmp_lock_pool_t pool;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000616};
617
618typedef union kmp_user_lock *kmp_user_lock_p;
619
Jonathan Peyton30419822017-05-12 18:01:32 +0000620#if !KMP_USE_DYNAMIC_LOCK
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000621
Jim Cownie5e8470a2013-09-27 10:38:44 +0000622extern size_t __kmp_base_user_lock_size;
623extern size_t __kmp_user_lock_size;
624
Jonathan Peyton30419822017-05-12 18:01:32 +0000625extern kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000626
Jonathan Peyton30419822017-05-12 18:01:32 +0000627static inline kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck) {
628 KMP_DEBUG_ASSERT(__kmp_get_user_lock_owner_ != NULL);
629 return (*__kmp_get_user_lock_owner_)(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000630}
631
Jonathan Peyton30419822017-05-12 18:01:32 +0000632extern int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
633 kmp_int32 gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000634
Jonathan Peyton30419822017-05-12 18:01:32 +0000635#if KMP_OS_LINUX && \
636 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000637
Jonathan Peyton30419822017-05-12 18:01:32 +0000638#define __kmp_acquire_user_lock_with_checks(lck, gtid) \
639 if (__kmp_user_lock_kind == lk_tas) { \
640 if (__kmp_env_consistency_check) { \
641 char const *const func = "omp_set_lock"; \
642 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) && \
643 lck->tas.lk.depth_locked != -1) { \
644 KMP_FATAL(LockNestableUsedAsSimple, func); \
645 } \
646 if ((gtid >= 0) && (lck->tas.lk.poll - 1 == gtid)) { \
647 KMP_FATAL(LockIsAlreadyOwned, func); \
648 } \
649 } \
Jonathan Peyton37e2ef52018-07-09 17:36:22 +0000650 if (lck->tas.lk.poll != 0 || \
Roman Lebedev781a0892018-12-15 09:23:39 +0000651 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
Jonathan Peyton30419822017-05-12 18:01:32 +0000652 kmp_uint32 spins; \
653 KMP_FSYNC_PREPARE(lck); \
654 KMP_INIT_YIELD(spins); \
Jonathan Peytone47d32f2019-02-28 19:11:29 +0000655 do { \
656 KMP_YIELD_OVERSUB_ELSE_SPIN(spins); \
657 } while ( \
658 lck->tas.lk.poll != 0 || \
659 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \
Jonathan Peyton30419822017-05-12 18:01:32 +0000660 } \
661 KMP_FSYNC_ACQUIRED(lck); \
662 } else { \
663 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL); \
664 (*__kmp_acquire_user_lock_with_checks_)(lck, gtid); \
665 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000666
667#else
Jonathan Peyton30419822017-05-12 18:01:32 +0000668static inline int __kmp_acquire_user_lock_with_checks(kmp_user_lock_p lck,
669 kmp_int32 gtid) {
670 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL);
671 return (*__kmp_acquire_user_lock_with_checks_)(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000672}
673#endif
674
Jonathan Peyton30419822017-05-12 18:01:32 +0000675extern int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
676 kmp_int32 gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000677
Jonathan Peyton30419822017-05-12 18:01:32 +0000678#if KMP_OS_LINUX && \
679 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000680
Jonathan Peyton30419822017-05-12 18:01:32 +0000681#include "kmp_i18n.h" /* AC: KMP_FATAL definition */
682extern int __kmp_env_consistency_check; /* AC: copy from kmp.h here */
683static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
684 kmp_int32 gtid) {
685 if (__kmp_user_lock_kind == lk_tas) {
686 if (__kmp_env_consistency_check) {
687 char const *const func = "omp_test_lock";
688 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
689 lck->tas.lk.depth_locked != -1) {
690 KMP_FATAL(LockNestableUsedAsSimple, func);
691 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000692 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000693 return ((lck->tas.lk.poll == 0) &&
Roman Lebedev781a0892018-12-15 09:23:39 +0000694 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1));
Jonathan Peyton30419822017-05-12 18:01:32 +0000695 } else {
696 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
697 return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
698 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000699}
700#else
Jonathan Peyton30419822017-05-12 18:01:32 +0000701static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,
702 kmp_int32 gtid) {
703 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL);
704 return (*__kmp_test_user_lock_with_checks_)(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000705}
706#endif
707
Jonathan Peyton30419822017-05-12 18:01:32 +0000708extern int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
709 kmp_int32 gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000710
Jonathan Peyton30419822017-05-12 18:01:32 +0000711static inline void __kmp_release_user_lock_with_checks(kmp_user_lock_p lck,
712 kmp_int32 gtid) {
713 KMP_DEBUG_ASSERT(__kmp_release_user_lock_with_checks_ != NULL);
714 (*__kmp_release_user_lock_with_checks_)(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000715}
716
Jonathan Peyton30419822017-05-12 18:01:32 +0000717extern void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000718
Jonathan Peyton30419822017-05-12 18:01:32 +0000719static inline void __kmp_init_user_lock_with_checks(kmp_user_lock_p lck) {
720 KMP_DEBUG_ASSERT(__kmp_init_user_lock_with_checks_ != NULL);
721 (*__kmp_init_user_lock_with_checks_)(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000722}
723
Jim Cownie5e8470a2013-09-27 10:38:44 +0000724// We need a non-checking version of destroy lock for when the RTL is
725// doing the cleanup as it can't always tell if the lock is nested or not.
Jonathan Peyton30419822017-05-12 18:01:32 +0000726extern void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000727
Jonathan Peyton30419822017-05-12 18:01:32 +0000728static inline void __kmp_destroy_user_lock(kmp_user_lock_p lck) {
729 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_ != NULL);
730 (*__kmp_destroy_user_lock_)(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000731}
732
Jonathan Peyton30419822017-05-12 18:01:32 +0000733extern void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000734
Jonathan Peyton30419822017-05-12 18:01:32 +0000735static inline void __kmp_destroy_user_lock_with_checks(kmp_user_lock_p lck) {
736 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_with_checks_ != NULL);
737 (*__kmp_destroy_user_lock_with_checks_)(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000738}
739
Jonathan Peyton30419822017-05-12 18:01:32 +0000740extern int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
741 kmp_int32 gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000742
743#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
744
Jonathan Peyton30419822017-05-12 18:01:32 +0000745#define __kmp_acquire_nested_user_lock_with_checks(lck, gtid, depth) \
746 if (__kmp_user_lock_kind == lk_tas) { \
747 if (__kmp_env_consistency_check) { \
748 char const *const func = "omp_set_nest_lock"; \
749 if ((sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) && \
750 lck->tas.lk.depth_locked == -1) { \
751 KMP_FATAL(LockSimpleUsedAsNestable, func); \
752 } \
753 } \
754 if (lck->tas.lk.poll - 1 == gtid) { \
755 lck->tas.lk.depth_locked += 1; \
756 *depth = KMP_LOCK_ACQUIRED_NEXT; \
757 } else { \
758 if ((lck->tas.lk.poll != 0) || \
Roman Lebedev781a0892018-12-15 09:23:39 +0000759 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
Jonathan Peyton30419822017-05-12 18:01:32 +0000760 kmp_uint32 spins; \
761 KMP_FSYNC_PREPARE(lck); \
762 KMP_INIT_YIELD(spins); \
Jonathan Peytone47d32f2019-02-28 19:11:29 +0000763 do { \
764 KMP_YIELD_OVERSUB_ELSE_SPIN(spins); \
765 } while ( \
Roman Lebedev781a0892018-12-15 09:23:39 +0000766 (lck->tas.lk.poll != 0) || \
Jonathan Peytone47d32f2019-02-28 19:11:29 +0000767 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \
Jonathan Peyton30419822017-05-12 18:01:32 +0000768 } \
769 lck->tas.lk.depth_locked = 1; \
770 *depth = KMP_LOCK_ACQUIRED_FIRST; \
771 } \
772 KMP_FSYNC_ACQUIRED(lck); \
773 } else { \
774 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL); \
775 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid); \
776 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000777
778#else
Jim Cownie181b4bb2013-12-23 17:28:57 +0000779static inline void
Jonathan Peyton30419822017-05-12 18:01:32 +0000780__kmp_acquire_nested_user_lock_with_checks(kmp_user_lock_p lck, kmp_int32 gtid,
781 int *depth) {
782 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL);
783 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000784}
785#endif
786
Jonathan Peyton30419822017-05-12 18:01:32 +0000787extern int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
788 kmp_int32 gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000789
790#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
Jonathan Peyton30419822017-05-12 18:01:32 +0000791static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
792 kmp_int32 gtid) {
793 if (__kmp_user_lock_kind == lk_tas) {
794 int retval;
795 if (__kmp_env_consistency_check) {
796 char const *const func = "omp_test_nest_lock";
797 if ((sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) &&
798 lck->tas.lk.depth_locked == -1) {
799 KMP_FATAL(LockSimpleUsedAsNestable, func);
800 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000801 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000802 KMP_DEBUG_ASSERT(gtid >= 0);
803 if (lck->tas.lk.poll - 1 ==
804 gtid) { /* __kmp_get_tas_lock_owner( lck ) == gtid */
805 return ++lck->tas.lk.depth_locked; /* same owner, depth increased */
806 }
807 retval = ((lck->tas.lk.poll == 0) &&
Roman Lebedev781a0892018-12-15 09:23:39 +0000808 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1));
Jonathan Peyton30419822017-05-12 18:01:32 +0000809 if (retval) {
810 KMP_MB();
811 lck->tas.lk.depth_locked = 1;
812 }
813 return retval;
814 } else {
815 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
816 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
817 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000818}
819#else
Jonathan Peyton30419822017-05-12 18:01:32 +0000820static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,
821 kmp_int32 gtid) {
822 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL);
823 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000824}
825#endif
826
Jonathan Peyton30419822017-05-12 18:01:32 +0000827extern int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
828 kmp_int32 gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000829
Andrey Churbanov8d09fac2015-04-29 15:52:19 +0000830static inline int
Jonathan Peyton30419822017-05-12 18:01:32 +0000831__kmp_release_nested_user_lock_with_checks(kmp_user_lock_p lck,
832 kmp_int32 gtid) {
833 KMP_DEBUG_ASSERT(__kmp_release_nested_user_lock_with_checks_ != NULL);
834 return (*__kmp_release_nested_user_lock_with_checks_)(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000835}
836
Jonathan Peyton30419822017-05-12 18:01:32 +0000837extern void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000838
Jim Cownie181b4bb2013-12-23 17:28:57 +0000839static inline void
Jonathan Peyton30419822017-05-12 18:01:32 +0000840__kmp_init_nested_user_lock_with_checks(kmp_user_lock_p lck) {
841 KMP_DEBUG_ASSERT(__kmp_init_nested_user_lock_with_checks_ != NULL);
842 (*__kmp_init_nested_user_lock_with_checks_)(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000843}
844
Jonathan Peyton30419822017-05-12 18:01:32 +0000845extern void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck);
846
847static inline void
848__kmp_destroy_nested_user_lock_with_checks(kmp_user_lock_p lck) {
849 KMP_DEBUG_ASSERT(__kmp_destroy_nested_user_lock_with_checks_ != NULL);
850 (*__kmp_destroy_nested_user_lock_with_checks_)(lck);
851}
852
Jim Cownie5e8470a2013-09-27 10:38:44 +0000853// user lock functions which do not necessarily exist for all lock kinds.
854//
855// The "set" functions usually have wrapper routines that check for a NULL set
856// function pointer and call it if non-NULL.
857//
858// In some cases, it makes sense to have a "get" wrapper function check for a
859// NULL get function pointer and return NULL / invalid value / error code if
860// the function pointer is NULL.
861//
862// In other cases, the calling code really should differentiate between an
863// unimplemented function and one that is implemented but returning NULL /
864// invalied value. If this is the case, no get function wrapper exists.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000865
Jonathan Peyton30419822017-05-12 18:01:32 +0000866extern int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000867
868// no set function; fields set durining local allocation
869
Jonathan Peyton30419822017-05-12 18:01:32 +0000870extern const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000871
Jonathan Peyton30419822017-05-12 18:01:32 +0000872static inline const ident_t *__kmp_get_user_lock_location(kmp_user_lock_p lck) {
873 if (__kmp_get_user_lock_location_ != NULL) {
874 return (*__kmp_get_user_lock_location_)(lck);
875 } else {
876 return NULL;
877 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000878}
879
Jonathan Peyton30419822017-05-12 18:01:32 +0000880extern void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
881 const ident_t *loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000882
Jonathan Peyton30419822017-05-12 18:01:32 +0000883static inline void __kmp_set_user_lock_location(kmp_user_lock_p lck,
884 const ident_t *loc) {
885 if (__kmp_set_user_lock_location_ != NULL) {
886 (*__kmp_set_user_lock_location_)(lck, loc);
887 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000888}
889
Jonathan Peyton30419822017-05-12 18:01:32 +0000890extern kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000891
Jonathan Peyton30419822017-05-12 18:01:32 +0000892extern void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
893 kmp_lock_flags_t flags);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000894
Jonathan Peyton30419822017-05-12 18:01:32 +0000895static inline void __kmp_set_user_lock_flags(kmp_user_lock_p lck,
896 kmp_lock_flags_t flags) {
897 if (__kmp_set_user_lock_flags_ != NULL) {
898 (*__kmp_set_user_lock_flags_)(lck, flags);
899 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000900}
901
Jim Cownie5e8470a2013-09-27 10:38:44 +0000902// The fuction which sets up all of the vtbl pointers for kmp_user_lock_t.
Jonathan Peyton30419822017-05-12 18:01:32 +0000903extern void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000904
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000905// Macros for binding user lock functions.
Jonathan Peyton30419822017-05-12 18:01:32 +0000906#define KMP_BIND_USER_LOCK_TEMPLATE(nest, kind, suffix) \
907 { \
908 __kmp_acquire##nest##user_lock_with_checks_ = (int (*)( \
909 kmp_user_lock_p, kmp_int32))__kmp_acquire##nest##kind##_##suffix; \
910 __kmp_release##nest##user_lock_with_checks_ = (int (*)( \
911 kmp_user_lock_p, kmp_int32))__kmp_release##nest##kind##_##suffix; \
912 __kmp_test##nest##user_lock_with_checks_ = (int (*)( \
913 kmp_user_lock_p, kmp_int32))__kmp_test##nest##kind##_##suffix; \
914 __kmp_init##nest##user_lock_with_checks_ = \
915 (void (*)(kmp_user_lock_p))__kmp_init##nest##kind##_##suffix; \
916 __kmp_destroy##nest##user_lock_with_checks_ = \
917 (void (*)(kmp_user_lock_p))__kmp_destroy##nest##kind##_##suffix; \
918 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000919
Jonathan Peyton30419822017-05-12 18:01:32 +0000920#define KMP_BIND_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock)
921#define KMP_BIND_USER_LOCK_WITH_CHECKS(kind) \
922 KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock_with_checks)
923#define KMP_BIND_NESTED_USER_LOCK(kind) \
924 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock)
925#define KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(kind) \
926 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock_with_checks)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000927
Jim Cownie5e8470a2013-09-27 10:38:44 +0000928// User lock table & lock allocation
Jonathan Peyton30419822017-05-12 18:01:32 +0000929/* On 64-bit Linux* OS (and OS X*) GNU compiler allocates only 4 bytems memory
930 for lock variable, which is not enough to store a pointer, so we have to use
931 lock indexes instead of pointers and maintain lock table to map indexes to
932 pointers.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000933
934
Jonathan Peyton30419822017-05-12 18:01:32 +0000935 Note: The first element of the table is not a pointer to lock! It is a
936 pointer to previously allocated table (or NULL if it is the first table).
Jim Cownie5e8470a2013-09-27 10:38:44 +0000937
Jonathan Peyton30419822017-05-12 18:01:32 +0000938 Usage:
Jim Cownie5e8470a2013-09-27 10:38:44 +0000939
Jonathan Peyton30419822017-05-12 18:01:32 +0000940 if ( OMP_LOCK_T_SIZE < sizeof( <lock> ) ) { // or OMP_NEST_LOCK_T_SIZE
941 Lock table is fully utilized. User locks are indexes, so table is used on
942 user lock operation.
943 Note: it may be the case (lin_32) that we don't need to use a lock
944 table for regular locks, but do need the table for nested locks.
945 }
946 else {
947 Lock table initialized but not actually used.
948 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000949*/
950
951struct kmp_lock_table {
Jonathan Peyton30419822017-05-12 18:01:32 +0000952 kmp_lock_index_t used; // Number of used elements
953 kmp_lock_index_t allocated; // Number of allocated elements
954 kmp_user_lock_p *table; // Lock table.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000955};
956
957typedef struct kmp_lock_table kmp_lock_table_t;
958
959extern kmp_lock_table_t __kmp_user_lock_table;
960extern kmp_user_lock_p __kmp_lock_pool;
961
962struct kmp_block_of_locks {
Jonathan Peyton30419822017-05-12 18:01:32 +0000963 struct kmp_block_of_locks *next_block;
964 void *locks;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000965};
966
967typedef struct kmp_block_of_locks kmp_block_of_locks_t;
968
969extern kmp_block_of_locks_t *__kmp_lock_blocks;
970extern int __kmp_num_locks_in_block;
971
Jonathan Peyton30419822017-05-12 18:01:32 +0000972extern kmp_user_lock_p __kmp_user_lock_allocate(void **user_lock,
973 kmp_int32 gtid,
974 kmp_lock_flags_t flags);
975extern void __kmp_user_lock_free(void **user_lock, kmp_int32 gtid,
976 kmp_user_lock_p lck);
977extern kmp_user_lock_p __kmp_lookup_user_lock(void **user_lock,
978 char const *func);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000979extern void __kmp_cleanup_user_locks();
980
Jonathan Peyton30419822017-05-12 18:01:32 +0000981#define KMP_CHECK_USER_LOCK_INIT() \
982 { \
983 if (!TCR_4(__kmp_init_user_locks)) { \
984 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock); \
985 if (!TCR_4(__kmp_init_user_locks)) { \
986 TCW_4(__kmp_init_user_locks, TRUE); \
987 } \
988 __kmp_release_bootstrap_lock(&__kmp_initz_lock); \
989 } \
990 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000991
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000992#endif // KMP_USE_DYNAMIC_LOCK
993
Jim Cownie5e8470a2013-09-27 10:38:44 +0000994#undef KMP_PAD
995#undef KMP_GTID_DNE
996
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000997#if KMP_USE_DYNAMIC_LOCK
Jonathan Peyton30419822017-05-12 18:01:32 +0000998// KMP_USE_DYNAMIC_LOCK enables dynamic dispatch of lock functions without
999// breaking the current compatibility. Essential functionality of this new code
1000// is dynamic dispatch, but it also implements (or enables implementation of)
1001// hinted user lock and critical section which will be part of OMP 4.5 soon.
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001002//
Jonathan Peyton30419822017-05-12 18:01:32 +00001003// Lock type can be decided at creation time (i.e., lock initialization), and
1004// subsequent lock function call on the created lock object requires type
1005// extraction and call through jump table using the extracted type. This type
1006// information is stored in two different ways depending on the size of the lock
1007// object, and we differentiate lock types by this size requirement - direct and
1008// indirect locks.
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001009//
1010// Direct locks:
Jonathan Peyton30419822017-05-12 18:01:32 +00001011// A direct lock object fits into the space created by the compiler for an
1012// omp_lock_t object, and TAS/Futex lock falls into this category. We use low
1013// one byte of the lock object as the storage for the lock type, and appropriate
1014// bit operation is required to access the data meaningful to the lock
1015// algorithms. Also, to differentiate direct lock from indirect lock, 1 is
1016// written to LSB of the lock object. The newly introduced "hle" lock is also a
1017// direct lock.
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001018//
1019// Indirect locks:
Jonathan Peyton30419822017-05-12 18:01:32 +00001020// An indirect lock object requires more space than the compiler-generated
1021// space, and it should be allocated from heap. Depending on the size of the
1022// compiler-generated space for the lock (i.e., size of omp_lock_t), this
1023// omp_lock_t object stores either the address of the heap-allocated indirect
1024// lock (void * fits in the object) or an index to the indirect lock table entry
1025// that holds the address. Ticket/Queuing/DRDPA/Adaptive lock falls into this
1026// category, and the newly introduced "rtm" lock is also an indirect lock which
1027// was implemented on top of the Queuing lock. When the omp_lock_t object holds
1028// an index (not lock address), 0 is written to LSB to differentiate the lock
1029// from a direct lock, and the remaining part is the actual index to the
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001030// indirect lock table.
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001031
1032#include <stdint.h> // for uintptr_t
1033
Jonathan Peytondae13d82015-12-11 21:57:06 +00001034// Shortcuts
Jonathan Peyton30419822017-05-12 18:01:32 +00001035#define KMP_USE_INLINED_TAS \
1036 (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)) && 1
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00001037#define KMP_USE_INLINED_FUTEX KMP_USE_FUTEX && 0
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001038
1039// List of lock definitions; all nested locks are indirect locks.
1040// hle lock is xchg lock prefixed with XACQUIRE/XRELEASE.
1041// All nested locks are indirect lock types.
Jonathan Peytondae13d82015-12-11 21:57:06 +00001042#if KMP_USE_TSX
Jonathan Peyton30419822017-05-12 18:01:32 +00001043#if KMP_USE_FUTEX
1044#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) m(hle, a)
1045#define KMP_FOREACH_I_LOCK(m, a) \
1046 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \
1047 m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \
1048 m(nested_queuing, a) m(nested_drdpa, a)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001049#else
Jonathan Peyton30419822017-05-12 18:01:32 +00001050#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(hle, a)
1051#define KMP_FOREACH_I_LOCK(m, a) \
1052 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm, a) \
1053 m(nested_tas, a) m(nested_ticket, a) m(nested_queuing, a) \
1054 m(nested_drdpa, a)
1055#endif // KMP_USE_FUTEX
1056#define KMP_LAST_D_LOCK lockseq_hle
1057#else
1058#if KMP_USE_FUTEX
1059#define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a)
1060#define KMP_FOREACH_I_LOCK(m, a) \
1061 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_futex, a) \
1062 m(nested_ticket, a) m(nested_queuing, a) m(nested_drdpa, a)
1063#define KMP_LAST_D_LOCK lockseq_futex
1064#else
1065#define KMP_FOREACH_D_LOCK(m, a) m(tas, a)
1066#define KMP_FOREACH_I_LOCK(m, a) \
1067 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_ticket, a) \
1068 m(nested_queuing, a) m(nested_drdpa, a)
1069#define KMP_LAST_D_LOCK lockseq_tas
1070#endif // KMP_USE_FUTEX
Jonathan Peytondae13d82015-12-11 21:57:06 +00001071#endif // KMP_USE_TSX
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001072
1073// Information used in dynamic dispatch
Jonathan Peyton30419822017-05-12 18:01:32 +00001074#define KMP_LOCK_SHIFT \
1075 8 // number of low bits to be used as tag for direct locks
Jonathan Peytona03533d2015-12-11 21:49:08 +00001076#define KMP_FIRST_D_LOCK lockseq_tas
1077#define KMP_FIRST_I_LOCK lockseq_ticket
Jonathan Peyton30419822017-05-12 18:01:32 +00001078#define KMP_LAST_I_LOCK lockseq_nested_drdpa
1079#define KMP_NUM_I_LOCKS \
1080 (locktag_nested_drdpa + 1) // number of indirect lock types
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001081
1082// Base type for dynamic locks.
1083typedef kmp_uint32 kmp_dyna_lock_t;
1084
Jonathan Peyton30419822017-05-12 18:01:32 +00001085// Lock sequence that enumerates all lock kinds. Always make this enumeration
1086// consistent with kmp_lockseq_t in the include directory.
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001087typedef enum {
Jonathan Peyton30419822017-05-12 18:01:32 +00001088 lockseq_indirect = 0,
1089#define expand_seq(l, a) lockseq_##l,
1090 KMP_FOREACH_D_LOCK(expand_seq, 0) KMP_FOREACH_I_LOCK(expand_seq, 0)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001091#undef expand_seq
1092} kmp_dyna_lockseq_t;
1093
1094// Enumerates indirect lock tags.
1095typedef enum {
Jonathan Peyton30419822017-05-12 18:01:32 +00001096#define expand_tag(l, a) locktag_##l,
1097 KMP_FOREACH_I_LOCK(expand_tag, 0)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001098#undef expand_tag
1099} kmp_indirect_locktag_t;
1100
1101// Utility macros that extract information from lock sequences.
Jonathan Peyton30419822017-05-12 18:01:32 +00001102#define KMP_IS_D_LOCK(seq) \
1103 ((seq) >= KMP_FIRST_D_LOCK && (seq) <= KMP_LAST_D_LOCK)
1104#define KMP_IS_I_LOCK(seq) \
1105 ((seq) >= KMP_FIRST_I_LOCK && (seq) <= KMP_LAST_I_LOCK)
1106#define KMP_GET_I_TAG(seq) (kmp_indirect_locktag_t)((seq)-KMP_FIRST_I_LOCK)
1107#define KMP_GET_D_TAG(seq) ((seq) << 1 | 1)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001108
1109// Enumerates direct lock tags starting from indirect tag.
1110typedef enum {
Jonathan Peyton30419822017-05-12 18:01:32 +00001111#define expand_tag(l, a) locktag_##l = KMP_GET_D_TAG(lockseq_##l),
1112 KMP_FOREACH_D_LOCK(expand_tag, 0)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001113#undef expand_tag
1114} kmp_direct_locktag_t;
1115
1116// Indirect lock type
1117typedef struct {
Jonathan Peyton30419822017-05-12 18:01:32 +00001118 kmp_user_lock_p lock;
1119 kmp_indirect_locktag_t type;
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001120} kmp_indirect_lock_t;
1121
Jonathan Peyton30419822017-05-12 18:01:32 +00001122// Function tables for direct locks. Set/unset/test differentiate functions
1123// with/without consistency checking.
Jonathan Peytona03533d2015-12-11 21:49:08 +00001124extern void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t);
Jonas Hahnfeld4d77e502019-08-15 13:26:41 +00001125extern void (**__kmp_direct_destroy)(kmp_dyna_lock_t *);
1126extern int (**__kmp_direct_set)(kmp_dyna_lock_t *, kmp_int32);
1127extern int (**__kmp_direct_unset)(kmp_dyna_lock_t *, kmp_int32);
1128extern int (**__kmp_direct_test)(kmp_dyna_lock_t *, kmp_int32);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001129
Jonathan Peyton30419822017-05-12 18:01:32 +00001130// Function tables for indirect locks. Set/unset/test differentiate functions
1131// with/withuot consistency checking.
Jonathan Peytona03533d2015-12-11 21:49:08 +00001132extern void (*__kmp_indirect_init[])(kmp_user_lock_p);
Jonas Hahnfeld4d77e502019-08-15 13:26:41 +00001133extern void (**__kmp_indirect_destroy)(kmp_user_lock_p);
1134extern int (**__kmp_indirect_set)(kmp_user_lock_p, kmp_int32);
1135extern int (**__kmp_indirect_unset)(kmp_user_lock_p, kmp_int32);
1136extern int (**__kmp_indirect_test)(kmp_user_lock_p, kmp_int32);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001137
1138// Extracts direct lock tag from a user lock pointer
Jonathan Peyton30419822017-05-12 18:01:32 +00001139#define KMP_EXTRACT_D_TAG(l) \
1140 (*((kmp_dyna_lock_t *)(l)) & ((1 << KMP_LOCK_SHIFT) - 1) & \
1141 -(*((kmp_dyna_lock_t *)(l)) & 1))
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001142
1143// Extracts indirect lock index from a user lock pointer
Jonathan Peytonf2d119f2015-12-03 19:37:20 +00001144#define KMP_EXTRACT_I_INDEX(l) (*(kmp_lock_index_t *)(l) >> 1)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001145
Jonathan Peyton30419822017-05-12 18:01:32 +00001146// Returns function pointer to the direct lock function with l (kmp_dyna_lock_t
1147// *) and op (operation type).
Jonathan Peytona03533d2015-12-11 21:49:08 +00001148#define KMP_D_LOCK_FUNC(l, op) __kmp_direct_##op[KMP_EXTRACT_D_TAG(l)]
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001149
Jonathan Peyton30419822017-05-12 18:01:32 +00001150// Returns function pointer to the indirect lock function with l
1151// (kmp_indirect_lock_t *) and op (operation type).
1152#define KMP_I_LOCK_FUNC(l, op) \
1153 __kmp_indirect_##op[((kmp_indirect_lock_t *)(l))->type]
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001154
1155// Initializes a direct lock with the given lock pointer and lock sequence.
Jonathan Peyton30419822017-05-12 18:01:32 +00001156#define KMP_INIT_D_LOCK(l, seq) \
1157 __kmp_direct_init[KMP_GET_D_TAG(seq)]((kmp_dyna_lock_t *)l, seq)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001158
1159// Initializes an indirect lock with the given lock pointer and lock sequence.
Jonathan Peyton30419822017-05-12 18:01:32 +00001160#define KMP_INIT_I_LOCK(l, seq) \
1161 __kmp_direct_init[0]((kmp_dyna_lock_t *)(l), seq)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001162
1163// Returns "free" lock value for the given lock type.
Jonathan Peyton30419822017-05-12 18:01:32 +00001164#define KMP_LOCK_FREE(type) (locktag_##type)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001165
1166// Returns "busy" lock value for the given lock teyp.
Jonathan Peyton30419822017-05-12 18:01:32 +00001167#define KMP_LOCK_BUSY(v, type) ((v) << KMP_LOCK_SHIFT | locktag_##type)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001168
1169// Returns lock value after removing (shifting) lock tag.
Jonathan Peyton30419822017-05-12 18:01:32 +00001170#define KMP_LOCK_STRIP(v) ((v) >> KMP_LOCK_SHIFT)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001171
Jonathan Peyton30419822017-05-12 18:01:32 +00001172// Initializes global states and data structures for managing dynamic user
1173// locks.
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001174extern void __kmp_init_dynamic_user_locks();
1175
1176// Allocates and returns an indirect lock with the given indirect lock tag.
Jonathan Peyton30419822017-05-12 18:01:32 +00001177extern kmp_indirect_lock_t *
1178__kmp_allocate_indirect_lock(void **, kmp_int32, kmp_indirect_locktag_t);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001179
1180// Cleans up global states and data structures for managing dynamic user locks.
1181extern void __kmp_cleanup_indirect_user_locks();
1182
Jonathan Peyton61118492016-05-20 19:03:38 +00001183// Default user lock sequence when not using hinted locks.
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001184extern kmp_dyna_lockseq_t __kmp_user_lock_seq;
1185
1186// Jump table for "set lock location", available only for indirect locks.
Jonathan Peyton30419822017-05-12 18:01:32 +00001187extern void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
1188 const ident_t *);
1189#define KMP_SET_I_LOCK_LOCATION(lck, loc) \
1190 { \
1191 if (__kmp_indirect_set_location[(lck)->type] != NULL) \
1192 __kmp_indirect_set_location[(lck)->type]((lck)->lock, loc); \
1193 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001194
1195// Jump table for "set lock flags", available only for indirect locks.
Jonathan Peyton30419822017-05-12 18:01:32 +00001196extern void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
1197 kmp_lock_flags_t);
1198#define KMP_SET_I_LOCK_FLAGS(lck, flag) \
1199 { \
1200 if (__kmp_indirect_set_flags[(lck)->type] != NULL) \
1201 __kmp_indirect_set_flags[(lck)->type]((lck)->lock, flag); \
1202 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001203
1204// Jump table for "get lock location", available only for indirect locks.
Jonathan Peyton30419822017-05-12 18:01:32 +00001205extern const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
1206 kmp_user_lock_p);
1207#define KMP_GET_I_LOCK_LOCATION(lck) \
1208 (__kmp_indirect_get_location[(lck)->type] != NULL \
1209 ? __kmp_indirect_get_location[(lck)->type]((lck)->lock) \
1210 : NULL)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001211
1212// Jump table for "get lock flags", available only for indirect locks.
Jonathan Peyton30419822017-05-12 18:01:32 +00001213extern kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
1214 kmp_user_lock_p);
1215#define KMP_GET_I_LOCK_FLAGS(lck) \
1216 (__kmp_indirect_get_flags[(lck)->type] != NULL \
1217 ? __kmp_indirect_get_flags[(lck)->type]((lck)->lock) \
1218 : NULL)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001219
Jonathan Peyton30419822017-05-12 18:01:32 +00001220#define KMP_I_LOCK_CHUNK \
1221 1024 // number of kmp_indirect_lock_t objects to be allocated together
Jonathan Peytondae13d82015-12-11 21:57:06 +00001222
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001223// Lock table for indirect locks.
Jonathan Peytondae13d82015-12-11 21:57:06 +00001224typedef struct kmp_indirect_lock_table {
Jonathan Peyton30419822017-05-12 18:01:32 +00001225 kmp_indirect_lock_t **table; // blocks of indirect locks allocated
1226 kmp_lock_index_t size; // size of the indirect lock table
1227 kmp_lock_index_t next; // index to the next lock to be allocated
Jonathan Peytondae13d82015-12-11 21:57:06 +00001228} kmp_indirect_lock_table_t;
1229
1230extern kmp_indirect_lock_table_t __kmp_i_lock_table;
1231
1232// Returns the indirect lock associated with the given index.
Jonathan Peyton30419822017-05-12 18:01:32 +00001233#define KMP_GET_I_LOCK(index) \
1234 (*(__kmp_i_lock_table.table + (index) / KMP_I_LOCK_CHUNK) + \
1235 (index) % KMP_I_LOCK_CHUNK)
Jonathan Peytondae13d82015-12-11 21:57:06 +00001236
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001237// Number of locks in a lock block, which is fixed to "1" now.
Jonathan Peyton30419822017-05-12 18:01:32 +00001238// TODO: No lock block implementation now. If we do support, we need to manage
1239// lock block data structure for each indirect lock type.
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001240extern int __kmp_num_locks_in_block;
1241
1242// Fast lock table lookup without consistency checking
Jonathan Peyton30419822017-05-12 18:01:32 +00001243#define KMP_LOOKUP_I_LOCK(l) \
1244 ((OMP_LOCK_T_SIZE < sizeof(void *)) ? KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(l)) \
1245 : *((kmp_indirect_lock_t **)(l)))
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001246
Jonathan Peytonde4749b2016-12-14 23:01:24 +00001247// Used once in kmp_error.cpp
Jonathan Peyton30419822017-05-12 18:01:32 +00001248extern kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p, kmp_uint32);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001249
1250#else // KMP_USE_DYNAMIC_LOCK
1251
Jonathan Peyton30419822017-05-12 18:01:32 +00001252#define KMP_LOCK_BUSY(v, type) (v)
1253#define KMP_LOCK_FREE(type) 0
1254#define KMP_LOCK_STRIP(v) (v)
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001255
1256#endif // KMP_USE_DYNAMIC_LOCK
1257
Jonathan Peyton377aa402016-04-14 16:00:37 +00001258// data structure for using backoff within spin locks.
1259typedef struct {
Jonathan Peyton30419822017-05-12 18:01:32 +00001260 kmp_uint32 step; // current step
1261 kmp_uint32 max_backoff; // upper bound of outer delay loop
1262 kmp_uint32 min_tick; // size of inner delay loop in ticks (machine-dependent)
Jonathan Peyton377aa402016-04-14 16:00:37 +00001263} kmp_backoff_t;
1264
1265// Runtime's default backoff parameters
1266extern kmp_backoff_t __kmp_spin_backoff_params;
1267
1268// Backoff function
1269extern void __kmp_spin_backoff(kmp_backoff_t *);
1270
Jim Cownie5e8470a2013-09-27 10:38:44 +00001271#ifdef __cplusplus
1272} // extern "C"
1273#endif // __cplusplus
1274
1275#endif /* KMP_LOCK_H */