blob: 5581dbd3bd3407c6009b565fb935e0797585ab3b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jason Low90631822014-07-14 10:27:49 -07002#ifndef __LINUX_OSQ_LOCK_H
3#define __LINUX_OSQ_LOCK_H
4
5/*
6 * An MCS like lock especially tailored for optimistic spinning for sleeping
7 * lock implementations (mutex, rwsem, etc).
8 */
Davidlohr Buesod84b6722015-01-06 11:45:07 -08009struct optimistic_spin_node {
10 struct optimistic_spin_node *next, *prev;
11 int locked; /* 1 if lock acquired */
12 int cpu; /* encoded CPU # + 1 value */
13};
Jason Low90631822014-07-14 10:27:49 -070014
15struct optimistic_spin_queue {
16 /*
17 * Stores an encoded value of the CPU # of the tail node in the queue.
18 * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
19 */
20 atomic_t tail;
21};
22
Davidlohr Buesod84b6722015-01-06 11:45:07 -080023#define OSQ_UNLOCKED_VAL (0)
24
Jason Low4d9d9512014-07-14 10:27:50 -070025/* Init macro and function. */
26#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
27
28static inline void osq_lock_init(struct optimistic_spin_queue *lock)
29{
30 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
31}
32
Davidlohr Buesod84b6722015-01-06 11:45:07 -080033extern bool osq_lock(struct optimistic_spin_queue *lock);
34extern void osq_unlock(struct optimistic_spin_queue *lock);
35
Waiman Long59aabfc2015-04-30 17:12:16 -040036static inline bool osq_is_locked(struct optimistic_spin_queue *lock)
37{
38 return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL;
39}
40
Jason Low90631822014-07-14 10:27:49 -070041#endif