blob: 703ea5c30a33f84bd60fcc19415b48c53084f5c7 [file] [log] [blame]
Jason Low90631822014-07-14 10:27:49 -07001#ifndef __LINUX_OSQ_LOCK_H
2#define __LINUX_OSQ_LOCK_H
3
4/*
5 * An MCS like lock especially tailored for optimistic spinning for sleeping
6 * lock implementations (mutex, rwsem, etc).
7 */
Davidlohr Buesod84b6722015-01-06 11:45:07 -08008struct optimistic_spin_node {
9 struct optimistic_spin_node *next, *prev;
10 int locked; /* 1 if lock acquired */
11 int cpu; /* encoded CPU # + 1 value */
12};
Jason Low90631822014-07-14 10:27:49 -070013
14struct optimistic_spin_queue {
15 /*
16 * Stores an encoded value of the CPU # of the tail node in the queue.
17 * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
18 */
19 atomic_t tail;
20};
21
Davidlohr Buesod84b6722015-01-06 11:45:07 -080022#define OSQ_UNLOCKED_VAL (0)
23
Jason Low4d9d9512014-07-14 10:27:50 -070024/* Init macro and function. */
25#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
26
27static inline void osq_lock_init(struct optimistic_spin_queue *lock)
28{
29 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
30}
31
Davidlohr Buesod84b6722015-01-06 11:45:07 -080032extern bool osq_lock(struct optimistic_spin_queue *lock);
33extern void osq_unlock(struct optimistic_spin_queue *lock);
34
Waiman Long59aabfc2015-04-30 17:12:16 -040035static inline bool osq_is_locked(struct optimistic_spin_queue *lock)
36{
37 return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL;
38}
39
Jason Low90631822014-07-14 10:27:49 -070040#endif