Jason Low | 9063182 | 2014-07-14 10:27:49 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_OSQ_LOCK_H |
| 2 | #define __LINUX_OSQ_LOCK_H |
| 3 | |
| 4 | /* |
| 5 | * An MCS like lock especially tailored for optimistic spinning for sleeping |
| 6 | * lock implementations (mutex, rwsem, etc). |
| 7 | */ |
Davidlohr Bueso | d84b672 | 2015-01-06 11:45:07 -0800 | [diff] [blame] | 8 | struct optimistic_spin_node { |
| 9 | struct optimistic_spin_node *next, *prev; |
| 10 | int locked; /* 1 if lock acquired */ |
| 11 | int cpu; /* encoded CPU # + 1 value */ |
| 12 | }; |
Jason Low | 9063182 | 2014-07-14 10:27:49 -0700 | [diff] [blame] | 13 | |
| 14 | struct optimistic_spin_queue { |
| 15 | /* |
| 16 | * Stores an encoded value of the CPU # of the tail node in the queue. |
| 17 | * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL. |
| 18 | */ |
| 19 | atomic_t tail; |
| 20 | }; |
| 21 | |
Davidlohr Bueso | d84b672 | 2015-01-06 11:45:07 -0800 | [diff] [blame] | 22 | #define OSQ_UNLOCKED_VAL (0) |
| 23 | |
Jason Low | 4d9d951 | 2014-07-14 10:27:50 -0700 | [diff] [blame] | 24 | /* Init macro and function. */ |
| 25 | #define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) } |
| 26 | |
| 27 | static inline void osq_lock_init(struct optimistic_spin_queue *lock) |
| 28 | { |
| 29 | atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); |
| 30 | } |
| 31 | |
Davidlohr Bueso | d84b672 | 2015-01-06 11:45:07 -0800 | [diff] [blame] | 32 | extern bool osq_lock(struct optimistic_spin_queue *lock); |
| 33 | extern void osq_unlock(struct optimistic_spin_queue *lock); |
| 34 | |
Waiman Long | 59aabfc | 2015-04-30 17:12:16 -0400 | [diff] [blame] | 35 | static inline bool osq_is_locked(struct optimistic_spin_queue *lock) |
| 36 | { |
| 37 | return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL; |
| 38 | } |
| 39 | |
Jason Low | 9063182 | 2014-07-14 10:27:49 -0700 | [diff] [blame] | 40 | #endif |