Waiman Long | 0f8f2aa | 2013-08-28 18:13:26 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_LOCKREF_H |
| 2 | #define __LINUX_LOCKREF_H |
| 3 | |
| 4 | /* |
| 5 | * Locked reference counts. |
| 6 | * |
| 7 | * These are different from just plain atomic refcounts in that they |
| 8 | * are atomic with respect to the spinlock that goes with them. In |
| 9 | * particular, there can be implementations that don't actually get |
| 10 | * the spinlock for the common decrement/increment operations, but they |
| 11 | * still have to check that the operation is done semantically as if |
| 12 | * the spinlock had been taken (using a cmpxchg operation that covers |
| 13 | * both the lock and the count word, or using memory transactions, for |
| 14 | * example). |
| 15 | */ |
| 16 | |
| 17 | #include <linux/spinlock.h> |
Peter Zijlstra | 57f4257 | 2013-11-14 14:31:54 -0800 | [diff] [blame] | 18 | #include <generated/bounds.h> |
| 19 | |
| 20 | #define USE_CMPXCHG_LOCKREF \ |
| 21 | (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ |
Kirill A. Shutemov | 597d795 | 2013-12-20 13:35:58 +0200 | [diff] [blame] | 22 | IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4) |
Waiman Long | 0f8f2aa | 2013-08-28 18:13:26 -0700 | [diff] [blame] | 23 | |
| 24 | struct lockref { |
Linus Torvalds | bc08b44 | 2013-09-02 12:12:15 -0700 | [diff] [blame] | 25 | union { |
Peter Zijlstra | 57f4257 | 2013-11-14 14:31:54 -0800 | [diff] [blame] | 26 | #if USE_CMPXCHG_LOCKREF |
Linus Torvalds | bc08b44 | 2013-09-02 12:12:15 -0700 | [diff] [blame] | 27 | aligned_u64 lock_count; |
| 28 | #endif |
| 29 | struct { |
| 30 | spinlock_t lock; |
| 31 | unsigned int count; |
| 32 | }; |
| 33 | }; |
Waiman Long | 0f8f2aa | 2013-08-28 18:13:26 -0700 | [diff] [blame] | 34 | }; |
| 35 | |
Linus Torvalds | 2f4f12e | 2013-09-02 11:58:20 -0700 | [diff] [blame] | 36 | extern void lockref_get(struct lockref *); |
| 37 | extern int lockref_get_not_zero(struct lockref *); |
| 38 | extern int lockref_get_or_lock(struct lockref *); |
| 39 | extern int lockref_put_or_lock(struct lockref *); |
Waiman Long | 0f8f2aa | 2013-08-28 18:13:26 -0700 | [diff] [blame] | 40 | |
Linus Torvalds | e7d33bb | 2013-09-07 15:49:18 -0700 | [diff] [blame] | 41 | extern void lockref_mark_dead(struct lockref *); |
| 42 | extern int lockref_get_not_dead(struct lockref *); |
| 43 | |
Steven Whitehouse | e66cf16 | 2013-10-15 15:18:08 +0100 | [diff] [blame] | 44 | /* Must be called under spinlock for reliable results */ |
| 45 | static inline int __lockref_is_dead(const struct lockref *l) |
| 46 | { |
| 47 | return ((int)l->count < 0); |
| 48 | } |
| 49 | |
Waiman Long | 0f8f2aa | 2013-08-28 18:13:26 -0700 | [diff] [blame] | 50 | #endif /* __LINUX_LOCKREF_H */ |