blob: b10b122dd09989be9bcf70f3700d22ffce0b3623 [file] [log] [blame]
Waiman Long0f8f2aa2013-08-28 18:13:26 -07001#ifndef __LINUX_LOCKREF_H
2#define __LINUX_LOCKREF_H
3
4/*
5 * Locked reference counts.
6 *
7 * These are different from just plain atomic refcounts in that they
8 * are atomic with respect to the spinlock that goes with them. In
9 * particular, there can be implementations that don't actually get
10 * the spinlock for the common decrement/increment operations, but they
11 * still have to check that the operation is done semantically as if
12 * the spinlock had been taken (using a cmpxchg operation that covers
13 * both the lock and the count word, or using memory transactions, for
14 * example).
15 */
16
17#include <linux/spinlock.h>
Peter Zijlstra57f42572013-11-14 14:31:54 -080018#include <generated/bounds.h>
19
20#define USE_CMPXCHG_LOCKREF \
21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
Kirill A. Shutemov597d7952013-12-20 13:35:58 +020022 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
Waiman Long0f8f2aa2013-08-28 18:13:26 -070023
24struct lockref {
Linus Torvaldsbc08b442013-09-02 12:12:15 -070025 union {
Peter Zijlstra57f42572013-11-14 14:31:54 -080026#if USE_CMPXCHG_LOCKREF
Linus Torvaldsbc08b442013-09-02 12:12:15 -070027 aligned_u64 lock_count;
28#endif
29 struct {
30 spinlock_t lock;
Linus Torvalds360f5472015-01-09 15:19:03 -080031 int count;
Linus Torvaldsbc08b442013-09-02 12:12:15 -070032 };
33 };
Waiman Long0f8f2aa2013-08-28 18:13:26 -070034};
35
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070036extern void lockref_get(struct lockref *);
Linus Torvalds360f5472015-01-09 15:19:03 -080037extern int lockref_put_return(struct lockref *);
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070038extern int lockref_get_not_zero(struct lockref *);
39extern int lockref_get_or_lock(struct lockref *);
40extern int lockref_put_or_lock(struct lockref *);
Waiman Long0f8f2aa2013-08-28 18:13:26 -070041
Linus Torvaldse7d33bb2013-09-07 15:49:18 -070042extern void lockref_mark_dead(struct lockref *);
43extern int lockref_get_not_dead(struct lockref *);
44
Steven Whitehousee66cf162013-10-15 15:18:08 +010045/* Must be called under spinlock for reliable results */
46static inline int __lockref_is_dead(const struct lockref *l)
47{
48 return ((int)l->count < 0);
49}
50
Waiman Long0f8f2aa2013-08-28 18:13:26 -070051#endif /* __LINUX_LOCKREF_H */