blob: a0177c1f55b14c13f742fe38e71667ad4b16650a [file] [log] [blame]
Arnaldo Carvalho de Melo73a9bf92017-02-22 17:00:53 -03001#ifndef _TOOLS_LINUX_REFCOUNT_H
2#define _TOOLS_LINUX_REFCOUNT_H
3
4/*
5 * Variant of atomic_t specialized for reference counts.
6 *
7 * The interface matches the atomic_t interface (to aid in porting) but only
8 * provides the few functions one should use for reference counting.
9 *
10 * It differs in that the counter saturates at UINT_MAX and will not move once
11 * there. This avoids wrapping the counter and causing 'spurious'
12 * use-after-free issues.
13 *
14 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
15 * and provide only what is strictly required for refcounts.
16 *
17 * The increments are fully relaxed; these will not provide ordering. The
18 * rationale is that whatever is used to obtain the object we're increasing the
19 * reference count on will provide the ordering. For locked data structures,
20 * its the lock acquire, for RCU/lockless data structures its the dependent
21 * load.
22 *
23 * Do note that inc_not_zero() provides a control dependency which will order
24 * future stores against the inc, this ensures we'll never modify the object
25 * if we did not in fact acquire a reference.
26 *
27 * The decrements will provide release order, such that all the prior loads and
28 * stores will be issued before, it also provides a control dependency, which
29 * will order us against the subsequent free().
30 *
31 * The control dependency is against the load of the cmpxchg (ll/sc) that
32 * succeeded. This means the stores aren't fully ordered, but this is fine
33 * because the 1->0 transition indicates no concurrency.
34 *
35 * Note that the allocator is responsible for ordering things between free()
36 * and alloc().
37 *
38 */
39
40#include <linux/atomic.h>
41#include <linux/kernel.h>
42
43#ifdef NDEBUG
44#define REFCOUNT_WARN(cond, str) (void)(cond)
45#define __refcount_check
46#else
47#define REFCOUNT_WARN(cond, str) BUG_ON(cond)
48#define __refcount_check __must_check
49#endif
50
51typedef struct refcount_struct {
52 atomic_t refs;
53} refcount_t;
54
55#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
56
57static inline void refcount_set(refcount_t *r, unsigned int n)
58{
59 atomic_set(&r->refs, n);
60}
61
62static inline unsigned int refcount_read(const refcount_t *r)
63{
64 return atomic_read(&r->refs);
65}
66
67/*
68 * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
69 *
70 * Provides no memory ordering, it is assumed the caller has guaranteed the
71 * object memory to be stable (RCU, etc.). It does provide a control dependency
72 * and thereby orders future stores. See the comment on top.
73 */
74static inline __refcount_check
75bool refcount_inc_not_zero(refcount_t *r)
76{
77 unsigned int old, new, val = atomic_read(&r->refs);
78
79 for (;;) {
80 new = val + 1;
81
82 if (!val)
83 return false;
84
85 if (unlikely(!new))
86 return true;
87
88 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
89 if (old == val)
90 break;
91
92 val = old;
93 }
94
95 REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
96
97 return true;
98}
99
100/*
101 * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
102 *
103 * Provides no memory ordering, it is assumed the caller already has a
104 * reference on the object, will WARN when this is not so.
105 */
106static inline void refcount_inc(refcount_t *r)
107{
108 REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
109}
110
111/*
112 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
113 * decrement when saturated at UINT_MAX.
114 *
115 * Provides release memory ordering, such that prior loads and stores are done
116 * before, and provides a control dependency such that free() must come after.
117 * See the comment on top.
118 */
119static inline __refcount_check
120bool refcount_sub_and_test(unsigned int i, refcount_t *r)
121{
122 unsigned int old, new, val = atomic_read(&r->refs);
123
124 for (;;) {
125 if (unlikely(val == UINT_MAX))
126 return false;
127
128 new = val - i;
129 if (new > val) {
130 REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
131 return false;
132 }
133
134 old = atomic_cmpxchg_release(&r->refs, val, new);
135 if (old == val)
136 break;
137
138 val = old;
139 }
140
141 return !new;
142}
143
144static inline __refcount_check
145bool refcount_dec_and_test(refcount_t *r)
146{
147 return refcount_sub_and_test(1, r);
148}
149
150
151#endif /* _ATOMIC_LINUX_REFCOUNT_H */