Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Variant of atomic_t specialized for reference counts. |
| 4 | * |
| 5 | * The interface matches the atomic_t interface (to aid in porting) but only |
| 6 | * provides the few functions one should use for reference counting. |
| 7 | * |
| 8 | * It differs in that the counter saturates at UINT_MAX and will not move once |
| 9 | * there. This avoids wrapping the counter and causing 'spurious' |
| 10 | * use-after-free issues. |
| 11 | * |
| 12 | * Memory ordering rules are slightly relaxed wrt regular atomic_t functions |
| 13 | * and provide only what is strictly required for refcounts. |
| 14 | * |
| 15 | * The increments are fully relaxed; these will not provide ordering. The |
| 16 | * rationale is that whatever is used to obtain the object we're increasing the |
| 17 | * reference count on will provide the ordering. For locked data structures, |
| 18 | * its the lock acquire, for RCU/lockless data structures its the dependent |
| 19 | * load. |
| 20 | * |
| 21 | * Do note that inc_not_zero() provides a control dependency which will order |
| 22 | * future stores against the inc, this ensures we'll never modify the object |
| 23 | * if we did not in fact acquire a reference. |
| 24 | * |
| 25 | * The decrements will provide release order, such that all the prior loads and |
| 26 | * stores will be issued before, it also provides a control dependency, which |
| 27 | * will order us against the subsequent free(). |
| 28 | * |
| 29 | * The control dependency is against the load of the cmpxchg (ll/sc) that |
| 30 | * succeeded. This means the stores aren't fully ordered, but this is fine |
| 31 | * because the 1->0 transition indicates no concurrency. |
| 32 | * |
| 33 | * Note that the allocator is responsible for ordering things between free() |
| 34 | * and alloc(). |
| 35 | * |
| 36 | */ |
| 37 | |
Alexey Dobriyan | 75a040f | 2018-04-01 01:00:36 +0300 | [diff] [blame] | 38 | #include <linux/mutex.h> |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 39 | #include <linux/refcount.h> |
Alexey Dobriyan | 75a040f | 2018-04-01 01:00:36 +0300 | [diff] [blame] | 40 | #include <linux/spinlock.h> |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 41 | #include <linux/bug.h> |
| 42 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 43 | /** |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 44 | * refcount_add_not_zero_checked - add a value to a refcount unless it is 0 |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 45 | * @i: the value to add to the refcount |
| 46 | * @r: the refcount |
| 47 | * |
| 48 | * Will saturate at UINT_MAX and WARN. |
| 49 | * |
| 50 | * Provides no memory ordering, it is assumed the caller has guaranteed the |
| 51 | * object memory to be stable (RCU, etc.). It does provide a control dependency |
| 52 | * and thereby orders future stores. See the comment on top. |
| 53 | * |
| 54 | * Use of this function is not recommended for the normal reference counting |
| 55 | * use case in which references are taken and released one at a time. In these |
| 56 | * cases, refcount_inc(), or one of its variants, should instead be used to |
| 57 | * increment a reference count. |
| 58 | * |
| 59 | * Return: false if the passed refcount is 0, true otherwise |
| 60 | */ |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 61 | bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r) |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 62 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 63 | unsigned int new, val = atomic_read(&r->refs); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 64 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 65 | do { |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 66 | if (!val) |
| 67 | return false; |
| 68 | |
| 69 | if (unlikely(val == UINT_MAX)) |
| 70 | return true; |
| 71 | |
| 72 | new = val + i; |
| 73 | if (new < val) |
| 74 | new = UINT_MAX; |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 75 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 76 | } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 77 | |
Ingo Molnar | 9dcfe2c | 2017-03-01 09:25:55 +0100 | [diff] [blame] | 78 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 79 | |
| 80 | return true; |
| 81 | } |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 82 | EXPORT_SYMBOL(refcount_add_not_zero_checked); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 83 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 84 | /** |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 85 | * refcount_add_checked - add a value to a refcount |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 86 | * @i: the value to add to the refcount |
| 87 | * @r: the refcount |
| 88 | * |
| 89 | * Similar to atomic_add(), but will saturate at UINT_MAX and WARN. |
| 90 | * |
| 91 | * Provides no memory ordering, it is assumed the caller has guaranteed the |
| 92 | * object memory to be stable (RCU, etc.). It does provide a control dependency |
| 93 | * and thereby orders future stores. See the comment on top. |
| 94 | * |
| 95 | * Use of this function is not recommended for the normal reference counting |
| 96 | * use case in which references are taken and released one at a time. In these |
| 97 | * cases, refcount_inc(), or one of its variants, should instead be used to |
| 98 | * increment a reference count. |
| 99 | */ |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 100 | void refcount_add_checked(unsigned int i, refcount_t *r) |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 101 | { |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 102 | WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 103 | } |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 104 | EXPORT_SYMBOL(refcount_add_checked); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 105 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 106 | /** |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 107 | * refcount_inc_not_zero_checked - increment a refcount unless it is 0 |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 108 | * @r: the refcount to increment |
| 109 | * |
| 110 | * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN. |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 111 | * |
| 112 | * Provides no memory ordering, it is assumed the caller has guaranteed the |
| 113 | * object memory to be stable (RCU, etc.). It does provide a control dependency |
| 114 | * and thereby orders future stores. See the comment on top. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 115 | * |
| 116 | * Return: true if the increment was successful, false otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 117 | */ |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 118 | bool refcount_inc_not_zero_checked(refcount_t *r) |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 119 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 120 | unsigned int new, val = atomic_read(&r->refs); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 121 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 122 | do { |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 123 | new = val + 1; |
| 124 | |
| 125 | if (!val) |
| 126 | return false; |
| 127 | |
| 128 | if (unlikely(!new)) |
| 129 | return true; |
| 130 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 131 | } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 132 | |
Ingo Molnar | 9dcfe2c | 2017-03-01 09:25:55 +0100 | [diff] [blame] | 133 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 134 | |
| 135 | return true; |
| 136 | } |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 137 | EXPORT_SYMBOL(refcount_inc_not_zero_checked); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 138 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 139 | /** |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 140 | * refcount_inc_checked - increment a refcount |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 141 | * @r: the refcount to increment |
| 142 | * |
| 143 | * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN. |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 144 | * |
| 145 | * Provides no memory ordering, it is assumed the caller already has a |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 146 | * reference on the object. |
| 147 | * |
| 148 | * Will WARN if the refcount is 0, as this represents a possible use-after-free |
| 149 | * condition. |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 150 | */ |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 151 | void refcount_inc_checked(refcount_t *r) |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 152 | { |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 153 | WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 154 | } |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 155 | EXPORT_SYMBOL(refcount_inc_checked); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 156 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 157 | /** |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 158 | * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0 |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 159 | * @i: amount to subtract from the refcount |
| 160 | * @r: the refcount |
| 161 | * |
| 162 | * Similar to atomic_dec_and_test(), but it will WARN, return false and |
| 163 | * ultimately leak on underflow and will fail to decrement when saturated |
| 164 | * at UINT_MAX. |
| 165 | * |
| 166 | * Provides release memory ordering, such that prior loads and stores are done |
| 167 | * before, and provides a control dependency such that free() must come after. |
| 168 | * See the comment on top. |
| 169 | * |
| 170 | * Use of this function is not recommended for the normal reference counting |
| 171 | * use case in which references are taken and released one at a time. In these |
| 172 | * cases, refcount_dec(), or one of its variants, should instead be used to |
| 173 | * decrement a reference count. |
| 174 | * |
| 175 | * Return: true if the resulting refcount is 0, false otherwise |
| 176 | */ |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 177 | bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r) |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 178 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 179 | unsigned int new, val = atomic_read(&r->refs); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 180 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 181 | do { |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 182 | if (unlikely(val == UINT_MAX)) |
| 183 | return false; |
| 184 | |
| 185 | new = val - i; |
| 186 | if (new > val) { |
Ingo Molnar | 9dcfe2c | 2017-03-01 09:25:55 +0100 | [diff] [blame] | 187 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 188 | return false; |
| 189 | } |
| 190 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 191 | } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 192 | |
| 193 | return !new; |
| 194 | } |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 195 | EXPORT_SYMBOL(refcount_sub_and_test_checked); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 196 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 197 | /** |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 198 | * refcount_dec_and_test_checked - decrement a refcount and test if it is 0 |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 199 | * @r: the refcount |
| 200 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 201 | * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to |
| 202 | * decrement when saturated at UINT_MAX. |
| 203 | * |
| 204 | * Provides release memory ordering, such that prior loads and stores are done |
| 205 | * before, and provides a control dependency such that free() must come after. |
| 206 | * See the comment on top. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 207 | * |
| 208 | * Return: true if the resulting refcount is 0, false otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 209 | */ |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 210 | bool refcount_dec_and_test_checked(refcount_t *r) |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 211 | { |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 212 | return refcount_sub_and_test_checked(1, r); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 213 | } |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 214 | EXPORT_SYMBOL(refcount_dec_and_test_checked); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 215 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 216 | /** |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 217 | * refcount_dec_checked - decrement a refcount |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 218 | * @r: the refcount |
| 219 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 220 | * Similar to atomic_dec(), it will WARN on underflow and fail to decrement |
| 221 | * when saturated at UINT_MAX. |
| 222 | * |
| 223 | * Provides release memory ordering, such that prior loads and stores are done |
| 224 | * before. |
| 225 | */ |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 226 | void refcount_dec_checked(refcount_t *r) |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 227 | { |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 228 | WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 229 | } |
Mark Rutland | afed7bc | 2018-07-11 10:36:07 +0100 | [diff] [blame] | 230 | EXPORT_SYMBOL(refcount_dec_checked); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 231 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 232 | /** |
| 233 | * refcount_dec_if_one - decrement a refcount if it is 1 |
| 234 | * @r: the refcount |
| 235 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 236 | * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the |
| 237 | * success thereof. |
| 238 | * |
| 239 | * Like all decrement operations, it provides release memory order and provides |
| 240 | * a control dependency. |
| 241 | * |
| 242 | * It can be used like a try-delete operator; this explicit case is provided |
| 243 | * and not cmpxchg in generic, because that would allow implementing unsafe |
| 244 | * operations. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 245 | * |
| 246 | * Return: true if the resulting refcount is 0, false otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 247 | */ |
| 248 | bool refcount_dec_if_one(refcount_t *r) |
| 249 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 250 | int val = 1; |
| 251 | |
| 252 | return atomic_try_cmpxchg_release(&r->refs, &val, 0); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 253 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 254 | EXPORT_SYMBOL(refcount_dec_if_one); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 255 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 256 | /** |
| 257 | * refcount_dec_not_one - decrement a refcount if it is not 1 |
| 258 | * @r: the refcount |
| 259 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 260 | * No atomic_t counterpart, it decrements unless the value is 1, in which case |
| 261 | * it will return false. |
| 262 | * |
| 263 | * Was often done like: atomic_add_unless(&var, -1, 1) |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 264 | * |
| 265 | * Return: true if the decrement operation was successful, false otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 266 | */ |
| 267 | bool refcount_dec_not_one(refcount_t *r) |
| 268 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 269 | unsigned int new, val = atomic_read(&r->refs); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 270 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 271 | do { |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 272 | if (unlikely(val == UINT_MAX)) |
| 273 | return true; |
| 274 | |
| 275 | if (val == 1) |
| 276 | return false; |
| 277 | |
| 278 | new = val - 1; |
| 279 | if (new > val) { |
Ingo Molnar | 9dcfe2c | 2017-03-01 09:25:55 +0100 | [diff] [blame] | 280 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 281 | return true; |
| 282 | } |
| 283 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 284 | } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 285 | |
| 286 | return true; |
| 287 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 288 | EXPORT_SYMBOL(refcount_dec_not_one); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 289 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 290 | /** |
| 291 | * refcount_dec_and_mutex_lock - return holding mutex if able to decrement |
| 292 | * refcount to 0 |
| 293 | * @r: the refcount |
| 294 | * @lock: the mutex to be locked |
| 295 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 296 | * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail |
| 297 | * to decrement when saturated at UINT_MAX. |
| 298 | * |
| 299 | * Provides release memory ordering, such that prior loads and stores are done |
| 300 | * before, and provides a control dependency such that free() must come after. |
| 301 | * See the comment on top. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 302 | * |
| 303 | * Return: true and hold mutex if able to decrement refcount to 0, false |
| 304 | * otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 305 | */ |
| 306 | bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) |
| 307 | { |
| 308 | if (refcount_dec_not_one(r)) |
| 309 | return false; |
| 310 | |
| 311 | mutex_lock(lock); |
| 312 | if (!refcount_dec_and_test(r)) { |
| 313 | mutex_unlock(lock); |
| 314 | return false; |
| 315 | } |
| 316 | |
| 317 | return true; |
| 318 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 319 | EXPORT_SYMBOL(refcount_dec_and_mutex_lock); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 320 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 321 | /** |
| 322 | * refcount_dec_and_lock - return holding spinlock if able to decrement |
| 323 | * refcount to 0 |
| 324 | * @r: the refcount |
| 325 | * @lock: the spinlock to be locked |
| 326 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 327 | * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to |
| 328 | * decrement when saturated at UINT_MAX. |
| 329 | * |
| 330 | * Provides release memory ordering, such that prior loads and stores are done |
| 331 | * before, and provides a control dependency such that free() must come after. |
| 332 | * See the comment on top. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 333 | * |
| 334 | * Return: true and hold spinlock if able to decrement refcount to 0, false |
| 335 | * otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 336 | */ |
| 337 | bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) |
| 338 | { |
| 339 | if (refcount_dec_not_one(r)) |
| 340 | return false; |
| 341 | |
| 342 | spin_lock(lock); |
| 343 | if (!refcount_dec_and_test(r)) { |
| 344 | spin_unlock(lock); |
| 345 | return false; |
| 346 | } |
| 347 | |
| 348 | return true; |
| 349 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 350 | EXPORT_SYMBOL(refcount_dec_and_lock); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 351 | |
Anna-Maria Gleixner | 7ea959c | 2018-06-12 18:16:21 +0200 | [diff] [blame] | 352 | /** |
| 353 | * refcount_dec_and_lock_irqsave - return holding spinlock with disabled |
| 354 | * interrupts if able to decrement refcount to 0 |
| 355 | * @r: the refcount |
| 356 | * @lock: the spinlock to be locked |
| 357 | * @flags: saved IRQ-flags if the is acquired |
| 358 | * |
| 359 | * Same as refcount_dec_and_lock() above except that the spinlock is acquired |
| 360 | * with disabled interupts. |
| 361 | * |
| 362 | * Return: true and hold spinlock if able to decrement refcount to 0, false |
| 363 | * otherwise |
| 364 | */ |
| 365 | bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, |
| 366 | unsigned long *flags) |
| 367 | { |
| 368 | if (refcount_dec_not_one(r)) |
| 369 | return false; |
| 370 | |
| 371 | spin_lock_irqsave(lock, *flags); |
| 372 | if (!refcount_dec_and_test(r)) { |
| 373 | spin_unlock_irqrestore(lock, *flags); |
| 374 | return false; |
| 375 | } |
| 376 | |
| 377 | return true; |
| 378 | } |
| 379 | EXPORT_SYMBOL(refcount_dec_and_lock_irqsave); |