Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Variant of atomic_t specialized for reference counts. |
| 3 | * |
| 4 | * The interface matches the atomic_t interface (to aid in porting) but only |
| 5 | * provides the few functions one should use for reference counting. |
| 6 | * |
| 7 | * It differs in that the counter saturates at UINT_MAX and will not move once |
| 8 | * there. This avoids wrapping the counter and causing 'spurious' |
| 9 | * use-after-free issues. |
| 10 | * |
| 11 | * Memory ordering rules are slightly relaxed wrt regular atomic_t functions |
| 12 | * and provide only what is strictly required for refcounts. |
| 13 | * |
| 14 | * The increments are fully relaxed; these will not provide ordering. The |
| 15 | * rationale is that whatever is used to obtain the object we're increasing the |
| 16 | * reference count on will provide the ordering. For locked data structures, |
| 17 | * its the lock acquire, for RCU/lockless data structures its the dependent |
| 18 | * load. |
| 19 | * |
| 20 | * Do note that inc_not_zero() provides a control dependency which will order |
| 21 | * future stores against the inc, this ensures we'll never modify the object |
| 22 | * if we did not in fact acquire a reference. |
| 23 | * |
| 24 | * The decrements will provide release order, such that all the prior loads and |
| 25 | * stores will be issued before, it also provides a control dependency, which |
| 26 | * will order us against the subsequent free(). |
| 27 | * |
| 28 | * The control dependency is against the load of the cmpxchg (ll/sc) that |
| 29 | * succeeded. This means the stores aren't fully ordered, but this is fine |
| 30 | * because the 1->0 transition indicates no concurrency. |
| 31 | * |
| 32 | * Note that the allocator is responsible for ordering things between free() |
| 33 | * and alloc(). |
| 34 | * |
| 35 | */ |
| 36 | |
| 37 | #include <linux/refcount.h> |
| 38 | #include <linux/bug.h> |
| 39 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 40 | /** |
| 41 | * refcount_add_not_zero - add a value to a refcount unless it is 0 |
| 42 | * @i: the value to add to the refcount |
| 43 | * @r: the refcount |
| 44 | * |
| 45 | * Will saturate at UINT_MAX and WARN. |
| 46 | * |
| 47 | * Provides no memory ordering, it is assumed the caller has guaranteed the |
| 48 | * object memory to be stable (RCU, etc.). It does provide a control dependency |
| 49 | * and thereby orders future stores. See the comment on top. |
| 50 | * |
| 51 | * Use of this function is not recommended for the normal reference counting |
| 52 | * use case in which references are taken and released one at a time. In these |
| 53 | * cases, refcount_inc(), or one of its variants, should instead be used to |
| 54 | * increment a reference count. |
| 55 | * |
| 56 | * Return: false if the passed refcount is 0, true otherwise |
| 57 | */ |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 58 | bool refcount_add_not_zero(unsigned int i, refcount_t *r) |
| 59 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 60 | unsigned int new, val = atomic_read(&r->refs); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 61 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 62 | do { |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 63 | if (!val) |
| 64 | return false; |
| 65 | |
| 66 | if (unlikely(val == UINT_MAX)) |
| 67 | return true; |
| 68 | |
| 69 | new = val + i; |
| 70 | if (new < val) |
| 71 | new = UINT_MAX; |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 72 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 73 | } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 74 | |
Ingo Molnar | 9dcfe2c | 2017-03-01 09:25:55 +0100 | [diff] [blame] | 75 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 76 | |
| 77 | return true; |
| 78 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 79 | EXPORT_SYMBOL(refcount_add_not_zero); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 80 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 81 | /** |
| 82 | * refcount_add - add a value to a refcount |
| 83 | * @i: the value to add to the refcount |
| 84 | * @r: the refcount |
| 85 | * |
| 86 | * Similar to atomic_add(), but will saturate at UINT_MAX and WARN. |
| 87 | * |
| 88 | * Provides no memory ordering, it is assumed the caller has guaranteed the |
| 89 | * object memory to be stable (RCU, etc.). It does provide a control dependency |
| 90 | * and thereby orders future stores. See the comment on top. |
| 91 | * |
| 92 | * Use of this function is not recommended for the normal reference counting |
| 93 | * use case in which references are taken and released one at a time. In these |
| 94 | * cases, refcount_inc(), or one of its variants, should instead be used to |
| 95 | * increment a reference count. |
| 96 | */ |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 97 | void refcount_add(unsigned int i, refcount_t *r) |
| 98 | { |
Ingo Molnar | 9dcfe2c | 2017-03-01 09:25:55 +0100 | [diff] [blame] | 99 | WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 100 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 101 | EXPORT_SYMBOL(refcount_add); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 102 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 103 | /** |
| 104 | * refcount_inc_not_zero - increment a refcount unless it is 0 |
| 105 | * @r: the refcount to increment |
| 106 | * |
| 107 | * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN. |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 108 | * |
| 109 | * Provides no memory ordering, it is assumed the caller has guaranteed the |
| 110 | * object memory to be stable (RCU, etc.). It does provide a control dependency |
| 111 | * and thereby orders future stores. See the comment on top. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 112 | * |
| 113 | * Return: true if the increment was successful, false otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 114 | */ |
| 115 | bool refcount_inc_not_zero(refcount_t *r) |
| 116 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 117 | unsigned int new, val = atomic_read(&r->refs); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 118 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 119 | do { |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 120 | new = val + 1; |
| 121 | |
| 122 | if (!val) |
| 123 | return false; |
| 124 | |
| 125 | if (unlikely(!new)) |
| 126 | return true; |
| 127 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 128 | } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 129 | |
Ingo Molnar | 9dcfe2c | 2017-03-01 09:25:55 +0100 | [diff] [blame] | 130 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 131 | |
| 132 | return true; |
| 133 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 134 | EXPORT_SYMBOL(refcount_inc_not_zero); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 135 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 136 | /** |
| 137 | * refcount_inc - increment a refcount |
| 138 | * @r: the refcount to increment |
| 139 | * |
| 140 | * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN. |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 141 | * |
| 142 | * Provides no memory ordering, it is assumed the caller already has a |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 143 | * reference on the object. |
| 144 | * |
| 145 | * Will WARN if the refcount is 0, as this represents a possible use-after-free |
| 146 | * condition. |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 147 | */ |
| 148 | void refcount_inc(refcount_t *r) |
| 149 | { |
Ingo Molnar | 9dcfe2c | 2017-03-01 09:25:55 +0100 | [diff] [blame] | 150 | WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 151 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 152 | EXPORT_SYMBOL(refcount_inc); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 153 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 154 | /** |
| 155 | * refcount_sub_and_test - subtract from a refcount and test if it is 0 |
| 156 | * @i: amount to subtract from the refcount |
| 157 | * @r: the refcount |
| 158 | * |
| 159 | * Similar to atomic_dec_and_test(), but it will WARN, return false and |
| 160 | * ultimately leak on underflow and will fail to decrement when saturated |
| 161 | * at UINT_MAX. |
| 162 | * |
| 163 | * Provides release memory ordering, such that prior loads and stores are done |
| 164 | * before, and provides a control dependency such that free() must come after. |
| 165 | * See the comment on top. |
| 166 | * |
| 167 | * Use of this function is not recommended for the normal reference counting |
| 168 | * use case in which references are taken and released one at a time. In these |
| 169 | * cases, refcount_dec(), or one of its variants, should instead be used to |
| 170 | * decrement a reference count. |
| 171 | * |
| 172 | * Return: true if the resulting refcount is 0, false otherwise |
| 173 | */ |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 174 | bool refcount_sub_and_test(unsigned int i, refcount_t *r) |
| 175 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 176 | unsigned int new, val = atomic_read(&r->refs); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 177 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 178 | do { |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 179 | if (unlikely(val == UINT_MAX)) |
| 180 | return false; |
| 181 | |
| 182 | new = val - i; |
| 183 | if (new > val) { |
Ingo Molnar | 9dcfe2c | 2017-03-01 09:25:55 +0100 | [diff] [blame] | 184 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 185 | return false; |
| 186 | } |
| 187 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 188 | } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 189 | |
| 190 | return !new; |
| 191 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 192 | EXPORT_SYMBOL(refcount_sub_and_test); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 193 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 194 | /** |
| 195 | * refcount_dec_and_test - decrement a refcount and test if it is 0 |
| 196 | * @r: the refcount |
| 197 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 198 | * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to |
| 199 | * decrement when saturated at UINT_MAX. |
| 200 | * |
| 201 | * Provides release memory ordering, such that prior loads and stores are done |
| 202 | * before, and provides a control dependency such that free() must come after. |
| 203 | * See the comment on top. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 204 | * |
| 205 | * Return: true if the resulting refcount is 0, false otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 206 | */ |
| 207 | bool refcount_dec_and_test(refcount_t *r) |
| 208 | { |
| 209 | return refcount_sub_and_test(1, r); |
| 210 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 211 | EXPORT_SYMBOL(refcount_dec_and_test); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 212 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 213 | /** |
| 214 | * refcount_dec - decrement a refcount |
| 215 | * @r: the refcount |
| 216 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 217 | * Similar to atomic_dec(), it will WARN on underflow and fail to decrement |
| 218 | * when saturated at UINT_MAX. |
| 219 | * |
| 220 | * Provides release memory ordering, such that prior loads and stores are done |
| 221 | * before. |
| 222 | */ |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 223 | void refcount_dec(refcount_t *r) |
| 224 | { |
Ingo Molnar | 9dcfe2c | 2017-03-01 09:25:55 +0100 | [diff] [blame] | 225 | WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 226 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 227 | EXPORT_SYMBOL(refcount_dec); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 228 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 229 | /** |
| 230 | * refcount_dec_if_one - decrement a refcount if it is 1 |
| 231 | * @r: the refcount |
| 232 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 233 | * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the |
| 234 | * success thereof. |
| 235 | * |
| 236 | * Like all decrement operations, it provides release memory order and provides |
| 237 | * a control dependency. |
| 238 | * |
| 239 | * It can be used like a try-delete operator; this explicit case is provided |
| 240 | * and not cmpxchg in generic, because that would allow implementing unsafe |
| 241 | * operations. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 242 | * |
| 243 | * Return: true if the resulting refcount is 0, false otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 244 | */ |
| 245 | bool refcount_dec_if_one(refcount_t *r) |
| 246 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 247 | int val = 1; |
| 248 | |
| 249 | return atomic_try_cmpxchg_release(&r->refs, &val, 0); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 250 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 251 | EXPORT_SYMBOL(refcount_dec_if_one); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 252 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 253 | /** |
| 254 | * refcount_dec_not_one - decrement a refcount if it is not 1 |
| 255 | * @r: the refcount |
| 256 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 257 | * No atomic_t counterpart, it decrements unless the value is 1, in which case |
| 258 | * it will return false. |
| 259 | * |
| 260 | * Was often done like: atomic_add_unless(&var, -1, 1) |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 261 | * |
| 262 | * Return: true if the decrement operation was successful, false otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 263 | */ |
| 264 | bool refcount_dec_not_one(refcount_t *r) |
| 265 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 266 | unsigned int new, val = atomic_read(&r->refs); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 267 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 268 | do { |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 269 | if (unlikely(val == UINT_MAX)) |
| 270 | return true; |
| 271 | |
| 272 | if (val == 1) |
| 273 | return false; |
| 274 | |
| 275 | new = val - 1; |
| 276 | if (new > val) { |
Ingo Molnar | 9dcfe2c | 2017-03-01 09:25:55 +0100 | [diff] [blame] | 277 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 278 | return true; |
| 279 | } |
| 280 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 281 | } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 282 | |
| 283 | return true; |
| 284 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 285 | EXPORT_SYMBOL(refcount_dec_not_one); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 286 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 287 | /** |
| 288 | * refcount_dec_and_mutex_lock - return holding mutex if able to decrement |
| 289 | * refcount to 0 |
| 290 | * @r: the refcount |
| 291 | * @lock: the mutex to be locked |
| 292 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 293 | * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail |
| 294 | * to decrement when saturated at UINT_MAX. |
| 295 | * |
| 296 | * Provides release memory ordering, such that prior loads and stores are done |
| 297 | * before, and provides a control dependency such that free() must come after. |
| 298 | * See the comment on top. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 299 | * |
| 300 | * Return: true and hold mutex if able to decrement refcount to 0, false |
| 301 | * otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 302 | */ |
| 303 | bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) |
| 304 | { |
| 305 | if (refcount_dec_not_one(r)) |
| 306 | return false; |
| 307 | |
| 308 | mutex_lock(lock); |
| 309 | if (!refcount_dec_and_test(r)) { |
| 310 | mutex_unlock(lock); |
| 311 | return false; |
| 312 | } |
| 313 | |
| 314 | return true; |
| 315 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 316 | EXPORT_SYMBOL(refcount_dec_and_mutex_lock); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 317 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 318 | /** |
| 319 | * refcount_dec_and_lock - return holding spinlock if able to decrement |
| 320 | * refcount to 0 |
| 321 | * @r: the refcount |
| 322 | * @lock: the spinlock to be locked |
| 323 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 324 | * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to |
| 325 | * decrement when saturated at UINT_MAX. |
| 326 | * |
| 327 | * Provides release memory ordering, such that prior loads and stores are done |
| 328 | * before, and provides a control dependency such that free() must come after. |
| 329 | * See the comment on top. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 330 | * |
| 331 | * Return: true and hold spinlock if able to decrement refcount to 0, false |
| 332 | * otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 333 | */ |
| 334 | bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) |
| 335 | { |
| 336 | if (refcount_dec_not_one(r)) |
| 337 | return false; |
| 338 | |
| 339 | spin_lock(lock); |
| 340 | if (!refcount_dec_and_test(r)) { |
| 341 | spin_unlock(lock); |
| 342 | return false; |
| 343 | } |
| 344 | |
| 345 | return true; |
| 346 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 347 | EXPORT_SYMBOL(refcount_dec_and_lock); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 348 | |