Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Robert P. J. Day | 932fb06 | 2010-03-13 07:58:13 -0500 | [diff] [blame] | 2 | * kref.h - library routines for handling generic reference counted objects |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> |
| 5 | * Copyright (C) 2004 IBM Corp. |
| 6 | * |
| 7 | * based on kobject.h which was: |
| 8 | * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> |
| 9 | * Copyright (C) 2002-2003 Open Source Development Labs |
| 10 | * |
| 11 | * This file is released under the GPLv2. |
| 12 | * |
| 13 | */ |
| 14 | |
| 15 | #ifndef _KREF_H_ |
| 16 | #define _KREF_H_ |
| 17 | |
Greg Kroah-Hartman | 6261dde | 2011-12-14 11:19:07 -0800 | [diff] [blame] | 18 | #include <linux/bug.h> |
| 19 | #include <linux/atomic.h> |
James Bottomley | 67175b8 | 2012-01-17 21:14:05 +0000 | [diff] [blame] | 20 | #include <linux/kernel.h> |
Al Viro | 8ad5db8 | 2012-08-17 20:10:46 -0400 | [diff] [blame] | 21 | #include <linux/mutex.h> |
Joern Engel | ccf5ae8 | 2013-05-13 16:30:06 -0400 | [diff] [blame] | 22 | #include <linux/spinlock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
| 24 | struct kref { |
| 25 | atomic_t refcount; |
| 26 | }; |
| 27 | |
Peter Zijlstra | 4af679c | 2011-12-13 10:36:20 +0100 | [diff] [blame] | 28 | /** |
| 29 | * kref_init - initialize object. |
| 30 | * @kref: object in question. |
| 31 | */ |
| 32 | static inline void kref_init(struct kref *kref) |
| 33 | { |
| 34 | atomic_set(&kref->refcount, 1); |
Peter Zijlstra | 4af679c | 2011-12-13 10:36:20 +0100 | [diff] [blame] | 35 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Peter Zijlstra | 4af679c | 2011-12-13 10:36:20 +0100 | [diff] [blame] | 37 | /** |
| 38 | * kref_get - increment refcount for object. |
| 39 | * @kref: object. |
| 40 | */ |
| 41 | static inline void kref_get(struct kref *kref) |
| 42 | { |
Anatol Pomozov | 2d864e4 | 2013-05-07 15:37:48 -0700 | [diff] [blame] | 43 | /* If refcount was 0 before incrementing then we have a race |
| 44 | * condition when this kref is freeing by some other thread right now. |
| 45 | * In this case one should use kref_get_unless_zero() |
| 46 | */ |
| 47 | WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2); |
Peter Zijlstra | 4af679c | 2011-12-13 10:36:20 +0100 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | /** |
Peter Zijlstra | 4af679c | 2011-12-13 10:36:20 +0100 | [diff] [blame] | 51 | * kref_sub - subtract a number of refcounts for object. |
| 52 | * @kref: object. |
| 53 | * @count: Number of recounts to subtract. |
| 54 | * @release: pointer to the function that will clean up the object when the |
| 55 | * last reference to the object is released. |
| 56 | * This pointer is required, and it is not acceptable to pass kfree |
Greg Kroah-Hartman | 6261dde | 2011-12-14 11:19:07 -0800 | [diff] [blame] | 57 | * in as this function. If the caller does pass kfree to this |
| 58 | * function, you will be publicly mocked mercilessly by the kref |
| 59 | * maintainer, and anyone else who happens to notice it. You have |
| 60 | * been warned. |
Peter Zijlstra | 4af679c | 2011-12-13 10:36:20 +0100 | [diff] [blame] | 61 | * |
| 62 | * Subtract @count from the refcount, and if 0, call release(). |
| 63 | * Return 1 if the object was removed, otherwise return 0. Beware, if this |
| 64 | * function returns 0, you still can not count on the kref from remaining in |
| 65 | * memory. Only use the return value if you want to see if the kref is now |
| 66 | * gone, not present. |
| 67 | */ |
| 68 | static inline int kref_sub(struct kref *kref, unsigned int count, |
| 69 | void (*release)(struct kref *kref)) |
| 70 | { |
| 71 | WARN_ON(release == NULL); |
Peter Zijlstra | 4af679c | 2011-12-13 10:36:20 +0100 | [diff] [blame] | 72 | |
| 73 | if (atomic_sub_and_test((int) count, &kref->refcount)) { |
| 74 | release(kref); |
| 75 | return 1; |
| 76 | } |
| 77 | return 0; |
| 78 | } |
Peter Zijlstra | 47dbd7d | 2011-12-10 11:43:43 +0100 | [diff] [blame] | 79 | |
| 80 | /** |
| 81 | * kref_put - decrement refcount for object. |
| 82 | * @kref: object. |
| 83 | * @release: pointer to the function that will clean up the object when the |
| 84 | * last reference to the object is released. |
| 85 | * This pointer is required, and it is not acceptable to pass kfree |
Greg Kroah-Hartman | 6261dde | 2011-12-14 11:19:07 -0800 | [diff] [blame] | 86 | * in as this function. If the caller does pass kfree to this |
| 87 | * function, you will be publicly mocked mercilessly by the kref |
| 88 | * maintainer, and anyone else who happens to notice it. You have |
| 89 | * been warned. |
Peter Zijlstra | 47dbd7d | 2011-12-10 11:43:43 +0100 | [diff] [blame] | 90 | * |
| 91 | * Decrement the refcount, and if 0, call release(). |
| 92 | * Return 1 if the object was removed, otherwise return 0. Beware, if this |
| 93 | * function returns 0, you still can not count on the kref from remaining in |
| 94 | * memory. Only use the return value if you want to see if the kref is now |
| 95 | * gone, not present. |
| 96 | */ |
| 97 | static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) |
| 98 | { |
| 99 | return kref_sub(kref, 1, release); |
| 100 | } |
Al Viro | 8ad5db8 | 2012-08-17 20:10:46 -0400 | [diff] [blame] | 101 | |
Joern Engel | ccf5ae8 | 2013-05-13 16:30:06 -0400 | [diff] [blame] | 102 | /** |
| 103 | * kref_put_spinlock_irqsave - decrement refcount for object. |
| 104 | * @kref: object. |
| 105 | * @release: pointer to the function that will clean up the object when the |
| 106 | * last reference to the object is released. |
| 107 | * This pointer is required, and it is not acceptable to pass kfree |
| 108 | * in as this function. |
| 109 | * @lock: lock to take in release case |
| 110 | * |
| 111 | * Behaves identical to kref_put with one exception. If the reference count |
| 112 | * drops to zero, the lock will be taken atomically wrt dropping the reference |
| 113 | * count. The release function has to call spin_unlock() without _irqrestore. |
| 114 | */ |
| 115 | static inline int kref_put_spinlock_irqsave(struct kref *kref, |
| 116 | void (*release)(struct kref *kref), |
| 117 | spinlock_t *lock) |
| 118 | { |
| 119 | unsigned long flags; |
| 120 | |
| 121 | WARN_ON(release == NULL); |
| 122 | if (atomic_add_unless(&kref->refcount, -1, 1)) |
| 123 | return 0; |
| 124 | spin_lock_irqsave(lock, flags); |
| 125 | if (atomic_dec_and_test(&kref->refcount)) { |
| 126 | release(kref); |
| 127 | local_irq_restore(flags); |
| 128 | return 1; |
| 129 | } |
| 130 | spin_unlock_irqrestore(lock, flags); |
| 131 | return 0; |
| 132 | } |
| 133 | |
Al Viro | 8ad5db8 | 2012-08-17 20:10:46 -0400 | [diff] [blame] | 134 | static inline int kref_put_mutex(struct kref *kref, |
| 135 | void (*release)(struct kref *kref), |
| 136 | struct mutex *lock) |
| 137 | { |
| 138 | WARN_ON(release == NULL); |
Anatol Pomozov | 2d864e4 | 2013-05-07 15:37:48 -0700 | [diff] [blame] | 139 | if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { |
Al Viro | 8ad5db8 | 2012-08-17 20:10:46 -0400 | [diff] [blame] | 140 | mutex_lock(lock); |
| 141 | if (unlikely(!atomic_dec_and_test(&kref->refcount))) { |
| 142 | mutex_unlock(lock); |
| 143 | return 0; |
| 144 | } |
| 145 | release(kref); |
| 146 | return 1; |
| 147 | } |
| 148 | return 0; |
| 149 | } |
Thomas Hellstrom | 4b20db3 | 2012-11-06 11:31:49 +0000 | [diff] [blame] | 150 | |
| 151 | /** |
| 152 | * kref_get_unless_zero - Increment refcount for object unless it is zero. |
| 153 | * @kref: object. |
| 154 | * |
| 155 | * Return non-zero if the increment succeeded. Otherwise return 0. |
| 156 | * |
| 157 | * This function is intended to simplify locking around refcounting for |
| 158 | * objects that can be looked up from a lookup structure, and which are |
| 159 | * removed from that lookup structure in the object destructor. |
| 160 | * Operations on such objects require at least a read lock around |
| 161 | * lookup + kref_get, and a write lock around kref_put + remove from lookup |
| 162 | * structure. Furthermore, RCU implementations become extremely tricky. |
| 163 | * With a lookup followed by a kref_get_unless_zero *with return value check* |
| 164 | * locking in the kref_put path can be deferred to the actual removal from |
| 165 | * the lookup structure and RCU lookups become trivial. |
| 166 | */ |
| 167 | static inline int __must_check kref_get_unless_zero(struct kref *kref) |
| 168 | { |
| 169 | return atomic_add_unless(&kref->refcount, 1, 0); |
| 170 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | #endif /* _KREF_H_ */ |