Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 1 | #define pr_fmt(fmt) "%s: " fmt "\n", __func__ |
| 2 | |
| 3 | #include <linux/kernel.h> |
| 4 | #include <linux/percpu-refcount.h> |
| 5 | |
| 6 | /* |
| 7 | * Initially, a percpu refcount is just a set of percpu counters. Initially, we |
| 8 | * don't try to detect the ref hitting 0 - which means that get/put can just |
| 9 | * increment or decrement the local counter. Note that the counter on a |
| 10 | * particular cpu can (and will) wrap - this is fine, when we go to shutdown the |
| 11 | * percpu counters will all sum to the correct value |
| 12 | * |
| 13 | * (More precisely: because moduler arithmatic is commutative the sum of all the |
| 14 | * pcpu_count vars will be equal to what it would have been if all the gets and |
| 15 | * puts were done to a single integer, even if some of the percpu integers |
| 16 | * overflow or underflow). |
| 17 | * |
| 18 | * The real trick to implementing percpu refcounts is shutdown. We can't detect |
| 19 | * the ref hitting 0 on every put - this would require global synchronization |
| 20 | * and defeat the whole purpose of using percpu refs. |
| 21 | * |
| 22 | * What we do is require the user to keep track of the initial refcount; we know |
| 23 | * the ref can't hit 0 before the user drops the initial ref, so as long as we |
| 24 | * convert to non percpu mode before the initial ref is dropped everything |
| 25 | * works. |
| 26 | * |
| 27 | * Converting to non percpu mode is done with some RCUish stuff in |
| 28 | * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t |
| 29 | * can't hit 0 before we've added up all the percpu refs. |
| 30 | */ |
| 31 | |
| 32 | #define PCPU_COUNT_BIAS (1U << 31) |
| 33 | |
| 34 | /** |
| 35 | * percpu_ref_init - initialize a percpu refcount |
Tejun Heo | ac89906 | 2013-06-12 20:43:06 -0700 | [diff] [blame] | 36 | * @ref: percpu_ref to initialize |
| 37 | * @release: function which will be called when refcount hits 0 |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 38 | * |
| 39 | * Initializes the refcount in single atomic counter mode with a refcount of 1; |
| 40 | * analagous to atomic_set(ref, 1). |
| 41 | * |
| 42 | * Note that @release must not sleep - it may potentially be called from RCU |
| 43 | * callback context by percpu_ref_kill(). |
| 44 | */ |
Tejun Heo | ac89906 | 2013-06-12 20:43:06 -0700 | [diff] [blame] | 45 | int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release) |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 46 | { |
| 47 | atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); |
| 48 | |
| 49 | ref->pcpu_count = alloc_percpu(unsigned); |
| 50 | if (!ref->pcpu_count) |
| 51 | return -ENOMEM; |
| 52 | |
| 53 | ref->release = release; |
| 54 | return 0; |
| 55 | } |
Matias Bjorling | 5e9dd37 | 2013-10-16 13:47:01 -0700 | [diff] [blame] | 56 | EXPORT_SYMBOL_GPL(percpu_ref_init); |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 57 | |
Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 58 | /** |
| 59 | * percpu_ref_cancel_init - cancel percpu_ref_init() |
| 60 | * @ref: percpu_ref to cancel init for |
| 61 | * |
| 62 | * Once a percpu_ref is initialized, its destruction is initiated by |
| 63 | * percpu_ref_kill() and completes asynchronously, which can be painful to |
| 64 | * do when destroying a half-constructed object in init failure path. |
| 65 | * |
| 66 | * This function destroys @ref without invoking @ref->release and the |
| 67 | * memory area containing it can be freed immediately on return. To |
| 68 | * prevent accidental misuse, it's required that @ref has finished |
| 69 | * percpu_ref_init(), whether successful or not, but never used. |
| 70 | * |
| 71 | * The weird name and usage restriction are to prevent people from using |
| 72 | * this function by mistake for normal shutdown instead of |
| 73 | * percpu_ref_kill(). |
| 74 | */ |
| 75 | void percpu_ref_cancel_init(struct percpu_ref *ref) |
| 76 | { |
| 77 | unsigned __percpu *pcpu_count = ref->pcpu_count; |
| 78 | int cpu; |
| 79 | |
| 80 | WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS); |
| 81 | |
| 82 | if (pcpu_count) { |
| 83 | for_each_possible_cpu(cpu) |
| 84 | WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu)); |
| 85 | free_percpu(ref->pcpu_count); |
| 86 | } |
| 87 | } |
Matias Bjorling | 5e9dd37 | 2013-10-16 13:47:01 -0700 | [diff] [blame] | 88 | EXPORT_SYMBOL_GPL(percpu_ref_cancel_init); |
Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 89 | |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 90 | static void percpu_ref_kill_rcu(struct rcu_head *rcu) |
| 91 | { |
| 92 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); |
Tejun Heo | acac788 | 2013-06-12 20:52:01 -0700 | [diff] [blame] | 93 | unsigned __percpu *pcpu_count = ref->pcpu_count; |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 94 | unsigned count = 0; |
| 95 | int cpu; |
| 96 | |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 97 | /* Mask out PCPU_REF_DEAD */ |
| 98 | pcpu_count = (unsigned __percpu *) |
| 99 | (((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK); |
| 100 | |
| 101 | for_each_possible_cpu(cpu) |
| 102 | count += *per_cpu_ptr(pcpu_count, cpu); |
| 103 | |
| 104 | free_percpu(pcpu_count); |
| 105 | |
| 106 | pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); |
| 107 | |
| 108 | /* |
| 109 | * It's crucial that we sum the percpu counters _before_ adding the sum |
| 110 | * to &ref->count; since gets could be happening on one cpu while puts |
| 111 | * happen on another, adding a single cpu's count could cause |
| 112 | * @ref->count to hit 0 before we've got a consistent value - but the |
| 113 | * sum of all the counts will be consistent and correct. |
| 114 | * |
| 115 | * Subtracting the bias value then has to happen _after_ adding count to |
| 116 | * &ref->count; we need the bias value to prevent &ref->count from |
| 117 | * reaching 0 before we add the percpu counts. But doing it at the same |
| 118 | * time is equivalent and saves us atomic operations: |
| 119 | */ |
| 120 | |
| 121 | atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); |
| 122 | |
Kent Overstreet | 687b0ad | 2014-01-06 13:13:26 -0800 | [diff] [blame] | 123 | WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)", |
| 124 | atomic_read(&ref->count)); |
| 125 | |
Tejun Heo | dbece3a | 2013-06-13 19:23:53 -0700 | [diff] [blame] | 126 | /* @ref is viewed as dead on all CPUs, send out kill confirmation */ |
| 127 | if (ref->confirm_kill) |
| 128 | ref->confirm_kill(ref); |
| 129 | |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 130 | /* |
| 131 | * Now we're in single atomic_t mode with a consistent refcount, so it's |
| 132 | * safe to drop our initial ref: |
| 133 | */ |
| 134 | percpu_ref_put(ref); |
| 135 | } |
| 136 | |
| 137 | /** |
Tejun Heo | dbece3a | 2013-06-13 19:23:53 -0700 | [diff] [blame] | 138 | * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation |
Tejun Heo | ac89906 | 2013-06-12 20:43:06 -0700 | [diff] [blame] | 139 | * @ref: percpu_ref to kill |
Tejun Heo | dbece3a | 2013-06-13 19:23:53 -0700 | [diff] [blame] | 140 | * @confirm_kill: optional confirmation callback |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 141 | * |
Tejun Heo | dbece3a | 2013-06-13 19:23:53 -0700 | [diff] [blame] | 142 | * Equivalent to percpu_ref_kill() but also schedules kill confirmation if |
| 143 | * @confirm_kill is not NULL. @confirm_kill, which may not block, will be |
| 144 | * called after @ref is seen as dead from all CPUs - all further |
| 145 | * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget() |
| 146 | * for more details. |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 147 | * |
Tejun Heo | dbece3a | 2013-06-13 19:23:53 -0700 | [diff] [blame] | 148 | * Due to the way percpu_ref is implemented, @confirm_kill will be called |
| 149 | * after at least one full RCU grace period has passed but this is an |
| 150 | * implementation detail and callers must not depend on it. |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 151 | */ |
Tejun Heo | dbece3a | 2013-06-13 19:23:53 -0700 | [diff] [blame] | 152 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
| 153 | percpu_ref_func_t *confirm_kill) |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 154 | { |
Kent Overstreet | c1ae6e9 | 2013-06-03 16:02:29 -0700 | [diff] [blame] | 155 | WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD, |
| 156 | "percpu_ref_kill() called more than once!\n"); |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 157 | |
Kent Overstreet | c1ae6e9 | 2013-06-03 16:02:29 -0700 | [diff] [blame] | 158 | ref->pcpu_count = (unsigned __percpu *) |
| 159 | (((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD); |
Tejun Heo | dbece3a | 2013-06-13 19:23:53 -0700 | [diff] [blame] | 160 | ref->confirm_kill = confirm_kill; |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 161 | |
Tejun Heo | a424445 | 2013-06-16 16:12:26 -0700 | [diff] [blame] | 162 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 163 | } |
Matias Bjorling | 5e9dd37 | 2013-10-16 13:47:01 -0700 | [diff] [blame] | 164 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); |