blob: a89cf09a82684d729222699afb1f911162977ba7 [file] [log] [blame]
Kent Overstreet215e2622013-05-31 15:26:45 -07001#define pr_fmt(fmt) "%s: " fmt "\n", __func__
2
3#include <linux/kernel.h>
4#include <linux/percpu-refcount.h>
5
6/*
7 * Initially, a percpu refcount is just a set of percpu counters. Initially, we
8 * don't try to detect the ref hitting 0 - which means that get/put can just
9 * increment or decrement the local counter. Note that the counter on a
10 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
11 * percpu counters will all sum to the correct value
12 *
13 * (More precisely: because moduler arithmatic is commutative the sum of all the
14 * pcpu_count vars will be equal to what it would have been if all the gets and
15 * puts were done to a single integer, even if some of the percpu integers
16 * overflow or underflow).
17 *
18 * The real trick to implementing percpu refcounts is shutdown. We can't detect
19 * the ref hitting 0 on every put - this would require global synchronization
20 * and defeat the whole purpose of using percpu refs.
21 *
22 * What we do is require the user to keep track of the initial refcount; we know
23 * the ref can't hit 0 before the user drops the initial ref, so as long as we
24 * convert to non percpu mode before the initial ref is dropped everything
25 * works.
26 *
27 * Converting to non percpu mode is done with some RCUish stuff in
28 * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t
29 * can't hit 0 before we've added up all the percpu refs.
30 */
31
32#define PCPU_COUNT_BIAS (1U << 31)
33
Tejun Heoeae79752014-06-28 08:10:13 -040034static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
35{
Tejun Heo7d742072014-06-28 08:10:13 -040036 return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
Tejun Heoeae79752014-06-28 08:10:13 -040037}
38
Kent Overstreet215e2622013-05-31 15:26:45 -070039/**
40 * percpu_ref_init - initialize a percpu refcount
Tejun Heoac899062013-06-12 20:43:06 -070041 * @ref: percpu_ref to initialize
42 * @release: function which will be called when refcount hits 0
Kent Overstreet215e2622013-05-31 15:26:45 -070043 *
44 * Initializes the refcount in single atomic counter mode with a refcount of 1;
45 * analagous to atomic_set(ref, 1).
46 *
47 * Note that @release must not sleep - it may potentially be called from RCU
48 * callback context by percpu_ref_kill().
49 */
Tejun Heoac899062013-06-12 20:43:06 -070050int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
Kent Overstreet215e2622013-05-31 15:26:45 -070051{
52 atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
53
Tejun Heo7d742072014-06-28 08:10:13 -040054 ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
55 if (!ref->pcpu_count_ptr)
Kent Overstreet215e2622013-05-31 15:26:45 -070056 return -ENOMEM;
57
58 ref->release = release;
59 return 0;
60}
Matias Bjorling5e9dd372013-10-16 13:47:01 -070061EXPORT_SYMBOL_GPL(percpu_ref_init);
Kent Overstreet215e2622013-05-31 15:26:45 -070062
Tejun Heobc497bd2013-06-12 20:52:35 -070063/**
Tejun Heo2d722782014-06-28 08:10:14 -040064 * percpu_ref_reinit - re-initialize a percpu refcount
65 * @ref: perpcu_ref to re-initialize
66 *
67 * Re-initialize @ref so that it's in the same state as when it finished
68 * percpu_ref_init(). @ref must have been initialized successfully, killed
69 * and reached 0 but not exited.
70 *
71 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
72 * this function is in progress.
73 */
74void percpu_ref_reinit(struct percpu_ref *ref)
75{
76 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
77 int cpu;
78
79 BUG_ON(!pcpu_count);
80 WARN_ON(!percpu_ref_is_zero(ref));
81
82 atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
83
84 /*
85 * Restore per-cpu operation. smp_store_release() is paired with
86 * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
87 * that the zeroing is visible to all percpu accesses which can see
88 * the following PCPU_REF_DEAD clearing.
89 */
90 for_each_possible_cpu(cpu)
91 *per_cpu_ptr(pcpu_count, cpu) = 0;
92
93 smp_store_release(&ref->pcpu_count_ptr,
94 ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
95}
96EXPORT_SYMBOL_GPL(percpu_ref_reinit);
97
98/**
Tejun Heo9a1049d2014-06-28 08:10:14 -040099 * percpu_ref_exit - undo percpu_ref_init()
100 * @ref: percpu_ref to exit
Tejun Heobc497bd2013-06-12 20:52:35 -0700101 *
Tejun Heo9a1049d2014-06-28 08:10:14 -0400102 * This function exits @ref. The caller is responsible for ensuring that
103 * @ref is no longer in active use. The usual places to invoke this
104 * function from are the @ref->release() callback or in init failure path
105 * where percpu_ref_init() succeeded but other parts of the initialization
106 * of the embedding object failed.
Tejun Heobc497bd2013-06-12 20:52:35 -0700107 */
Tejun Heo9a1049d2014-06-28 08:10:14 -0400108void percpu_ref_exit(struct percpu_ref *ref)
Tejun Heobc497bd2013-06-12 20:52:35 -0700109{
Tejun Heoeae79752014-06-28 08:10:13 -0400110 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
Tejun Heobc497bd2013-06-12 20:52:35 -0700111
112 if (pcpu_count) {
Tejun Heoeae79752014-06-28 08:10:13 -0400113 free_percpu(pcpu_count);
Tejun Heo9a1049d2014-06-28 08:10:14 -0400114 ref->pcpu_count_ptr = PCPU_REF_DEAD;
Tejun Heobc497bd2013-06-12 20:52:35 -0700115 }
116}
Tejun Heo9a1049d2014-06-28 08:10:14 -0400117EXPORT_SYMBOL_GPL(percpu_ref_exit);
Tejun Heobc497bd2013-06-12 20:52:35 -0700118
Kent Overstreet215e2622013-05-31 15:26:45 -0700119static void percpu_ref_kill_rcu(struct rcu_head *rcu)
120{
121 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
Tejun Heoeae79752014-06-28 08:10:13 -0400122 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
Kent Overstreet215e2622013-05-31 15:26:45 -0700123 unsigned count = 0;
124 int cpu;
125
Kent Overstreet215e2622013-05-31 15:26:45 -0700126 for_each_possible_cpu(cpu)
127 count += *per_cpu_ptr(pcpu_count, cpu);
128
Kent Overstreet215e2622013-05-31 15:26:45 -0700129 pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
130
131 /*
132 * It's crucial that we sum the percpu counters _before_ adding the sum
133 * to &ref->count; since gets could be happening on one cpu while puts
134 * happen on another, adding a single cpu's count could cause
135 * @ref->count to hit 0 before we've got a consistent value - but the
136 * sum of all the counts will be consistent and correct.
137 *
138 * Subtracting the bias value then has to happen _after_ adding count to
139 * &ref->count; we need the bias value to prevent &ref->count from
140 * reaching 0 before we add the percpu counts. But doing it at the same
141 * time is equivalent and saves us atomic operations:
142 */
143
144 atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
145
Kent Overstreet687b0ad2014-01-06 13:13:26 -0800146 WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)",
147 atomic_read(&ref->count));
148
Tejun Heodbece3a2013-06-13 19:23:53 -0700149 /* @ref is viewed as dead on all CPUs, send out kill confirmation */
150 if (ref->confirm_kill)
151 ref->confirm_kill(ref);
152
Kent Overstreet215e2622013-05-31 15:26:45 -0700153 /*
154 * Now we're in single atomic_t mode with a consistent refcount, so it's
155 * safe to drop our initial ref:
156 */
157 percpu_ref_put(ref);
158}
159
160/**
Tejun Heodbece3a2013-06-13 19:23:53 -0700161 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
Tejun Heoac899062013-06-12 20:43:06 -0700162 * @ref: percpu_ref to kill
Tejun Heodbece3a2013-06-13 19:23:53 -0700163 * @confirm_kill: optional confirmation callback
Kent Overstreet215e2622013-05-31 15:26:45 -0700164 *
Tejun Heodbece3a2013-06-13 19:23:53 -0700165 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
166 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
167 * called after @ref is seen as dead from all CPUs - all further
168 * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget()
169 * for more details.
Kent Overstreet215e2622013-05-31 15:26:45 -0700170 *
Tejun Heodbece3a2013-06-13 19:23:53 -0700171 * Due to the way percpu_ref is implemented, @confirm_kill will be called
172 * after at least one full RCU grace period has passed but this is an
173 * implementation detail and callers must not depend on it.
Kent Overstreet215e2622013-05-31 15:26:45 -0700174 */
Tejun Heodbece3a2013-06-13 19:23:53 -0700175void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
176 percpu_ref_func_t *confirm_kill)
Kent Overstreet215e2622013-05-31 15:26:45 -0700177{
Tejun Heo7d742072014-06-28 08:10:13 -0400178 WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
Kent Overstreetc1ae6e92013-06-03 16:02:29 -0700179 "percpu_ref_kill() called more than once!\n");
Kent Overstreet215e2622013-05-31 15:26:45 -0700180
Tejun Heo7d742072014-06-28 08:10:13 -0400181 ref->pcpu_count_ptr |= PCPU_REF_DEAD;
Tejun Heodbece3a2013-06-13 19:23:53 -0700182 ref->confirm_kill = confirm_kill;
Kent Overstreet215e2622013-05-31 15:26:45 -0700183
Tejun Heoa4244452013-06-16 16:12:26 -0700184 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
Kent Overstreet215e2622013-05-31 15:26:45 -0700185}
Matias Bjorling5e9dd372013-10-16 13:47:01 -0700186EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
Tejun Heo0a302882014-09-23 15:24:32 -0400187
188/*
189 * XXX: Temporary kludge to work around SCSI blk-mq stall. Used only by
190 * block/blk-mq.c::blk_mq_freeze_queue(). Will be removed during v3.18
191 * devel cycle. Do not use anywhere else.
192 */
193void __percpu_ref_kill_expedited(struct percpu_ref *ref)
194{
195 WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
196 "percpu_ref_kill() called more than once on %pf!",
197 ref->release);
198
199 ref->pcpu_count_ptr |= PCPU_REF_DEAD;
200 synchronize_sched_expedited();
201 percpu_ref_kill_rcu(&ref->rcu);
202}