blob: c3617a8525d7dc6fccd89f954cebc83c226850a7 [file] [log] [blame]
Kent Overstreet215e2622013-05-31 15:26:45 -07001#define pr_fmt(fmt) "%s: " fmt "\n", __func__
2
3#include <linux/kernel.h>
Tejun Heo490c79a2014-09-24 13:31:49 -04004#include <linux/sched.h>
5#include <linux/wait.h>
Kent Overstreet215e2622013-05-31 15:26:45 -07006#include <linux/percpu-refcount.h>
7
8/*
9 * Initially, a percpu refcount is just a set of percpu counters. Initially, we
10 * don't try to detect the ref hitting 0 - which means that get/put can just
11 * increment or decrement the local counter. Note that the counter on a
12 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
13 * percpu counters will all sum to the correct value
14 *
Bogdan Sikorabdb428c2015-12-27 14:58:23 +010015 * (More precisely: because modular arithmetic is commutative the sum of all the
Tejun Heoeecc16b2014-09-24 13:31:48 -040016 * percpu_count vars will be equal to what it would have been if all the gets
17 * and puts were done to a single integer, even if some of the percpu integers
Kent Overstreet215e2622013-05-31 15:26:45 -070018 * overflow or underflow).
19 *
20 * The real trick to implementing percpu refcounts is shutdown. We can't detect
21 * the ref hitting 0 on every put - this would require global synchronization
22 * and defeat the whole purpose of using percpu refs.
23 *
24 * What we do is require the user to keep track of the initial refcount; we know
25 * the ref can't hit 0 before the user drops the initial ref, so as long as we
26 * convert to non percpu mode before the initial ref is dropped everything
27 * works.
28 *
29 * Converting to non percpu mode is done with some RCUish stuff in
Tejun Heoe6253052014-09-20 01:27:25 -040030 * percpu_ref_kill. Additionally, we need a bias value so that the
31 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
Kent Overstreet215e2622013-05-31 15:26:45 -070032 */
33
Tejun Heoeecc16b2014-09-24 13:31:48 -040034#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
Kent Overstreet215e2622013-05-31 15:26:45 -070035
Tejun Heo490c79a2014-09-24 13:31:49 -040036static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
37
Tejun Heoeecc16b2014-09-24 13:31:48 -040038static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
Tejun Heoeae79752014-06-28 08:10:13 -040039{
Tejun Heoeecc16b2014-09-24 13:31:48 -040040 return (unsigned long __percpu *)
Tejun Heo27344a92014-09-24 13:31:49 -040041 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
Tejun Heoeae79752014-06-28 08:10:13 -040042}
43
Kent Overstreet215e2622013-05-31 15:26:45 -070044/**
45 * percpu_ref_init - initialize a percpu refcount
Tejun Heoac899062013-06-12 20:43:06 -070046 * @ref: percpu_ref to initialize
47 * @release: function which will be called when refcount hits 0
Tejun Heo2aad2a82014-09-24 13:31:50 -040048 * @flags: PERCPU_REF_INIT_* flags
Tejun Heoa34375e2014-09-08 09:51:30 +090049 * @gfp: allocation mask to use
Kent Overstreet215e2622013-05-31 15:26:45 -070050 *
Tejun Heo2aad2a82014-09-24 13:31:50 -040051 * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
52 * refcount of 1; analagous to atomic_long_set(ref, 1). See the
53 * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
Kent Overstreet215e2622013-05-31 15:26:45 -070054 *
55 * Note that @release must not sleep - it may potentially be called from RCU
56 * callback context by percpu_ref_kill().
57 */
Tejun Heoa34375e2014-09-08 09:51:30 +090058int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
Tejun Heo2aad2a82014-09-24 13:31:50 -040059 unsigned int flags, gfp_t gfp)
Kent Overstreet215e2622013-05-31 15:26:45 -070060{
Tejun Heo27344a92014-09-24 13:31:49 -040061 size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
62 __alignof__(unsigned long));
Tejun Heo2aad2a82014-09-24 13:31:50 -040063 unsigned long start_count = 0;
Kent Overstreet215e2622013-05-31 15:26:45 -070064
Tejun Heo27344a92014-09-24 13:31:49 -040065 ref->percpu_count_ptr = (unsigned long)
66 __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
Tejun Heoeecc16b2014-09-24 13:31:48 -040067 if (!ref->percpu_count_ptr)
Kent Overstreet215e2622013-05-31 15:26:45 -070068 return -ENOMEM;
69
Tejun Heo1cae13e2014-09-24 13:31:50 -040070 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
71
Tejun Heo2aad2a82014-09-24 13:31:50 -040072 if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
73 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
74 else
75 start_count += PERCPU_COUNT_BIAS;
76
77 if (flags & PERCPU_REF_INIT_DEAD)
78 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
79 else
80 start_count++;
81
82 atomic_long_set(&ref->count, start_count);
83
Kent Overstreet215e2622013-05-31 15:26:45 -070084 ref->release = release;
85 return 0;
86}
Matias Bjorling5e9dd372013-10-16 13:47:01 -070087EXPORT_SYMBOL_GPL(percpu_ref_init);
Kent Overstreet215e2622013-05-31 15:26:45 -070088
Tejun Heobc497bd2013-06-12 20:52:35 -070089/**
Tejun Heo9a1049d2014-06-28 08:10:14 -040090 * percpu_ref_exit - undo percpu_ref_init()
91 * @ref: percpu_ref to exit
Tejun Heobc497bd2013-06-12 20:52:35 -070092 *
Tejun Heo9a1049d2014-06-28 08:10:14 -040093 * This function exits @ref. The caller is responsible for ensuring that
94 * @ref is no longer in active use. The usual places to invoke this
95 * function from are the @ref->release() callback or in init failure path
96 * where percpu_ref_init() succeeded but other parts of the initialization
97 * of the embedding object failed.
Tejun Heobc497bd2013-06-12 20:52:35 -070098 */
Tejun Heo9a1049d2014-06-28 08:10:14 -040099void percpu_ref_exit(struct percpu_ref *ref)
Tejun Heobc497bd2013-06-12 20:52:35 -0700100{
Tejun Heoeecc16b2014-09-24 13:31:48 -0400101 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
Tejun Heobc497bd2013-06-12 20:52:35 -0700102
Tejun Heoeecc16b2014-09-24 13:31:48 -0400103 if (percpu_count) {
104 free_percpu(percpu_count);
Tejun Heo27344a92014-09-24 13:31:49 -0400105 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
Tejun Heobc497bd2013-06-12 20:52:35 -0700106 }
107}
Tejun Heo9a1049d2014-06-28 08:10:14 -0400108EXPORT_SYMBOL_GPL(percpu_ref_exit);
Tejun Heobc497bd2013-06-12 20:52:35 -0700109
Tejun Heo490c79a2014-09-24 13:31:49 -0400110static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
111{
112 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
113
114 ref->confirm_switch(ref);
115 ref->confirm_switch = NULL;
116 wake_up_all(&percpu_ref_switch_waitq);
117
118 /* drop ref from percpu_ref_switch_to_atomic() */
119 percpu_ref_put(ref);
120}
121
122static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
Kent Overstreet215e2622013-05-31 15:26:45 -0700123{
124 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
Tejun Heoeecc16b2014-09-24 13:31:48 -0400125 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
Tejun Heoe6253052014-09-20 01:27:25 -0400126 unsigned long count = 0;
Kent Overstreet215e2622013-05-31 15:26:45 -0700127 int cpu;
128
Kent Overstreet215e2622013-05-31 15:26:45 -0700129 for_each_possible_cpu(cpu)
Tejun Heoeecc16b2014-09-24 13:31:48 -0400130 count += *per_cpu_ptr(percpu_count, cpu);
Kent Overstreet215e2622013-05-31 15:26:45 -0700131
Tejun Heoeecc16b2014-09-24 13:31:48 -0400132 pr_debug("global %ld percpu %ld",
Tejun Heoe6253052014-09-20 01:27:25 -0400133 atomic_long_read(&ref->count), (long)count);
Kent Overstreet215e2622013-05-31 15:26:45 -0700134
135 /*
136 * It's crucial that we sum the percpu counters _before_ adding the sum
137 * to &ref->count; since gets could be happening on one cpu while puts
138 * happen on another, adding a single cpu's count could cause
139 * @ref->count to hit 0 before we've got a consistent value - but the
140 * sum of all the counts will be consistent and correct.
141 *
142 * Subtracting the bias value then has to happen _after_ adding count to
143 * &ref->count; we need the bias value to prevent &ref->count from
144 * reaching 0 before we add the percpu counts. But doing it at the same
145 * time is equivalent and saves us atomic operations:
146 */
Tejun Heoeecc16b2014-09-24 13:31:48 -0400147 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
Kent Overstreet215e2622013-05-31 15:26:45 -0700148
Tejun Heoe6253052014-09-20 01:27:25 -0400149 WARN_ONCE(atomic_long_read(&ref->count) <= 0,
Tejun Heo490c79a2014-09-24 13:31:49 -0400150 "percpu ref (%pf) <= 0 (%ld) after switching to atomic",
Tejun Heoe6253052014-09-20 01:27:25 -0400151 ref->release, atomic_long_read(&ref->count));
Kent Overstreet687b0ad2014-01-06 13:13:26 -0800152
Tejun Heo490c79a2014-09-24 13:31:49 -0400153 /* @ref is viewed as dead on all CPUs, send out switch confirmation */
154 percpu_ref_call_confirm_rcu(rcu);
155}
Tejun Heodbece3a2013-06-13 19:23:53 -0700156
Tejun Heo490c79a2014-09-24 13:31:49 -0400157static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
158{
159}
160
161static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
162 percpu_ref_func_t *confirm_switch)
163{
Tejun Heo18808352015-09-29 17:47:18 -0400164 /*
165 * If the previous ATOMIC switching hasn't finished yet, wait for
166 * its completion. If the caller ensures that ATOMIC switching
167 * isn't in progress, this function can be called from any context.
168 * Do an extra confirm_switch test to circumvent the unconditional
169 * might_sleep() in wait_event().
170 */
171 if (ref->confirm_switch)
172 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
173
Tejun Heob2302c72015-09-29 17:47:17 -0400174 if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
Tejun Heo18808352015-09-29 17:47:18 -0400175 if (confirm_switch)
Tejun Heob2302c72015-09-29 17:47:17 -0400176 confirm_switch(ref);
Tejun Heob2302c72015-09-29 17:47:17 -0400177 return;
Tejun Heo490c79a2014-09-24 13:31:49 -0400178 }
Tejun Heob2302c72015-09-29 17:47:17 -0400179
180 /* switching from percpu to atomic */
181 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
182
183 /*
184 * Non-NULL ->confirm_switch is used to indicate that switching is
185 * in progress. Use noop one if unspecified.
186 */
Tejun Heob2302c72015-09-29 17:47:17 -0400187 ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
188
189 percpu_ref_get(ref); /* put after confirmation */
190 call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
191}
192
193static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
194{
195 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
196 int cpu;
197
Tejun Heo18808352015-09-29 17:47:18 -0400198 /*
199 * If the previous ATOMIC switching hasn't finished yet, wait for
200 * its completion. If the caller ensures that ATOMIC switching
201 * isn't in progress, this function can be called from any context.
202 * Do an extra confirm_switch test to circumvent the unconditional
203 * might_sleep() in wait_event().
204 */
205 if (ref->confirm_switch)
206 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
207
Tejun Heob2302c72015-09-29 17:47:17 -0400208 BUG_ON(!percpu_count);
209
210 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
211 return;
212
Tejun Heob2302c72015-09-29 17:47:17 -0400213 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
214
215 /*
216 * Restore per-cpu operation. smp_store_release() is paired with
217 * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
218 * that the zeroing is visible to all percpu accesses which can see
219 * the following __PERCPU_REF_ATOMIC clearing.
220 */
221 for_each_possible_cpu(cpu)
222 *per_cpu_ptr(percpu_count, cpu) = 0;
223
224 smp_store_release(&ref->percpu_count_ptr,
225 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
Kent Overstreet215e2622013-05-31 15:26:45 -0700226}
227
228/**
Tejun Heo490c79a2014-09-24 13:31:49 -0400229 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
230 * @ref: percpu_ref to switch to atomic mode
231 * @confirm_switch: optional confirmation callback
Kent Overstreet215e2622013-05-31 15:26:45 -0700232 *
Tejun Heo490c79a2014-09-24 13:31:49 -0400233 * There's no reason to use this function for the usual reference counting.
234 * Use percpu_ref_kill[_and_confirm]().
Kent Overstreet215e2622013-05-31 15:26:45 -0700235 *
Tejun Heo490c79a2014-09-24 13:31:49 -0400236 * Schedule switching of @ref to atomic mode. All its percpu counts will
237 * be collected to the main atomic counter. On completion, when all CPUs
238 * are guaraneed to be in atomic mode, @confirm_switch, which may not
239 * block, is invoked. This function may be invoked concurrently with all
240 * the get/put operations and can safely be mixed with kill and reinit
Tejun Heo1cae13e2014-09-24 13:31:50 -0400241 * operations. Note that @ref will stay in atomic mode across kill/reinit
242 * cycles until percpu_ref_switch_to_percpu() is called.
Tejun Heo490c79a2014-09-24 13:31:49 -0400243 *
244 * This function normally doesn't block and can be called from any context
245 * but it may block if @confirm_kill is specified and @ref is already in
246 * the process of switching to atomic mode. In such cases, @confirm_switch
247 * will be invoked after the switching is complete.
Kent Overstreet215e2622013-05-31 15:26:45 -0700248 */
Tejun Heo490c79a2014-09-24 13:31:49 -0400249void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
250 percpu_ref_func_t *confirm_switch)
Kent Overstreet215e2622013-05-31 15:26:45 -0700251{
Tejun Heo1cae13e2014-09-24 13:31:50 -0400252 ref->force_atomic = true;
Tejun Heo490c79a2014-09-24 13:31:49 -0400253 __percpu_ref_switch_to_atomic(ref, confirm_switch);
Kent Overstreet215e2622013-05-31 15:26:45 -0700254}
Tejun Heoa2237372014-09-24 13:31:48 -0400255
Tejun Heof47ad452014-09-24 13:31:49 -0400256/**
257 * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
258 * @ref: percpu_ref to switch to percpu mode
259 *
260 * There's no reason to use this function for the usual reference counting.
261 * To re-use an expired ref, use percpu_ref_reinit().
262 *
263 * Switch @ref to percpu mode. This function may be invoked concurrently
264 * with all the get/put operations and can safely be mixed with kill and
Tejun Heo1cae13e2014-09-24 13:31:50 -0400265 * reinit operations. This function reverses the sticky atomic state set
266 * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
267 * dying or dead, the actual switching takes place on the following
268 * percpu_ref_reinit().
Tejun Heof47ad452014-09-24 13:31:49 -0400269 *
270 * This function normally doesn't block and can be called from any context
271 * but it may block if @ref is in the process of switching to atomic mode
272 * by percpu_ref_switch_atomic().
273 */
274void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
275{
Tejun Heo1cae13e2014-09-24 13:31:50 -0400276 ref->force_atomic = false;
277
Tejun Heof47ad452014-09-24 13:31:49 -0400278 /* a dying or dead ref can't be switched to percpu mode w/o reinit */
279 if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
280 __percpu_ref_switch_to_percpu(ref);
281}
Tejun Heo490c79a2014-09-24 13:31:49 -0400282
283/**
284 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
285 * @ref: percpu_ref to kill
286 * @confirm_kill: optional confirmation callback
287 *
288 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
289 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
290 * called after @ref is seen as dead from all CPUs at which point all
291 * further invocations of percpu_ref_tryget_live() will fail. See
292 * percpu_ref_tryget_live() for details.
293 *
294 * This function normally doesn't block and can be called from any context
Tejun Heof47ad452014-09-24 13:31:49 -0400295 * but it may block if @confirm_kill is specified and @ref is in the
Tejun Heoa2f56302015-09-29 17:47:16 -0400296 * process of switching to atomic mode by percpu_ref_switch_to_atomic().
Tejun Heo490c79a2014-09-24 13:31:49 -0400297 */
298void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
299 percpu_ref_func_t *confirm_kill)
300{
301 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
302 "%s called more than once on %pf!", __func__, ref->release);
303
304 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
305 __percpu_ref_switch_to_atomic(ref, confirm_kill);
306 percpu_ref_put(ref);
307}
308EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
Tejun Heof47ad452014-09-24 13:31:49 -0400309
310/**
311 * percpu_ref_reinit - re-initialize a percpu refcount
312 * @ref: perpcu_ref to re-initialize
313 *
314 * Re-initialize @ref so that it's in the same state as when it finished
Tejun Heo1cae13e2014-09-24 13:31:50 -0400315 * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
316 * initialized successfully and reached 0 but not exited.
Tejun Heof47ad452014-09-24 13:31:49 -0400317 *
318 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
319 * this function is in progress.
320 */
321void percpu_ref_reinit(struct percpu_ref *ref)
322{
323 WARN_ON_ONCE(!percpu_ref_is_zero(ref));
324
325 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
326 percpu_ref_get(ref);
Tejun Heo1cae13e2014-09-24 13:31:50 -0400327 if (!ref->force_atomic)
328 __percpu_ref_switch_to_percpu(ref);
Tejun Heof47ad452014-09-24 13:31:49 -0400329}
330EXPORT_SYMBOL_GPL(percpu_ref_reinit);