blob: 3dfbf237cd8f32fc684e2934e7a8ace0da043b14 [file] [log] [blame]
Kent Overstreet215e2622013-05-31 15:26:45 -07001/*
2 * Percpu refcounts:
3 * (C) 2012 Google, Inc.
4 * Author: Kent Overstreet <koverstreet@google.com>
5 *
6 * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
7 * atomic_dec_and_test() - but percpu.
8 *
9 * There's one important difference between percpu refs and normal atomic_t
10 * refcounts; you have to keep track of your initial refcount, and then when you
11 * start shutting down you call percpu_ref_kill() _before_ dropping the initial
12 * refcount.
13 *
14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
15 * than an atomic_t - this is because of the way shutdown works, see
16 * percpu_ref_kill()/PCPU_COUNT_BIAS.
17 *
18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
20 * puts the ref back in single atomic_t mode, collecting the per cpu refs and
21 * issuing the appropriate barriers, and then marks the ref as shutting down so
22 * that percpu_ref_put() will check for the ref hitting 0. After it returns,
23 * it's safe to drop the initial ref.
24 *
25 * USAGE:
26 *
27 * See fs/aio.c for some example usage; it's used there for struct kioctx, which
28 * is created when userspaces calls io_setup(), and destroyed when userspace
29 * calls io_destroy() or the process exits.
30 *
31 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
32 * calls percpu_ref_kill(), then hlist_del_rcu() and sychronize_rcu() to remove
33 * the kioctx from the proccess's list of kioctxs - after that, there can't be
34 * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
35 * the initial ref with percpu_ref_put().
36 *
37 * Code that does a two stage shutdown like this often needs some kind of
38 * explicit synchronization to ensure the initial refcount can only be dropped
39 * once - percpu_ref_kill() does this for you, it returns true once and false if
40 * someone else already called it. The aio code uses it this way, but it's not
41 * necessary if the code has some other mechanism to synchronize teardown.
42 * around.
43 */
44
45#ifndef _LINUX_PERCPU_REFCOUNT_H
46#define _LINUX_PERCPU_REFCOUNT_H
47
48#include <linux/atomic.h>
49#include <linux/kernel.h>
50#include <linux/percpu.h>
51#include <linux/rcupdate.h>
52
53struct percpu_ref;
Tejun Heoac899062013-06-12 20:43:06 -070054typedef void (percpu_ref_func_t)(struct percpu_ref *);
Kent Overstreet215e2622013-05-31 15:26:45 -070055
56struct percpu_ref {
57 atomic_t count;
58 /*
59 * The low bit of the pointer indicates whether the ref is in percpu
Tejun Heo9a1049d2014-06-28 08:10:14 -040060 * mode; if set, then get/put will manipulate the atomic_t.
Kent Overstreet215e2622013-05-31 15:26:45 -070061 */
Tejun Heo7d742072014-06-28 08:10:13 -040062 unsigned long pcpu_count_ptr;
Tejun Heoac899062013-06-12 20:43:06 -070063 percpu_ref_func_t *release;
Tejun Heodbece3a2013-06-13 19:23:53 -070064 percpu_ref_func_t *confirm_kill;
Kent Overstreet215e2622013-05-31 15:26:45 -070065 struct rcu_head rcu;
66};
67
Tejun Heoacac7882013-06-12 20:52:01 -070068int __must_check percpu_ref_init(struct percpu_ref *ref,
69 percpu_ref_func_t *release);
Tejun Heo2d722782014-06-28 08:10:14 -040070void percpu_ref_reinit(struct percpu_ref *ref);
Tejun Heo9a1049d2014-06-28 08:10:14 -040071void percpu_ref_exit(struct percpu_ref *ref);
Tejun Heodbece3a2013-06-13 19:23:53 -070072void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
73 percpu_ref_func_t *confirm_kill);
74
75/**
76 * percpu_ref_kill - drop the initial ref
77 * @ref: percpu_ref to kill
78 *
79 * Must be used to drop the initial ref on a percpu refcount; must be called
80 * precisely once before shutdown.
81 *
82 * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
83 * percpu counters and dropping the initial ref.
84 */
85static inline void percpu_ref_kill(struct percpu_ref *ref)
86{
87 return percpu_ref_kill_and_confirm(ref, NULL);
88}
Kent Overstreet215e2622013-05-31 15:26:45 -070089
Kent Overstreet215e2622013-05-31 15:26:45 -070090#define PCPU_REF_DEAD 1
91
Tejun Heoeae79752014-06-28 08:10:13 -040092/*
93 * Internal helper. Don't use outside percpu-refcount proper. The
94 * function doesn't return the pointer and let the caller test it for NULL
95 * because doing so forces the compiler to generate two conditional
96 * branches as it can't assume that @ref->pcpu_count is not NULL.
97 */
98static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
99 unsigned __percpu **pcpu_countp)
100{
Tejun Heo7d742072014-06-28 08:10:13 -0400101 unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
Tejun Heoeae79752014-06-28 08:10:13 -0400102
Tejun Heo2d722782014-06-28 08:10:14 -0400103 /* paired with smp_store_release() in percpu_ref_reinit() */
104 smp_read_barrier_depends();
105
Tejun Heoeae79752014-06-28 08:10:13 -0400106 if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
107 return false;
108
109 *pcpu_countp = (unsigned __percpu *)pcpu_ptr;
110 return true;
111}
Kent Overstreet215e2622013-05-31 15:26:45 -0700112
113/**
114 * percpu_ref_get - increment a percpu refcount
Tejun Heoac899062013-06-12 20:43:06 -0700115 * @ref: percpu_ref to get
Kent Overstreet215e2622013-05-31 15:26:45 -0700116 *
117 * Analagous to atomic_inc().
118 */
119static inline void percpu_ref_get(struct percpu_ref *ref)
120{
121 unsigned __percpu *pcpu_count;
122
Tejun Heoa4244452013-06-16 16:12:26 -0700123 rcu_read_lock_sched();
Kent Overstreet215e2622013-05-31 15:26:45 -0700124
Tejun Heoeae79752014-06-28 08:10:13 -0400125 if (__pcpu_ref_alive(ref, &pcpu_count))
Sebastian Ott0c36b392014-06-04 15:58:24 +0200126 this_cpu_inc(*pcpu_count);
Kent Overstreet215e2622013-05-31 15:26:45 -0700127 else
128 atomic_inc(&ref->count);
129
Tejun Heoa4244452013-06-16 16:12:26 -0700130 rcu_read_unlock_sched();
Kent Overstreet215e2622013-05-31 15:26:45 -0700131}
132
133/**
Tejun Heo4fb6e252014-05-09 15:11:53 -0400134 * percpu_ref_tryget - try to increment a percpu refcount
135 * @ref: percpu_ref to try-get
136 *
137 * Increment a percpu refcount unless its count already reached zero.
138 * Returns %true on success; %false on failure.
139 *
140 * The caller is responsible for ensuring that @ref stays accessible.
141 */
142static inline bool percpu_ref_tryget(struct percpu_ref *ref)
143{
144 unsigned __percpu *pcpu_count;
145 int ret = false;
146
147 rcu_read_lock_sched();
148
Tejun Heoeae79752014-06-28 08:10:13 -0400149 if (__pcpu_ref_alive(ref, &pcpu_count)) {
Tejun Heo315c5552014-06-04 12:50:14 -0400150 this_cpu_inc(*pcpu_count);
Tejun Heo4fb6e252014-05-09 15:11:53 -0400151 ret = true;
152 } else {
153 ret = atomic_inc_not_zero(&ref->count);
154 }
155
156 rcu_read_unlock_sched();
157
158 return ret;
159}
160
161/**
Tejun Heo2070d502014-05-09 15:11:53 -0400162 * percpu_ref_tryget_live - try to increment a live percpu refcount
Tejun Heodbece3a2013-06-13 19:23:53 -0700163 * @ref: percpu_ref to try-get
164 *
165 * Increment a percpu refcount unless it has already been killed. Returns
166 * %true on success; %false on failure.
167 *
168 * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget
169 * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be
170 * used. After the confirm_kill callback is invoked, it's guaranteed that
171 * no new reference will be given out by percpu_ref_tryget().
Tejun Heo4fb6e252014-05-09 15:11:53 -0400172 *
173 * The caller is responsible for ensuring that @ref stays accessible.
Tejun Heodbece3a2013-06-13 19:23:53 -0700174 */
Tejun Heo2070d502014-05-09 15:11:53 -0400175static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
Tejun Heodbece3a2013-06-13 19:23:53 -0700176{
177 unsigned __percpu *pcpu_count;
178 int ret = false;
179
Tejun Heoa4244452013-06-16 16:12:26 -0700180 rcu_read_lock_sched();
Tejun Heodbece3a2013-06-13 19:23:53 -0700181
Tejun Heoeae79752014-06-28 08:10:13 -0400182 if (__pcpu_ref_alive(ref, &pcpu_count)) {
Sebastian Ott0c36b392014-06-04 15:58:24 +0200183 this_cpu_inc(*pcpu_count);
Tejun Heodbece3a2013-06-13 19:23:53 -0700184 ret = true;
185 }
186
Tejun Heoa4244452013-06-16 16:12:26 -0700187 rcu_read_unlock_sched();
Tejun Heodbece3a2013-06-13 19:23:53 -0700188
189 return ret;
190}
191
192/**
Kent Overstreet215e2622013-05-31 15:26:45 -0700193 * percpu_ref_put - decrement a percpu refcount
Tejun Heoac899062013-06-12 20:43:06 -0700194 * @ref: percpu_ref to put
Kent Overstreet215e2622013-05-31 15:26:45 -0700195 *
196 * Decrement the refcount, and if 0, call the release function (which was passed
197 * to percpu_ref_init())
198 */
199static inline void percpu_ref_put(struct percpu_ref *ref)
200{
201 unsigned __percpu *pcpu_count;
202
Tejun Heoa4244452013-06-16 16:12:26 -0700203 rcu_read_lock_sched();
Kent Overstreet215e2622013-05-31 15:26:45 -0700204
Tejun Heoeae79752014-06-28 08:10:13 -0400205 if (__pcpu_ref_alive(ref, &pcpu_count))
Sebastian Ott0c36b392014-06-04 15:58:24 +0200206 this_cpu_dec(*pcpu_count);
Kent Overstreet215e2622013-05-31 15:26:45 -0700207 else if (unlikely(atomic_dec_and_test(&ref->count)))
208 ref->release(ref);
209
Tejun Heoa4244452013-06-16 16:12:26 -0700210 rcu_read_unlock_sched();
Kent Overstreet215e2622013-05-31 15:26:45 -0700211}
212
Tejun Heo2d722782014-06-28 08:10:14 -0400213/**
214 * percpu_ref_is_zero - test whether a percpu refcount reached zero
215 * @ref: percpu_ref to test
216 *
217 * Returns %true if @ref reached zero.
218 */
219static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
220{
221 unsigned __percpu *pcpu_count;
222
223 if (__pcpu_ref_alive(ref, &pcpu_count))
224 return false;
225 return !atomic_read(&ref->count);
226}
227
Kent Overstreet215e2622013-05-31 15:26:45 -0700228#endif