blob: 5d8920e23073d8caff4ebe834eaab219e53dba0f [file] [log] [blame]
Kent Overstreet215e2622013-05-31 15:26:45 -07001/*
2 * Percpu refcounts:
3 * (C) 2012 Google, Inc.
4 * Author: Kent Overstreet <koverstreet@google.com>
5 *
6 * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
7 * atomic_dec_and_test() - but percpu.
8 *
9 * There's one important difference between percpu refs and normal atomic_t
10 * refcounts; you have to keep track of your initial refcount, and then when you
11 * start shutting down you call percpu_ref_kill() _before_ dropping the initial
12 * refcount.
13 *
14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
15 * than an atomic_t - this is because of the way shutdown works, see
16 * percpu_ref_kill()/PCPU_COUNT_BIAS.
17 *
18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
20 * puts the ref back in single atomic_t mode, collecting the per cpu refs and
21 * issuing the appropriate barriers, and then marks the ref as shutting down so
22 * that percpu_ref_put() will check for the ref hitting 0. After it returns,
23 * it's safe to drop the initial ref.
24 *
25 * USAGE:
26 *
27 * See fs/aio.c for some example usage; it's used there for struct kioctx, which
28 * is created when userspaces calls io_setup(), and destroyed when userspace
29 * calls io_destroy() or the process exits.
30 *
31 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
32 * calls percpu_ref_kill(), then hlist_del_rcu() and sychronize_rcu() to remove
33 * the kioctx from the proccess's list of kioctxs - after that, there can't be
34 * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
35 * the initial ref with percpu_ref_put().
36 *
37 * Code that does a two stage shutdown like this often needs some kind of
38 * explicit synchronization to ensure the initial refcount can only be dropped
39 * once - percpu_ref_kill() does this for you, it returns true once and false if
40 * someone else already called it. The aio code uses it this way, but it's not
41 * necessary if the code has some other mechanism to synchronize teardown.
42 * around.
43 */
44
45#ifndef _LINUX_PERCPU_REFCOUNT_H
46#define _LINUX_PERCPU_REFCOUNT_H
47
48#include <linux/atomic.h>
49#include <linux/kernel.h>
50#include <linux/percpu.h>
51#include <linux/rcupdate.h>
52
53struct percpu_ref;
Tejun Heoac899062013-06-12 20:43:06 -070054typedef void (percpu_ref_func_t)(struct percpu_ref *);
Kent Overstreet215e2622013-05-31 15:26:45 -070055
56struct percpu_ref {
57 atomic_t count;
58 /*
59 * The low bit of the pointer indicates whether the ref is in percpu
60 * mode; if set, then get/put will manipulate the atomic_t (this is a
61 * hack because we need to keep the pointer around for
62 * percpu_ref_kill_rcu())
63 */
64 unsigned __percpu *pcpu_count;
Tejun Heoac899062013-06-12 20:43:06 -070065 percpu_ref_func_t *release;
Tejun Heodbece3a2013-06-13 19:23:53 -070066 percpu_ref_func_t *confirm_kill;
Kent Overstreet215e2622013-05-31 15:26:45 -070067 struct rcu_head rcu;
68};
69
Tejun Heoacac7882013-06-12 20:52:01 -070070int __must_check percpu_ref_init(struct percpu_ref *ref,
71 percpu_ref_func_t *release);
Tejun Heobc497bd2013-06-12 20:52:35 -070072void percpu_ref_cancel_init(struct percpu_ref *ref);
Tejun Heodbece3a2013-06-13 19:23:53 -070073void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
74 percpu_ref_func_t *confirm_kill);
75
76/**
77 * percpu_ref_kill - drop the initial ref
78 * @ref: percpu_ref to kill
79 *
80 * Must be used to drop the initial ref on a percpu refcount; must be called
81 * precisely once before shutdown.
82 *
83 * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
84 * percpu counters and dropping the initial ref.
85 */
86static inline void percpu_ref_kill(struct percpu_ref *ref)
87{
88 return percpu_ref_kill_and_confirm(ref, NULL);
89}
Kent Overstreet215e2622013-05-31 15:26:45 -070090
91#define PCPU_STATUS_BITS 2
92#define PCPU_STATUS_MASK ((1 << PCPU_STATUS_BITS) - 1)
93#define PCPU_REF_PTR 0
94#define PCPU_REF_DEAD 1
95
96#define REF_STATUS(count) (((unsigned long) count) & PCPU_STATUS_MASK)
97
98/**
99 * percpu_ref_get - increment a percpu refcount
Tejun Heoac899062013-06-12 20:43:06 -0700100 * @ref: percpu_ref to get
Kent Overstreet215e2622013-05-31 15:26:45 -0700101 *
102 * Analagous to atomic_inc().
103 */
104static inline void percpu_ref_get(struct percpu_ref *ref)
105{
106 unsigned __percpu *pcpu_count;
107
Tejun Heoa4244452013-06-16 16:12:26 -0700108 rcu_read_lock_sched();
Kent Overstreet215e2622013-05-31 15:26:45 -0700109
110 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
111
112 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
Sebastian Ott0c36b392014-06-04 15:58:24 +0200113 this_cpu_inc(*pcpu_count);
Kent Overstreet215e2622013-05-31 15:26:45 -0700114 else
115 atomic_inc(&ref->count);
116
Tejun Heoa4244452013-06-16 16:12:26 -0700117 rcu_read_unlock_sched();
Kent Overstreet215e2622013-05-31 15:26:45 -0700118}
119
120/**
Tejun Heo4fb6e252014-05-09 15:11:53 -0400121 * percpu_ref_tryget - try to increment a percpu refcount
122 * @ref: percpu_ref to try-get
123 *
124 * Increment a percpu refcount unless its count already reached zero.
125 * Returns %true on success; %false on failure.
126 *
127 * The caller is responsible for ensuring that @ref stays accessible.
128 */
129static inline bool percpu_ref_tryget(struct percpu_ref *ref)
130{
131 unsigned __percpu *pcpu_count;
132 int ret = false;
133
134 rcu_read_lock_sched();
135
136 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
137
138 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
Tejun Heo315c5552014-06-04 12:50:14 -0400139 this_cpu_inc(*pcpu_count);
Tejun Heo4fb6e252014-05-09 15:11:53 -0400140 ret = true;
141 } else {
142 ret = atomic_inc_not_zero(&ref->count);
143 }
144
145 rcu_read_unlock_sched();
146
147 return ret;
148}
149
150/**
Tejun Heo2070d502014-05-09 15:11:53 -0400151 * percpu_ref_tryget_live - try to increment a live percpu refcount
Tejun Heodbece3a2013-06-13 19:23:53 -0700152 * @ref: percpu_ref to try-get
153 *
154 * Increment a percpu refcount unless it has already been killed. Returns
155 * %true on success; %false on failure.
156 *
157 * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget
158 * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be
159 * used. After the confirm_kill callback is invoked, it's guaranteed that
160 * no new reference will be given out by percpu_ref_tryget().
Tejun Heo4fb6e252014-05-09 15:11:53 -0400161 *
162 * The caller is responsible for ensuring that @ref stays accessible.
Tejun Heodbece3a2013-06-13 19:23:53 -0700163 */
Tejun Heo2070d502014-05-09 15:11:53 -0400164static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
Tejun Heodbece3a2013-06-13 19:23:53 -0700165{
166 unsigned __percpu *pcpu_count;
167 int ret = false;
168
Tejun Heoa4244452013-06-16 16:12:26 -0700169 rcu_read_lock_sched();
Tejun Heodbece3a2013-06-13 19:23:53 -0700170
171 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
172
173 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
Sebastian Ott0c36b392014-06-04 15:58:24 +0200174 this_cpu_inc(*pcpu_count);
Tejun Heodbece3a2013-06-13 19:23:53 -0700175 ret = true;
176 }
177
Tejun Heoa4244452013-06-16 16:12:26 -0700178 rcu_read_unlock_sched();
Tejun Heodbece3a2013-06-13 19:23:53 -0700179
180 return ret;
181}
182
183/**
Kent Overstreet215e2622013-05-31 15:26:45 -0700184 * percpu_ref_put - decrement a percpu refcount
Tejun Heoac899062013-06-12 20:43:06 -0700185 * @ref: percpu_ref to put
Kent Overstreet215e2622013-05-31 15:26:45 -0700186 *
187 * Decrement the refcount, and if 0, call the release function (which was passed
188 * to percpu_ref_init())
189 */
190static inline void percpu_ref_put(struct percpu_ref *ref)
191{
192 unsigned __percpu *pcpu_count;
193
Tejun Heoa4244452013-06-16 16:12:26 -0700194 rcu_read_lock_sched();
Kent Overstreet215e2622013-05-31 15:26:45 -0700195
196 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
197
198 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
Sebastian Ott0c36b392014-06-04 15:58:24 +0200199 this_cpu_dec(*pcpu_count);
Kent Overstreet215e2622013-05-31 15:26:45 -0700200 else if (unlikely(atomic_dec_and_test(&ref->count)))
201 ref->release(ref);
202
Tejun Heoa4244452013-06-16 16:12:26 -0700203 rcu_read_unlock_sched();
Kent Overstreet215e2622013-05-31 15:26:45 -0700204}
205
206#endif