| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 1 | #define pr_fmt(fmt) "%s: " fmt "\n", __func__ | 
|  | 2 |  | 
|  | 3 | #include <linux/kernel.h> | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 4 | #include <linux/sched.h> | 
|  | 5 | #include <linux/wait.h> | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 6 | #include <linux/percpu-refcount.h> | 
|  | 7 |  | 
|  | 8 | /* | 
|  | 9 | * Initially, a percpu refcount is just a set of percpu counters. Initially, we | 
|  | 10 | * don't try to detect the ref hitting 0 - which means that get/put can just | 
|  | 11 | * increment or decrement the local counter. Note that the counter on a | 
|  | 12 | * particular cpu can (and will) wrap - this is fine, when we go to shutdown the | 
|  | 13 | * percpu counters will all sum to the correct value | 
|  | 14 | * | 
| Bogdan Sikora | bdb428c | 2015-12-27 14:58:23 +0100 | [diff] [blame] | 15 | * (More precisely: because modular arithmetic is commutative the sum of all the | 
| Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 16 | * percpu_count vars will be equal to what it would have been if all the gets | 
|  | 17 | * and puts were done to a single integer, even if some of the percpu integers | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 18 | * overflow or underflow). | 
|  | 19 | * | 
|  | 20 | * The real trick to implementing percpu refcounts is shutdown. We can't detect | 
|  | 21 | * the ref hitting 0 on every put - this would require global synchronization | 
|  | 22 | * and defeat the whole purpose of using percpu refs. | 
|  | 23 | * | 
|  | 24 | * What we do is require the user to keep track of the initial refcount; we know | 
|  | 25 | * the ref can't hit 0 before the user drops the initial ref, so as long as we | 
|  | 26 | * convert to non percpu mode before the initial ref is dropped everything | 
|  | 27 | * works. | 
|  | 28 | * | 
|  | 29 | * Converting to non percpu mode is done with some RCUish stuff in | 
| Tejun Heo | e625305 | 2014-09-20 01:27:25 -0400 | [diff] [blame] | 30 | * percpu_ref_kill. Additionally, we need a bias value so that the | 
|  | 31 | * atomic_long_t can't hit 0 before we've added up all the percpu refs. | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 32 | */ | 
|  | 33 |  | 
| Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 34 | #define PERCPU_COUNT_BIAS	(1LU << (BITS_PER_LONG - 1)) | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 35 |  | 
| Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 36 | static DEFINE_SPINLOCK(percpu_ref_switch_lock); | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 37 | static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq); | 
|  | 38 |  | 
| Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 39 | static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) | 
| Tejun Heo | eae7975 | 2014-06-28 08:10:13 -0400 | [diff] [blame] | 40 | { | 
| Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 41 | return (unsigned long __percpu *) | 
| Tejun Heo | 27344a9 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 42 | (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); | 
| Tejun Heo | eae7975 | 2014-06-28 08:10:13 -0400 | [diff] [blame] | 43 | } | 
|  | 44 |  | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 45 | /** | 
|  | 46 | * percpu_ref_init - initialize a percpu refcount | 
| Tejun Heo | ac89906 | 2013-06-12 20:43:06 -0700 | [diff] [blame] | 47 | * @ref: percpu_ref to initialize | 
|  | 48 | * @release: function which will be called when refcount hits 0 | 
| Tejun Heo | 2aad2a8 | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 49 | * @flags: PERCPU_REF_INIT_* flags | 
| Tejun Heo | a34375e | 2014-09-08 09:51:30 +0900 | [diff] [blame] | 50 | * @gfp: allocation mask to use | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 51 | * | 
| Tejun Heo | 2aad2a8 | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 52 | * Initializes @ref.  If @flags is zero, @ref starts in percpu mode with a | 
|  | 53 | * refcount of 1; analagous to atomic_long_set(ref, 1).  See the | 
|  | 54 | * definitions of PERCPU_REF_INIT_* flags for flag behaviors. | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 55 | * | 
|  | 56 | * Note that @release must not sleep - it may potentially be called from RCU | 
|  | 57 | * callback context by percpu_ref_kill(). | 
|  | 58 | */ | 
| Tejun Heo | a34375e | 2014-09-08 09:51:30 +0900 | [diff] [blame] | 59 | int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, | 
| Tejun Heo | 2aad2a8 | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 60 | unsigned int flags, gfp_t gfp) | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 61 | { | 
| Tejun Heo | 27344a9 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 62 | size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS, | 
|  | 63 | __alignof__(unsigned long)); | 
| Tejun Heo | 2aad2a8 | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 64 | unsigned long start_count = 0; | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 65 |  | 
| Tejun Heo | 27344a9 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 66 | ref->percpu_count_ptr = (unsigned long) | 
|  | 67 | __alloc_percpu_gfp(sizeof(unsigned long), align, gfp); | 
| Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 68 | if (!ref->percpu_count_ptr) | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 69 | return -ENOMEM; | 
|  | 70 |  | 
| Tejun Heo | 1cae13e | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 71 | ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; | 
|  | 72 |  | 
| Tejun Heo | 2aad2a8 | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 73 | if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) | 
|  | 74 | ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; | 
|  | 75 | else | 
|  | 76 | start_count += PERCPU_COUNT_BIAS; | 
|  | 77 |  | 
|  | 78 | if (flags & PERCPU_REF_INIT_DEAD) | 
|  | 79 | ref->percpu_count_ptr |= __PERCPU_REF_DEAD; | 
|  | 80 | else | 
|  | 81 | start_count++; | 
|  | 82 |  | 
|  | 83 | atomic_long_set(&ref->count, start_count); | 
|  | 84 |  | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 85 | ref->release = release; | 
| Roman Pen | a67823c | 2016-08-11 19:27:09 +0200 | [diff] [blame^] | 86 | ref->confirm_switch = NULL; | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 87 | return 0; | 
|  | 88 | } | 
| Matias Bjorling | 5e9dd37 | 2013-10-16 13:47:01 -0700 | [diff] [blame] | 89 | EXPORT_SYMBOL_GPL(percpu_ref_init); | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 90 |  | 
| Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 91 | /** | 
| Tejun Heo | 9a1049d | 2014-06-28 08:10:14 -0400 | [diff] [blame] | 92 | * percpu_ref_exit - undo percpu_ref_init() | 
|  | 93 | * @ref: percpu_ref to exit | 
| Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 94 | * | 
| Tejun Heo | 9a1049d | 2014-06-28 08:10:14 -0400 | [diff] [blame] | 95 | * This function exits @ref.  The caller is responsible for ensuring that | 
|  | 96 | * @ref is no longer in active use.  The usual places to invoke this | 
|  | 97 | * function from are the @ref->release() callback or in init failure path | 
|  | 98 | * where percpu_ref_init() succeeded but other parts of the initialization | 
|  | 99 | * of the embedding object failed. | 
| Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 100 | */ | 
| Tejun Heo | 9a1049d | 2014-06-28 08:10:14 -0400 | [diff] [blame] | 101 | void percpu_ref_exit(struct percpu_ref *ref) | 
| Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 102 | { | 
| Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 103 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); | 
| Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 104 |  | 
| Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 105 | if (percpu_count) { | 
| Roman Pen | a67823c | 2016-08-11 19:27:09 +0200 | [diff] [blame^] | 106 | /* non-NULL confirm_switch indicates switching in progress */ | 
|  | 107 | WARN_ON_ONCE(ref->confirm_switch); | 
| Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 108 | free_percpu(percpu_count); | 
| Tejun Heo | 27344a9 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 109 | ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; | 
| Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 110 | } | 
|  | 111 | } | 
| Tejun Heo | 9a1049d | 2014-06-28 08:10:14 -0400 | [diff] [blame] | 112 | EXPORT_SYMBOL_GPL(percpu_ref_exit); | 
| Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 113 |  | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 114 | static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu) | 
|  | 115 | { | 
|  | 116 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); | 
|  | 117 |  | 
|  | 118 | ref->confirm_switch(ref); | 
|  | 119 | ref->confirm_switch = NULL; | 
|  | 120 | wake_up_all(&percpu_ref_switch_waitq); | 
|  | 121 |  | 
|  | 122 | /* drop ref from percpu_ref_switch_to_atomic() */ | 
|  | 123 | percpu_ref_put(ref); | 
|  | 124 | } | 
|  | 125 |  | 
|  | 126 | static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu) | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 127 | { | 
|  | 128 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); | 
| Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 129 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); | 
| Tejun Heo | e625305 | 2014-09-20 01:27:25 -0400 | [diff] [blame] | 130 | unsigned long count = 0; | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 131 | int cpu; | 
|  | 132 |  | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 133 | for_each_possible_cpu(cpu) | 
| Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 134 | count += *per_cpu_ptr(percpu_count, cpu); | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 135 |  | 
| Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 136 | pr_debug("global %ld percpu %ld", | 
| Tejun Heo | e625305 | 2014-09-20 01:27:25 -0400 | [diff] [blame] | 137 | atomic_long_read(&ref->count), (long)count); | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 138 |  | 
|  | 139 | /* | 
|  | 140 | * It's crucial that we sum the percpu counters _before_ adding the sum | 
|  | 141 | * to &ref->count; since gets could be happening on one cpu while puts | 
|  | 142 | * happen on another, adding a single cpu's count could cause | 
|  | 143 | * @ref->count to hit 0 before we've got a consistent value - but the | 
|  | 144 | * sum of all the counts will be consistent and correct. | 
|  | 145 | * | 
|  | 146 | * Subtracting the bias value then has to happen _after_ adding count to | 
|  | 147 | * &ref->count; we need the bias value to prevent &ref->count from | 
|  | 148 | * reaching 0 before we add the percpu counts. But doing it at the same | 
|  | 149 | * time is equivalent and saves us atomic operations: | 
|  | 150 | */ | 
| Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 151 | atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count); | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 152 |  | 
| Tejun Heo | e625305 | 2014-09-20 01:27:25 -0400 | [diff] [blame] | 153 | WARN_ONCE(atomic_long_read(&ref->count) <= 0, | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 154 | "percpu ref (%pf) <= 0 (%ld) after switching to atomic", | 
| Tejun Heo | e625305 | 2014-09-20 01:27:25 -0400 | [diff] [blame] | 155 | ref->release, atomic_long_read(&ref->count)); | 
| Kent Overstreet | 687b0ad | 2014-01-06 13:13:26 -0800 | [diff] [blame] | 156 |  | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 157 | /* @ref is viewed as dead on all CPUs, send out switch confirmation */ | 
|  | 158 | percpu_ref_call_confirm_rcu(rcu); | 
|  | 159 | } | 
| Tejun Heo | dbece3a | 2013-06-13 19:23:53 -0700 | [diff] [blame] | 160 |  | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 161 | static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref) | 
|  | 162 | { | 
|  | 163 | } | 
|  | 164 |  | 
|  | 165 | static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, | 
|  | 166 | percpu_ref_func_t *confirm_switch) | 
|  | 167 | { | 
| Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 168 | if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) { | 
| Tejun Heo | 1880835 | 2015-09-29 17:47:18 -0400 | [diff] [blame] | 169 | if (confirm_switch) | 
| Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 170 | confirm_switch(ref); | 
| Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 171 | return; | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 172 | } | 
| Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 173 |  | 
|  | 174 | /* switching from percpu to atomic */ | 
|  | 175 | ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; | 
|  | 176 |  | 
|  | 177 | /* | 
|  | 178 | * Non-NULL ->confirm_switch is used to indicate that switching is | 
|  | 179 | * in progress.  Use noop one if unspecified. | 
|  | 180 | */ | 
| Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 181 | ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; | 
|  | 182 |  | 
|  | 183 | percpu_ref_get(ref);	/* put after confirmation */ | 
|  | 184 | call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); | 
|  | 185 | } | 
|  | 186 |  | 
|  | 187 | static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) | 
|  | 188 | { | 
|  | 189 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); | 
|  | 190 | int cpu; | 
|  | 191 |  | 
|  | 192 | BUG_ON(!percpu_count); | 
|  | 193 |  | 
|  | 194 | if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) | 
|  | 195 | return; | 
|  | 196 |  | 
| Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 197 | atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); | 
|  | 198 |  | 
|  | 199 | /* | 
|  | 200 | * Restore per-cpu operation.  smp_store_release() is paired with | 
|  | 201 | * smp_read_barrier_depends() in __ref_is_percpu() and guarantees | 
|  | 202 | * that the zeroing is visible to all percpu accesses which can see | 
|  | 203 | * the following __PERCPU_REF_ATOMIC clearing. | 
|  | 204 | */ | 
|  | 205 | for_each_possible_cpu(cpu) | 
|  | 206 | *per_cpu_ptr(percpu_count, cpu) = 0; | 
|  | 207 |  | 
|  | 208 | smp_store_release(&ref->percpu_count_ptr, | 
|  | 209 | ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 210 | } | 
|  | 211 |  | 
| Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 212 | static void __percpu_ref_switch_mode(struct percpu_ref *ref, | 
|  | 213 | percpu_ref_func_t *confirm_switch) | 
|  | 214 | { | 
| Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 215 | lockdep_assert_held(&percpu_ref_switch_lock); | 
|  | 216 |  | 
| Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 217 | /* | 
|  | 218 | * If the previous ATOMIC switching hasn't finished yet, wait for | 
|  | 219 | * its completion.  If the caller ensures that ATOMIC switching | 
|  | 220 | * isn't in progress, this function can be called from any context. | 
| Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 221 | */ | 
| Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 222 | wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch, | 
|  | 223 | percpu_ref_switch_lock); | 
| Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 224 |  | 
|  | 225 | if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD)) | 
|  | 226 | __percpu_ref_switch_to_atomic(ref, confirm_switch); | 
|  | 227 | else | 
|  | 228 | __percpu_ref_switch_to_percpu(ref); | 
|  | 229 | } | 
|  | 230 |  | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 231 | /** | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 232 | * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode | 
|  | 233 | * @ref: percpu_ref to switch to atomic mode | 
|  | 234 | * @confirm_switch: optional confirmation callback | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 235 | * | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 236 | * There's no reason to use this function for the usual reference counting. | 
|  | 237 | * Use percpu_ref_kill[_and_confirm](). | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 238 | * | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 239 | * Schedule switching of @ref to atomic mode.  All its percpu counts will | 
|  | 240 | * be collected to the main atomic counter.  On completion, when all CPUs | 
|  | 241 | * are guaraneed to be in atomic mode, @confirm_switch, which may not | 
|  | 242 | * block, is invoked.  This function may be invoked concurrently with all | 
|  | 243 | * the get/put operations and can safely be mixed with kill and reinit | 
| Tejun Heo | 1cae13e | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 244 | * operations.  Note that @ref will stay in atomic mode across kill/reinit | 
|  | 245 | * cycles until percpu_ref_switch_to_percpu() is called. | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 246 | * | 
| Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 247 | * This function may block if @ref is in the process of switching to atomic | 
|  | 248 | * mode.  If the caller ensures that @ref is not in the process of | 
|  | 249 | * switching to atomic mode, this function can be called from any context. | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 250 | */ | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 251 | void percpu_ref_switch_to_atomic(struct percpu_ref *ref, | 
|  | 252 | percpu_ref_func_t *confirm_switch) | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 253 | { | 
| Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 254 | unsigned long flags; | 
|  | 255 |  | 
|  | 256 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); | 
|  | 257 |  | 
| Tejun Heo | 1cae13e | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 258 | ref->force_atomic = true; | 
| Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 259 | __percpu_ref_switch_mode(ref, confirm_switch); | 
| Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 260 |  | 
|  | 261 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); | 
| Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 262 | } | 
| Tejun Heo | a223737 | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 263 |  | 
| Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 264 | /** | 
|  | 265 | * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode | 
|  | 266 | * @ref: percpu_ref to switch to percpu mode | 
|  | 267 | * | 
|  | 268 | * There's no reason to use this function for the usual reference counting. | 
|  | 269 | * To re-use an expired ref, use percpu_ref_reinit(). | 
|  | 270 | * | 
|  | 271 | * Switch @ref to percpu mode.  This function may be invoked concurrently | 
|  | 272 | * with all the get/put operations and can safely be mixed with kill and | 
| Tejun Heo | 1cae13e | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 273 | * reinit operations.  This function reverses the sticky atomic state set | 
|  | 274 | * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic().  If @ref is | 
|  | 275 | * dying or dead, the actual switching takes place on the following | 
|  | 276 | * percpu_ref_reinit(). | 
| Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 277 | * | 
| Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 278 | * This function may block if @ref is in the process of switching to atomic | 
|  | 279 | * mode.  If the caller ensures that @ref is not in the process of | 
|  | 280 | * switching to atomic mode, this function can be called from any context. | 
| Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 281 | */ | 
|  | 282 | void percpu_ref_switch_to_percpu(struct percpu_ref *ref) | 
|  | 283 | { | 
| Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 284 | unsigned long flags; | 
|  | 285 |  | 
|  | 286 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); | 
|  | 287 |  | 
| Tejun Heo | 1cae13e | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 288 | ref->force_atomic = false; | 
| Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 289 | __percpu_ref_switch_mode(ref, NULL); | 
| Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 290 |  | 
|  | 291 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); | 
| Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 292 | } | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 293 |  | 
|  | 294 | /** | 
|  | 295 | * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation | 
|  | 296 | * @ref: percpu_ref to kill | 
|  | 297 | * @confirm_kill: optional confirmation callback | 
|  | 298 | * | 
|  | 299 | * Equivalent to percpu_ref_kill() but also schedules kill confirmation if | 
|  | 300 | * @confirm_kill is not NULL.  @confirm_kill, which may not block, will be | 
|  | 301 | * called after @ref is seen as dead from all CPUs at which point all | 
|  | 302 | * further invocations of percpu_ref_tryget_live() will fail.  See | 
|  | 303 | * percpu_ref_tryget_live() for details. | 
|  | 304 | * | 
|  | 305 | * This function normally doesn't block and can be called from any context | 
| Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 306 | * but it may block if @confirm_kill is specified and @ref is in the | 
| Tejun Heo | a2f5630 | 2015-09-29 17:47:16 -0400 | [diff] [blame] | 307 | * process of switching to atomic mode by percpu_ref_switch_to_atomic(). | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 308 | */ | 
|  | 309 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | 
|  | 310 | percpu_ref_func_t *confirm_kill) | 
|  | 311 | { | 
| Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 312 | unsigned long flags; | 
|  | 313 |  | 
|  | 314 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); | 
|  | 315 |  | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 316 | WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, | 
|  | 317 | "%s called more than once on %pf!", __func__, ref->release); | 
|  | 318 |  | 
|  | 319 | ref->percpu_count_ptr |= __PERCPU_REF_DEAD; | 
| Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 320 | __percpu_ref_switch_mode(ref, confirm_kill); | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 321 | percpu_ref_put(ref); | 
| Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 322 |  | 
|  | 323 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); | 
| Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 324 | } | 
|  | 325 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); | 
| Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 326 |  | 
|  | 327 | /** | 
|  | 328 | * percpu_ref_reinit - re-initialize a percpu refcount | 
|  | 329 | * @ref: perpcu_ref to re-initialize | 
|  | 330 | * | 
|  | 331 | * Re-initialize @ref so that it's in the same state as when it finished | 
| Tejun Heo | 1cae13e | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 332 | * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD.  @ref must have been | 
|  | 333 | * initialized successfully and reached 0 but not exited. | 
| Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 334 | * | 
|  | 335 | * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while | 
|  | 336 | * this function is in progress. | 
|  | 337 | */ | 
|  | 338 | void percpu_ref_reinit(struct percpu_ref *ref) | 
|  | 339 | { | 
| Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 340 | unsigned long flags; | 
|  | 341 |  | 
|  | 342 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); | 
|  | 343 |  | 
| Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 344 | WARN_ON_ONCE(!percpu_ref_is_zero(ref)); | 
|  | 345 |  | 
|  | 346 | ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; | 
|  | 347 | percpu_ref_get(ref); | 
| Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 348 | __percpu_ref_switch_mode(ref, NULL); | 
| Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 349 |  | 
|  | 350 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); | 
| Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 351 | } | 
|  | 352 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); |