| Paul E. McKenney | 621934e | 2006-10-04 02:17:02 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Sleepable Read-Copy Update mechanism for mutual exclusion | 
|  | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or modify | 
|  | 5 | * it under the terms of the GNU General Public License as published by | 
|  | 6 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 7 | * (at your option) any later version. | 
|  | 8 | * | 
|  | 9 | * This program is distributed in the hope that it will be useful, | 
|  | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 12 | * GNU General Public License for more details. | 
|  | 13 | * | 
|  | 14 | * You should have received a copy of the GNU General Public License | 
| Paul E. McKenney | 87de1cf | 2013-12-03 10:02:52 -0800 | [diff] [blame] | 15 | * along with this program; if not, you can access it online at | 
|  | 16 | * http://www.gnu.org/licenses/gpl-2.0.html. | 
| Paul E. McKenney | 621934e | 2006-10-04 02:17:02 -0700 | [diff] [blame] | 17 | * | 
|  | 18 | * Copyright (C) IBM Corporation, 2006 | 
| Lai Jiangshan | 4e87b2d | 2012-10-13 01:14:14 +0800 | [diff] [blame] | 19 | * Copyright (C) Fujitsu, 2012 | 
| Paul E. McKenney | 621934e | 2006-10-04 02:17:02 -0700 | [diff] [blame] | 20 | * | 
|  | 21 | * Author: Paul McKenney <paulmck@us.ibm.com> | 
| Lai Jiangshan | 4e87b2d | 2012-10-13 01:14:14 +0800 | [diff] [blame] | 22 | *	   Lai Jiangshan <laijs@cn.fujitsu.com> | 
| Paul E. McKenney | 621934e | 2006-10-04 02:17:02 -0700 | [diff] [blame] | 23 | * | 
|  | 24 | * For detailed explanation of Read-Copy Update mechanism see - | 
|  | 25 | * 		Documentation/RCU/ *.txt | 
|  | 26 | * | 
|  | 27 | */ | 
|  | 28 |  | 
| Alan Stern | eabc069 | 2006-10-04 02:17:04 -0700 | [diff] [blame] | 29 | #ifndef _LINUX_SRCU_H | 
|  | 30 | #define _LINUX_SRCU_H | 
|  | 31 |  | 
| Paul E. McKenney | d14aada | 2010-04-19 22:24:22 -0700 | [diff] [blame] | 32 | #include <linux/mutex.h> | 
| Paul E. McKenney | ff195cb | 2011-10-07 18:22:04 +0200 | [diff] [blame] | 33 | #include <linux/rcupdate.h> | 
| Lai Jiangshan | 931ea9d | 2012-03-19 16:12:13 +0800 | [diff] [blame] | 34 | #include <linux/workqueue.h> | 
| Paul E. McKenney | d14aada | 2010-04-19 22:24:22 -0700 | [diff] [blame] | 35 |  | 
| Paul E. McKenney | 621934e | 2006-10-04 02:17:02 -0700 | [diff] [blame] | 36 | struct srcu_struct_array { | 
| Paul E. McKenney | cef5012 | 2012-02-05 07:42:44 -0800 | [diff] [blame] | 37 | unsigned long c[2]; | 
| Lai Jiangshan | b52ce06 | 2012-02-27 09:29:09 -0800 | [diff] [blame] | 38 | unsigned long seq[2]; | 
| Paul E. McKenney | 621934e | 2006-10-04 02:17:02 -0700 | [diff] [blame] | 39 | }; | 
|  | 40 |  | 
| Lai Jiangshan | 931ea9d | 2012-03-19 16:12:13 +0800 | [diff] [blame] | 41 | struct rcu_batch { | 
|  | 42 | struct rcu_head *head, **tail; | 
|  | 43 | }; | 
|  | 44 |  | 
| Lai Jiangshan | 55c6659a | 2012-10-13 01:14:16 +0800 | [diff] [blame] | 45 | #define RCU_BATCH_INIT(name) { NULL, &(name.head) } | 
|  | 46 |  | 
| Paul E. McKenney | 621934e | 2006-10-04 02:17:02 -0700 | [diff] [blame] | 47 | struct srcu_struct { | 
| Paul E. McKenney | a5c198f | 2014-11-23 20:30:06 -0800 | [diff] [blame] | 48 | unsigned long completed; | 
| Tejun Heo | 43cf38e | 2010-02-02 14:38:57 +0900 | [diff] [blame] | 49 | struct srcu_struct_array __percpu *per_cpu_ref; | 
| Lai Jiangshan | 931ea9d | 2012-03-19 16:12:13 +0800 | [diff] [blame] | 50 | spinlock_t queue_lock; /* protect ->batch_queue, ->running */ | 
|  | 51 | bool running; | 
|  | 52 | /* callbacks just queued */ | 
|  | 53 | struct rcu_batch batch_queue; | 
|  | 54 | /* callbacks try to do the first check_zero */ | 
|  | 55 | struct rcu_batch batch_check0; | 
|  | 56 | /* callbacks done with the first check_zero and the flip */ | 
|  | 57 | struct rcu_batch batch_check1; | 
|  | 58 | struct rcu_batch batch_done; | 
|  | 59 | struct delayed_work work; | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 60 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 
|  | 61 | struct lockdep_map dep_map; | 
|  | 62 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 
| Paul E. McKenney | 621934e | 2006-10-04 02:17:02 -0700 | [diff] [blame] | 63 | }; | 
|  | 64 |  | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 65 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 
|  | 66 |  | 
|  | 67 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, | 
|  | 68 | struct lock_class_key *key); | 
|  | 69 |  | 
|  | 70 | #define init_srcu_struct(sp) \ | 
|  | 71 | ({ \ | 
|  | 72 | static struct lock_class_key __srcu_key; \ | 
|  | 73 | \ | 
|  | 74 | __init_srcu_struct((sp), #sp, &__srcu_key); \ | 
|  | 75 | }) | 
|  | 76 |  | 
| Lai Jiangshan | 55c6659a | 2012-10-13 01:14:16 +0800 | [diff] [blame] | 77 | #define __SRCU_DEP_MAP_INIT(srcu_name)	.dep_map = { .name = #srcu_name }, | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 78 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 
|  | 79 |  | 
| Alan Stern | e6a9201 | 2006-10-04 02:17:05 -0700 | [diff] [blame] | 80 | int init_srcu_struct(struct srcu_struct *sp); | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 81 |  | 
| Lai Jiangshan | 55c6659a | 2012-10-13 01:14:16 +0800 | [diff] [blame] | 82 | #define __SRCU_DEP_MAP_INIT(srcu_name) | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 83 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 
|  | 84 |  | 
| Lai Jiangshan | f2ebfbc | 2012-10-13 01:14:15 +0800 | [diff] [blame] | 85 | void process_srcu(struct work_struct *work); | 
|  | 86 |  | 
| Lai Jiangshan | 55c6659a | 2012-10-13 01:14:16 +0800 | [diff] [blame] | 87 | #define __SRCU_STRUCT_INIT(name)					\ | 
|  | 88 | {								\ | 
|  | 89 | .completed = -300,					\ | 
|  | 90 | .per_cpu_ref = &name##_srcu_array,			\ | 
|  | 91 | .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock),	\ | 
|  | 92 | .running = false,					\ | 
|  | 93 | .batch_queue = RCU_BATCH_INIT(name.batch_queue),	\ | 
|  | 94 | .batch_check0 = RCU_BATCH_INIT(name.batch_check0),	\ | 
|  | 95 | .batch_check1 = RCU_BATCH_INIT(name.batch_check1),	\ | 
|  | 96 | .batch_done = RCU_BATCH_INIT(name.batch_done),		\ | 
|  | 97 | .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\ | 
|  | 98 | __SRCU_DEP_MAP_INIT(name)				\ | 
|  | 99 | } | 
|  | 100 |  | 
|  | 101 | /* | 
|  | 102 | * define and init a srcu struct at build time. | 
|  | 103 | * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. | 
|  | 104 | */ | 
| Paul E. McKenney | 9735af5 | 2014-11-26 10:42:50 -0800 | [diff] [blame] | 105 | #define __DEFINE_SRCU(name, is_static)					\ | 
| Lai Jiangshan | 55c6659a | 2012-10-13 01:14:16 +0800 | [diff] [blame] | 106 | static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ | 
| Paul E. McKenney | 9735af5 | 2014-11-26 10:42:50 -0800 | [diff] [blame] | 107 | is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) | 
|  | 108 | #define DEFINE_SRCU(name)		__DEFINE_SRCU(name, /* not static */) | 
|  | 109 | #define DEFINE_STATIC_SRCU(name)	__DEFINE_SRCU(name, static) | 
| Lai Jiangshan | 55c6659a | 2012-10-13 01:14:16 +0800 | [diff] [blame] | 110 |  | 
| Lai Jiangshan | 931ea9d | 2012-03-19 16:12:13 +0800 | [diff] [blame] | 111 | /** | 
|  | 112 | * call_srcu() - Queue a callback for invocation after an SRCU grace period | 
|  | 113 | * @sp: srcu_struct in queue the callback | 
|  | 114 | * @head: structure to be used for queueing the SRCU callback. | 
|  | 115 | * @func: function to be invoked after the SRCU grace period | 
|  | 116 | * | 
|  | 117 | * The callback function will be invoked some time after a full SRCU | 
|  | 118 | * grace period elapses, in other words after all pre-existing SRCU | 
|  | 119 | * read-side critical sections have completed.  However, the callback | 
|  | 120 | * function might well execute concurrently with other SRCU read-side | 
|  | 121 | * critical sections that started after call_srcu() was invoked.  SRCU | 
|  | 122 | * read-side critical sections are delimited by srcu_read_lock() and | 
|  | 123 | * srcu_read_unlock(), and may be nested. | 
|  | 124 | * | 
|  | 125 | * The callback will be invoked from process context, but must nevertheless | 
|  | 126 | * be fast and must not block. | 
|  | 127 | */ | 
|  | 128 | void call_srcu(struct srcu_struct *sp, struct rcu_head *head, | 
|  | 129 | void (*func)(struct rcu_head *head)); | 
|  | 130 |  | 
| Paul E. McKenney | 621934e | 2006-10-04 02:17:02 -0700 | [diff] [blame] | 131 | void cleanup_srcu_struct(struct srcu_struct *sp); | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 132 | int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); | 
|  | 133 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); | 
| Paul E. McKenney | 621934e | 2006-10-04 02:17:02 -0700 | [diff] [blame] | 134 | void synchronize_srcu(struct srcu_struct *sp); | 
| Paul E. McKenney | 0cd397d | 2009-10-25 19:03:51 -0700 | [diff] [blame] | 135 | void synchronize_srcu_expedited(struct srcu_struct *sp); | 
| Paul E. McKenney | a5c198f | 2014-11-23 20:30:06 -0800 | [diff] [blame] | 136 | unsigned long srcu_batches_completed(struct srcu_struct *sp); | 
| Lai Jiangshan | 931ea9d | 2012-03-19 16:12:13 +0800 | [diff] [blame] | 137 | void srcu_barrier(struct srcu_struct *sp); | 
| Alan Stern | eabc069 | 2006-10-04 02:17:04 -0700 | [diff] [blame] | 138 |  | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 139 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 
|  | 140 |  | 
|  | 141 | /** | 
|  | 142 | * srcu_read_lock_held - might we be in SRCU read-side critical section? | 
|  | 143 | * | 
| Paul E. McKenney | d20200b | 2010-03-30 10:52:21 -0700 | [diff] [blame] | 144 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU | 
|  | 145 | * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC, | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 146 | * this assumes we are in an SRCU read-side critical section unless it can | 
|  | 147 | * prove otherwise. | 
| Paul E. McKenney | ff195cb | 2011-10-07 18:22:04 +0200 | [diff] [blame] | 148 | * | 
| Paul E. McKenney | 867f236 | 2011-10-07 18:22:05 +0200 | [diff] [blame] | 149 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot | 
|  | 150 | * and while lockdep is disabled. | 
|  | 151 | * | 
| Lai Jiangshan | 511a086 | 2012-11-29 16:46:06 +0800 | [diff] [blame] | 152 | * Note that SRCU is based on its own statemachine and it doesn't | 
|  | 153 | * relies on normal RCU, it can be called from the CPU which | 
|  | 154 | * is in the idle loop from an RCU point of view or offline. | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 155 | */ | 
|  | 156 | static inline int srcu_read_lock_held(struct srcu_struct *sp) | 
|  | 157 | { | 
| Paul E. McKenney | 867f236 | 2011-10-07 18:22:05 +0200 | [diff] [blame] | 158 | if (!debug_lockdep_rcu_enabled()) | 
| Paul E. McKenney | ff195cb | 2011-10-07 18:22:04 +0200 | [diff] [blame] | 159 | return 1; | 
| Paul E. McKenney | ff195cb | 2011-10-07 18:22:04 +0200 | [diff] [blame] | 160 | return lock_is_held(&sp->dep_map); | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 161 | } | 
|  | 162 |  | 
|  | 163 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 
|  | 164 |  | 
|  | 165 | static inline int srcu_read_lock_held(struct srcu_struct *sp) | 
|  | 166 | { | 
|  | 167 | return 1; | 
|  | 168 | } | 
|  | 169 |  | 
|  | 170 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 
|  | 171 |  | 
|  | 172 | /** | 
| Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 173 | * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing | 
|  | 174 | * @p: the pointer to fetch and protect for later dereferencing | 
|  | 175 | * @sp: pointer to the srcu_struct, which is used to check that we | 
|  | 176 | *	really are in an SRCU read-side critical section. | 
|  | 177 | * @c: condition to check for update-side use | 
| Paul E. McKenney | c26d34a | 2010-02-22 17:04:46 -0800 | [diff] [blame] | 178 | * | 
| Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 179 | * If PROVE_RCU is enabled, invoking this outside of an RCU read-side | 
|  | 180 | * critical section will result in an RCU-lockdep splat, unless @c evaluates | 
|  | 181 | * to 1.  The @c argument will normally be a logical expression containing | 
|  | 182 | * lockdep_is_held() calls. | 
| Paul E. McKenney | c26d34a | 2010-02-22 17:04:46 -0800 | [diff] [blame] | 183 | */ | 
| Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 184 | #define srcu_dereference_check(p, sp, c) \ | 
| Paul E. McKenney | b826565a | 2015-02-02 11:46:33 -0800 | [diff] [blame] | 185 | __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu) | 
| Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 186 |  | 
|  | 187 | /** | 
|  | 188 | * srcu_dereference - fetch SRCU-protected pointer for later dereferencing | 
|  | 189 | * @p: the pointer to fetch and protect for later dereferencing | 
|  | 190 | * @sp: pointer to the srcu_struct, which is used to check that we | 
|  | 191 | *	really are in an SRCU read-side critical section. | 
|  | 192 | * | 
|  | 193 | * Makes rcu_dereference_check() do the dirty work.  If PROVE_RCU | 
|  | 194 | * is enabled, invoking this outside of an RCU read-side critical | 
|  | 195 | * section will result in an RCU-lockdep splat. | 
|  | 196 | */ | 
|  | 197 | #define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) | 
| Paul E. McKenney | c26d34a | 2010-02-22 17:04:46 -0800 | [diff] [blame] | 198 |  | 
|  | 199 | /** | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 200 | * srcu_read_lock - register a new reader for an SRCU-protected structure. | 
|  | 201 | * @sp: srcu_struct in which to register the new reader. | 
|  | 202 | * | 
|  | 203 | * Enter an SRCU read-side critical section.  Note that SRCU read-side | 
| Paul E. McKenney | 73d4da4 | 2010-08-16 10:50:54 -0700 | [diff] [blame] | 204 | * critical sections may be nested.  However, it is illegal to | 
|  | 205 | * call anything that waits on an SRCU grace period for the same | 
|  | 206 | * srcu_struct, whether directly or indirectly.  Please note that | 
|  | 207 | * one way to indirectly wait on an SRCU grace period is to acquire | 
|  | 208 | * a mutex that is held elsewhere while calling synchronize_srcu() or | 
|  | 209 | * synchronize_srcu_expedited(). | 
| Paul E. McKenney | 3842a08 | 2011-11-28 10:42:42 -0800 | [diff] [blame] | 210 | * | 
|  | 211 | * Note that srcu_read_lock() and the matching srcu_read_unlock() must | 
|  | 212 | * occur in the same context, for example, it is illegal to invoke | 
|  | 213 | * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() | 
|  | 214 | * was invoked in process context. | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 215 | */ | 
|  | 216 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | 
|  | 217 | { | 
| Paul E. McKenney | 49f5903 | 2015-09-01 00:42:57 -0700 | [diff] [blame] | 218 | int retval; | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 219 |  | 
| Paul E. McKenney | 49f5903 | 2015-09-01 00:42:57 -0700 | [diff] [blame] | 220 | preempt_disable(); | 
|  | 221 | retval = __srcu_read_lock(sp); | 
|  | 222 | preempt_enable(); | 
| Paul E. McKenney | ff195cb | 2011-10-07 18:22:04 +0200 | [diff] [blame] | 223 | rcu_lock_acquire(&(sp)->dep_map); | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 224 | return retval; | 
|  | 225 | } | 
|  | 226 |  | 
|  | 227 | /** | 
|  | 228 | * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. | 
|  | 229 | * @sp: srcu_struct in which to unregister the old reader. | 
|  | 230 | * @idx: return value from corresponding srcu_read_lock(). | 
|  | 231 | * | 
|  | 232 | * Exit an SRCU read-side critical section. | 
|  | 233 | */ | 
|  | 234 | static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) | 
|  | 235 | __releases(sp) | 
|  | 236 | { | 
| Paul E. McKenney | ff195cb | 2011-10-07 18:22:04 +0200 | [diff] [blame] | 237 | rcu_lock_release(&(sp)->dep_map); | 
| Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 238 | __srcu_read_unlock(sp, idx); | 
|  | 239 | } | 
|  | 240 |  | 
| Michael S. Tsirkin | ce332f6 | 2013-11-04 22:36:17 +0200 | [diff] [blame] | 241 | /** | 
|  | 242 | * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock | 
|  | 243 | * | 
|  | 244 | * Converts the preceding srcu_read_unlock into a two-way memory barrier. | 
|  | 245 | * | 
|  | 246 | * Call this after srcu_read_unlock, to guarantee that all memory operations | 
|  | 247 | * that occur after smp_mb__after_srcu_read_unlock will appear to happen after | 
|  | 248 | * the preceding srcu_read_unlock. | 
|  | 249 | */ | 
|  | 250 | static inline void smp_mb__after_srcu_read_unlock(void) | 
|  | 251 | { | 
|  | 252 | /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ | 
|  | 253 | } | 
|  | 254 |  | 
| Alan Stern | eabc069 | 2006-10-04 02:17:04 -0700 | [diff] [blame] | 255 | #endif |