Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 2 | * Read-Copy Update mechanism for mutual exclusion |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 18 | * Copyright IBM Corporation, 2001 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | * |
| 20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 21 | * |
Josh Triplett | 595182b | 2006-10-04 02:17:21 -0700 | [diff] [blame] | 22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
| 24 | * Papers: |
| 25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf |
| 26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
| 27 | * |
| 28 | * For detailed explanation of Read-Copy Update mechanism see - |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 29 | * http://lse.sourceforge.net/locking/rcupdate.html |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | * |
| 31 | */ |
| 32 | |
| 33 | #ifndef __LINUX_RCUPDATE_H |
| 34 | #define __LINUX_RCUPDATE_H |
| 35 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <linux/cache.h> |
| 37 | #include <linux/spinlock.h> |
| 38 | #include <linux/threads.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <linux/cpumask.h> |
| 40 | #include <linux/seqlock.h> |
Peter Zijlstra | 851a67b | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 41 | #include <linux/lockdep.h> |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 42 | #include <linux/completion.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
Dave Young | e5ab677 | 2010-03-10 15:24:05 -0800 | [diff] [blame] | 44 | #ifdef CONFIG_RCU_TORTURE_TEST |
| 45 | extern int rcutorture_runnable; /* for sysctl */ |
| 46 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
| 47 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | /** |
| 49 | * struct rcu_head - callback structure for use with RCU |
| 50 | * @next: next update requests in a list |
| 51 | * @func: actual update function to call after the grace period. |
| 52 | */ |
| 53 | struct rcu_head { |
| 54 | struct rcu_head *next; |
| 55 | void (*func)(struct rcu_head *head); |
| 56 | }; |
| 57 | |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 58 | /* Exported common interfaces */ |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 59 | extern void synchronize_rcu_bh(void); |
Paul E. McKenney | 16e3081 | 2009-09-13 09:15:11 -0700 | [diff] [blame] | 60 | extern void synchronize_sched(void); |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 61 | extern void rcu_barrier(void); |
| 62 | extern void rcu_barrier_bh(void); |
| 63 | extern void rcu_barrier_sched(void); |
| 64 | extern void synchronize_sched_expedited(void); |
| 65 | extern int sched_expedited_torture_stats(char *page); |
| 66 | |
| 67 | /* Internal to kernel */ |
| 68 | extern void rcu_init(void); |
Paul E. McKenney | d9f1bb6 | 2010-02-25 14:06:47 -0800 | [diff] [blame] | 69 | extern int rcu_scheduler_active; |
| 70 | extern void rcu_scheduler_starting(void); |
Paul E. McKenney | a682604 | 2009-02-25 18:03:42 -0800 | [diff] [blame] | 71 | |
Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 72 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 73 | #include <linux/rcutree.h> |
Paul E. McKenney | 2c28e24 | 2009-10-26 13:57:44 -0700 | [diff] [blame] | 74 | #elif defined(CONFIG_TINY_RCU) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 75 | #include <linux/rcutiny.h> |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 76 | #else |
| 77 | #error "Unknown RCU implementation specified to kernel configuration" |
Paul E. McKenney | 6b3ef48 | 2009-08-22 13:56:53 -0700 | [diff] [blame] | 78 | #endif |
Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 79 | |
Paul E. McKenney | 3d76c08 | 2009-09-28 07:46:32 -0700 | [diff] [blame] | 80 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } |
Dipankar Sarma | 8b6490e | 2005-09-09 13:04:07 -0700 | [diff] [blame] | 81 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | #define INIT_RCU_HEAD(ptr) do { \ |
| 83 | (ptr)->next = NULL; (ptr)->func = NULL; \ |
| 84 | } while (0) |
| 85 | |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 86 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 87 | |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 88 | extern struct lockdep_map rcu_lock_map; |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 89 | # define rcu_read_acquire() \ |
| 90 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 91 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 92 | |
| 93 | extern struct lockdep_map rcu_bh_lock_map; |
| 94 | # define rcu_read_acquire_bh() \ |
| 95 | lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) |
| 96 | # define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_) |
| 97 | |
| 98 | extern struct lockdep_map rcu_sched_lock_map; |
| 99 | # define rcu_read_acquire_sched() \ |
| 100 | lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) |
| 101 | # define rcu_read_release_sched() \ |
| 102 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) |
| 103 | |
Paul E. McKenney | 54dbf96 | 2010-03-03 07:46:57 -0800 | [diff] [blame] | 104 | static inline int debug_lockdep_rcu_enabled(void) |
| 105 | { |
| 106 | return likely(rcu_scheduler_active && debug_locks); |
| 107 | } |
| 108 | |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 109 | /** |
| 110 | * rcu_read_lock_held - might we be in RCU read-side critical section? |
| 111 | * |
| 112 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in |
| 113 | * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, |
| 114 | * this assumes we are in an RCU read-side critical section unless it can |
| 115 | * prove otherwise. |
Paul E. McKenney | 54dbf96 | 2010-03-03 07:46:57 -0800 | [diff] [blame] | 116 | * |
| 117 | * Check rcu_scheduler_active to prevent false positives during boot. |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 118 | */ |
| 119 | static inline int rcu_read_lock_held(void) |
| 120 | { |
Paul E. McKenney | 54dbf96 | 2010-03-03 07:46:57 -0800 | [diff] [blame] | 121 | if (!debug_lockdep_rcu_enabled()) |
| 122 | return 1; |
| 123 | return lock_is_held(&rcu_lock_map); |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 124 | } |
| 125 | |
Paul E. McKenney | e3818b8 | 2010-03-15 17:03:43 -0700 | [diff] [blame] | 126 | /* |
| 127 | * rcu_read_lock_bh_held() is defined out of line to avoid #include-file |
| 128 | * hell. |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 129 | */ |
Paul E. McKenney | e3818b8 | 2010-03-15 17:03:43 -0700 | [diff] [blame] | 130 | extern int rcu_read_lock_bh_held(void); |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 131 | |
| 132 | /** |
| 133 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? |
| 134 | * |
| 135 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an |
| 136 | * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, |
| 137 | * this assumes we are in an RCU-sched read-side critical section unless it |
| 138 | * can prove otherwise. Note that disabling of preemption (including |
| 139 | * disabling irqs) counts as an RCU-sched read-side critical section. |
Paul E. McKenney | 54dbf96 | 2010-03-03 07:46:57 -0800 | [diff] [blame] | 140 | * |
| 141 | * Check rcu_scheduler_active to prevent false positives during boot. |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 142 | */ |
Paul E. McKenney | e6033e3 | 2010-03-03 17:50:16 -0800 | [diff] [blame] | 143 | #ifdef CONFIG_PREEMPT |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 144 | static inline int rcu_read_lock_sched_held(void) |
| 145 | { |
| 146 | int lockdep_opinion = 0; |
| 147 | |
Paul E. McKenney | 54dbf96 | 2010-03-03 07:46:57 -0800 | [diff] [blame] | 148 | if (!debug_lockdep_rcu_enabled()) |
| 149 | return 1; |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 150 | if (debug_locks) |
| 151 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
Lai Jiangshan | 0cff810 | 2010-03-18 12:25:33 -0700 | [diff] [blame] | 152 | return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 153 | } |
Paul E. McKenney | e6033e3 | 2010-03-03 17:50:16 -0800 | [diff] [blame] | 154 | #else /* #ifdef CONFIG_PREEMPT */ |
| 155 | static inline int rcu_read_lock_sched_held(void) |
| 156 | { |
| 157 | return 1; |
| 158 | } |
| 159 | #endif /* #else #ifdef CONFIG_PREEMPT */ |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 160 | |
| 161 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 162 | |
| 163 | # define rcu_read_acquire() do { } while (0) |
| 164 | # define rcu_read_release() do { } while (0) |
| 165 | # define rcu_read_acquire_bh() do { } while (0) |
| 166 | # define rcu_read_release_bh() do { } while (0) |
| 167 | # define rcu_read_acquire_sched() do { } while (0) |
| 168 | # define rcu_read_release_sched() do { } while (0) |
| 169 | |
| 170 | static inline int rcu_read_lock_held(void) |
| 171 | { |
| 172 | return 1; |
| 173 | } |
| 174 | |
| 175 | static inline int rcu_read_lock_bh_held(void) |
| 176 | { |
| 177 | return 1; |
| 178 | } |
| 179 | |
Paul E. McKenney | e6033e3 | 2010-03-03 17:50:16 -0800 | [diff] [blame] | 180 | #ifdef CONFIG_PREEMPT |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 181 | static inline int rcu_read_lock_sched_held(void) |
| 182 | { |
Lai Jiangshan | 0cff810 | 2010-03-18 12:25:33 -0700 | [diff] [blame] | 183 | return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 184 | } |
Paul E. McKenney | e6033e3 | 2010-03-03 17:50:16 -0800 | [diff] [blame] | 185 | #else /* #ifdef CONFIG_PREEMPT */ |
| 186 | static inline int rcu_read_lock_sched_held(void) |
| 187 | { |
| 188 | return 1; |
| 189 | } |
| 190 | #endif /* #else #ifdef CONFIG_PREEMPT */ |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 191 | |
| 192 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 193 | |
| 194 | #ifdef CONFIG_PROVE_RCU |
| 195 | |
| 196 | /** |
| 197 | * rcu_dereference_check - rcu_dereference with debug checking |
| 198 | * |
| 199 | * Do an rcu_dereference(), but check that the context is correct. |
| 200 | * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to |
| 201 | * ensure that the rcu_dereference_check() executes within an RCU |
| 202 | * read-side critical section. It is also possible to check for |
| 203 | * locks being held, for example, by using lockdep_is_held(). |
| 204 | */ |
| 205 | #define rcu_dereference_check(p, c) \ |
| 206 | ({ \ |
Paul E. McKenney | 54dbf96 | 2010-03-03 07:46:57 -0800 | [diff] [blame] | 207 | if (debug_lockdep_rcu_enabled() && !(c)) \ |
Paul E. McKenney | 0632eb3 | 2010-02-22 17:04:47 -0800 | [diff] [blame] | 208 | lockdep_rcu_dereference(__FILE__, __LINE__); \ |
Paul E. McKenney | c26d34a | 2010-02-22 17:04:46 -0800 | [diff] [blame] | 209 | rcu_dereference_raw(p); \ |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 210 | }) |
| 211 | |
| 212 | #else /* #ifdef CONFIG_PROVE_RCU */ |
| 213 | |
Paul E. McKenney | c26d34a | 2010-02-22 17:04:46 -0800 | [diff] [blame] | 214 | #define rcu_dereference_check(p, c) rcu_dereference_raw(p) |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 215 | |
| 216 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 217 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | /** |
| 219 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
| 220 | * |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 221 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | * are within RCU read-side critical sections, then the |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 223 | * synchronize_rcu() is guaranteed to block until after all the other |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | * CPUs exit their critical sections. Similarly, if call_rcu() is invoked |
| 225 | * on one CPU while other CPUs are within RCU read-side critical |
| 226 | * sections, invocation of the corresponding RCU callback is deferred |
| 227 | * until after the all the other CPUs exit their critical sections. |
| 228 | * |
| 229 | * Note, however, that RCU callbacks are permitted to run concurrently |
| 230 | * with RCU read-side critical sections. One way that this can happen |
| 231 | * is via the following sequence of events: (1) CPU 0 enters an RCU |
| 232 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register |
| 233 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, |
| 234 | * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU |
| 235 | * callback is invoked. This is legal, because the RCU read-side critical |
| 236 | * section that was running concurrently with the call_rcu() (and which |
| 237 | * therefore might be referencing something that the corresponding RCU |
| 238 | * callback would free up) has completed before the corresponding |
| 239 | * RCU callback is invoked. |
| 240 | * |
| 241 | * RCU read-side critical sections may be nested. Any deferred actions |
| 242 | * will be deferred until the outermost RCU read-side critical section |
| 243 | * completes. |
| 244 | * |
| 245 | * It is illegal to block while in an RCU read-side critical section. |
| 246 | */ |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 247 | static inline void rcu_read_lock(void) |
| 248 | { |
| 249 | __rcu_read_lock(); |
| 250 | __acquire(RCU); |
| 251 | rcu_read_acquire(); |
| 252 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | /* |
| 255 | * So where is rcu_write_lock()? It does not exist, as there is no |
| 256 | * way for writers to lock out RCU readers. This is a feature, not |
| 257 | * a bug -- this property is what provides RCU's performance benefits. |
| 258 | * Of course, writers must coordinate with each other. The normal |
| 259 | * spinlock primitives work well for this, but any other technique may be |
| 260 | * used as well. RCU does not care how the writers keep out of each |
| 261 | * others' way, as long as they do so. |
| 262 | */ |
Paul E. McKenney | 3d76c08 | 2009-09-28 07:46:32 -0700 | [diff] [blame] | 263 | |
| 264 | /** |
| 265 | * rcu_read_unlock - marks the end of an RCU read-side critical section. |
| 266 | * |
| 267 | * See rcu_read_lock() for more information. |
| 268 | */ |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 269 | static inline void rcu_read_unlock(void) |
| 270 | { |
| 271 | rcu_read_release(); |
| 272 | __release(RCU); |
| 273 | __rcu_read_unlock(); |
| 274 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | |
| 276 | /** |
| 277 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section |
| 278 | * |
| 279 | * This is equivalent of rcu_read_lock(), but to be used when updates |
| 280 | * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks |
| 281 | * consider completion of a softirq handler to be a quiescent state, |
| 282 | * a process in RCU read-side critical section must be protected by |
| 283 | * disabling softirqs. Read-side critical sections in interrupt context |
| 284 | * can use just rcu_read_lock(). |
| 285 | * |
| 286 | */ |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 287 | static inline void rcu_read_lock_bh(void) |
| 288 | { |
| 289 | __rcu_read_lock_bh(); |
| 290 | __acquire(RCU_BH); |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 291 | rcu_read_acquire_bh(); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 292 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | |
| 294 | /* |
| 295 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section |
| 296 | * |
| 297 | * See rcu_read_lock_bh() for more information. |
| 298 | */ |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 299 | static inline void rcu_read_unlock_bh(void) |
| 300 | { |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 301 | rcu_read_release_bh(); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 302 | __release(RCU_BH); |
| 303 | __rcu_read_unlock_bh(); |
| 304 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
| 306 | /** |
Mathieu Desnoyers | 1c50b72 | 2008-09-29 11:06:46 -0400 | [diff] [blame] | 307 | * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section |
| 308 | * |
| 309 | * Should be used with either |
| 310 | * - synchronize_sched() |
| 311 | * or |
| 312 | * - call_rcu_sched() and rcu_barrier_sched() |
| 313 | * on the write-side to insure proper synchronization. |
| 314 | */ |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 315 | static inline void rcu_read_lock_sched(void) |
| 316 | { |
| 317 | preempt_disable(); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 318 | __acquire(RCU_SCHED); |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 319 | rcu_read_acquire_sched(); |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 320 | } |
Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 321 | |
| 322 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
Paul E. McKenney | 7c614d6 | 2009-08-24 09:42:00 -0700 | [diff] [blame] | 323 | static inline notrace void rcu_read_lock_sched_notrace(void) |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 324 | { |
| 325 | preempt_disable_notrace(); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 326 | __acquire(RCU_SCHED); |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 327 | } |
Mathieu Desnoyers | 1c50b72 | 2008-09-29 11:06:46 -0400 | [diff] [blame] | 328 | |
| 329 | /* |
| 330 | * rcu_read_unlock_sched - marks the end of a RCU-classic critical section |
| 331 | * |
| 332 | * See rcu_read_lock_sched for more information. |
| 333 | */ |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 334 | static inline void rcu_read_unlock_sched(void) |
| 335 | { |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 336 | rcu_read_release_sched(); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 337 | __release(RCU_SCHED); |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 338 | preempt_enable(); |
| 339 | } |
Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 340 | |
| 341 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
Paul E. McKenney | 7c614d6 | 2009-08-24 09:42:00 -0700 | [diff] [blame] | 342 | static inline notrace void rcu_read_unlock_sched_notrace(void) |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 343 | { |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 344 | __release(RCU_SCHED); |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 345 | preempt_enable_notrace(); |
| 346 | } |
Mathieu Desnoyers | 1c50b72 | 2008-09-29 11:06:46 -0400 | [diff] [blame] | 347 | |
| 348 | |
| 349 | /** |
Paul E. McKenney | c26d34a | 2010-02-22 17:04:46 -0800 | [diff] [blame] | 350 | * rcu_dereference_raw - fetch an RCU-protected pointer |
| 351 | * |
| 352 | * The caller must be within some flavor of RCU read-side critical |
| 353 | * section, or must be otherwise preventing the pointer from changing, |
| 354 | * for example, by holding an appropriate lock. This pointer may later |
| 355 | * be safely dereferenced. It is the caller's responsibility to have |
| 356 | * done the right thing, as this primitive does no checking of any kind. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | * |
| 358 | * Inserts memory barriers on architectures that require them |
| 359 | * (currently only the Alpha), and, more importantly, documents |
| 360 | * exactly which pointers are protected by RCU. |
| 361 | */ |
Paul E. McKenney | c26d34a | 2010-02-22 17:04:46 -0800 | [diff] [blame] | 362 | #define rcu_dereference_raw(p) ({ \ |
Paul E. McKenney | 97b4303 | 2007-10-16 23:26:04 -0700 | [diff] [blame] | 363 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | smp_read_barrier_depends(); \ |
| 365 | (_________p1); \ |
| 366 | }) |
| 367 | |
| 368 | /** |
Paul E. McKenney | c26d34a | 2010-02-22 17:04:46 -0800 | [diff] [blame] | 369 | * rcu_dereference - fetch an RCU-protected pointer, checking for RCU |
| 370 | * |
| 371 | * Makes rcu_dereference_check() do the dirty work. |
| 372 | */ |
| 373 | #define rcu_dereference(p) \ |
| 374 | rcu_dereference_check(p, rcu_read_lock_held()) |
| 375 | |
| 376 | /** |
| 377 | * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh |
| 378 | * |
| 379 | * Makes rcu_dereference_check() do the dirty work. |
| 380 | */ |
| 381 | #define rcu_dereference_bh(p) \ |
| 382 | rcu_dereference_check(p, rcu_read_lock_bh_held()) |
| 383 | |
| 384 | /** |
| 385 | * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched |
| 386 | * |
| 387 | * Makes rcu_dereference_check() do the dirty work. |
| 388 | */ |
| 389 | #define rcu_dereference_sched(p) \ |
| 390 | rcu_dereference_check(p, rcu_read_lock_sched_held()) |
| 391 | |
| 392 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | * rcu_assign_pointer - assign (publicize) a pointer to a newly |
| 394 | * initialized structure that will be dereferenced by RCU read-side |
| 395 | * critical sections. Returns the value assigned. |
| 396 | * |
| 397 | * Inserts memory barriers on architectures that require them |
| 398 | * (pretty much all of them other than x86), and also prevents |
| 399 | * the compiler from reordering the code that initializes the |
| 400 | * structure after the pointer assignment. More importantly, this |
| 401 | * call documents which pointers will be dereferenced by RCU read-side |
| 402 | * code. |
| 403 | */ |
| 404 | |
Paul E. McKenney | d99c4f6 | 2008-02-06 01:37:25 -0800 | [diff] [blame] | 405 | #define rcu_assign_pointer(p, v) \ |
| 406 | ({ \ |
| 407 | if (!__builtin_constant_p(v) || \ |
| 408 | ((v) != NULL)) \ |
| 409 | smp_wmb(); \ |
| 410 | (p) = (v); \ |
| 411 | }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 413 | /* Infrastructure to implement the synchronize_() primitives. */ |
| 414 | |
| 415 | struct rcu_synchronize { |
| 416 | struct rcu_head head; |
| 417 | struct completion completion; |
| 418 | }; |
| 419 | |
| 420 | extern void wakeme_after_rcu(struct rcu_head *head); |
| 421 | |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 422 | /** |
Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 423 | * call_rcu - Queue an RCU callback for invocation after a grace period. |
| 424 | * @head: structure to be used for queueing the RCU updates. |
| 425 | * @func: actual update function to be invoked after the grace period |
| 426 | * |
| 427 | * The update function will be invoked some time after a full grace |
| 428 | * period elapses, in other words after all currently executing RCU |
| 429 | * read-side critical sections have completed. RCU read-side critical |
| 430 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
| 431 | * and may be nested. |
| 432 | */ |
| 433 | extern void call_rcu(struct rcu_head *head, |
| 434 | void (*func)(struct rcu_head *head)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | |
Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 436 | /** |
| 437 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. |
| 438 | * @head: structure to be used for queueing the RCU updates. |
| 439 | * @func: actual update function to be invoked after the grace period |
| 440 | * |
| 441 | * The update function will be invoked some time after a full grace |
| 442 | * period elapses, in other words after all currently executing RCU |
| 443 | * read-side critical sections have completed. call_rcu_bh() assumes |
| 444 | * that the read-side critical sections end on completion of a softirq |
| 445 | * handler. This means that read-side critical sections in process |
| 446 | * context must not be interrupted by softirqs. This interface is to be |
| 447 | * used when most of the read-side critical sections are in softirq context. |
| 448 | * RCU read-side critical sections are delimited by : |
| 449 | * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. |
| 450 | * OR |
| 451 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. |
| 452 | * These may be nested. |
| 453 | */ |
| 454 | extern void call_rcu_bh(struct rcu_head *head, |
| 455 | void (*func)(struct rcu_head *head)); |
| 456 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | #endif /* __LINUX_RCUPDATE_H */ |