Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 2 | * Read-Copy Update mechanism for mutual exclusion |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
Paul E. McKenney | 87de1cf | 2013-12-03 10:02:52 -0800 | [diff] [blame] | 15 | * along with this program; if not, you can access it online at |
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | * |
Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 18 | * Copyright IBM Corporation, 2001 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | * |
| 20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 21 | * |
Josh Triplett | 595182b | 2006-10-04 02:17:21 -0700 | [diff] [blame] | 22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
| 24 | * Papers: |
| 25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf |
| 26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
| 27 | * |
| 28 | * For detailed explanation of Read-Copy Update mechanism see - |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 29 | * http://lse.sourceforge.net/locking/rcupdate.html |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | * |
| 31 | */ |
| 32 | |
| 33 | #ifndef __LINUX_RCUPDATE_H |
| 34 | #define __LINUX_RCUPDATE_H |
| 35 | |
Paul E. McKenney | 9909875 | 2011-05-31 21:03:55 -0700 | [diff] [blame] | 36 | #include <linux/types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <linux/cache.h> |
| 38 | #include <linux/spinlock.h> |
| 39 | #include <linux/threads.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #include <linux/cpumask.h> |
| 41 | #include <linux/seqlock.h> |
Peter Zijlstra | 851a67b | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 42 | #include <linux/lockdep.h> |
Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 43 | #include <linux/completion.h> |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 44 | #include <linux/debugobjects.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 45 | #include <linux/bug.h> |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 46 | #include <linux/compiler.h> |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 47 | #include <linux/ktime.h> |
| 48 | |
Paul E. McKenney | 88c1863 | 2013-12-16 13:24:32 -0800 | [diff] [blame] | 49 | #include <asm/barrier.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
Paul E. McKenney | 79cfea0 | 2015-12-07 13:09:52 -0800 | [diff] [blame] | 51 | #ifndef CONFIG_TINY_RCU |
Paul Gortmaker | 7a75474 | 2014-02-11 16:10:12 -0500 | [diff] [blame] | 52 | extern int rcu_expedited; /* for sysctl */ |
Paul E. McKenney | 5a9be7c | 2015-11-24 15:44:06 -0800 | [diff] [blame] | 53 | extern int rcu_normal; /* also for sysctl */ |
Paul E. McKenney | 79cfea0 | 2015-12-07 13:09:52 -0800 | [diff] [blame] | 54 | #endif /* #ifndef CONFIG_TINY_RCU */ |
Dave Young | e5ab677 | 2010-03-10 15:24:05 -0800 | [diff] [blame] | 55 | |
Paul E. McKenney | 0d39482 | 2015-02-18 12:24:30 -0800 | [diff] [blame] | 56 | #ifdef CONFIG_TINY_RCU |
| 57 | /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ |
Paul E. McKenney | 5a9be7c | 2015-11-24 15:44:06 -0800 | [diff] [blame] | 58 | static inline bool rcu_gp_is_normal(void) /* Internal RCU use. */ |
| 59 | { |
| 60 | return true; |
| 61 | } |
Paul E. McKenney | 0d39482 | 2015-02-18 12:24:30 -0800 | [diff] [blame] | 62 | static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */ |
| 63 | { |
| 64 | return false; |
| 65 | } |
| 66 | |
| 67 | static inline void rcu_expedite_gp(void) |
| 68 | { |
| 69 | } |
| 70 | |
| 71 | static inline void rcu_unexpedite_gp(void) |
| 72 | { |
| 73 | } |
| 74 | #else /* #ifdef CONFIG_TINY_RCU */ |
Paul E. McKenney | 5a9be7c | 2015-11-24 15:44:06 -0800 | [diff] [blame] | 75 | bool rcu_gp_is_normal(void); /* Internal RCU use. */ |
Paul E. McKenney | 0d39482 | 2015-02-18 12:24:30 -0800 | [diff] [blame] | 76 | bool rcu_gp_is_expedited(void); /* Internal RCU use. */ |
| 77 | void rcu_expedite_gp(void); |
| 78 | void rcu_unexpedite_gp(void); |
| 79 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
| 80 | |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 81 | enum rcutorture_type { |
| 82 | RCU_FLAVOR, |
| 83 | RCU_BH_FLAVOR, |
| 84 | RCU_SCHED_FLAVOR, |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 85 | RCU_TASKS_FLAVOR, |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 86 | SRCU_FLAVOR, |
| 87 | INVALID_RCU_FLAVOR |
| 88 | }; |
| 89 | |
Pranith Kumar | 28f6569 | 2014-09-22 14:00:48 -0400 | [diff] [blame] | 90 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 91 | void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, |
| 92 | unsigned long *gpnum, unsigned long *completed); |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 93 | void rcutorture_record_test_transition(void); |
| 94 | void rcutorture_record_progress(unsigned long vernum); |
| 95 | void do_trace_rcu_torture_read(const char *rcutorturename, |
| 96 | struct rcu_head *rhp, |
| 97 | unsigned long secs, |
| 98 | unsigned long c_old, |
| 99 | unsigned long c); |
Paul E. McKenney | 4a29865 | 2011-04-03 21:33:51 -0700 | [diff] [blame] | 100 | #else |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 101 | static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, |
| 102 | int *flags, |
| 103 | unsigned long *gpnum, |
| 104 | unsigned long *completed) |
| 105 | { |
| 106 | *flags = 0; |
| 107 | *gpnum = 0; |
| 108 | *completed = 0; |
| 109 | } |
Paul E. McKenney | 4a29865 | 2011-04-03 21:33:51 -0700 | [diff] [blame] | 110 | static inline void rcutorture_record_test_transition(void) |
| 111 | { |
| 112 | } |
| 113 | static inline void rcutorture_record_progress(unsigned long vernum) |
| 114 | { |
| 115 | } |
Paul E. McKenney | 91afaf3 | 2011-10-02 07:44:32 -0700 | [diff] [blame] | 116 | #ifdef CONFIG_RCU_TRACE |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 117 | void do_trace_rcu_torture_read(const char *rcutorturename, |
| 118 | struct rcu_head *rhp, |
| 119 | unsigned long secs, |
| 120 | unsigned long c_old, |
| 121 | unsigned long c); |
Paul E. McKenney | 91afaf3 | 2011-10-02 07:44:32 -0700 | [diff] [blame] | 122 | #else |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 123 | #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ |
| 124 | do { } while (0) |
Paul E. McKenney | 91afaf3 | 2011-10-02 07:44:32 -0700 | [diff] [blame] | 125 | #endif |
Paul E. McKenney | 4a29865 | 2011-04-03 21:33:51 -0700 | [diff] [blame] | 126 | #endif |
| 127 | |
Tejun Heo | e27fc96 | 2010-11-22 21:36:11 -0800 | [diff] [blame] | 128 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) |
| 129 | #define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) |
Paul E. McKenney | a3dc3fb | 2010-08-13 16:16:25 -0700 | [diff] [blame] | 130 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) |
| 131 | #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) |
Paul E. McKenney | c0f4dfd | 2012-12-28 11:30:36 -0800 | [diff] [blame] | 132 | #define ulong2long(a) (*(long *)(&(a))) |
Paul E. McKenney | a3dc3fb | 2010-08-13 16:16:25 -0700 | [diff] [blame] | 133 | |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 134 | /* Exported common interfaces */ |
Paul E. McKenney | 2c42818 | 2011-05-26 22:14:36 -0700 | [diff] [blame] | 135 | |
| 136 | #ifdef CONFIG_PREEMPT_RCU |
| 137 | |
| 138 | /** |
| 139 | * call_rcu() - Queue an RCU callback for invocation after a grace period. |
| 140 | * @head: structure to be used for queueing the RCU updates. |
| 141 | * @func: actual callback function to be invoked after the grace period |
| 142 | * |
| 143 | * The callback function will be invoked some time after a full grace |
| 144 | * period elapses, in other words after all pre-existing RCU read-side |
| 145 | * critical sections have completed. However, the callback function |
| 146 | * might well execute concurrently with RCU read-side critical sections |
| 147 | * that started after call_rcu() was invoked. RCU read-side critical |
| 148 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
| 149 | * and may be nested. |
Paul E. McKenney | f0a0e6f | 2012-10-23 13:47:01 -0700 | [diff] [blame] | 150 | * |
| 151 | * Note that all CPUs must agree that the grace period extended beyond |
| 152 | * all pre-existing RCU read-side critical section. On systems with more |
| 153 | * than one CPU, this means that when "func()" is invoked, each CPU is |
| 154 | * guaranteed to have executed a full memory barrier since the end of its |
| 155 | * last RCU read-side critical section whose beginning preceded the call |
| 156 | * to call_rcu(). It also means that each CPU executing an RCU read-side |
| 157 | * critical section that continues beyond the start of "func()" must have |
| 158 | * executed a memory barrier after the call_rcu() but before the beginning |
| 159 | * of that RCU read-side critical section. Note that these guarantees |
| 160 | * include CPUs that are offline, idle, or executing in user mode, as |
| 161 | * well as CPUs that are executing in the kernel. |
| 162 | * |
| 163 | * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the |
| 164 | * resulting RCU callback function "func()", then both CPU A and CPU B are |
| 165 | * guaranteed to execute a full memory barrier during the time interval |
| 166 | * between the call to call_rcu() and the invocation of "func()" -- even |
| 167 | * if CPU A and CPU B are the same CPU (but again only if the system has |
| 168 | * more than one CPU). |
Paul E. McKenney | 2c42818 | 2011-05-26 22:14:36 -0700 | [diff] [blame] | 169 | */ |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 170 | void call_rcu(struct rcu_head *head, |
Boqun Feng | b6a4ae7 | 2015-07-29 13:29:38 +0800 | [diff] [blame] | 171 | rcu_callback_t func); |
Paul E. McKenney | 2c42818 | 2011-05-26 22:14:36 -0700 | [diff] [blame] | 172 | |
| 173 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
| 174 | |
| 175 | /* In classic RCU, call_rcu() is just call_rcu_sched(). */ |
| 176 | #define call_rcu call_rcu_sched |
| 177 | |
| 178 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
| 179 | |
| 180 | /** |
| 181 | * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. |
| 182 | * @head: structure to be used for queueing the RCU updates. |
| 183 | * @func: actual callback function to be invoked after the grace period |
| 184 | * |
| 185 | * The callback function will be invoked some time after a full grace |
| 186 | * period elapses, in other words after all currently executing RCU |
| 187 | * read-side critical sections have completed. call_rcu_bh() assumes |
| 188 | * that the read-side critical sections end on completion of a softirq |
| 189 | * handler. This means that read-side critical sections in process |
| 190 | * context must not be interrupted by softirqs. This interface is to be |
| 191 | * used when most of the read-side critical sections are in softirq context. |
| 192 | * RCU read-side critical sections are delimited by : |
| 193 | * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. |
| 194 | * OR |
| 195 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. |
| 196 | * These may be nested. |
Paul E. McKenney | f0a0e6f | 2012-10-23 13:47:01 -0700 | [diff] [blame] | 197 | * |
| 198 | * See the description of call_rcu() for more detailed information on |
| 199 | * memory ordering guarantees. |
Paul E. McKenney | 2c42818 | 2011-05-26 22:14:36 -0700 | [diff] [blame] | 200 | */ |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 201 | void call_rcu_bh(struct rcu_head *head, |
Boqun Feng | b6a4ae7 | 2015-07-29 13:29:38 +0800 | [diff] [blame] | 202 | rcu_callback_t func); |
Paul E. McKenney | 2c42818 | 2011-05-26 22:14:36 -0700 | [diff] [blame] | 203 | |
| 204 | /** |
| 205 | * call_rcu_sched() - Queue an RCU for invocation after sched grace period. |
| 206 | * @head: structure to be used for queueing the RCU updates. |
| 207 | * @func: actual callback function to be invoked after the grace period |
| 208 | * |
| 209 | * The callback function will be invoked some time after a full grace |
| 210 | * period elapses, in other words after all currently executing RCU |
| 211 | * read-side critical sections have completed. call_rcu_sched() assumes |
| 212 | * that the read-side critical sections end on enabling of preemption |
| 213 | * or on voluntary preemption. |
| 214 | * RCU read-side critical sections are delimited by : |
| 215 | * - rcu_read_lock_sched() and rcu_read_unlock_sched(), |
| 216 | * OR |
| 217 | * anything that disables preemption. |
| 218 | * These may be nested. |
Paul E. McKenney | f0a0e6f | 2012-10-23 13:47:01 -0700 | [diff] [blame] | 219 | * |
| 220 | * See the description of call_rcu() for more detailed information on |
| 221 | * memory ordering guarantees. |
Paul E. McKenney | 2c42818 | 2011-05-26 22:14:36 -0700 | [diff] [blame] | 222 | */ |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 223 | void call_rcu_sched(struct rcu_head *head, |
Boqun Feng | b6a4ae7 | 2015-07-29 13:29:38 +0800 | [diff] [blame] | 224 | rcu_callback_t func); |
Paul E. McKenney | 2c42818 | 2011-05-26 22:14:36 -0700 | [diff] [blame] | 225 | |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 226 | void synchronize_sched(void); |
Paul E. McKenney | 03b042b | 2009-06-25 09:08:16 -0700 | [diff] [blame] | 227 | |
Paul E. McKenney | ee376db | 2015-01-10 19:47:10 -0800 | [diff] [blame] | 228 | /* |
| 229 | * Structure allowing asynchronous waiting on RCU. |
| 230 | */ |
| 231 | struct rcu_synchronize { |
| 232 | struct rcu_head head; |
| 233 | struct completion completion; |
| 234 | }; |
| 235 | void wakeme_after_rcu(struct rcu_head *head); |
| 236 | |
Paul E. McKenney | ec90a19 | 2015-06-10 12:53:06 -0700 | [diff] [blame] | 237 | void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, |
| 238 | struct rcu_synchronize *rs_array); |
| 239 | |
| 240 | #define _wait_rcu_gp(checktiny, ...) \ |
Oleg Nesterov | 66e8c57 | 2015-08-25 20:45:18 +0200 | [diff] [blame] | 241 | do { \ |
| 242 | call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ |
| 243 | struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ |
| 244 | __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ |
| 245 | __crcu_array, __rs_array); \ |
Paul E. McKenney | ec90a19 | 2015-06-10 12:53:06 -0700 | [diff] [blame] | 246 | } while (0) |
| 247 | |
| 248 | #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) |
| 249 | |
| 250 | /** |
| 251 | * synchronize_rcu_mult - Wait concurrently for multiple grace periods |
| 252 | * @...: List of call_rcu() functions for the flavors to wait on. |
| 253 | * |
| 254 | * This macro waits concurrently for multiple flavors of RCU grace periods. |
| 255 | * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait |
| 256 | * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU |
| 257 | * domain requires you to write a wrapper function for that SRCU domain's |
| 258 | * call_srcu() function, supplying the corresponding srcu_struct. |
| 259 | * |
| 260 | * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU |
| 261 | * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called |
| 262 | * is automatically a grace period. |
| 263 | */ |
| 264 | #define synchronize_rcu_mult(...) \ |
| 265 | _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) |
| 266 | |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 267 | /** |
| 268 | * call_rcu_tasks() - Queue an RCU for invocation task-based grace period |
| 269 | * @head: structure to be used for queueing the RCU updates. |
| 270 | * @func: actual callback function to be invoked after the grace period |
| 271 | * |
| 272 | * The callback function will be invoked some time after a full grace |
| 273 | * period elapses, in other words after all currently executing RCU |
| 274 | * read-side critical sections have completed. call_rcu_tasks() assumes |
| 275 | * that the read-side critical sections end at a voluntary context |
| 276 | * switch (not a preemption!), entry into idle, or transition to usermode |
| 277 | * execution. As such, there are no read-side primitives analogous to |
| 278 | * rcu_read_lock() and rcu_read_unlock() because this primitive is intended |
| 279 | * to determine that all tasks have passed through a safe state, not so |
| 280 | * much for data-strcuture synchronization. |
| 281 | * |
| 282 | * See the description of call_rcu() for more detailed information on |
| 283 | * memory ordering guarantees. |
| 284 | */ |
Boqun Feng | b6a4ae7 | 2015-07-29 13:29:38 +0800 | [diff] [blame] | 285 | void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); |
Paul E. McKenney | 53c6d4e | 2014-07-01 12:22:23 -0700 | [diff] [blame] | 286 | void synchronize_rcu_tasks(void); |
| 287 | void rcu_barrier_tasks(void); |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 288 | |
Paul E. McKenney | a3dc3fb | 2010-08-13 16:16:25 -0700 | [diff] [blame] | 289 | #ifdef CONFIG_PREEMPT_RCU |
| 290 | |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 291 | void __rcu_read_lock(void); |
| 292 | void __rcu_read_unlock(void); |
| 293 | void rcu_read_unlock_special(struct task_struct *t); |
Paul E. McKenney | 7b0b759 | 2010-08-17 14:18:46 -0700 | [diff] [blame] | 294 | void synchronize_rcu(void); |
| 295 | |
Paul E. McKenney | a3dc3fb | 2010-08-13 16:16:25 -0700 | [diff] [blame] | 296 | /* |
| 297 | * Defined as a macro as it is a very low level header included from |
| 298 | * areas that don't even know about current. This gives the rcu_read_lock() |
| 299 | * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other |
| 300 | * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. |
| 301 | */ |
| 302 | #define rcu_preempt_depth() (current->rcu_read_lock_nesting) |
| 303 | |
Paul E. McKenney | 7b0b759 | 2010-08-17 14:18:46 -0700 | [diff] [blame] | 304 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
| 305 | |
| 306 | static inline void __rcu_read_lock(void) |
| 307 | { |
Boqun Feng | bb73c52 | 2015-07-30 16:55:38 -0700 | [diff] [blame] | 308 | if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) |
| 309 | preempt_disable(); |
Paul E. McKenney | 7b0b759 | 2010-08-17 14:18:46 -0700 | [diff] [blame] | 310 | } |
| 311 | |
| 312 | static inline void __rcu_read_unlock(void) |
| 313 | { |
Boqun Feng | bb73c52 | 2015-07-30 16:55:38 -0700 | [diff] [blame] | 314 | if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) |
| 315 | preempt_enable(); |
Paul E. McKenney | 7b0b759 | 2010-08-17 14:18:46 -0700 | [diff] [blame] | 316 | } |
| 317 | |
| 318 | static inline void synchronize_rcu(void) |
| 319 | { |
| 320 | synchronize_sched(); |
| 321 | } |
| 322 | |
| 323 | static inline int rcu_preempt_depth(void) |
| 324 | { |
| 325 | return 0; |
| 326 | } |
| 327 | |
| 328 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
| 329 | |
| 330 | /* Internal to kernel */ |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 331 | void rcu_init(void); |
Paul E. McKenney | 284a8c9 | 2014-08-14 16:38:46 -0700 | [diff] [blame] | 332 | void rcu_sched_qs(void); |
| 333 | void rcu_bh_qs(void); |
Paul E. McKenney | c3377c2d | 2014-10-21 07:53:02 -0700 | [diff] [blame] | 334 | void rcu_check_callbacks(int user); |
Thomas Gleixner | 27d50c7 | 2016-02-26 18:43:44 +0000 | [diff] [blame] | 335 | void rcu_report_dead(unsigned int cpu); |
Frederic Weisbecker | 2b1d502 | 2012-07-11 20:26:30 +0200 | [diff] [blame] | 336 | |
Paul E. McKenney | 79cfea0 | 2015-12-07 13:09:52 -0800 | [diff] [blame] | 337 | #ifndef CONFIG_TINY_RCU |
| 338 | void rcu_end_inkernel_boot(void); |
| 339 | #else /* #ifndef CONFIG_TINY_RCU */ |
| 340 | static inline void rcu_end_inkernel_boot(void) { } |
| 341 | #endif /* #ifndef CONFIG_TINY_RCU */ |
| 342 | |
Rik van Riel | 61f38db | 2014-04-26 23:15:35 -0700 | [diff] [blame] | 343 | #ifdef CONFIG_RCU_STALL_COMMON |
| 344 | void rcu_sysrq_start(void); |
| 345 | void rcu_sysrq_end(void); |
| 346 | #else /* #ifdef CONFIG_RCU_STALL_COMMON */ |
| 347 | static inline void rcu_sysrq_start(void) |
| 348 | { |
| 349 | } |
| 350 | static inline void rcu_sysrq_end(void) |
| 351 | { |
| 352 | } |
| 353 | #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ |
| 354 | |
Paul E. McKenney | d1ec4c3 | 2015-05-13 10:41:58 -0700 | [diff] [blame] | 355 | #ifdef CONFIG_NO_HZ_FULL |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 356 | void rcu_user_enter(void); |
| 357 | void rcu_user_exit(void); |
Frederic Weisbecker | 2b1d502 | 2012-07-11 20:26:30 +0200 | [diff] [blame] | 358 | #else |
| 359 | static inline void rcu_user_enter(void) { } |
| 360 | static inline void rcu_user_exit(void) { } |
Paul E. McKenney | d1ec4c3 | 2015-05-13 10:41:58 -0700 | [diff] [blame] | 361 | #endif /* CONFIG_NO_HZ_FULL */ |
Frederic Weisbecker | 2b1d502 | 2012-07-11 20:26:30 +0200 | [diff] [blame] | 362 | |
Paul E. McKenney | f4579fc | 2014-07-25 11:21:47 -0700 | [diff] [blame] | 363 | #ifdef CONFIG_RCU_NOCB_CPU |
| 364 | void rcu_init_nohz(void); |
| 365 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
| 366 | static inline void rcu_init_nohz(void) |
| 367 | { |
| 368 | } |
| 369 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
| 370 | |
Paul E. McKenney | 8a2ecf4 | 2012-02-02 15:42:04 -0800 | [diff] [blame] | 371 | /** |
| 372 | * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers |
| 373 | * @a: Code that RCU needs to pay attention to. |
| 374 | * |
| 375 | * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden |
| 376 | * in the inner idle loop, that is, between the rcu_idle_enter() and |
| 377 | * the rcu_idle_exit() -- RCU will happily ignore any such read-side |
| 378 | * critical sections. However, things like powertop need tracepoints |
| 379 | * in the inner idle loop. |
| 380 | * |
| 381 | * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) |
Paul E. McKenney | 810ce8b | 2016-04-20 09:22:15 -0700 | [diff] [blame] | 382 | * will tell RCU that it needs to pay attention, invoke its argument |
| 383 | * (in this example, calling the do_something_with_RCU() function), |
Paul E. McKenney | 8a2ecf4 | 2012-02-02 15:42:04 -0800 | [diff] [blame] | 384 | * and then tell RCU to go back to ignoring this CPU. It is permissible |
Paul E. McKenney | 810ce8b | 2016-04-20 09:22:15 -0700 | [diff] [blame] | 385 | * to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is |
| 386 | * on the order of a million or so, even on 32-bit systems). It is |
| 387 | * not legal to block within RCU_NONIDLE(), nor is it permissible to |
| 388 | * transfer control either into or out of RCU_NONIDLE()'s statement. |
Paul E. McKenney | 8a2ecf4 | 2012-02-02 15:42:04 -0800 | [diff] [blame] | 389 | */ |
| 390 | #define RCU_NONIDLE(a) \ |
| 391 | do { \ |
Paul E. McKenney | 7c9906c | 2015-10-31 00:59:01 -0700 | [diff] [blame] | 392 | rcu_irq_enter_irqson(); \ |
Paul E. McKenney | 8a2ecf4 | 2012-02-02 15:42:04 -0800 | [diff] [blame] | 393 | do { a; } while (0); \ |
Paul E. McKenney | 7c9906c | 2015-10-31 00:59:01 -0700 | [diff] [blame] | 394 | rcu_irq_exit_irqson(); \ |
Paul E. McKenney | 8a2ecf4 | 2012-02-02 15:42:04 -0800 | [diff] [blame] | 395 | } while (0) |
| 396 | |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 397 | /* |
| 398 | * Note a voluntary context switch for RCU-tasks benefit. This is a |
| 399 | * macro rather than an inline function to avoid #include hell. |
| 400 | */ |
| 401 | #ifdef CONFIG_TASKS_RCU |
Paul E. McKenney | 3f95aa8 | 2014-08-04 06:10:23 -0700 | [diff] [blame] | 402 | #define TASKS_RCU(x) x |
| 403 | extern struct srcu_struct tasks_rcu_exit_srcu; |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 404 | #define rcu_note_voluntary_context_switch(t) \ |
| 405 | do { \ |
Paul E. McKenney | 5cd3719 | 2014-12-13 20:32:04 -0800 | [diff] [blame] | 406 | rcu_all_qs(); \ |
Paul E. McKenney | 7d0ae80 | 2015-03-03 14:57:58 -0800 | [diff] [blame] | 407 | if (READ_ONCE((t)->rcu_tasks_holdout)) \ |
| 408 | WRITE_ONCE((t)->rcu_tasks_holdout, false); \ |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 409 | } while (0) |
| 410 | #else /* #ifdef CONFIG_TASKS_RCU */ |
Paul E. McKenney | 3f95aa8 | 2014-08-04 06:10:23 -0700 | [diff] [blame] | 411 | #define TASKS_RCU(x) do { } while (0) |
Paul E. McKenney | 5cd3719 | 2014-12-13 20:32:04 -0800 | [diff] [blame] | 412 | #define rcu_note_voluntary_context_switch(t) rcu_all_qs() |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 413 | #endif /* #else #ifdef CONFIG_TASKS_RCU */ |
| 414 | |
Paul E. McKenney | bde6c3a | 2014-07-01 11:26:57 -0700 | [diff] [blame] | 415 | /** |
| 416 | * cond_resched_rcu_qs - Report potential quiescent states to RCU |
| 417 | * |
| 418 | * This macro resembles cond_resched(), except that it is defined to |
| 419 | * report potential quiescent states to RCU-tasks even if the cond_resched() |
| 420 | * machinery were to be shut off, as some advocate for PREEMPT kernels. |
| 421 | */ |
| 422 | #define cond_resched_rcu_qs() \ |
| 423 | do { \ |
Paul E. McKenney | b6331ae | 2014-10-04 03:43:41 -0700 | [diff] [blame] | 424 | if (!cond_resched()) \ |
| 425 | rcu_note_voluntary_context_switch(current); \ |
Paul E. McKenney | bde6c3a | 2014-07-01 11:26:57 -0700 | [diff] [blame] | 426 | } while (0) |
| 427 | |
Paul E. McKenney | cc6783f | 2013-09-06 17:39:49 -0700 | [diff] [blame] | 428 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 429 | bool __rcu_is_watching(void); |
Paul E. McKenney | cc6783f | 2013-09-06 17:39:49 -0700 | [diff] [blame] | 430 | #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ |
| 431 | |
Paul E. McKenney | 2c42818 | 2011-05-26 22:14:36 -0700 | [diff] [blame] | 432 | /* |
| 433 | * Infrastructure to implement the synchronize_() primitives in |
| 434 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. |
| 435 | */ |
| 436 | |
Pranith Kumar | 28f6569 | 2014-09-22 14:00:48 -0400 | [diff] [blame] | 437 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 438 | #include <linux/rcutree.h> |
Paul E. McKenney | 127781d | 2013-03-27 08:44:00 -0700 | [diff] [blame] | 439 | #elif defined(CONFIG_TINY_RCU) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 440 | #include <linux/rcutiny.h> |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 441 | #else |
| 442 | #error "Unknown RCU implementation specified to kernel configuration" |
Paul E. McKenney | 6b3ef48 | 2009-08-22 13:56:53 -0700 | [diff] [blame] | 443 | #endif |
Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 444 | |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 445 | /* |
| 446 | * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic |
| 447 | * initialization and destruction of rcu_head on the stack. rcu_head structures |
| 448 | * allocated dynamically in the heap or defined statically don't need any |
| 449 | * initialization. |
| 450 | */ |
| 451 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
Paul E. McKenney | 546a9d8 | 2014-06-19 14:57:10 -0700 | [diff] [blame] | 452 | void init_rcu_head(struct rcu_head *head); |
| 453 | void destroy_rcu_head(struct rcu_head *head); |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 454 | void init_rcu_head_on_stack(struct rcu_head *head); |
| 455 | void destroy_rcu_head_on_stack(struct rcu_head *head); |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 456 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
Paul E. McKenney | 546a9d8 | 2014-06-19 14:57:10 -0700 | [diff] [blame] | 457 | static inline void init_rcu_head(struct rcu_head *head) |
| 458 | { |
| 459 | } |
| 460 | |
| 461 | static inline void destroy_rcu_head(struct rcu_head *head) |
| 462 | { |
| 463 | } |
| 464 | |
Mathieu Desnoyers | 4376030 | 2010-04-17 08:48:39 -0400 | [diff] [blame] | 465 | static inline void init_rcu_head_on_stack(struct rcu_head *head) |
| 466 | { |
| 467 | } |
| 468 | |
| 469 | static inline void destroy_rcu_head_on_stack(struct rcu_head *head) |
| 470 | { |
| 471 | } |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 472 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
Mathieu Desnoyers | 4376030 | 2010-04-17 08:48:39 -0400 | [diff] [blame] | 473 | |
Paul E. McKenney | c0d6d01 | 2012-01-23 12:41:26 -0800 | [diff] [blame] | 474 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) |
| 475 | bool rcu_lockdep_current_cpu_online(void); |
| 476 | #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ |
| 477 | static inline bool rcu_lockdep_current_cpu_online(void) |
| 478 | { |
Pranith Kumar | 521d24e | 2014-07-08 18:26:18 -0400 | [diff] [blame] | 479 | return true; |
Paul E. McKenney | c0d6d01 | 2012-01-23 12:41:26 -0800 | [diff] [blame] | 480 | } |
| 481 | #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ |
| 482 | |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 483 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 484 | |
Frederic Weisbecker | 00f49e5 | 2011-10-07 18:22:02 +0200 | [diff] [blame] | 485 | static inline void rcu_lock_acquire(struct lockdep_map *map) |
| 486 | { |
Oleg Nesterov | fb9edbe | 2014-01-20 19:20:06 +0100 | [diff] [blame] | 487 | lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); |
Frederic Weisbecker | 00f49e5 | 2011-10-07 18:22:02 +0200 | [diff] [blame] | 488 | } |
| 489 | |
| 490 | static inline void rcu_lock_release(struct lockdep_map *map) |
| 491 | { |
Frederic Weisbecker | 00f49e5 | 2011-10-07 18:22:02 +0200 | [diff] [blame] | 492 | lock_release(map, 1, _THIS_IP_); |
| 493 | } |
| 494 | |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 495 | extern struct lockdep_map rcu_lock_map; |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 496 | extern struct lockdep_map rcu_bh_lock_map; |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 497 | extern struct lockdep_map rcu_sched_lock_map; |
Paul E. McKenney | 24ef659 | 2013-10-28 09:22:24 -0700 | [diff] [blame] | 498 | extern struct lockdep_map rcu_callback_map; |
Iulia Manda | a235c09 | 2014-03-12 18:37:24 +0200 | [diff] [blame] | 499 | int debug_lockdep_rcu_enabled(void); |
Paul E. McKenney | 54dbf96 | 2010-03-03 07:46:57 -0800 | [diff] [blame] | 500 | |
Oleg Nesterov | 85b39d3 | 2014-07-08 15:17:59 -0700 | [diff] [blame] | 501 | int rcu_read_lock_held(void); |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 502 | int rcu_read_lock_bh_held(void); |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 503 | |
| 504 | /** |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 505 | * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 506 | * |
Paul E. McKenney | d20200b | 2010-03-30 10:52:21 -0700 | [diff] [blame] | 507 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an |
| 508 | * RCU-sched read-side critical section. In absence of |
| 509 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side |
Denys Vlasenko | d5671f6 | 2015-05-26 17:48:34 +0200 | [diff] [blame] | 510 | * critical section unless it can prove otherwise. |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 511 | */ |
Denys Vlasenko | d5671f6 | 2015-05-26 17:48:34 +0200 | [diff] [blame] | 512 | int rcu_read_lock_sched_held(void); |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 513 | |
| 514 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 515 | |
Paul E. McKenney | d8ab29f | 2011-10-07 18:22:03 +0200 | [diff] [blame] | 516 | # define rcu_lock_acquire(a) do { } while (0) |
| 517 | # define rcu_lock_release(a) do { } while (0) |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 518 | |
| 519 | static inline int rcu_read_lock_held(void) |
| 520 | { |
| 521 | return 1; |
| 522 | } |
| 523 | |
| 524 | static inline int rcu_read_lock_bh_held(void) |
| 525 | { |
| 526 | return 1; |
| 527 | } |
| 528 | |
| 529 | static inline int rcu_read_lock_sched_held(void) |
| 530 | { |
Boqun Feng | 293e242 | 2016-03-23 23:11:48 +0800 | [diff] [blame] | 531 | return !preemptible(); |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 532 | } |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 533 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 534 | |
| 535 | #ifdef CONFIG_PROVE_RCU |
| 536 | |
Tetsuo Handa | 4221a99 | 2010-06-26 01:08:19 +0900 | [diff] [blame] | 537 | /** |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 538 | * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met |
| 539 | * @c: condition to check |
| 540 | * @s: informative message |
| 541 | */ |
| 542 | #define RCU_LOCKDEP_WARN(c, s) \ |
| 543 | do { \ |
| 544 | static bool __section(.data.unlikely) __warned; \ |
| 545 | if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ |
| 546 | __warned = true; \ |
| 547 | lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ |
| 548 | } \ |
| 549 | } while (0) |
| 550 | |
Paul E. McKenney | 50406b9 | 2012-01-12 13:49:19 -0800 | [diff] [blame] | 551 | #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) |
| 552 | static inline void rcu_preempt_sleep_check(void) |
| 553 | { |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 554 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), |
| 555 | "Illegal context switch in RCU read-side critical section"); |
Paul E. McKenney | 50406b9 | 2012-01-12 13:49:19 -0800 | [diff] [blame] | 556 | } |
| 557 | #else /* #ifdef CONFIG_PROVE_RCU */ |
| 558 | static inline void rcu_preempt_sleep_check(void) |
| 559 | { |
| 560 | } |
| 561 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
| 562 | |
Paul E. McKenney | b3fbab0 | 2011-05-24 08:31:09 -0700 | [diff] [blame] | 563 | #define rcu_sleep_check() \ |
| 564 | do { \ |
Paul E. McKenney | 50406b9 | 2012-01-12 13:49:19 -0800 | [diff] [blame] | 565 | rcu_preempt_sleep_check(); \ |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 566 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ |
| 567 | "Illegal context switch in RCU-bh read-side critical section"); \ |
| 568 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ |
| 569 | "Illegal context switch in RCU-sched read-side critical section"); \ |
Paul E. McKenney | b3fbab0 | 2011-05-24 08:31:09 -0700 | [diff] [blame] | 570 | } while (0) |
| 571 | |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 572 | #else /* #ifdef CONFIG_PROVE_RCU */ |
| 573 | |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 574 | #define RCU_LOCKDEP_WARN(c, s) do { } while (0) |
Paul E. McKenney | b3fbab0 | 2011-05-24 08:31:09 -0700 | [diff] [blame] | 575 | #define rcu_sleep_check() do { } while (0) |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 576 | |
| 577 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
| 578 | |
| 579 | /* |
| 580 | * Helper functions for rcu_dereference_check(), rcu_dereference_protected() |
| 581 | * and rcu_assign_pointer(). Some of these could be folded into their |
| 582 | * callers, but they are left separate in order to ease introduction of |
| 583 | * multiple flavors of pointers to match the multiple flavors of RCU |
| 584 | * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in |
| 585 | * the future. |
| 586 | */ |
Paul E. McKenney | 53ecfba | 2010-09-13 17:24:21 -0700 | [diff] [blame] | 587 | |
| 588 | #ifdef __CHECKER__ |
| 589 | #define rcu_dereference_sparse(p, space) \ |
| 590 | ((void)(((typeof(*p) space *)p) == p)) |
| 591 | #else /* #ifdef __CHECKER__ */ |
| 592 | #define rcu_dereference_sparse(p, space) |
| 593 | #endif /* #else #ifdef __CHECKER__ */ |
| 594 | |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 595 | #define __rcu_access_pointer(p, space) \ |
Joe Perches | 0adab9b | 2013-12-05 16:19:15 -0800 | [diff] [blame] | 596 | ({ \ |
Paul E. McKenney | 7d0ae80 | 2015-03-03 14:57:58 -0800 | [diff] [blame] | 597 | typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ |
Joe Perches | 0adab9b | 2013-12-05 16:19:15 -0800 | [diff] [blame] | 598 | rcu_dereference_sparse(p, space); \ |
| 599 | ((typeof(*p) __force __kernel *)(_________p1)); \ |
| 600 | }) |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 601 | #define __rcu_dereference_check(p, c, space) \ |
Joe Perches | 0adab9b | 2013-12-05 16:19:15 -0800 | [diff] [blame] | 602 | ({ \ |
Pranith Kumar | ac59853 | 2014-11-13 14:24:14 -0500 | [diff] [blame] | 603 | /* Dependency order vs. p above. */ \ |
| 604 | typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 605 | RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ |
Joe Perches | 0adab9b | 2013-12-05 16:19:15 -0800 | [diff] [blame] | 606 | rcu_dereference_sparse(p, space); \ |
Pranith Kumar | ac59853 | 2014-11-13 14:24:14 -0500 | [diff] [blame] | 607 | ((typeof(*p) __force __kernel *)(________p1)); \ |
Joe Perches | 0adab9b | 2013-12-05 16:19:15 -0800 | [diff] [blame] | 608 | }) |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 609 | #define __rcu_dereference_protected(p, c, space) \ |
Joe Perches | 0adab9b | 2013-12-05 16:19:15 -0800 | [diff] [blame] | 610 | ({ \ |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 611 | RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ |
Joe Perches | 0adab9b | 2013-12-05 16:19:15 -0800 | [diff] [blame] | 612 | rcu_dereference_sparse(p, space); \ |
| 613 | ((typeof(*p) __force __kernel *)(p)); \ |
| 614 | }) |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 615 | |
Paul E. McKenney | 462225ae | 2013-11-11 09:59:34 -0800 | [diff] [blame] | 616 | /** |
| 617 | * RCU_INITIALIZER() - statically initialize an RCU-protected global variable |
| 618 | * @v: The value to statically initialize with. |
| 619 | */ |
| 620 | #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) |
| 621 | |
| 622 | /** |
| 623 | * rcu_assign_pointer() - assign to RCU-protected pointer |
| 624 | * @p: pointer to assign to |
| 625 | * @v: value to assign (publish) |
| 626 | * |
| 627 | * Assigns the specified value to the specified RCU-protected |
| 628 | * pointer, ensuring that any concurrent RCU readers will see |
| 629 | * any prior initialization. |
| 630 | * |
| 631 | * Inserts memory barriers on architectures that require them |
| 632 | * (which is most of them), and also prevents the compiler from |
| 633 | * reordering the code that initializes the structure after the pointer |
| 634 | * assignment. More importantly, this call documents which pointers |
| 635 | * will be dereferenced by RCU read-side code. |
| 636 | * |
| 637 | * In some special cases, you may use RCU_INIT_POINTER() instead |
| 638 | * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due |
| 639 | * to the fact that it does not constrain either the CPU or the compiler. |
| 640 | * That said, using RCU_INIT_POINTER() when you should have used |
| 641 | * rcu_assign_pointer() is a very bad thing that results in |
| 642 | * impossible-to-diagnose memory corruption. So please be careful. |
| 643 | * See the RCU_INIT_POINTER() comment header for details. |
| 644 | * |
| 645 | * Note that rcu_assign_pointer() evaluates each of its arguments only |
| 646 | * once, appearances notwithstanding. One of the "extra" evaluations |
| 647 | * is in typeof() and the other visible only to sparse (__CHECKER__), |
| 648 | * neither of which actually execute the argument. As with most cpp |
| 649 | * macros, this execute-arguments-only-once property is important, so |
| 650 | * please be careful when making changes to rcu_assign_pointer() and the |
| 651 | * other macros that it invokes. |
| 652 | */ |
Paul E. McKenney | 3a37f72 | 2016-05-01 18:46:54 -0700 | [diff] [blame^] | 653 | #define rcu_assign_pointer(p, v) \ |
| 654 | ({ \ |
| 655 | uintptr_t _r_a_p__v = (uintptr_t)(v); \ |
| 656 | \ |
| 657 | if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ |
| 658 | WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ |
| 659 | else \ |
| 660 | smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ |
| 661 | _r_a_p__v; \ |
| 662 | }) |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 663 | |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 664 | /** |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 665 | * rcu_access_pointer() - fetch RCU pointer with no dereferencing |
| 666 | * @p: The pointer to read |
| 667 | * |
| 668 | * Return the value of the specified RCU-protected pointer, but omit the |
Paul E. McKenney | 7d0ae80 | 2015-03-03 14:57:58 -0800 | [diff] [blame] | 669 | * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 670 | * when the value of this pointer is accessed, but the pointer is not |
| 671 | * dereferenced, for example, when testing an RCU-protected pointer against |
| 672 | * NULL. Although rcu_access_pointer() may also be used in cases where |
| 673 | * update-side locks prevent the value of the pointer from changing, you |
| 674 | * should instead use rcu_dereference_protected() for this use case. |
Paul E. McKenney | 5e1ee6e | 2012-01-12 17:21:20 -0800 | [diff] [blame] | 675 | * |
| 676 | * It is also permissible to use rcu_access_pointer() when read-side |
| 677 | * access to the pointer was removed at least one grace period ago, as |
| 678 | * is the case in the context of the RCU callback that is freeing up |
| 679 | * the data, or after a synchronize_rcu() returns. This can be useful |
| 680 | * when tearing down multi-linked structures after a grace period |
| 681 | * has elapsed. |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 682 | */ |
| 683 | #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) |
| 684 | |
| 685 | /** |
| 686 | * rcu_dereference_check() - rcu_dereference with debug checking |
David Howells | c08c68d | 2010-04-09 15:39:11 -0700 | [diff] [blame] | 687 | * @p: The pointer to read, prior to dereferencing |
| 688 | * @c: The conditions under which the dereference will take place |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 689 | * |
David Howells | c08c68d | 2010-04-09 15:39:11 -0700 | [diff] [blame] | 690 | * Do an rcu_dereference(), but check that the conditions under which the |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 691 | * dereference will take place are correct. Typically the conditions |
| 692 | * indicate the various locking conditions that should be held at that |
| 693 | * point. The check should return true if the conditions are satisfied. |
| 694 | * An implicit check for being in an RCU read-side critical section |
| 695 | * (rcu_read_lock()) is included. |
David Howells | c08c68d | 2010-04-09 15:39:11 -0700 | [diff] [blame] | 696 | * |
| 697 | * For example: |
| 698 | * |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 699 | * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); |
David Howells | c08c68d | 2010-04-09 15:39:11 -0700 | [diff] [blame] | 700 | * |
| 701 | * could be used to indicate to lockdep that foo->bar may only be dereferenced |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 702 | * if either rcu_read_lock() is held, or that the lock required to replace |
David Howells | c08c68d | 2010-04-09 15:39:11 -0700 | [diff] [blame] | 703 | * the bar struct at foo->bar is held. |
| 704 | * |
| 705 | * Note that the list of conditions may also include indications of when a lock |
| 706 | * need not be held, for example during initialisation or destruction of the |
| 707 | * target struct: |
| 708 | * |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 709 | * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || |
David Howells | c08c68d | 2010-04-09 15:39:11 -0700 | [diff] [blame] | 710 | * atomic_read(&foo->usage) == 0); |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 711 | * |
| 712 | * Inserts memory barriers on architectures that require them |
| 713 | * (currently only the Alpha), prevents the compiler from refetching |
| 714 | * (and from merging fetches), and, more importantly, documents exactly |
| 715 | * which pointers are protected by RCU and checks that the pointer is |
| 716 | * annotated as __rcu. |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 717 | */ |
| 718 | #define rcu_dereference_check(p, c) \ |
Paul E. McKenney | b826565a | 2015-02-02 11:46:33 -0800 | [diff] [blame] | 719 | __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu) |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 720 | |
Paul E. McKenney | b62730b | 2010-04-09 15:39:10 -0700 | [diff] [blame] | 721 | /** |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 722 | * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking |
| 723 | * @p: The pointer to read, prior to dereferencing |
| 724 | * @c: The conditions under which the dereference will take place |
| 725 | * |
| 726 | * This is the RCU-bh counterpart to rcu_dereference_check(). |
| 727 | */ |
| 728 | #define rcu_dereference_bh_check(p, c) \ |
Paul E. McKenney | b826565a | 2015-02-02 11:46:33 -0800 | [diff] [blame] | 729 | __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 730 | |
| 731 | /** |
| 732 | * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking |
| 733 | * @p: The pointer to read, prior to dereferencing |
| 734 | * @c: The conditions under which the dereference will take place |
| 735 | * |
| 736 | * This is the RCU-sched counterpart to rcu_dereference_check(). |
| 737 | */ |
| 738 | #define rcu_dereference_sched_check(p, c) \ |
Paul E. McKenney | b826565a | 2015-02-02 11:46:33 -0800 | [diff] [blame] | 739 | __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 740 | __rcu) |
| 741 | |
| 742 | #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ |
| 743 | |
Steven Rostedt | 12bcbe6 | 2013-05-28 14:38:42 -0400 | [diff] [blame] | 744 | /* |
| 745 | * The tracing infrastructure traces RCU (we want that), but unfortunately |
| 746 | * some of the RCU checks causes tracing to lock up the system. |
| 747 | * |
Alexey Kardashevskiy | f039f0a | 2015-11-02 13:21:47 +1100 | [diff] [blame] | 748 | * The no-tracing version of rcu_dereference_raw() must not call |
Steven Rostedt | 12bcbe6 | 2013-05-28 14:38:42 -0400 | [diff] [blame] | 749 | * rcu_read_lock_held(). |
| 750 | */ |
| 751 | #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) |
| 752 | |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 753 | /** |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 754 | * rcu_dereference_protected() - fetch RCU pointer when updates prevented |
| 755 | * @p: The pointer to read, prior to dereferencing |
| 756 | * @c: The conditions under which the dereference will take place |
Paul E. McKenney | b62730b | 2010-04-09 15:39:10 -0700 | [diff] [blame] | 757 | * |
| 758 | * Return the value of the specified RCU-protected pointer, but omit |
Paul E. McKenney | 7d0ae80 | 2015-03-03 14:57:58 -0800 | [diff] [blame] | 759 | * both the smp_read_barrier_depends() and the READ_ONCE(). This |
Paul E. McKenney | b62730b | 2010-04-09 15:39:10 -0700 | [diff] [blame] | 760 | * is useful in cases where update-side locks prevent the value of the |
| 761 | * pointer from changing. Please note that this primitive does -not- |
| 762 | * prevent the compiler from repeating this reference or combining it |
| 763 | * with other references, so it should not be used without protection |
| 764 | * of appropriate locks. |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 765 | * |
| 766 | * This function is only for update-side use. Using this function |
| 767 | * when protected only by rcu_read_lock() will result in infrequent |
| 768 | * but very ugly failures. |
Paul E. McKenney | b62730b | 2010-04-09 15:39:10 -0700 | [diff] [blame] | 769 | */ |
| 770 | #define rcu_dereference_protected(p, c) \ |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 771 | __rcu_dereference_protected((p), (c), __rcu) |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 772 | |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 773 | |
| 774 | /** |
| 775 | * rcu_dereference() - fetch RCU-protected pointer for dereferencing |
| 776 | * @p: The pointer to read, prior to dereferencing |
| 777 | * |
| 778 | * This is a simple wrapper around rcu_dereference_check(). |
| 779 | */ |
| 780 | #define rcu_dereference(p) rcu_dereference_check(p, 0) |
| 781 | |
| 782 | /** |
| 783 | * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing |
| 784 | * @p: The pointer to read, prior to dereferencing |
| 785 | * |
| 786 | * Makes rcu_dereference_check() do the dirty work. |
| 787 | */ |
| 788 | #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) |
| 789 | |
| 790 | /** |
| 791 | * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing |
| 792 | * @p: The pointer to read, prior to dereferencing |
| 793 | * |
| 794 | * Makes rcu_dereference_check() do the dirty work. |
| 795 | */ |
| 796 | #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) |
| 797 | |
| 798 | /** |
Paul E. McKenney | c3ac7cf | 2015-09-10 16:29:02 -0700 | [diff] [blame] | 799 | * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism |
| 800 | * @p: The pointer to hand off |
| 801 | * |
| 802 | * This is simply an identity function, but it documents where a pointer |
| 803 | * is handed off from RCU to some other synchronization mechanism, for |
| 804 | * example, reference counting or locking. In C11, it would map to |
| 805 | * kill_dependency(). It could be used as follows: |
| 806 | * |
| 807 | * rcu_read_lock(); |
| 808 | * p = rcu_dereference(gp); |
| 809 | * long_lived = is_long_lived(p); |
| 810 | * if (long_lived) { |
| 811 | * if (!atomic_inc_not_zero(p->refcnt)) |
| 812 | * long_lived = false; |
| 813 | * else |
| 814 | * p = rcu_pointer_handoff(p); |
| 815 | * } |
| 816 | * rcu_read_unlock(); |
| 817 | */ |
| 818 | #define rcu_pointer_handoff(p) (p) |
| 819 | |
| 820 | /** |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 821 | * rcu_read_lock() - mark the beginning of an RCU read-side critical section |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 822 | * |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 823 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 | * are within RCU read-side critical sections, then the |
Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 825 | * synchronize_rcu() is guaranteed to block until after all the other |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 826 | * CPUs exit their critical sections. Similarly, if call_rcu() is invoked |
| 827 | * on one CPU while other CPUs are within RCU read-side critical |
| 828 | * sections, invocation of the corresponding RCU callback is deferred |
| 829 | * until after the all the other CPUs exit their critical sections. |
| 830 | * |
| 831 | * Note, however, that RCU callbacks are permitted to run concurrently |
Paul E. McKenney | 77d8485 | 2010-07-08 17:38:59 -0700 | [diff] [blame] | 832 | * with new RCU read-side critical sections. One way that this can happen |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | * is via the following sequence of events: (1) CPU 0 enters an RCU |
| 834 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register |
| 835 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, |
| 836 | * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU |
| 837 | * callback is invoked. This is legal, because the RCU read-side critical |
| 838 | * section that was running concurrently with the call_rcu() (and which |
| 839 | * therefore might be referencing something that the corresponding RCU |
| 840 | * callback would free up) has completed before the corresponding |
| 841 | * RCU callback is invoked. |
| 842 | * |
| 843 | * RCU read-side critical sections may be nested. Any deferred actions |
| 844 | * will be deferred until the outermost RCU read-side critical section |
| 845 | * completes. |
| 846 | * |
Paul E. McKenney | 9079fd7 | 2010-08-07 21:59:54 -0700 | [diff] [blame] | 847 | * You can avoid reading and understanding the next paragraph by |
| 848 | * following this rule: don't put anything in an rcu_read_lock() RCU |
| 849 | * read-side critical section that would block in a !PREEMPT kernel. |
| 850 | * But if you want the full story, read on! |
| 851 | * |
Paul E. McKenney | ab74fdf | 2014-05-04 15:41:21 -0700 | [diff] [blame] | 852 | * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), |
| 853 | * it is illegal to block while in an RCU read-side critical section. |
Pranith Kumar | 28f6569 | 2014-09-22 14:00:48 -0400 | [diff] [blame] | 854 | * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT |
Paul E. McKenney | ab74fdf | 2014-05-04 15:41:21 -0700 | [diff] [blame] | 855 | * kernel builds, RCU read-side critical sections may be preempted, |
| 856 | * but explicit blocking is illegal. Finally, in preemptible RCU |
| 857 | * implementations in real-time (with -rt patchset) kernel builds, RCU |
| 858 | * read-side critical sections may be preempted and they may also block, but |
| 859 | * only when acquiring spinlocks that are subject to priority inheritance. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | */ |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 861 | static inline void rcu_read_lock(void) |
| 862 | { |
| 863 | __rcu_read_lock(); |
| 864 | __acquire(RCU); |
Paul E. McKenney | d8ab29f | 2011-10-07 18:22:03 +0200 | [diff] [blame] | 865 | rcu_lock_acquire(&rcu_lock_map); |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 866 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| 867 | "rcu_read_lock() used illegally while idle"); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 868 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 869 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | /* |
| 871 | * So where is rcu_write_lock()? It does not exist, as there is no |
| 872 | * way for writers to lock out RCU readers. This is a feature, not |
| 873 | * a bug -- this property is what provides RCU's performance benefits. |
| 874 | * Of course, writers must coordinate with each other. The normal |
| 875 | * spinlock primitives work well for this, but any other technique may be |
| 876 | * used as well. RCU does not care how the writers keep out of each |
| 877 | * others' way, as long as they do so. |
| 878 | */ |
Paul E. McKenney | 3d76c08 | 2009-09-28 07:46:32 -0700 | [diff] [blame] | 879 | |
| 880 | /** |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 881 | * rcu_read_unlock() - marks the end of an RCU read-side critical section. |
Paul E. McKenney | 3d76c08 | 2009-09-28 07:46:32 -0700 | [diff] [blame] | 882 | * |
Paul E. McKenney | f27bc48 | 2014-05-04 15:38:38 -0700 | [diff] [blame] | 883 | * In most situations, rcu_read_unlock() is immune from deadlock. |
| 884 | * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() |
| 885 | * is responsible for deboosting, which it does via rt_mutex_unlock(). |
| 886 | * Unfortunately, this function acquires the scheduler's runqueue and |
| 887 | * priority-inheritance spinlocks. This means that deadlock could result |
| 888 | * if the caller of rcu_read_unlock() already holds one of these locks or |
Oleg Nesterov | ce36f2f | 2014-09-28 23:44:21 +0200 | [diff] [blame] | 889 | * any lock that is ever acquired while holding them; or any lock which |
| 890 | * can be taken from interrupt context because rcu_boost()->rt_mutex_lock() |
| 891 | * does not disable irqs while taking ->wait_lock. |
Paul E. McKenney | f27bc48 | 2014-05-04 15:38:38 -0700 | [diff] [blame] | 892 | * |
| 893 | * That said, RCU readers are never priority boosted unless they were |
| 894 | * preempted. Therefore, one way to avoid deadlock is to make sure |
| 895 | * that preemption never happens within any RCU read-side critical |
| 896 | * section whose outermost rcu_read_unlock() is called with one of |
| 897 | * rt_mutex_unlock()'s locks held. Such preemption can be avoided in |
| 898 | * a number of ways, for example, by invoking preempt_disable() before |
| 899 | * critical section's outermost rcu_read_lock(). |
| 900 | * |
| 901 | * Given that the set of locks acquired by rt_mutex_unlock() might change |
| 902 | * at any time, a somewhat more future-proofed approach is to make sure |
| 903 | * that that preemption never happens within any RCU read-side critical |
| 904 | * section whose outermost rcu_read_unlock() is called with irqs disabled. |
| 905 | * This approach relies on the fact that rt_mutex_unlock() currently only |
| 906 | * acquires irq-disabled locks. |
| 907 | * |
| 908 | * The second of these two approaches is best in most situations, |
| 909 | * however, the first approach can also be useful, at least to those |
| 910 | * developers willing to keep abreast of the set of locks acquired by |
| 911 | * rt_mutex_unlock(). |
| 912 | * |
Paul E. McKenney | 3d76c08 | 2009-09-28 07:46:32 -0700 | [diff] [blame] | 913 | * See rcu_read_lock() for more information. |
| 914 | */ |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 915 | static inline void rcu_read_unlock(void) |
| 916 | { |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 917 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| 918 | "rcu_read_unlock() used illegally while idle"); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 919 | __release(RCU); |
| 920 | __rcu_read_unlock(); |
Paul E. McKenney | d24209bb | 2015-01-21 15:26:03 -0800 | [diff] [blame] | 921 | rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 922 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 | |
| 924 | /** |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 925 | * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | * |
| 927 | * This is equivalent of rcu_read_lock(), but to be used when updates |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 928 | * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since |
| 929 | * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a |
| 930 | * softirq handler to be a quiescent state, a process in RCU read-side |
| 931 | * critical section must be protected by disabling softirqs. Read-side |
| 932 | * critical sections in interrupt context can use just rcu_read_lock(), |
| 933 | * though this should at least be commented to avoid confusing people |
| 934 | * reading the code. |
Paul E. McKenney | 3842a08 | 2011-11-28 10:42:42 -0800 | [diff] [blame] | 935 | * |
| 936 | * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() |
| 937 | * must occur in the same context, for example, it is illegal to invoke |
| 938 | * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() |
| 939 | * was invoked from some other task. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | */ |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 941 | static inline void rcu_read_lock_bh(void) |
| 942 | { |
Paul E. McKenney | 6206ab9 | 2011-08-01 06:22:11 -0700 | [diff] [blame] | 943 | local_bh_disable(); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 944 | __acquire(RCU_BH); |
Paul E. McKenney | d8ab29f | 2011-10-07 18:22:03 +0200 | [diff] [blame] | 945 | rcu_lock_acquire(&rcu_bh_lock_map); |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 946 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| 947 | "rcu_read_lock_bh() used illegally while idle"); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 948 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 949 | |
| 950 | /* |
| 951 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section |
| 952 | * |
| 953 | * See rcu_read_lock_bh() for more information. |
| 954 | */ |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 955 | static inline void rcu_read_unlock_bh(void) |
| 956 | { |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 957 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| 958 | "rcu_read_unlock_bh() used illegally while idle"); |
Paul E. McKenney | d8ab29f | 2011-10-07 18:22:03 +0200 | [diff] [blame] | 959 | rcu_lock_release(&rcu_bh_lock_map); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 960 | __release(RCU_BH); |
Paul E. McKenney | 6206ab9 | 2011-08-01 06:22:11 -0700 | [diff] [blame] | 961 | local_bh_enable(); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 962 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | |
| 964 | /** |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 965 | * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section |
Mathieu Desnoyers | 1c50b72 | 2008-09-29 11:06:46 -0400 | [diff] [blame] | 966 | * |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 967 | * This is equivalent of rcu_read_lock(), but to be used when updates |
| 968 | * are being done using call_rcu_sched() or synchronize_rcu_sched(). |
| 969 | * Read-side critical sections can also be introduced by anything that |
| 970 | * disables preemption, including local_irq_disable() and friends. |
Paul E. McKenney | 3842a08 | 2011-11-28 10:42:42 -0800 | [diff] [blame] | 971 | * |
| 972 | * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() |
| 973 | * must occur in the same context, for example, it is illegal to invoke |
| 974 | * rcu_read_unlock_sched() from process context if the matching |
| 975 | * rcu_read_lock_sched() was invoked from an NMI handler. |
Mathieu Desnoyers | 1c50b72 | 2008-09-29 11:06:46 -0400 | [diff] [blame] | 976 | */ |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 977 | static inline void rcu_read_lock_sched(void) |
| 978 | { |
| 979 | preempt_disable(); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 980 | __acquire(RCU_SCHED); |
Paul E. McKenney | d8ab29f | 2011-10-07 18:22:03 +0200 | [diff] [blame] | 981 | rcu_lock_acquire(&rcu_sched_lock_map); |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 982 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| 983 | "rcu_read_lock_sched() used illegally while idle"); |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 984 | } |
Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 985 | |
| 986 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
Paul E. McKenney | 7c614d6 | 2009-08-24 09:42:00 -0700 | [diff] [blame] | 987 | static inline notrace void rcu_read_lock_sched_notrace(void) |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 988 | { |
| 989 | preempt_disable_notrace(); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 990 | __acquire(RCU_SCHED); |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 991 | } |
Mathieu Desnoyers | 1c50b72 | 2008-09-29 11:06:46 -0400 | [diff] [blame] | 992 | |
| 993 | /* |
| 994 | * rcu_read_unlock_sched - marks the end of a RCU-classic critical section |
| 995 | * |
| 996 | * See rcu_read_lock_sched for more information. |
| 997 | */ |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 998 | static inline void rcu_read_unlock_sched(void) |
| 999 | { |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 1000 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| 1001 | "rcu_read_unlock_sched() used illegally while idle"); |
Paul E. McKenney | d8ab29f | 2011-10-07 18:22:03 +0200 | [diff] [blame] | 1002 | rcu_lock_release(&rcu_sched_lock_map); |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 1003 | __release(RCU_SCHED); |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 1004 | preempt_enable(); |
| 1005 | } |
Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 1006 | |
| 1007 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
Paul E. McKenney | 7c614d6 | 2009-08-24 09:42:00 -0700 | [diff] [blame] | 1008 | static inline notrace void rcu_read_unlock_sched_notrace(void) |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 1009 | { |
Paul E. McKenney | bc33f24 | 2009-08-22 13:56:47 -0700 | [diff] [blame] | 1010 | __release(RCU_SCHED); |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 1011 | preempt_enable_notrace(); |
| 1012 | } |
Mathieu Desnoyers | 1c50b72 | 2008-09-29 11:06:46 -0400 | [diff] [blame] | 1013 | |
Mathieu Desnoyers | 1c50b72 | 2008-09-29 11:06:46 -0400 | [diff] [blame] | 1014 | /** |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 1015 | * RCU_INIT_POINTER() - initialize an RCU protected pointer |
| 1016 | * |
Paul E. McKenney | 6846c0c | 2011-07-31 22:33:02 -0700 | [diff] [blame] | 1017 | * Initialize an RCU-protected pointer in special cases where readers |
| 1018 | * do not need ordering constraints on the CPU or the compiler. These |
| 1019 | * special cases are: |
| 1020 | * |
| 1021 | * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- |
| 1022 | * 2. The caller has taken whatever steps are required to prevent |
| 1023 | * RCU readers from concurrently accessing this pointer -or- |
| 1024 | * 3. The referenced data structure has already been exposed to |
| 1025 | * readers either at compile time or via rcu_assign_pointer() -and- |
| 1026 | * a. You have not made -any- reader-visible changes to |
| 1027 | * this structure since then -or- |
| 1028 | * b. It is OK for readers accessing this structure from its |
| 1029 | * new location to see the old state of the structure. (For |
| 1030 | * example, the changes were to statistical counters or to |
| 1031 | * other state where exact synchronization is not required.) |
| 1032 | * |
| 1033 | * Failure to follow these rules governing use of RCU_INIT_POINTER() will |
| 1034 | * result in impossible-to-diagnose memory corruption. As in the structures |
| 1035 | * will look OK in crash dumps, but any concurrent RCU readers might |
| 1036 | * see pre-initialized values of the referenced data structure. So |
| 1037 | * please be very careful how you use RCU_INIT_POINTER()!!! |
| 1038 | * |
| 1039 | * If you are creating an RCU-protected linked structure that is accessed |
| 1040 | * by a single external-to-structure RCU-protected pointer, then you may |
| 1041 | * use RCU_INIT_POINTER() to initialize the internal RCU-protected |
| 1042 | * pointers, but you must use rcu_assign_pointer() to initialize the |
| 1043 | * external-to-structure pointer -after- you have completely initialized |
| 1044 | * the reader-accessible portions of the linked structure. |
Paul E. McKenney | 71a9b26 | 2014-03-31 13:13:02 -0700 | [diff] [blame] | 1045 | * |
| 1046 | * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no |
| 1047 | * ordering guarantees for either the CPU or the compiler. |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 1048 | */ |
| 1049 | #define RCU_INIT_POINTER(p, v) \ |
Paul E. McKenney | d1b88eb | 2012-05-16 15:42:30 -0700 | [diff] [blame] | 1050 | do { \ |
Pranith Kumar | 1a6c9b2 | 2014-09-25 14:03:34 -0400 | [diff] [blame] | 1051 | rcu_dereference_sparse(p, __rcu); \ |
Peter Zijlstra | 155d1d1 | 2015-06-02 17:26:48 +0200 | [diff] [blame] | 1052 | WRITE_ONCE(p, RCU_INITIALIZER(v)); \ |
Paul E. McKenney | d1b88eb | 2012-05-16 15:42:30 -0700 | [diff] [blame] | 1053 | } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 | |
Paul E. McKenney | 172708d | 2012-05-16 15:23:45 -0700 | [diff] [blame] | 1055 | /** |
| 1056 | * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer |
| 1057 | * |
| 1058 | * GCC-style initialization for an RCU-protected pointer in a structure field. |
| 1059 | */ |
| 1060 | #define RCU_POINTER_INITIALIZER(p, v) \ |
Paul E. McKenney | 462225ae | 2013-11-11 09:59:34 -0800 | [diff] [blame] | 1061 | .p = RCU_INITIALIZER(v) |
Lai Jiangshan | 9ab1544 | 2011-03-18 11:15:47 +0800 | [diff] [blame] | 1062 | |
Jan Engelhardt | d8169d4 | 2012-04-19 11:44:39 -0700 | [diff] [blame] | 1063 | /* |
| 1064 | * Does the specified offset indicate that the corresponding rcu_head |
| 1065 | * structure can be handled by kfree_rcu()? |
| 1066 | */ |
| 1067 | #define __is_kfree_rcu_offset(offset) ((offset) < 4096) |
| 1068 | |
| 1069 | /* |
| 1070 | * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. |
| 1071 | */ |
| 1072 | #define __kfree_rcu(head, offset) \ |
| 1073 | do { \ |
| 1074 | BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ |
Boqun Feng | b6a4ae7 | 2015-07-29 13:29:38 +0800 | [diff] [blame] | 1075 | kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ |
Jan Engelhardt | d8169d4 | 2012-04-19 11:44:39 -0700 | [diff] [blame] | 1076 | } while (0) |
| 1077 | |
Lai Jiangshan | 9ab1544 | 2011-03-18 11:15:47 +0800 | [diff] [blame] | 1078 | /** |
| 1079 | * kfree_rcu() - kfree an object after a grace period. |
| 1080 | * @ptr: pointer to kfree |
| 1081 | * @rcu_head: the name of the struct rcu_head within the type of @ptr. |
| 1082 | * |
| 1083 | * Many rcu callbacks functions just call kfree() on the base structure. |
| 1084 | * These functions are trivial, but their size adds up, and furthermore |
| 1085 | * when they are used in a kernel module, that module must invoke the |
| 1086 | * high-latency rcu_barrier() function at module-unload time. |
| 1087 | * |
| 1088 | * The kfree_rcu() function handles this issue. Rather than encoding a |
| 1089 | * function address in the embedded rcu_head structure, kfree_rcu() instead |
| 1090 | * encodes the offset of the rcu_head structure within the base structure. |
| 1091 | * Because the functions are not allowed in the low-order 4096 bytes of |
| 1092 | * kernel virtual memory, offsets up to 4095 bytes can be accommodated. |
| 1093 | * If the offset is larger than 4095 bytes, a compile-time error will |
| 1094 | * be generated in __kfree_rcu(). If this error is triggered, you can |
| 1095 | * either fall back to use of call_rcu() or rearrange the structure to |
| 1096 | * position the rcu_head structure into the first 4096 bytes. |
| 1097 | * |
| 1098 | * Note that the allowable offset might decrease in the future, for example, |
| 1099 | * to allow something like kmem_cache_free_rcu(). |
Jan Engelhardt | d8169d4 | 2012-04-19 11:44:39 -0700 | [diff] [blame] | 1100 | * |
| 1101 | * The BUILD_BUG_ON check must not involve any function calls, hence the |
| 1102 | * checks are done in macros here. |
Lai Jiangshan | 9ab1544 | 2011-03-18 11:15:47 +0800 | [diff] [blame] | 1103 | */ |
| 1104 | #define kfree_rcu(ptr, rcu_head) \ |
| 1105 | __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) |
| 1106 | |
Paul E. McKenney | 3382adb | 2015-03-04 15:41:24 -0800 | [diff] [blame] | 1107 | #ifdef CONFIG_TINY_RCU |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 1108 | static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) |
Paul E. McKenney | ffa83fb | 2013-11-17 19:27:16 -0800 | [diff] [blame] | 1109 | { |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 1110 | *nextevt = KTIME_MAX; |
Paul E. McKenney | ffa83fb | 2013-11-17 19:27:16 -0800 | [diff] [blame] | 1111 | return 0; |
| 1112 | } |
Paul E. McKenney | 3382adb | 2015-03-04 15:41:24 -0800 | [diff] [blame] | 1113 | #endif /* #ifdef CONFIG_TINY_RCU */ |
Paul E. McKenney | ffa83fb | 2013-11-17 19:27:16 -0800 | [diff] [blame] | 1114 | |
Paul E. McKenney | 2f33b51 | 2013-11-17 18:25:48 -0800 | [diff] [blame] | 1115 | #if defined(CONFIG_RCU_NOCB_CPU_ALL) |
| 1116 | static inline bool rcu_is_nocb_cpu(int cpu) { return true; } |
| 1117 | #elif defined(CONFIG_RCU_NOCB_CPU) |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 1118 | bool rcu_is_nocb_cpu(int cpu); |
Frederic Weisbecker | d1e43fa | 2013-03-26 23:47:24 +0100 | [diff] [blame] | 1119 | #else |
| 1120 | static inline bool rcu_is_nocb_cpu(int cpu) { return false; } |
Paul E. McKenney | 2f33b51 | 2013-11-17 18:25:48 -0800 | [diff] [blame] | 1121 | #endif |
Frederic Weisbecker | d1e43fa | 2013-03-26 23:47:24 +0100 | [diff] [blame] | 1122 | |
| 1123 | |
Paul E. McKenney | 0edd1b1 | 2013-06-21 16:37:22 -0700 | [diff] [blame] | 1124 | /* Only for use by adaptive-ticks code. */ |
| 1125 | #ifdef CONFIG_NO_HZ_FULL_SYSIDLE |
Teodora Baluta | 584dc4c | 2013-11-11 17:11:23 +0200 | [diff] [blame] | 1126 | bool rcu_sys_is_idle(void); |
| 1127 | void rcu_sysidle_force_exit(void); |
Paul E. McKenney | 0edd1b1 | 2013-06-21 16:37:22 -0700 | [diff] [blame] | 1128 | #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
| 1129 | |
| 1130 | static inline bool rcu_sys_is_idle(void) |
| 1131 | { |
| 1132 | return false; |
| 1133 | } |
| 1134 | |
| 1135 | static inline void rcu_sysidle_force_exit(void) |
| 1136 | { |
| 1137 | } |
| 1138 | |
| 1139 | #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
| 1140 | |
| 1141 | |
Paul E. McKenney | 274529b | 2016-03-21 19:46:04 -0700 | [diff] [blame] | 1142 | /* |
| 1143 | * Dump the ftrace buffer, but only one time per callsite per boot. |
| 1144 | */ |
| 1145 | #define rcu_ftrace_dump(oops_dump_mode) \ |
| 1146 | do { \ |
| 1147 | static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ |
| 1148 | \ |
| 1149 | if (!atomic_read(&___rfd_beenhere) && \ |
| 1150 | !atomic_xchg(&___rfd_beenhere, 1)) \ |
| 1151 | ftrace_dump(oops_dump_mode); \ |
| 1152 | } while (0) |
| 1153 | |
| 1154 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1155 | #endif /* __LINUX_RCUPDATE_H */ |