blob: a005cac5e3024aa61eca92a76aa7d1473d7c7df7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul E. McKenneya71fca52009-09-18 10:28:19 -07002 * Read-Copy Update mechanism for mutual exclusion
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
Paul E. McKenney01c1c662008-01-25 21:08:24 +010018 * Copyright IBM Corporation, 2001
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
Paul E. McKenneya71fca52009-09-18 10:28:19 -070021 *
Josh Triplett595182b2006-10-04 02:17:21 -070022 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
Paul E. McKenneya71fca52009-09-18 10:28:19 -070029 * http://lse.sourceforge.net/locking/rcupdate.html
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 *
31 */
32
33#ifndef __LINUX_RCUPDATE_H
34#define __LINUX_RCUPDATE_H
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/cache.h>
37#include <linux/spinlock.h>
38#include <linux/threads.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/cpumask.h>
40#include <linux/seqlock.h>
Peter Zijlstra851a67b2007-10-11 22:11:12 +020041#include <linux/lockdep.h>
Paul E. McKenney4446a362008-05-12 21:21:05 +020042#include <linux/completion.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Dave Younge5ab6772010-03-10 15:24:05 -080044#ifdef CONFIG_RCU_TORTURE_TEST
45extern int rcutorture_runnable; /* for sysctl */
46#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/**
49 * struct rcu_head - callback structure for use with RCU
50 * @next: next update requests in a list
51 * @func: actual update function to call after the grace period.
52 */
53struct rcu_head {
54 struct rcu_head *next;
55 void (*func)(struct rcu_head *head);
56};
57
Paul E. McKenney03b042b2009-06-25 09:08:16 -070058/* Exported common interfaces */
Paul E. McKenney03b042b2009-06-25 09:08:16 -070059extern void synchronize_rcu_bh(void);
Paul E. McKenney16e30812009-09-13 09:15:11 -070060extern void synchronize_sched(void);
Paul E. McKenney03b042b2009-06-25 09:08:16 -070061extern void rcu_barrier(void);
62extern void rcu_barrier_bh(void);
63extern void rcu_barrier_sched(void);
64extern void synchronize_sched_expedited(void);
65extern int sched_expedited_torture_stats(char *page);
66
67/* Internal to kernel */
68extern void rcu_init(void);
Paul E. McKenneyd9f1bb62010-02-25 14:06:47 -080069extern int rcu_scheduler_active;
70extern void rcu_scheduler_starting(void);
Paul E. McKenneya6826042009-02-25 18:03:42 -080071
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070072#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010073#include <linux/rcutree.h>
Paul E. McKenney2c28e242009-10-26 13:57:44 -070074#elif defined(CONFIG_TINY_RCU)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070075#include <linux/rcutiny.h>
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010076#else
77#error "Unknown RCU implementation specified to kernel configuration"
Paul E. McKenney6b3ef482009-08-22 13:56:53 -070078#endif
Paul E. McKenney01c1c662008-01-25 21:08:24 +010079
Paul E. McKenney3d76c082009-09-28 07:46:32 -070080#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
Dipankar Sarma8b6490e2005-09-09 13:04:07 -070081#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#define INIT_RCU_HEAD(ptr) do { \
83 (ptr)->next = NULL; (ptr)->func = NULL; \
84} while (0)
85
Paul E. McKenneybc33f242009-08-22 13:56:47 -070086#ifdef CONFIG_DEBUG_LOCK_ALLOC
Paul E. McKenney632ee202010-02-22 17:04:45 -080087
Paul E. McKenneybc33f242009-08-22 13:56:47 -070088extern struct lockdep_map rcu_lock_map;
Paul E. McKenney632ee202010-02-22 17:04:45 -080089# define rcu_read_acquire() \
90 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
Paul E. McKenneybc33f242009-08-22 13:56:47 -070091# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
Paul E. McKenney632ee202010-02-22 17:04:45 -080092
93extern struct lockdep_map rcu_bh_lock_map;
94# define rcu_read_acquire_bh() \
95 lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
96# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_)
97
98extern struct lockdep_map rcu_sched_lock_map;
99# define rcu_read_acquire_sched() \
100 lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
101# define rcu_read_release_sched() \
102 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
103
104/**
105 * rcu_read_lock_held - might we be in RCU read-side critical section?
106 *
107 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
108 * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
109 * this assumes we are in an RCU read-side critical section unless it can
110 * prove otherwise.
111 */
112static inline int rcu_read_lock_held(void)
113{
114 if (debug_locks)
115 return lock_is_held(&rcu_lock_map);
116 return 1;
117}
118
119/**
120 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
121 *
122 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
123 * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
124 * this assumes we are in an RCU-bh read-side critical section unless it can
125 * prove otherwise.
126 */
127static inline int rcu_read_lock_bh_held(void)
128{
129 if (debug_locks)
130 return lock_is_held(&rcu_bh_lock_map);
131 return 1;
132}
133
134/**
135 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
136 *
137 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an
138 * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING,
139 * this assumes we are in an RCU-sched read-side critical section unless it
140 * can prove otherwise. Note that disabling of preemption (including
141 * disabling irqs) counts as an RCU-sched read-side critical section.
142 */
143static inline int rcu_read_lock_sched_held(void)
144{
145 int lockdep_opinion = 0;
146
147 if (debug_locks)
148 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
Paul E. McKenneyd9f1bb62010-02-25 14:06:47 -0800149 return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active;
Paul E. McKenney632ee202010-02-22 17:04:45 -0800150}
151
152#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
153
154# define rcu_read_acquire() do { } while (0)
155# define rcu_read_release() do { } while (0)
156# define rcu_read_acquire_bh() do { } while (0)
157# define rcu_read_release_bh() do { } while (0)
158# define rcu_read_acquire_sched() do { } while (0)
159# define rcu_read_release_sched() do { } while (0)
160
161static inline int rcu_read_lock_held(void)
162{
163 return 1;
164}
165
166static inline int rcu_read_lock_bh_held(void)
167{
168 return 1;
169}
170
171static inline int rcu_read_lock_sched_held(void)
172{
Paul E. McKenney0b1c8722010-02-26 16:38:57 -0800173 return preempt_count() != 0 || !rcu_scheduler_active;
Paul E. McKenney632ee202010-02-22 17:04:45 -0800174}
175
176#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
177
178#ifdef CONFIG_PROVE_RCU
179
180/**
181 * rcu_dereference_check - rcu_dereference with debug checking
182 *
183 * Do an rcu_dereference(), but check that the context is correct.
184 * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to
185 * ensure that the rcu_dereference_check() executes within an RCU
186 * read-side critical section. It is also possible to check for
187 * locks being held, for example, by using lockdep_is_held().
188 */
189#define rcu_dereference_check(p, c) \
190 ({ \
Paul E. McKenney0632eb32010-02-22 17:04:47 -0800191 if (debug_locks && !(c)) \
192 lockdep_rcu_dereference(__FILE__, __LINE__); \
Paul E. McKenneyc26d34a2010-02-22 17:04:46 -0800193 rcu_dereference_raw(p); \
Paul E. McKenney632ee202010-02-22 17:04:45 -0800194 })
195
196#else /* #ifdef CONFIG_PROVE_RCU */
197
Paul E. McKenneyc26d34a2010-02-22 17:04:46 -0800198#define rcu_dereference_check(p, c) rcu_dereference_raw(p)
Paul E. McKenney632ee202010-02-22 17:04:45 -0800199
200#endif /* #else #ifdef CONFIG_PROVE_RCU */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202/**
203 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
204 *
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700205 * When synchronize_rcu() is invoked on one CPU while other CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 * are within RCU read-side critical sections, then the
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700207 * synchronize_rcu() is guaranteed to block until after all the other
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
209 * on one CPU while other CPUs are within RCU read-side critical
210 * sections, invocation of the corresponding RCU callback is deferred
211 * until after the all the other CPUs exit their critical sections.
212 *
213 * Note, however, that RCU callbacks are permitted to run concurrently
214 * with RCU read-side critical sections. One way that this can happen
215 * is via the following sequence of events: (1) CPU 0 enters an RCU
216 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
217 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
218 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
219 * callback is invoked. This is legal, because the RCU read-side critical
220 * section that was running concurrently with the call_rcu() (and which
221 * therefore might be referencing something that the corresponding RCU
222 * callback would free up) has completed before the corresponding
223 * RCU callback is invoked.
224 *
225 * RCU read-side critical sections may be nested. Any deferred actions
226 * will be deferred until the outermost RCU read-side critical section
227 * completes.
228 *
229 * It is illegal to block while in an RCU read-side critical section.
230 */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700231static inline void rcu_read_lock(void)
232{
233 __rcu_read_lock();
234 __acquire(RCU);
235 rcu_read_acquire();
236}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238/*
239 * So where is rcu_write_lock()? It does not exist, as there is no
240 * way for writers to lock out RCU readers. This is a feature, not
241 * a bug -- this property is what provides RCU's performance benefits.
242 * Of course, writers must coordinate with each other. The normal
243 * spinlock primitives work well for this, but any other technique may be
244 * used as well. RCU does not care how the writers keep out of each
245 * others' way, as long as they do so.
246 */
Paul E. McKenney3d76c082009-09-28 07:46:32 -0700247
248/**
249 * rcu_read_unlock - marks the end of an RCU read-side critical section.
250 *
251 * See rcu_read_lock() for more information.
252 */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700253static inline void rcu_read_unlock(void)
254{
255 rcu_read_release();
256 __release(RCU);
257 __rcu_read_unlock();
258}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260/**
261 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
262 *
263 * This is equivalent of rcu_read_lock(), but to be used when updates
264 * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks
265 * consider completion of a softirq handler to be a quiescent state,
266 * a process in RCU read-side critical section must be protected by
267 * disabling softirqs. Read-side critical sections in interrupt context
268 * can use just rcu_read_lock().
269 *
270 */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700271static inline void rcu_read_lock_bh(void)
272{
273 __rcu_read_lock_bh();
274 __acquire(RCU_BH);
Paul E. McKenney632ee202010-02-22 17:04:45 -0800275 rcu_read_acquire_bh();
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700276}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
278/*
279 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
280 *
281 * See rcu_read_lock_bh() for more information.
282 */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700283static inline void rcu_read_unlock_bh(void)
284{
Paul E. McKenney632ee202010-02-22 17:04:45 -0800285 rcu_read_release_bh();
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700286 __release(RCU_BH);
287 __rcu_read_unlock_bh();
288}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
290/**
Mathieu Desnoyers1c50b722008-09-29 11:06:46 -0400291 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
292 *
293 * Should be used with either
294 * - synchronize_sched()
295 * or
296 * - call_rcu_sched() and rcu_barrier_sched()
297 * on the write-side to insure proper synchronization.
298 */
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700299static inline void rcu_read_lock_sched(void)
300{
301 preempt_disable();
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700302 __acquire(RCU_SCHED);
Paul E. McKenney632ee202010-02-22 17:04:45 -0800303 rcu_read_acquire_sched();
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700304}
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700305
306/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
Paul E. McKenney7c614d62009-08-24 09:42:00 -0700307static inline notrace void rcu_read_lock_sched_notrace(void)
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700308{
309 preempt_disable_notrace();
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700310 __acquire(RCU_SCHED);
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700311}
Mathieu Desnoyers1c50b722008-09-29 11:06:46 -0400312
313/*
314 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
315 *
316 * See rcu_read_lock_sched for more information.
317 */
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700318static inline void rcu_read_unlock_sched(void)
319{
Paul E. McKenney632ee202010-02-22 17:04:45 -0800320 rcu_read_release_sched();
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700321 __release(RCU_SCHED);
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700322 preempt_enable();
323}
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700324
325/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
Paul E. McKenney7c614d62009-08-24 09:42:00 -0700326static inline notrace void rcu_read_unlock_sched_notrace(void)
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700327{
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700328 __release(RCU_SCHED);
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700329 preempt_enable_notrace();
330}
Mathieu Desnoyers1c50b722008-09-29 11:06:46 -0400331
332
333/**
Paul E. McKenneyc26d34a2010-02-22 17:04:46 -0800334 * rcu_dereference_raw - fetch an RCU-protected pointer
335 *
336 * The caller must be within some flavor of RCU read-side critical
337 * section, or must be otherwise preventing the pointer from changing,
338 * for example, by holding an appropriate lock. This pointer may later
339 * be safely dereferenced. It is the caller's responsibility to have
340 * done the right thing, as this primitive does no checking of any kind.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 *
342 * Inserts memory barriers on architectures that require them
343 * (currently only the Alpha), and, more importantly, documents
344 * exactly which pointers are protected by RCU.
345 */
Paul E. McKenneyc26d34a2010-02-22 17:04:46 -0800346#define rcu_dereference_raw(p) ({ \
Paul E. McKenney97b43032007-10-16 23:26:04 -0700347 typeof(p) _________p1 = ACCESS_ONCE(p); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 smp_read_barrier_depends(); \
349 (_________p1); \
350 })
351
352/**
Paul E. McKenneyc26d34a2010-02-22 17:04:46 -0800353 * rcu_dereference - fetch an RCU-protected pointer, checking for RCU
354 *
355 * Makes rcu_dereference_check() do the dirty work.
356 */
357#define rcu_dereference(p) \
358 rcu_dereference_check(p, rcu_read_lock_held())
359
360/**
361 * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh
362 *
363 * Makes rcu_dereference_check() do the dirty work.
364 */
365#define rcu_dereference_bh(p) \
366 rcu_dereference_check(p, rcu_read_lock_bh_held())
367
368/**
369 * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
370 *
371 * Makes rcu_dereference_check() do the dirty work.
372 */
373#define rcu_dereference_sched(p) \
374 rcu_dereference_check(p, rcu_read_lock_sched_held())
375
376/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 * rcu_assign_pointer - assign (publicize) a pointer to a newly
378 * initialized structure that will be dereferenced by RCU read-side
379 * critical sections. Returns the value assigned.
380 *
381 * Inserts memory barriers on architectures that require them
382 * (pretty much all of them other than x86), and also prevents
383 * the compiler from reordering the code that initializes the
384 * structure after the pointer assignment. More importantly, this
385 * call documents which pointers will be dereferenced by RCU read-side
386 * code.
387 */
388
Paul E. McKenneyd99c4f62008-02-06 01:37:25 -0800389#define rcu_assign_pointer(p, v) \
390 ({ \
391 if (!__builtin_constant_p(v) || \
392 ((v) != NULL)) \
393 smp_wmb(); \
394 (p) = (v); \
395 })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Paul E. McKenney4446a362008-05-12 21:21:05 +0200397/* Infrastructure to implement the synchronize_() primitives. */
398
399struct rcu_synchronize {
400 struct rcu_head head;
401 struct completion completion;
402};
403
404extern void wakeme_after_rcu(struct rcu_head *head);
405
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700406/**
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100407 * call_rcu - Queue an RCU callback for invocation after a grace period.
408 * @head: structure to be used for queueing the RCU updates.
409 * @func: actual update function to be invoked after the grace period
410 *
411 * The update function will be invoked some time after a full grace
412 * period elapses, in other words after all currently executing RCU
413 * read-side critical sections have completed. RCU read-side critical
414 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
415 * and may be nested.
416 */
417extern void call_rcu(struct rcu_head *head,
418 void (*func)(struct rcu_head *head));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100420/**
421 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
422 * @head: structure to be used for queueing the RCU updates.
423 * @func: actual update function to be invoked after the grace period
424 *
425 * The update function will be invoked some time after a full grace
426 * period elapses, in other words after all currently executing RCU
427 * read-side critical sections have completed. call_rcu_bh() assumes
428 * that the read-side critical sections end on completion of a softirq
429 * handler. This means that read-side critical sections in process
430 * context must not be interrupted by softirqs. This interface is to be
431 * used when most of the read-side critical sections are in softirq context.
432 * RCU read-side critical sections are delimited by :
433 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
434 * OR
435 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
436 * These may be nested.
437 */
438extern void call_rcu_bh(struct rcu_head *head,
439 void (*func)(struct rcu_head *head));
440
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441#endif /* __LINUX_RCUPDATE_H */