blob: 872a98e13d6ab7ce2b833cf073925048c43807f9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul E. McKenneya71fca52009-09-18 10:28:19 -07002 * Read-Copy Update mechanism for mutual exclusion
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
Paul E. McKenney01c1c662008-01-25 21:08:24 +010018 * Copyright IBM Corporation, 2001
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
Paul E. McKenneya71fca52009-09-18 10:28:19 -070021 *
Josh Triplett595182b2006-10-04 02:17:21 -070022 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
Paul E. McKenneya71fca52009-09-18 10:28:19 -070029 * http://lse.sourceforge.net/locking/rcupdate.html
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 *
31 */
32
33#ifndef __LINUX_RCUPDATE_H
34#define __LINUX_RCUPDATE_H
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/cache.h>
37#include <linux/spinlock.h>
38#include <linux/threads.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/cpumask.h>
40#include <linux/seqlock.h>
Peter Zijlstra851a67b2007-10-11 22:11:12 +020041#include <linux/lockdep.h>
Paul E. McKenney4446a362008-05-12 21:21:05 +020042#include <linux/completion.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Dave Younge5ab6772010-03-10 15:24:05 -080044#ifdef CONFIG_RCU_TORTURE_TEST
45extern int rcutorture_runnable; /* for sysctl */
46#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/**
49 * struct rcu_head - callback structure for use with RCU
50 * @next: next update requests in a list
51 * @func: actual update function to call after the grace period.
52 */
53struct rcu_head {
54 struct rcu_head *next;
55 void (*func)(struct rcu_head *head);
56};
57
Paul E. McKenney03b042b2009-06-25 09:08:16 -070058/* Exported common interfaces */
Paul E. McKenney03b042b2009-06-25 09:08:16 -070059extern void synchronize_rcu_bh(void);
Paul E. McKenney16e30812009-09-13 09:15:11 -070060extern void synchronize_sched(void);
Paul E. McKenney03b042b2009-06-25 09:08:16 -070061extern void rcu_barrier(void);
62extern void rcu_barrier_bh(void);
63extern void rcu_barrier_sched(void);
64extern void synchronize_sched_expedited(void);
65extern int sched_expedited_torture_stats(char *page);
66
67/* Internal to kernel */
68extern void rcu_init(void);
Paul E. McKenneyd9f1bb62010-02-25 14:06:47 -080069extern int rcu_scheduler_active;
70extern void rcu_scheduler_starting(void);
Paul E. McKenneya6826042009-02-25 18:03:42 -080071
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070072#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010073#include <linux/rcutree.h>
Paul E. McKenney2c28e242009-10-26 13:57:44 -070074#elif defined(CONFIG_TINY_RCU)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070075#include <linux/rcutiny.h>
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010076#else
77#error "Unknown RCU implementation specified to kernel configuration"
Paul E. McKenney6b3ef482009-08-22 13:56:53 -070078#endif
Paul E. McKenney01c1c662008-01-25 21:08:24 +010079
Paul E. McKenney3d76c082009-09-28 07:46:32 -070080#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
Dipankar Sarma8b6490e2005-09-09 13:04:07 -070081#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#define INIT_RCU_HEAD(ptr) do { \
83 (ptr)->next = NULL; (ptr)->func = NULL; \
84} while (0)
85
Paul E. McKenneybc33f242009-08-22 13:56:47 -070086#ifdef CONFIG_DEBUG_LOCK_ALLOC
Paul E. McKenney632ee202010-02-22 17:04:45 -080087
Paul E. McKenneybc33f242009-08-22 13:56:47 -070088extern struct lockdep_map rcu_lock_map;
Paul E. McKenney632ee202010-02-22 17:04:45 -080089# define rcu_read_acquire() \
90 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
Paul E. McKenneybc33f242009-08-22 13:56:47 -070091# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
Paul E. McKenney632ee202010-02-22 17:04:45 -080092
93extern struct lockdep_map rcu_bh_lock_map;
94# define rcu_read_acquire_bh() \
95 lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
96# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_)
97
98extern struct lockdep_map rcu_sched_lock_map;
99# define rcu_read_acquire_sched() \
100 lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
101# define rcu_read_release_sched() \
102 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
103
Paul E. McKenney54dbf962010-03-03 07:46:57 -0800104static inline int debug_lockdep_rcu_enabled(void)
105{
106 return likely(rcu_scheduler_active && debug_locks);
107}
108
Paul E. McKenney632ee202010-02-22 17:04:45 -0800109/**
110 * rcu_read_lock_held - might we be in RCU read-side critical section?
111 *
112 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
113 * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
114 * this assumes we are in an RCU read-side critical section unless it can
115 * prove otherwise.
Paul E. McKenney54dbf962010-03-03 07:46:57 -0800116 *
117 * Check rcu_scheduler_active to prevent false positives during boot.
Paul E. McKenney632ee202010-02-22 17:04:45 -0800118 */
119static inline int rcu_read_lock_held(void)
120{
Paul E. McKenney54dbf962010-03-03 07:46:57 -0800121 if (!debug_lockdep_rcu_enabled())
122 return 1;
123 return lock_is_held(&rcu_lock_map);
Paul E. McKenney632ee202010-02-22 17:04:45 -0800124}
125
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700126/*
127 * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
128 * hell.
Paul E. McKenney632ee202010-02-22 17:04:45 -0800129 */
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700130extern int rcu_read_lock_bh_held(void);
Paul E. McKenney632ee202010-02-22 17:04:45 -0800131
132/**
133 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
134 *
135 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an
136 * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING,
137 * this assumes we are in an RCU-sched read-side critical section unless it
138 * can prove otherwise. Note that disabling of preemption (including
139 * disabling irqs) counts as an RCU-sched read-side critical section.
Paul E. McKenney54dbf962010-03-03 07:46:57 -0800140 *
141 * Check rcu_scheduler_active to prevent false positives during boot.
Paul E. McKenney632ee202010-02-22 17:04:45 -0800142 */
Paul E. McKenneye6033e32010-03-03 17:50:16 -0800143#ifdef CONFIG_PREEMPT
Paul E. McKenney632ee202010-02-22 17:04:45 -0800144static inline int rcu_read_lock_sched_held(void)
145{
146 int lockdep_opinion = 0;
147
Paul E. McKenney54dbf962010-03-03 07:46:57 -0800148 if (!debug_lockdep_rcu_enabled())
149 return 1;
Paul E. McKenney632ee202010-02-22 17:04:45 -0800150 if (debug_locks)
151 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
Lai Jiangshan0cff8102010-03-18 12:25:33 -0700152 return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
Paul E. McKenney632ee202010-02-22 17:04:45 -0800153}
Paul E. McKenneye6033e32010-03-03 17:50:16 -0800154#else /* #ifdef CONFIG_PREEMPT */
155static inline int rcu_read_lock_sched_held(void)
156{
157 return 1;
158}
159#endif /* #else #ifdef CONFIG_PREEMPT */
Paul E. McKenney632ee202010-02-22 17:04:45 -0800160
161#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
162
163# define rcu_read_acquire() do { } while (0)
164# define rcu_read_release() do { } while (0)
165# define rcu_read_acquire_bh() do { } while (0)
166# define rcu_read_release_bh() do { } while (0)
167# define rcu_read_acquire_sched() do { } while (0)
168# define rcu_read_release_sched() do { } while (0)
169
170static inline int rcu_read_lock_held(void)
171{
172 return 1;
173}
174
175static inline int rcu_read_lock_bh_held(void)
176{
177 return 1;
178}
179
Paul E. McKenneye6033e32010-03-03 17:50:16 -0800180#ifdef CONFIG_PREEMPT
Paul E. McKenney632ee202010-02-22 17:04:45 -0800181static inline int rcu_read_lock_sched_held(void)
182{
Lai Jiangshan0cff8102010-03-18 12:25:33 -0700183 return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled();
Paul E. McKenney632ee202010-02-22 17:04:45 -0800184}
Paul E. McKenneye6033e32010-03-03 17:50:16 -0800185#else /* #ifdef CONFIG_PREEMPT */
186static inline int rcu_read_lock_sched_held(void)
187{
188 return 1;
189}
190#endif /* #else #ifdef CONFIG_PREEMPT */
Paul E. McKenney632ee202010-02-22 17:04:45 -0800191
192#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
193
194#ifdef CONFIG_PROVE_RCU
195
196/**
197 * rcu_dereference_check - rcu_dereference with debug checking
198 *
199 * Do an rcu_dereference(), but check that the context is correct.
200 * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to
201 * ensure that the rcu_dereference_check() executes within an RCU
202 * read-side critical section. It is also possible to check for
203 * locks being held, for example, by using lockdep_is_held().
204 */
205#define rcu_dereference_check(p, c) \
206 ({ \
Paul E. McKenney54dbf962010-03-03 07:46:57 -0800207 if (debug_lockdep_rcu_enabled() && !(c)) \
Paul E. McKenney0632eb32010-02-22 17:04:47 -0800208 lockdep_rcu_dereference(__FILE__, __LINE__); \
Paul E. McKenneyc26d34a2010-02-22 17:04:46 -0800209 rcu_dereference_raw(p); \
Paul E. McKenney632ee202010-02-22 17:04:45 -0800210 })
211
212#else /* #ifdef CONFIG_PROVE_RCU */
213
Paul E. McKenneyc26d34a2010-02-22 17:04:46 -0800214#define rcu_dereference_check(p, c) rcu_dereference_raw(p)
Paul E. McKenney632ee202010-02-22 17:04:45 -0800215
216#endif /* #else #ifdef CONFIG_PROVE_RCU */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218/**
219 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
220 *
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700221 * When synchronize_rcu() is invoked on one CPU while other CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 * are within RCU read-side critical sections, then the
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700223 * synchronize_rcu() is guaranteed to block until after all the other
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
225 * on one CPU while other CPUs are within RCU read-side critical
226 * sections, invocation of the corresponding RCU callback is deferred
227 * until after the all the other CPUs exit their critical sections.
228 *
229 * Note, however, that RCU callbacks are permitted to run concurrently
230 * with RCU read-side critical sections. One way that this can happen
231 * is via the following sequence of events: (1) CPU 0 enters an RCU
232 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
233 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
234 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
235 * callback is invoked. This is legal, because the RCU read-side critical
236 * section that was running concurrently with the call_rcu() (and which
237 * therefore might be referencing something that the corresponding RCU
238 * callback would free up) has completed before the corresponding
239 * RCU callback is invoked.
240 *
241 * RCU read-side critical sections may be nested. Any deferred actions
242 * will be deferred until the outermost RCU read-side critical section
243 * completes.
244 *
245 * It is illegal to block while in an RCU read-side critical section.
246 */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700247static inline void rcu_read_lock(void)
248{
249 __rcu_read_lock();
250 __acquire(RCU);
251 rcu_read_acquire();
252}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254/*
255 * So where is rcu_write_lock()? It does not exist, as there is no
256 * way for writers to lock out RCU readers. This is a feature, not
257 * a bug -- this property is what provides RCU's performance benefits.
258 * Of course, writers must coordinate with each other. The normal
259 * spinlock primitives work well for this, but any other technique may be
260 * used as well. RCU does not care how the writers keep out of each
261 * others' way, as long as they do so.
262 */
Paul E. McKenney3d76c082009-09-28 07:46:32 -0700263
264/**
265 * rcu_read_unlock - marks the end of an RCU read-side critical section.
266 *
267 * See rcu_read_lock() for more information.
268 */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700269static inline void rcu_read_unlock(void)
270{
271 rcu_read_release();
272 __release(RCU);
273 __rcu_read_unlock();
274}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276/**
277 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
278 *
279 * This is equivalent of rcu_read_lock(), but to be used when updates
280 * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks
281 * consider completion of a softirq handler to be a quiescent state,
282 * a process in RCU read-side critical section must be protected by
283 * disabling softirqs. Read-side critical sections in interrupt context
284 * can use just rcu_read_lock().
285 *
286 */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700287static inline void rcu_read_lock_bh(void)
288{
289 __rcu_read_lock_bh();
290 __acquire(RCU_BH);
Paul E. McKenney632ee202010-02-22 17:04:45 -0800291 rcu_read_acquire_bh();
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700292}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
294/*
295 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
296 *
297 * See rcu_read_lock_bh() for more information.
298 */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700299static inline void rcu_read_unlock_bh(void)
300{
Paul E. McKenney632ee202010-02-22 17:04:45 -0800301 rcu_read_release_bh();
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700302 __release(RCU_BH);
303 __rcu_read_unlock_bh();
304}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306/**
Mathieu Desnoyers1c50b722008-09-29 11:06:46 -0400307 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
308 *
309 * Should be used with either
310 * - synchronize_sched()
311 * or
312 * - call_rcu_sched() and rcu_barrier_sched()
313 * on the write-side to insure proper synchronization.
314 */
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700315static inline void rcu_read_lock_sched(void)
316{
317 preempt_disable();
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700318 __acquire(RCU_SCHED);
Paul E. McKenney632ee202010-02-22 17:04:45 -0800319 rcu_read_acquire_sched();
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700320}
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700321
322/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
Paul E. McKenney7c614d62009-08-24 09:42:00 -0700323static inline notrace void rcu_read_lock_sched_notrace(void)
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700324{
325 preempt_disable_notrace();
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700326 __acquire(RCU_SCHED);
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700327}
Mathieu Desnoyers1c50b722008-09-29 11:06:46 -0400328
329/*
330 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
331 *
332 * See rcu_read_lock_sched for more information.
333 */
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700334static inline void rcu_read_unlock_sched(void)
335{
Paul E. McKenney632ee202010-02-22 17:04:45 -0800336 rcu_read_release_sched();
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700337 __release(RCU_SCHED);
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700338 preempt_enable();
339}
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700340
341/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
Paul E. McKenney7c614d62009-08-24 09:42:00 -0700342static inline notrace void rcu_read_unlock_sched_notrace(void)
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700343{
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700344 __release(RCU_SCHED);
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700345 preempt_enable_notrace();
346}
Mathieu Desnoyers1c50b722008-09-29 11:06:46 -0400347
348
349/**
Paul E. McKenneyc26d34a2010-02-22 17:04:46 -0800350 * rcu_dereference_raw - fetch an RCU-protected pointer
351 *
352 * The caller must be within some flavor of RCU read-side critical
353 * section, or must be otherwise preventing the pointer from changing,
354 * for example, by holding an appropriate lock. This pointer may later
355 * be safely dereferenced. It is the caller's responsibility to have
356 * done the right thing, as this primitive does no checking of any kind.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 *
358 * Inserts memory barriers on architectures that require them
359 * (currently only the Alpha), and, more importantly, documents
360 * exactly which pointers are protected by RCU.
361 */
Paul E. McKenneyc26d34a2010-02-22 17:04:46 -0800362#define rcu_dereference_raw(p) ({ \
Paul E. McKenney97b43032007-10-16 23:26:04 -0700363 typeof(p) _________p1 = ACCESS_ONCE(p); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 smp_read_barrier_depends(); \
365 (_________p1); \
366 })
367
368/**
Paul E. McKenneyc26d34a2010-02-22 17:04:46 -0800369 * rcu_dereference - fetch an RCU-protected pointer, checking for RCU
370 *
371 * Makes rcu_dereference_check() do the dirty work.
372 */
373#define rcu_dereference(p) \
374 rcu_dereference_check(p, rcu_read_lock_held())
375
376/**
377 * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh
378 *
379 * Makes rcu_dereference_check() do the dirty work.
380 */
381#define rcu_dereference_bh(p) \
382 rcu_dereference_check(p, rcu_read_lock_bh_held())
383
384/**
385 * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
386 *
387 * Makes rcu_dereference_check() do the dirty work.
388 */
389#define rcu_dereference_sched(p) \
390 rcu_dereference_check(p, rcu_read_lock_sched_held())
391
392/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 * rcu_assign_pointer - assign (publicize) a pointer to a newly
394 * initialized structure that will be dereferenced by RCU read-side
395 * critical sections. Returns the value assigned.
396 *
397 * Inserts memory barriers on architectures that require them
398 * (pretty much all of them other than x86), and also prevents
399 * the compiler from reordering the code that initializes the
400 * structure after the pointer assignment. More importantly, this
401 * call documents which pointers will be dereferenced by RCU read-side
402 * code.
403 */
404
Paul E. McKenneyd99c4f62008-02-06 01:37:25 -0800405#define rcu_assign_pointer(p, v) \
406 ({ \
407 if (!__builtin_constant_p(v) || \
408 ((v) != NULL)) \
409 smp_wmb(); \
410 (p) = (v); \
411 })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Paul E. McKenney4446a362008-05-12 21:21:05 +0200413/* Infrastructure to implement the synchronize_() primitives. */
414
415struct rcu_synchronize {
416 struct rcu_head head;
417 struct completion completion;
418};
419
420extern void wakeme_after_rcu(struct rcu_head *head);
421
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700422/**
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100423 * call_rcu - Queue an RCU callback for invocation after a grace period.
424 * @head: structure to be used for queueing the RCU updates.
425 * @func: actual update function to be invoked after the grace period
426 *
427 * The update function will be invoked some time after a full grace
428 * period elapses, in other words after all currently executing RCU
429 * read-side critical sections have completed. RCU read-side critical
430 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
431 * and may be nested.
432 */
433extern void call_rcu(struct rcu_head *head,
434 void (*func)(struct rcu_head *head));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100436/**
437 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
438 * @head: structure to be used for queueing the RCU updates.
439 * @func: actual update function to be invoked after the grace period
440 *
441 * The update function will be invoked some time after a full grace
442 * period elapses, in other words after all currently executing RCU
443 * read-side critical sections have completed. call_rcu_bh() assumes
444 * that the read-side critical sections end on completion of a softirq
445 * handler. This means that read-side critical sections in process
446 * context must not be interrupted by softirqs. This interface is to be
447 * used when most of the read-side critical sections are in softirq context.
448 * RCU read-side critical sections are delimited by :
449 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
450 * OR
451 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
452 * These may be nested.
453 */
454extern void call_rcu_bh(struct rcu_head *head,
455 void (*func)(struct rcu_head *head));
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457#endif /* __LINUX_RCUPDATE_H */