blob: 0627a447c589dedd3c0c82aa6ca8d39c86bad037 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul E. McKenneya71fca52009-09-18 10:28:19 -07002 * Read-Copy Update mechanism for mutual exclusion
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Paul E. McKenney87de1cf2013-12-03 10:02:52 -080015 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 *
Paul E. McKenney01c1c662008-01-25 21:08:24 +010018 * Copyright IBM Corporation, 2001
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
Paul E. McKenneya71fca52009-09-18 10:28:19 -070021 *
Josh Triplett595182b2006-10-04 02:17:21 -070022 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
Paul E. McKenneya71fca52009-09-18 10:28:19 -070029 * http://lse.sourceforge.net/locking/rcupdate.html
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 *
31 */
32
33#ifndef __LINUX_RCUPDATE_H
34#define __LINUX_RCUPDATE_H
35
Paul E. McKenney99098752011-05-31 21:03:55 -070036#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/cache.h>
38#include <linux/spinlock.h>
39#include <linux/threads.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/cpumask.h>
41#include <linux/seqlock.h>
Peter Zijlstra851a67b2007-10-11 22:11:12 +020042#include <linux/lockdep.h>
Paul E. McKenney4446a362008-05-12 21:21:05 +020043#include <linux/completion.h>
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -040044#include <linux/debugobjects.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050045#include <linux/bug.h>
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -070046#include <linux/compiler.h>
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +000047#include <linux/ktime.h>
48
Paul E. McKenney88c18632013-12-16 13:24:32 -080049#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Paul Gortmaker7a754742014-02-11 16:10:12 -050051extern int rcu_expedited; /* for sysctl */
Dave Younge5ab6772010-03-10 15:24:05 -080052
Paul E. McKenney0d394822015-02-18 12:24:30 -080053#ifdef CONFIG_TINY_RCU
54/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
55static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */
56{
57 return false;
58}
59
60static inline void rcu_expedite_gp(void)
61{
62}
63
64static inline void rcu_unexpedite_gp(void)
65{
66}
67#else /* #ifdef CONFIG_TINY_RCU */
68bool rcu_gp_is_expedited(void); /* Internal RCU use. */
69void rcu_expedite_gp(void);
70void rcu_unexpedite_gp(void);
71#endif /* #else #ifdef CONFIG_TINY_RCU */
72
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -080073enum rcutorture_type {
74 RCU_FLAVOR,
75 RCU_BH_FLAVOR,
76 RCU_SCHED_FLAVOR,
Paul E. McKenney69c60452014-07-01 11:59:36 -070077 RCU_TASKS_FLAVOR,
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -080078 SRCU_FLAVOR,
79 INVALID_RCU_FLAVOR
80};
81
Pranith Kumar28f65692014-09-22 14:00:48 -040082#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -080083void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
84 unsigned long *gpnum, unsigned long *completed);
Teodora Baluta584dc4c2013-11-11 17:11:23 +020085void rcutorture_record_test_transition(void);
86void rcutorture_record_progress(unsigned long vernum);
87void do_trace_rcu_torture_read(const char *rcutorturename,
88 struct rcu_head *rhp,
89 unsigned long secs,
90 unsigned long c_old,
91 unsigned long c);
Paul E. McKenney4a298652011-04-03 21:33:51 -070092#else
Paul E. McKenneyad0dc7f2014-02-19 10:51:42 -080093static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
94 int *flags,
95 unsigned long *gpnum,
96 unsigned long *completed)
97{
98 *flags = 0;
99 *gpnum = 0;
100 *completed = 0;
101}
Paul E. McKenney4a298652011-04-03 21:33:51 -0700102static inline void rcutorture_record_test_transition(void)
103{
104}
105static inline void rcutorture_record_progress(unsigned long vernum)
106{
107}
Paul E. McKenney91afaf32011-10-02 07:44:32 -0700108#ifdef CONFIG_RCU_TRACE
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200109void do_trace_rcu_torture_read(const char *rcutorturename,
110 struct rcu_head *rhp,
111 unsigned long secs,
112 unsigned long c_old,
113 unsigned long c);
Paul E. McKenney91afaf32011-10-02 07:44:32 -0700114#else
Paul E. McKenney52494532012-11-14 16:26:40 -0800115#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
116 do { } while (0)
Paul E. McKenney91afaf32011-10-02 07:44:32 -0700117#endif
Paul E. McKenney4a298652011-04-03 21:33:51 -0700118#endif
119
Tejun Heoe27fc962010-11-22 21:36:11 -0800120#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b))
121#define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b))
Paul E. McKenneya3dc3fb2010-08-13 16:16:25 -0700122#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
123#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
Paul E. McKenneyc0f4dfd2012-12-28 11:30:36 -0800124#define ulong2long(a) (*(long *)(&(a)))
Paul E. McKenneya3dc3fb2010-08-13 16:16:25 -0700125
Paul E. McKenney03b042b2009-06-25 09:08:16 -0700126/* Exported common interfaces */
Paul E. McKenney2c428182011-05-26 22:14:36 -0700127
128#ifdef CONFIG_PREEMPT_RCU
129
130/**
131 * call_rcu() - Queue an RCU callback for invocation after a grace period.
132 * @head: structure to be used for queueing the RCU updates.
133 * @func: actual callback function to be invoked after the grace period
134 *
135 * The callback function will be invoked some time after a full grace
136 * period elapses, in other words after all pre-existing RCU read-side
137 * critical sections have completed. However, the callback function
138 * might well execute concurrently with RCU read-side critical sections
139 * that started after call_rcu() was invoked. RCU read-side critical
140 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
141 * and may be nested.
Paul E. McKenneyf0a0e6f2012-10-23 13:47:01 -0700142 *
143 * Note that all CPUs must agree that the grace period extended beyond
144 * all pre-existing RCU read-side critical section. On systems with more
145 * than one CPU, this means that when "func()" is invoked, each CPU is
146 * guaranteed to have executed a full memory barrier since the end of its
147 * last RCU read-side critical section whose beginning preceded the call
148 * to call_rcu(). It also means that each CPU executing an RCU read-side
149 * critical section that continues beyond the start of "func()" must have
150 * executed a memory barrier after the call_rcu() but before the beginning
151 * of that RCU read-side critical section. Note that these guarantees
152 * include CPUs that are offline, idle, or executing in user mode, as
153 * well as CPUs that are executing in the kernel.
154 *
155 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
156 * resulting RCU callback function "func()", then both CPU A and CPU B are
157 * guaranteed to execute a full memory barrier during the time interval
158 * between the call to call_rcu() and the invocation of "func()" -- even
159 * if CPU A and CPU B are the same CPU (but again only if the system has
160 * more than one CPU).
Paul E. McKenney2c428182011-05-26 22:14:36 -0700161 */
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200162void call_rcu(struct rcu_head *head,
163 void (*func)(struct rcu_head *head));
Paul E. McKenney2c428182011-05-26 22:14:36 -0700164
165#else /* #ifdef CONFIG_PREEMPT_RCU */
166
167/* In classic RCU, call_rcu() is just call_rcu_sched(). */
168#define call_rcu call_rcu_sched
169
170#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
171
172/**
173 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
174 * @head: structure to be used for queueing the RCU updates.
175 * @func: actual callback function to be invoked after the grace period
176 *
177 * The callback function will be invoked some time after a full grace
178 * period elapses, in other words after all currently executing RCU
179 * read-side critical sections have completed. call_rcu_bh() assumes
180 * that the read-side critical sections end on completion of a softirq
181 * handler. This means that read-side critical sections in process
182 * context must not be interrupted by softirqs. This interface is to be
183 * used when most of the read-side critical sections are in softirq context.
184 * RCU read-side critical sections are delimited by :
185 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
186 * OR
187 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
188 * These may be nested.
Paul E. McKenneyf0a0e6f2012-10-23 13:47:01 -0700189 *
190 * See the description of call_rcu() for more detailed information on
191 * memory ordering guarantees.
Paul E. McKenney2c428182011-05-26 22:14:36 -0700192 */
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200193void call_rcu_bh(struct rcu_head *head,
194 void (*func)(struct rcu_head *head));
Paul E. McKenney2c428182011-05-26 22:14:36 -0700195
196/**
197 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
198 * @head: structure to be used for queueing the RCU updates.
199 * @func: actual callback function to be invoked after the grace period
200 *
201 * The callback function will be invoked some time after a full grace
202 * period elapses, in other words after all currently executing RCU
203 * read-side critical sections have completed. call_rcu_sched() assumes
204 * that the read-side critical sections end on enabling of preemption
205 * or on voluntary preemption.
206 * RCU read-side critical sections are delimited by :
207 * - rcu_read_lock_sched() and rcu_read_unlock_sched(),
208 * OR
209 * anything that disables preemption.
210 * These may be nested.
Paul E. McKenneyf0a0e6f2012-10-23 13:47:01 -0700211 *
212 * See the description of call_rcu() for more detailed information on
213 * memory ordering guarantees.
Paul E. McKenney2c428182011-05-26 22:14:36 -0700214 */
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200215void call_rcu_sched(struct rcu_head *head,
216 void (*func)(struct rcu_head *rcu));
Paul E. McKenney2c428182011-05-26 22:14:36 -0700217
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200218void synchronize_sched(void);
Paul E. McKenney03b042b2009-06-25 09:08:16 -0700219
Paul E. McKenneyee376db2015-01-10 19:47:10 -0800220/*
221 * Structure allowing asynchronous waiting on RCU.
222 */
223struct rcu_synchronize {
224 struct rcu_head head;
225 struct completion completion;
226};
227void wakeme_after_rcu(struct rcu_head *head);
228
Paul E. McKenney8315f422014-06-27 13:42:20 -0700229/**
230 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
231 * @head: structure to be used for queueing the RCU updates.
232 * @func: actual callback function to be invoked after the grace period
233 *
234 * The callback function will be invoked some time after a full grace
235 * period elapses, in other words after all currently executing RCU
236 * read-side critical sections have completed. call_rcu_tasks() assumes
237 * that the read-side critical sections end at a voluntary context
238 * switch (not a preemption!), entry into idle, or transition to usermode
239 * execution. As such, there are no read-side primitives analogous to
240 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
241 * to determine that all tasks have passed through a safe state, not so
242 * much for data-strcuture synchronization.
243 *
244 * See the description of call_rcu() for more detailed information on
245 * memory ordering guarantees.
246 */
247void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
Paul E. McKenney53c6d4e2014-07-01 12:22:23 -0700248void synchronize_rcu_tasks(void);
249void rcu_barrier_tasks(void);
Paul E. McKenney8315f422014-06-27 13:42:20 -0700250
Paul E. McKenneya3dc3fb2010-08-13 16:16:25 -0700251#ifdef CONFIG_PREEMPT_RCU
252
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200253void __rcu_read_lock(void);
254void __rcu_read_unlock(void);
255void rcu_read_unlock_special(struct task_struct *t);
Paul E. McKenney7b0b7592010-08-17 14:18:46 -0700256void synchronize_rcu(void);
257
Paul E. McKenneya3dc3fb2010-08-13 16:16:25 -0700258/*
259 * Defined as a macro as it is a very low level header included from
260 * areas that don't even know about current. This gives the rcu_read_lock()
261 * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
262 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
263 */
264#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
265
Paul E. McKenney7b0b7592010-08-17 14:18:46 -0700266#else /* #ifdef CONFIG_PREEMPT_RCU */
267
268static inline void __rcu_read_lock(void)
269{
270 preempt_disable();
271}
272
273static inline void __rcu_read_unlock(void)
274{
275 preempt_enable();
276}
277
278static inline void synchronize_rcu(void)
279{
280 synchronize_sched();
281}
282
283static inline int rcu_preempt_depth(void)
284{
285 return 0;
286}
287
288#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
289
290/* Internal to kernel */
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200291void rcu_init(void);
Paul E. McKenneyee425712015-02-19 10:51:32 -0800292void rcu_end_inkernel_boot(void);
Paul E. McKenney284a8c92014-08-14 16:38:46 -0700293void rcu_sched_qs(void);
294void rcu_bh_qs(void);
Paul E. McKenneyc3377c2d2014-10-21 07:53:02 -0700295void rcu_check_callbacks(int user);
Paul E. McKenney7b0b7592010-08-17 14:18:46 -0700296struct notifier_block;
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200297void rcu_idle_enter(void);
298void rcu_idle_exit(void);
299void rcu_irq_enter(void);
300void rcu_irq_exit(void);
Paul E. McKenney88428cc2015-01-28 14:42:09 -0800301int rcu_cpu_notify(struct notifier_block *self,
302 unsigned long action, void *hcpu);
Frederic Weisbecker2b1d5022012-07-11 20:26:30 +0200303
Rik van Riel61f38db2014-04-26 23:15:35 -0700304#ifdef CONFIG_RCU_STALL_COMMON
305void rcu_sysrq_start(void);
306void rcu_sysrq_end(void);
307#else /* #ifdef CONFIG_RCU_STALL_COMMON */
308static inline void rcu_sysrq_start(void)
309{
310}
311static inline void rcu_sysrq_end(void)
312{
313}
314#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
315
Frederic Weisbecker2b1d5022012-07-11 20:26:30 +0200316#ifdef CONFIG_RCU_USER_QS
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200317void rcu_user_enter(void);
318void rcu_user_exit(void);
Frederic Weisbecker2b1d5022012-07-11 20:26:30 +0200319#else
320static inline void rcu_user_enter(void) { }
321static inline void rcu_user_exit(void) { }
Frederic Weisbecker4d9a5d42012-10-11 01:47:16 +0200322static inline void rcu_user_hooks_switch(struct task_struct *prev,
323 struct task_struct *next) { }
Frederic Weisbecker2b1d5022012-07-11 20:26:30 +0200324#endif /* CONFIG_RCU_USER_QS */
325
Paul E. McKenneyf4579fc2014-07-25 11:21:47 -0700326#ifdef CONFIG_RCU_NOCB_CPU
327void rcu_init_nohz(void);
328#else /* #ifdef CONFIG_RCU_NOCB_CPU */
329static inline void rcu_init_nohz(void)
330{
331}
332#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
333
Paul E. McKenney8a2ecf42012-02-02 15:42:04 -0800334/**
335 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
336 * @a: Code that RCU needs to pay attention to.
337 *
338 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
339 * in the inner idle loop, that is, between the rcu_idle_enter() and
340 * the rcu_idle_exit() -- RCU will happily ignore any such read-side
341 * critical sections. However, things like powertop need tracepoints
342 * in the inner idle loop.
343 *
344 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
345 * will tell RCU that it needs to pay attending, invoke its argument
346 * (in this example, a call to the do_something_with_RCU() function),
347 * and then tell RCU to go back to ignoring this CPU. It is permissible
348 * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
349 * quite limited. If deeper nesting is required, it will be necessary
350 * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
Paul E. McKenney8a2ecf42012-02-02 15:42:04 -0800351 */
352#define RCU_NONIDLE(a) \
353 do { \
Paul E. McKenneyb4270ee2012-07-31 10:12:48 -0700354 rcu_irq_enter(); \
Paul E. McKenney8a2ecf42012-02-02 15:42:04 -0800355 do { a; } while (0); \
Paul E. McKenneyb4270ee2012-07-31 10:12:48 -0700356 rcu_irq_exit(); \
Paul E. McKenney8a2ecf42012-02-02 15:42:04 -0800357 } while (0)
358
Paul E. McKenney8315f422014-06-27 13:42:20 -0700359/*
360 * Note a voluntary context switch for RCU-tasks benefit. This is a
361 * macro rather than an inline function to avoid #include hell.
362 */
363#ifdef CONFIG_TASKS_RCU
Paul E. McKenney3f95aa82014-08-04 06:10:23 -0700364#define TASKS_RCU(x) x
365extern struct srcu_struct tasks_rcu_exit_srcu;
Paul E. McKenney8315f422014-06-27 13:42:20 -0700366#define rcu_note_voluntary_context_switch(t) \
367 do { \
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800368 rcu_all_qs(); \
Paul E. McKenney8315f422014-06-27 13:42:20 -0700369 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
370 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
Paul E. McKenney8315f422014-06-27 13:42:20 -0700371 } while (0)
372#else /* #ifdef CONFIG_TASKS_RCU */
Paul E. McKenney3f95aa82014-08-04 06:10:23 -0700373#define TASKS_RCU(x) do { } while (0)
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800374#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
Paul E. McKenney8315f422014-06-27 13:42:20 -0700375#endif /* #else #ifdef CONFIG_TASKS_RCU */
376
Paul E. McKenneybde6c3a2014-07-01 11:26:57 -0700377/**
378 * cond_resched_rcu_qs - Report potential quiescent states to RCU
379 *
380 * This macro resembles cond_resched(), except that it is defined to
381 * report potential quiescent states to RCU-tasks even if the cond_resched()
382 * machinery were to be shut off, as some advocate for PREEMPT kernels.
383 */
384#define cond_resched_rcu_qs() \
385do { \
Paul E. McKenneyb6331ae2014-10-04 03:43:41 -0700386 if (!cond_resched()) \
387 rcu_note_voluntary_context_switch(current); \
Paul E. McKenneybde6c3a2014-07-01 11:26:57 -0700388} while (0)
389
Paul E. McKenneycc6783f2013-09-06 17:39:49 -0700390#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200391bool __rcu_is_watching(void);
Paul E. McKenneycc6783f2013-09-06 17:39:49 -0700392#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
393
Paul E. McKenney2c428182011-05-26 22:14:36 -0700394/*
395 * Infrastructure to implement the synchronize_() primitives in
396 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
397 */
398
399typedef void call_rcu_func_t(struct rcu_head *head,
400 void (*func)(struct rcu_head *head));
401void wait_rcu_gp(call_rcu_func_t crf);
402
Pranith Kumar28f65692014-09-22 14:00:48 -0400403#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100404#include <linux/rcutree.h>
Paul E. McKenney127781d2013-03-27 08:44:00 -0700405#elif defined(CONFIG_TINY_RCU)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700406#include <linux/rcutiny.h>
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100407#else
408#error "Unknown RCU implementation specified to kernel configuration"
Paul E. McKenney6b3ef482009-08-22 13:56:53 -0700409#endif
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100410
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400411/*
412 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
413 * initialization and destruction of rcu_head on the stack. rcu_head structures
414 * allocated dynamically in the heap or defined statically don't need any
415 * initialization.
416 */
417#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
Paul E. McKenney546a9d82014-06-19 14:57:10 -0700418void init_rcu_head(struct rcu_head *head);
419void destroy_rcu_head(struct rcu_head *head);
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200420void init_rcu_head_on_stack(struct rcu_head *head);
421void destroy_rcu_head_on_stack(struct rcu_head *head);
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400422#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
Paul E. McKenney546a9d82014-06-19 14:57:10 -0700423static inline void init_rcu_head(struct rcu_head *head)
424{
425}
426
427static inline void destroy_rcu_head(struct rcu_head *head)
428{
429}
430
Mathieu Desnoyers43760302010-04-17 08:48:39 -0400431static inline void init_rcu_head_on_stack(struct rcu_head *head)
432{
433}
434
435static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
436{
437}
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400438#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
Mathieu Desnoyers43760302010-04-17 08:48:39 -0400439
Paul E. McKenneyc0d6d012012-01-23 12:41:26 -0800440#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
441bool rcu_lockdep_current_cpu_online(void);
442#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
443static inline bool rcu_lockdep_current_cpu_online(void)
444{
Pranith Kumar521d24e2014-07-08 18:26:18 -0400445 return true;
Paul E. McKenneyc0d6d012012-01-23 12:41:26 -0800446}
447#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
448
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700449#ifdef CONFIG_DEBUG_LOCK_ALLOC
Paul E. McKenney632ee202010-02-22 17:04:45 -0800450
Frederic Weisbecker00f49e52011-10-07 18:22:02 +0200451static inline void rcu_lock_acquire(struct lockdep_map *map)
452{
Oleg Nesterovfb9edbe2014-01-20 19:20:06 +0100453 lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
Frederic Weisbecker00f49e52011-10-07 18:22:02 +0200454}
455
456static inline void rcu_lock_release(struct lockdep_map *map)
457{
Frederic Weisbecker00f49e52011-10-07 18:22:02 +0200458 lock_release(map, 1, _THIS_IP_);
459}
460
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700461extern struct lockdep_map rcu_lock_map;
Paul E. McKenney632ee202010-02-22 17:04:45 -0800462extern struct lockdep_map rcu_bh_lock_map;
Paul E. McKenney632ee202010-02-22 17:04:45 -0800463extern struct lockdep_map rcu_sched_lock_map;
Paul E. McKenney24ef6592013-10-28 09:22:24 -0700464extern struct lockdep_map rcu_callback_map;
Iulia Mandaa235c092014-03-12 18:37:24 +0200465int debug_lockdep_rcu_enabled(void);
Paul E. McKenney54dbf962010-03-03 07:46:57 -0800466
Oleg Nesterov85b39d32014-07-08 15:17:59 -0700467int rcu_read_lock_held(void);
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200468int rcu_read_lock_bh_held(void);
Paul E. McKenney632ee202010-02-22 17:04:45 -0800469
470/**
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700471 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
Paul E. McKenney632ee202010-02-22 17:04:45 -0800472 *
Paul E. McKenneyd20200b2010-03-30 10:52:21 -0700473 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
474 * RCU-sched read-side critical section. In absence of
475 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
476 * critical section unless it can prove otherwise. Note that disabling
477 * of preemption (including disabling irqs) counts as an RCU-sched
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700478 * read-side critical section. This is useful for debug checks in functions
479 * that required that they be called within an RCU-sched read-side
480 * critical section.
Paul E. McKenney54dbf962010-03-03 07:46:57 -0800481 *
Paul E. McKenney32c141a2010-03-30 10:59:28 -0700482 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
483 * and while lockdep is disabled.
Frederic Weisbeckere6b80a32011-10-07 16:25:18 -0700484 *
485 * Note that if the CPU is in the idle loop from an RCU point of
486 * view (ie: that we are in the section between rcu_idle_enter() and
487 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
488 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
489 * that are in such a section, considering these as in extended quiescent
490 * state, so such a CPU is effectively never in an RCU read-side critical
491 * section regardless of what RCU primitives it invokes. This state of
492 * affairs is required --- we need to keep an RCU-free window in idle
493 * where the CPU may possibly enter into low power mode. This way we can
494 * notice an extended quiescent state to other CPUs that started a grace
495 * period. Otherwise we would delay any grace period as long as we run in
496 * the idle task.
Paul E. McKenneyc0d6d012012-01-23 12:41:26 -0800497 *
498 * Similarly, we avoid claiming an SRCU read lock held if the current
499 * CPU is offline.
Paul E. McKenney632ee202010-02-22 17:04:45 -0800500 */
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200501#ifdef CONFIG_PREEMPT_COUNT
Paul E. McKenney632ee202010-02-22 17:04:45 -0800502static inline int rcu_read_lock_sched_held(void)
503{
504 int lockdep_opinion = 0;
505
Paul E. McKenney54dbf962010-03-03 07:46:57 -0800506 if (!debug_lockdep_rcu_enabled())
507 return 1;
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700508 if (!rcu_is_watching())
Frederic Weisbeckere6b80a32011-10-07 16:25:18 -0700509 return 0;
Paul E. McKenneyc0d6d012012-01-23 12:41:26 -0800510 if (!rcu_lockdep_current_cpu_online())
511 return 0;
Paul E. McKenney632ee202010-02-22 17:04:45 -0800512 if (debug_locks)
513 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
Lai Jiangshan0cff8102010-03-18 12:25:33 -0700514 return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
Paul E. McKenney632ee202010-02-22 17:04:45 -0800515}
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200516#else /* #ifdef CONFIG_PREEMPT_COUNT */
Paul E. McKenneye6033e32010-03-03 17:50:16 -0800517static inline int rcu_read_lock_sched_held(void)
518{
519 return 1;
520}
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200521#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
Paul E. McKenney632ee202010-02-22 17:04:45 -0800522
523#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
524
Paul E. McKenneyd8ab29f2011-10-07 18:22:03 +0200525# define rcu_lock_acquire(a) do { } while (0)
526# define rcu_lock_release(a) do { } while (0)
Paul E. McKenney632ee202010-02-22 17:04:45 -0800527
528static inline int rcu_read_lock_held(void)
529{
530 return 1;
531}
532
533static inline int rcu_read_lock_bh_held(void)
534{
535 return 1;
536}
537
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200538#ifdef CONFIG_PREEMPT_COUNT
Paul E. McKenney632ee202010-02-22 17:04:45 -0800539static inline int rcu_read_lock_sched_held(void)
540{
Paul E. McKenneybbad9372010-04-02 16:17:17 -0700541 return preempt_count() != 0 || irqs_disabled();
Paul E. McKenney632ee202010-02-22 17:04:45 -0800542}
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200543#else /* #ifdef CONFIG_PREEMPT_COUNT */
Paul E. McKenneye6033e32010-03-03 17:50:16 -0800544static inline int rcu_read_lock_sched_held(void)
545{
546 return 1;
547}
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200548#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
Paul E. McKenney632ee202010-02-22 17:04:45 -0800549
550#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
551
552#ifdef CONFIG_PROVE_RCU
553
Tetsuo Handa4221a992010-06-26 01:08:19 +0900554/**
555 * rcu_lockdep_assert - emit lockdep splat if specified condition not met
556 * @c: condition to check
Paul E. McKenneyb3fbab02011-05-24 08:31:09 -0700557 * @s: informative message
Tetsuo Handa4221a992010-06-26 01:08:19 +0900558 */
Paul E. McKenneyb3fbab02011-05-24 08:31:09 -0700559#define rcu_lockdep_assert(c, s) \
Lai Jiangshan2b3fc352010-04-20 16:23:07 +0800560 do { \
Jan Beulich7ccaba52012-03-23 15:01:52 -0700561 static bool __section(.data.unlikely) __warned; \
Lai Jiangshan2b3fc352010-04-20 16:23:07 +0800562 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
563 __warned = true; \
Paul E. McKenneyb3fbab02011-05-24 08:31:09 -0700564 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
Lai Jiangshan2b3fc352010-04-20 16:23:07 +0800565 } \
566 } while (0)
567
Paul E. McKenney50406b92012-01-12 13:49:19 -0800568#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
569static inline void rcu_preempt_sleep_check(void)
570{
571 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
Paul E. McKenney5cf05ad2012-05-17 15:12:45 -0700572 "Illegal context switch in RCU read-side critical section");
Paul E. McKenney50406b92012-01-12 13:49:19 -0800573}
574#else /* #ifdef CONFIG_PROVE_RCU */
575static inline void rcu_preempt_sleep_check(void)
576{
577}
578#endif /* #else #ifdef CONFIG_PROVE_RCU */
579
Paul E. McKenneyb3fbab02011-05-24 08:31:09 -0700580#define rcu_sleep_check() \
581 do { \
Paul E. McKenney50406b92012-01-12 13:49:19 -0800582 rcu_preempt_sleep_check(); \
Paul E. McKenneyb3fbab02011-05-24 08:31:09 -0700583 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
Joe Perches41f4abd2013-12-05 15:10:23 -0800584 "Illegal context switch in RCU-bh read-side critical section"); \
Paul E. McKenneyb3fbab02011-05-24 08:31:09 -0700585 rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \
Joe Perches41f4abd2013-12-05 15:10:23 -0800586 "Illegal context switch in RCU-sched read-side critical section"); \
Paul E. McKenneyb3fbab02011-05-24 08:31:09 -0700587 } while (0)
588
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700589#else /* #ifdef CONFIG_PROVE_RCU */
590
Paul E. McKenneyb3fbab02011-05-24 08:31:09 -0700591#define rcu_lockdep_assert(c, s) do { } while (0)
592#define rcu_sleep_check() do { } while (0)
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700593
594#endif /* #else #ifdef CONFIG_PROVE_RCU */
595
596/*
597 * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
598 * and rcu_assign_pointer(). Some of these could be folded into their
599 * callers, but they are left separate in order to ease introduction of
600 * multiple flavors of pointers to match the multiple flavors of RCU
601 * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
602 * the future.
603 */
Paul E. McKenney53ecfba2010-09-13 17:24:21 -0700604
605#ifdef __CHECKER__
606#define rcu_dereference_sparse(p, space) \
607 ((void)(((typeof(*p) space *)p) == p))
608#else /* #ifdef __CHECKER__ */
609#define rcu_dereference_sparse(p, space)
610#endif /* #else #ifdef __CHECKER__ */
611
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700612#define __rcu_access_pointer(p, space) \
Joe Perches0adab9b2013-12-05 16:19:15 -0800613({ \
614 typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
615 rcu_dereference_sparse(p, space); \
616 ((typeof(*p) __force __kernel *)(_________p1)); \
617})
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700618#define __rcu_dereference_check(p, c, space) \
Joe Perches0adab9b2013-12-05 16:19:15 -0800619({ \
Pranith Kumarac598532014-11-13 14:24:14 -0500620 /* Dependency order vs. p above. */ \
621 typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
Joe Perches0adab9b2013-12-05 16:19:15 -0800622 rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
623 rcu_dereference_sparse(p, space); \
Pranith Kumarac598532014-11-13 14:24:14 -0500624 ((typeof(*p) __force __kernel *)(________p1)); \
Joe Perches0adab9b2013-12-05 16:19:15 -0800625})
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700626#define __rcu_dereference_protected(p, c, space) \
Joe Perches0adab9b2013-12-05 16:19:15 -0800627({ \
628 rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
629 rcu_dereference_sparse(p, space); \
630 ((typeof(*p) __force __kernel *)(p)); \
631})
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700632
Paul E. McKenneya4dd9922011-04-01 07:15:14 -0700633#define __rcu_access_index(p, space) \
Joe Perches0adab9b2013-12-05 16:19:15 -0800634({ \
635 typeof(p) _________p1 = ACCESS_ONCE(p); \
636 rcu_dereference_sparse(p, space); \
637 (_________p1); \
638})
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700639#define __rcu_dereference_index_check(p, c) \
Joe Perches0adab9b2013-12-05 16:19:15 -0800640({ \
Pranith Kumarac598532014-11-13 14:24:14 -0500641 /* Dependency order vs. p above. */ \
642 typeof(p) _________p1 = lockless_dereference(p); \
Joe Perches0adab9b2013-12-05 16:19:15 -0800643 rcu_lockdep_assert(c, \
644 "suspicious rcu_dereference_index_check() usage"); \
Joe Perches0adab9b2013-12-05 16:19:15 -0800645 (_________p1); \
646})
Paul E. McKenney462225ae2013-11-11 09:59:34 -0800647
648/**
649 * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
650 * @v: The value to statically initialize with.
651 */
652#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
653
654/**
Paul E. McKenney54ef6df2014-10-27 21:11:27 -0700655 * lockless_dereference() - safely load a pointer for later dereference
656 * @p: The pointer to load
657 *
658 * Similar to rcu_dereference(), but for situations where the pointed-to
659 * object's lifetime is managed by something other than RCU. That
660 * "something other" might be reference counting or simple immortality.
661 */
662#define lockless_dereference(p) \
663({ \
664 typeof(p) _________p1 = ACCESS_ONCE(p); \
665 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
666 (_________p1); \
667})
668
669/**
Paul E. McKenney462225ae2013-11-11 09:59:34 -0800670 * rcu_assign_pointer() - assign to RCU-protected pointer
671 * @p: pointer to assign to
672 * @v: value to assign (publish)
673 *
674 * Assigns the specified value to the specified RCU-protected
675 * pointer, ensuring that any concurrent RCU readers will see
676 * any prior initialization.
677 *
678 * Inserts memory barriers on architectures that require them
679 * (which is most of them), and also prevents the compiler from
680 * reordering the code that initializes the structure after the pointer
681 * assignment. More importantly, this call documents which pointers
682 * will be dereferenced by RCU read-side code.
683 *
684 * In some special cases, you may use RCU_INIT_POINTER() instead
685 * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
686 * to the fact that it does not constrain either the CPU or the compiler.
687 * That said, using RCU_INIT_POINTER() when you should have used
688 * rcu_assign_pointer() is a very bad thing that results in
689 * impossible-to-diagnose memory corruption. So please be careful.
690 * See the RCU_INIT_POINTER() comment header for details.
691 *
692 * Note that rcu_assign_pointer() evaluates each of its arguments only
693 * once, appearances notwithstanding. One of the "extra" evaluations
694 * is in typeof() and the other visible only to sparse (__CHECKER__),
695 * neither of which actually execute the argument. As with most cpp
696 * macros, this execute-arguments-only-once property is important, so
697 * please be careful when making changes to rcu_assign_pointer() and the
698 * other macros that it invokes.
699 */
Paul E. McKenney88c18632013-12-16 13:24:32 -0800700#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v))
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700701
Paul E. McKenney632ee202010-02-22 17:04:45 -0800702/**
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700703 * rcu_access_pointer() - fetch RCU pointer with no dereferencing
704 * @p: The pointer to read
705 *
706 * Return the value of the specified RCU-protected pointer, but omit the
707 * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
708 * when the value of this pointer is accessed, but the pointer is not
709 * dereferenced, for example, when testing an RCU-protected pointer against
710 * NULL. Although rcu_access_pointer() may also be used in cases where
711 * update-side locks prevent the value of the pointer from changing, you
712 * should instead use rcu_dereference_protected() for this use case.
Paul E. McKenney5e1ee6e2012-01-12 17:21:20 -0800713 *
714 * It is also permissible to use rcu_access_pointer() when read-side
715 * access to the pointer was removed at least one grace period ago, as
716 * is the case in the context of the RCU callback that is freeing up
717 * the data, or after a synchronize_rcu() returns. This can be useful
718 * when tearing down multi-linked structures after a grace period
719 * has elapsed.
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700720 */
721#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
722
723/**
724 * rcu_dereference_check() - rcu_dereference with debug checking
David Howellsc08c68d2010-04-09 15:39:11 -0700725 * @p: The pointer to read, prior to dereferencing
726 * @c: The conditions under which the dereference will take place
Paul E. McKenney632ee202010-02-22 17:04:45 -0800727 *
David Howellsc08c68d2010-04-09 15:39:11 -0700728 * Do an rcu_dereference(), but check that the conditions under which the
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700729 * dereference will take place are correct. Typically the conditions
730 * indicate the various locking conditions that should be held at that
731 * point. The check should return true if the conditions are satisfied.
732 * An implicit check for being in an RCU read-side critical section
733 * (rcu_read_lock()) is included.
David Howellsc08c68d2010-04-09 15:39:11 -0700734 *
735 * For example:
736 *
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700737 * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
David Howellsc08c68d2010-04-09 15:39:11 -0700738 *
739 * could be used to indicate to lockdep that foo->bar may only be dereferenced
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700740 * if either rcu_read_lock() is held, or that the lock required to replace
David Howellsc08c68d2010-04-09 15:39:11 -0700741 * the bar struct at foo->bar is held.
742 *
743 * Note that the list of conditions may also include indications of when a lock
744 * need not be held, for example during initialisation or destruction of the
745 * target struct:
746 *
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700747 * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
David Howellsc08c68d2010-04-09 15:39:11 -0700748 * atomic_read(&foo->usage) == 0);
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700749 *
750 * Inserts memory barriers on architectures that require them
751 * (currently only the Alpha), prevents the compiler from refetching
752 * (and from merging fetches), and, more importantly, documents exactly
753 * which pointers are protected by RCU and checks that the pointer is
754 * annotated as __rcu.
Paul E. McKenney632ee202010-02-22 17:04:45 -0800755 */
756#define rcu_dereference_check(p, c) \
Paul E. McKenneyb826565a2015-02-02 11:46:33 -0800757 __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
Paul E. McKenney632ee202010-02-22 17:04:45 -0800758
Paul E. McKenneyb62730b2010-04-09 15:39:10 -0700759/**
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700760 * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
761 * @p: The pointer to read, prior to dereferencing
762 * @c: The conditions under which the dereference will take place
763 *
764 * This is the RCU-bh counterpart to rcu_dereference_check().
765 */
766#define rcu_dereference_bh_check(p, c) \
Paul E. McKenneyb826565a2015-02-02 11:46:33 -0800767 __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700768
769/**
770 * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
771 * @p: The pointer to read, prior to dereferencing
772 * @c: The conditions under which the dereference will take place
773 *
774 * This is the RCU-sched counterpart to rcu_dereference_check().
775 */
776#define rcu_dereference_sched_check(p, c) \
Paul E. McKenneyb826565a2015-02-02 11:46:33 -0800777 __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700778 __rcu)
779
780#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
781
Steven Rostedt12bcbe62013-05-28 14:38:42 -0400782/*
783 * The tracing infrastructure traces RCU (we want that), but unfortunately
784 * some of the RCU checks causes tracing to lock up the system.
785 *
786 * The tracing version of rcu_dereference_raw() must not call
787 * rcu_read_lock_held().
788 */
789#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
790
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700791/**
Paul E. McKenneya4dd9922011-04-01 07:15:14 -0700792 * rcu_access_index() - fetch RCU index with no dereferencing
793 * @p: The index to read
794 *
795 * Return the value of the specified RCU-protected index, but omit the
796 * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
797 * when the value of this index is accessed, but the index is not
798 * dereferenced, for example, when testing an RCU-protected index against
799 * -1. Although rcu_access_index() may also be used in cases where
800 * update-side locks prevent the value of the index from changing, you
801 * should instead use rcu_dereference_index_protected() for this use case.
802 */
803#define rcu_access_index(p) __rcu_access_index((p), __rcu)
804
805/**
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700806 * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
807 * @p: The pointer to read, prior to dereferencing
808 * @c: The conditions under which the dereference will take place
809 *
810 * Similar to rcu_dereference_check(), but omits the sparse checking.
811 * This allows rcu_dereference_index_check() to be used on integers,
812 * which can then be used as array indices. Attempting to use
813 * rcu_dereference_check() on an integer will give compiler warnings
814 * because the sparse address-space mechanism relies on dereferencing
815 * the RCU-protected pointer. Dereferencing integers is not something
816 * that even gcc will put up with.
817 *
818 * Note that this function does not implicitly check for RCU read-side
819 * critical sections. If this function gains lots of uses, it might
820 * make sense to provide versions for each flavor of RCU, but it does
821 * not make sense as of early 2010.
822 */
823#define rcu_dereference_index_check(p, c) \
824 __rcu_dereference_index_check((p), (c))
825
826/**
827 * rcu_dereference_protected() - fetch RCU pointer when updates prevented
828 * @p: The pointer to read, prior to dereferencing
829 * @c: The conditions under which the dereference will take place
Paul E. McKenneyb62730b2010-04-09 15:39:10 -0700830 *
831 * Return the value of the specified RCU-protected pointer, but omit
832 * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
833 * is useful in cases where update-side locks prevent the value of the
834 * pointer from changing. Please note that this primitive does -not-
835 * prevent the compiler from repeating this reference or combining it
836 * with other references, so it should not be used without protection
837 * of appropriate locks.
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700838 *
839 * This function is only for update-side use. Using this function
840 * when protected only by rcu_read_lock() will result in infrequent
841 * but very ugly failures.
Paul E. McKenneyb62730b2010-04-09 15:39:10 -0700842 */
843#define rcu_dereference_protected(p, c) \
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700844 __rcu_dereference_protected((p), (c), __rcu)
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700845
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700846
847/**
848 * rcu_dereference() - fetch RCU-protected pointer for dereferencing
849 * @p: The pointer to read, prior to dereferencing
850 *
851 * This is a simple wrapper around rcu_dereference_check().
852 */
853#define rcu_dereference(p) rcu_dereference_check(p, 0)
854
855/**
856 * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
857 * @p: The pointer to read, prior to dereferencing
858 *
859 * Makes rcu_dereference_check() do the dirty work.
860 */
861#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
862
863/**
864 * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
865 * @p: The pointer to read, prior to dereferencing
866 *
867 * Makes rcu_dereference_check() do the dirty work.
868 */
869#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
870
871/**
872 * rcu_read_lock() - mark the beginning of an RCU read-side critical section
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 *
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700874 * When synchronize_rcu() is invoked on one CPU while other CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 * are within RCU read-side critical sections, then the
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700876 * synchronize_rcu() is guaranteed to block until after all the other
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
878 * on one CPU while other CPUs are within RCU read-side critical
879 * sections, invocation of the corresponding RCU callback is deferred
880 * until after the all the other CPUs exit their critical sections.
881 *
882 * Note, however, that RCU callbacks are permitted to run concurrently
Paul E. McKenney77d84852010-07-08 17:38:59 -0700883 * with new RCU read-side critical sections. One way that this can happen
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 * is via the following sequence of events: (1) CPU 0 enters an RCU
885 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
886 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
887 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
888 * callback is invoked. This is legal, because the RCU read-side critical
889 * section that was running concurrently with the call_rcu() (and which
890 * therefore might be referencing something that the corresponding RCU
891 * callback would free up) has completed before the corresponding
892 * RCU callback is invoked.
893 *
894 * RCU read-side critical sections may be nested. Any deferred actions
895 * will be deferred until the outermost RCU read-side critical section
896 * completes.
897 *
Paul E. McKenney9079fd72010-08-07 21:59:54 -0700898 * You can avoid reading and understanding the next paragraph by
899 * following this rule: don't put anything in an rcu_read_lock() RCU
900 * read-side critical section that would block in a !PREEMPT kernel.
901 * But if you want the full story, read on!
902 *
Paul E. McKenneyab74fdf2014-05-04 15:41:21 -0700903 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
904 * it is illegal to block while in an RCU read-side critical section.
Pranith Kumar28f65692014-09-22 14:00:48 -0400905 * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT
Paul E. McKenneyab74fdf2014-05-04 15:41:21 -0700906 * kernel builds, RCU read-side critical sections may be preempted,
907 * but explicit blocking is illegal. Finally, in preemptible RCU
908 * implementations in real-time (with -rt patchset) kernel builds, RCU
909 * read-side critical sections may be preempted and they may also block, but
910 * only when acquiring spinlocks that are subject to priority inheritance.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700912static inline void rcu_read_lock(void)
913{
914 __rcu_read_lock();
915 __acquire(RCU);
Paul E. McKenneyd8ab29f2011-10-07 18:22:03 +0200916 rcu_lock_acquire(&rcu_lock_map);
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700917 rcu_lockdep_assert(rcu_is_watching(),
Heiko Carstensbde23c62012-02-01 10:30:46 -0800918 "rcu_read_lock() used illegally while idle");
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700919}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921/*
922 * So where is rcu_write_lock()? It does not exist, as there is no
923 * way for writers to lock out RCU readers. This is a feature, not
924 * a bug -- this property is what provides RCU's performance benefits.
925 * Of course, writers must coordinate with each other. The normal
926 * spinlock primitives work well for this, but any other technique may be
927 * used as well. RCU does not care how the writers keep out of each
928 * others' way, as long as they do so.
929 */
Paul E. McKenney3d76c082009-09-28 07:46:32 -0700930
931/**
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700932 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
Paul E. McKenney3d76c082009-09-28 07:46:32 -0700933 *
Paul E. McKenneyf27bc482014-05-04 15:38:38 -0700934 * In most situations, rcu_read_unlock() is immune from deadlock.
935 * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
936 * is responsible for deboosting, which it does via rt_mutex_unlock().
937 * Unfortunately, this function acquires the scheduler's runqueue and
938 * priority-inheritance spinlocks. This means that deadlock could result
939 * if the caller of rcu_read_unlock() already holds one of these locks or
Oleg Nesterovce36f2f2014-09-28 23:44:21 +0200940 * any lock that is ever acquired while holding them; or any lock which
941 * can be taken from interrupt context because rcu_boost()->rt_mutex_lock()
942 * does not disable irqs while taking ->wait_lock.
Paul E. McKenneyf27bc482014-05-04 15:38:38 -0700943 *
944 * That said, RCU readers are never priority boosted unless they were
945 * preempted. Therefore, one way to avoid deadlock is to make sure
946 * that preemption never happens within any RCU read-side critical
947 * section whose outermost rcu_read_unlock() is called with one of
948 * rt_mutex_unlock()'s locks held. Such preemption can be avoided in
949 * a number of ways, for example, by invoking preempt_disable() before
950 * critical section's outermost rcu_read_lock().
951 *
952 * Given that the set of locks acquired by rt_mutex_unlock() might change
953 * at any time, a somewhat more future-proofed approach is to make sure
954 * that that preemption never happens within any RCU read-side critical
955 * section whose outermost rcu_read_unlock() is called with irqs disabled.
956 * This approach relies on the fact that rt_mutex_unlock() currently only
957 * acquires irq-disabled locks.
958 *
959 * The second of these two approaches is best in most situations,
960 * however, the first approach can also be useful, at least to those
961 * developers willing to keep abreast of the set of locks acquired by
962 * rt_mutex_unlock().
963 *
Paul E. McKenney3d76c082009-09-28 07:46:32 -0700964 * See rcu_read_lock() for more information.
965 */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700966static inline void rcu_read_unlock(void)
967{
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700968 rcu_lockdep_assert(rcu_is_watching(),
Heiko Carstensbde23c62012-02-01 10:30:46 -0800969 "rcu_read_unlock() used illegally while idle");
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700970 __release(RCU);
971 __rcu_read_unlock();
Paul E. McKenneyd24209bb2015-01-21 15:26:03 -0800972 rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700973}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
975/**
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700976 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 *
978 * This is equivalent of rcu_read_lock(), but to be used when updates
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700979 * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
980 * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
981 * softirq handler to be a quiescent state, a process in RCU read-side
982 * critical section must be protected by disabling softirqs. Read-side
983 * critical sections in interrupt context can use just rcu_read_lock(),
984 * though this should at least be commented to avoid confusing people
985 * reading the code.
Paul E. McKenney3842a082011-11-28 10:42:42 -0800986 *
987 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
988 * must occur in the same context, for example, it is illegal to invoke
989 * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
990 * was invoked from some other task.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 */
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700992static inline void rcu_read_lock_bh(void)
993{
Paul E. McKenney6206ab92011-08-01 06:22:11 -0700994 local_bh_disable();
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700995 __acquire(RCU_BH);
Paul E. McKenneyd8ab29f2011-10-07 18:22:03 +0200996 rcu_lock_acquire(&rcu_bh_lock_map);
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700997 rcu_lockdep_assert(rcu_is_watching(),
Heiko Carstensbde23c62012-02-01 10:30:46 -0800998 "rcu_read_lock_bh() used illegally while idle");
Paul E. McKenneybc33f242009-08-22 13:56:47 -0700999}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001/*
1002 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
1003 *
1004 * See rcu_read_lock_bh() for more information.
1005 */
Paul E. McKenneybc33f242009-08-22 13:56:47 -07001006static inline void rcu_read_unlock_bh(void)
1007{
Paul E. McKenney5c173eb2013-09-13 17:20:11 -07001008 rcu_lockdep_assert(rcu_is_watching(),
Heiko Carstensbde23c62012-02-01 10:30:46 -08001009 "rcu_read_unlock_bh() used illegally while idle");
Paul E. McKenneyd8ab29f2011-10-07 18:22:03 +02001010 rcu_lock_release(&rcu_bh_lock_map);
Paul E. McKenneybc33f242009-08-22 13:56:47 -07001011 __release(RCU_BH);
Paul E. McKenney6206ab92011-08-01 06:22:11 -07001012 local_bh_enable();
Paul E. McKenneybc33f242009-08-22 13:56:47 -07001013}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
1015/**
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -07001016 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
Mathieu Desnoyers1c50b722008-09-29 11:06:46 -04001017 *
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -07001018 * This is equivalent of rcu_read_lock(), but to be used when updates
1019 * are being done using call_rcu_sched() or synchronize_rcu_sched().
1020 * Read-side critical sections can also be introduced by anything that
1021 * disables preemption, including local_irq_disable() and friends.
Paul E. McKenney3842a082011-11-28 10:42:42 -08001022 *
1023 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
1024 * must occur in the same context, for example, it is illegal to invoke
1025 * rcu_read_unlock_sched() from process context if the matching
1026 * rcu_read_lock_sched() was invoked from an NMI handler.
Mathieu Desnoyers1c50b722008-09-29 11:06:46 -04001027 */
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001028static inline void rcu_read_lock_sched(void)
1029{
1030 preempt_disable();
Paul E. McKenneybc33f242009-08-22 13:56:47 -07001031 __acquire(RCU_SCHED);
Paul E. McKenneyd8ab29f2011-10-07 18:22:03 +02001032 rcu_lock_acquire(&rcu_sched_lock_map);
Paul E. McKenney5c173eb2013-09-13 17:20:11 -07001033 rcu_lockdep_assert(rcu_is_watching(),
Heiko Carstensbde23c62012-02-01 10:30:46 -08001034 "rcu_read_lock_sched() used illegally while idle");
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001035}
Paul E. McKenney1eba8f82009-09-23 09:50:42 -07001036
1037/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
Paul E. McKenney7c614d62009-08-24 09:42:00 -07001038static inline notrace void rcu_read_lock_sched_notrace(void)
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001039{
1040 preempt_disable_notrace();
Paul E. McKenneybc33f242009-08-22 13:56:47 -07001041 __acquire(RCU_SCHED);
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001042}
Mathieu Desnoyers1c50b722008-09-29 11:06:46 -04001043
1044/*
1045 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
1046 *
1047 * See rcu_read_lock_sched for more information.
1048 */
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001049static inline void rcu_read_unlock_sched(void)
1050{
Paul E. McKenney5c173eb2013-09-13 17:20:11 -07001051 rcu_lockdep_assert(rcu_is_watching(),
Heiko Carstensbde23c62012-02-01 10:30:46 -08001052 "rcu_read_unlock_sched() used illegally while idle");
Paul E. McKenneyd8ab29f2011-10-07 18:22:03 +02001053 rcu_lock_release(&rcu_sched_lock_map);
Paul E. McKenneybc33f242009-08-22 13:56:47 -07001054 __release(RCU_SCHED);
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001055 preempt_enable();
1056}
Paul E. McKenney1eba8f82009-09-23 09:50:42 -07001057
1058/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
Paul E. McKenney7c614d62009-08-24 09:42:00 -07001059static inline notrace void rcu_read_unlock_sched_notrace(void)
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001060{
Paul E. McKenneybc33f242009-08-22 13:56:47 -07001061 __release(RCU_SCHED);
Paul E. McKenneyd6714c22009-08-22 13:56:46 -07001062 preempt_enable_notrace();
1063}
Mathieu Desnoyers1c50b722008-09-29 11:06:46 -04001064
Mathieu Desnoyers1c50b722008-09-29 11:06:46 -04001065/**
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -07001066 * RCU_INIT_POINTER() - initialize an RCU protected pointer
1067 *
Paul E. McKenney6846c0c2011-07-31 22:33:02 -07001068 * Initialize an RCU-protected pointer in special cases where readers
1069 * do not need ordering constraints on the CPU or the compiler. These
1070 * special cases are:
1071 *
1072 * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
1073 * 2. The caller has taken whatever steps are required to prevent
1074 * RCU readers from concurrently accessing this pointer -or-
1075 * 3. The referenced data structure has already been exposed to
1076 * readers either at compile time or via rcu_assign_pointer() -and-
1077 * a. You have not made -any- reader-visible changes to
1078 * this structure since then -or-
1079 * b. It is OK for readers accessing this structure from its
1080 * new location to see the old state of the structure. (For
1081 * example, the changes were to statistical counters or to
1082 * other state where exact synchronization is not required.)
1083 *
1084 * Failure to follow these rules governing use of RCU_INIT_POINTER() will
1085 * result in impossible-to-diagnose memory corruption. As in the structures
1086 * will look OK in crash dumps, but any concurrent RCU readers might
1087 * see pre-initialized values of the referenced data structure. So
1088 * please be very careful how you use RCU_INIT_POINTER()!!!
1089 *
1090 * If you are creating an RCU-protected linked structure that is accessed
1091 * by a single external-to-structure RCU-protected pointer, then you may
1092 * use RCU_INIT_POINTER() to initialize the internal RCU-protected
1093 * pointers, but you must use rcu_assign_pointer() to initialize the
1094 * external-to-structure pointer -after- you have completely initialized
1095 * the reader-accessible portions of the linked structure.
Paul E. McKenney71a9b262014-03-31 13:13:02 -07001096 *
1097 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
1098 * ordering guarantees for either the CPU or the compiler.
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -07001099 */
1100#define RCU_INIT_POINTER(p, v) \
Paul E. McKenneyd1b88eb2012-05-16 15:42:30 -07001101 do { \
Pranith Kumar1a6c9b22014-09-25 14:03:34 -04001102 rcu_dereference_sparse(p, __rcu); \
Paul E. McKenney462225ae2013-11-11 09:59:34 -08001103 p = RCU_INITIALIZER(v); \
Paul E. McKenneyd1b88eb2012-05-16 15:42:30 -07001104 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
Paul E. McKenney172708d2012-05-16 15:23:45 -07001106/**
1107 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
1108 *
1109 * GCC-style initialization for an RCU-protected pointer in a structure field.
1110 */
1111#define RCU_POINTER_INITIALIZER(p, v) \
Paul E. McKenney462225ae2013-11-11 09:59:34 -08001112 .p = RCU_INITIALIZER(v)
Lai Jiangshan9ab15442011-03-18 11:15:47 +08001113
Jan Engelhardtd8169d42012-04-19 11:44:39 -07001114/*
1115 * Does the specified offset indicate that the corresponding rcu_head
1116 * structure can be handled by kfree_rcu()?
1117 */
1118#define __is_kfree_rcu_offset(offset) ((offset) < 4096)
1119
1120/*
1121 * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
1122 */
1123#define __kfree_rcu(head, offset) \
1124 do { \
1125 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
Paul E. McKenney4fa3b6c2012-06-05 15:53:53 -07001126 kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
Jan Engelhardtd8169d42012-04-19 11:44:39 -07001127 } while (0)
1128
Lai Jiangshan9ab15442011-03-18 11:15:47 +08001129/**
1130 * kfree_rcu() - kfree an object after a grace period.
1131 * @ptr: pointer to kfree
1132 * @rcu_head: the name of the struct rcu_head within the type of @ptr.
1133 *
1134 * Many rcu callbacks functions just call kfree() on the base structure.
1135 * These functions are trivial, but their size adds up, and furthermore
1136 * when they are used in a kernel module, that module must invoke the
1137 * high-latency rcu_barrier() function at module-unload time.
1138 *
1139 * The kfree_rcu() function handles this issue. Rather than encoding a
1140 * function address in the embedded rcu_head structure, kfree_rcu() instead
1141 * encodes the offset of the rcu_head structure within the base structure.
1142 * Because the functions are not allowed in the low-order 4096 bytes of
1143 * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
1144 * If the offset is larger than 4095 bytes, a compile-time error will
1145 * be generated in __kfree_rcu(). If this error is triggered, you can
1146 * either fall back to use of call_rcu() or rearrange the structure to
1147 * position the rcu_head structure into the first 4096 bytes.
1148 *
1149 * Note that the allowable offset might decrease in the future, for example,
1150 * to allow something like kmem_cache_free_rcu().
Jan Engelhardtd8169d42012-04-19 11:44:39 -07001151 *
1152 * The BUILD_BUG_ON check must not involve any function calls, hence the
1153 * checks are done in macros here.
Lai Jiangshan9ab15442011-03-18 11:15:47 +08001154 */
1155#define kfree_rcu(ptr, rcu_head) \
1156 __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
1157
Paul E. McKenneyffa83fb2013-11-17 19:27:16 -08001158#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001159static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
Paul E. McKenneyffa83fb2013-11-17 19:27:16 -08001160{
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001161 *nextevt = KTIME_MAX;
Paul E. McKenneyffa83fb2013-11-17 19:27:16 -08001162 return 0;
1163}
1164#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
1165
Paul E. McKenney2f33b512013-11-17 18:25:48 -08001166#if defined(CONFIG_RCU_NOCB_CPU_ALL)
1167static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
1168#elif defined(CONFIG_RCU_NOCB_CPU)
Teodora Baluta584dc4c2013-11-11 17:11:23 +02001169bool rcu_is_nocb_cpu(int cpu);
Frederic Weisbeckerd1e43fa2013-03-26 23:47:24 +01001170#else
1171static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
Paul E. McKenney2f33b512013-11-17 18:25:48 -08001172#endif
Frederic Weisbeckerd1e43fa2013-03-26 23:47:24 +01001173
1174
Paul E. McKenney0edd1b12013-06-21 16:37:22 -07001175/* Only for use by adaptive-ticks code. */
1176#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
Teodora Baluta584dc4c2013-11-11 17:11:23 +02001177bool rcu_sys_is_idle(void);
1178void rcu_sysidle_force_exit(void);
Paul E. McKenney0edd1b12013-06-21 16:37:22 -07001179#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
1180
1181static inline bool rcu_sys_is_idle(void)
1182{
1183 return false;
1184}
1185
1186static inline void rcu_sysidle_force_exit(void)
1187{
1188}
1189
1190#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
1191
1192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193#endif /* __LINUX_RCUPDATE_H */