blob: d9d032abd665beacf972d202cfb904e6a2bccd0b [file] [log] [blame]
Paul E. McKenney9f77da92009-08-22 13:56:45 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright IBM Corporation, 2008
20 *
21 * Author: Ingo Molnar <mingo@elte.hu>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */
24
25#include <linux/cache.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/cpumask.h>
29#include <linux/seqlock.h>
30
31/*
32 * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
33 * In theory, it should be possible to add more levels straightforwardly.
34 * In practice, this has not been tested, so there is probably some
35 * bug somewhere.
36 */
Paul E. McKenneycf244dc2009-12-02 12:10:14 -080037#define MAX_RCU_LVLS 4
Paul E. McKenney9f77da92009-08-22 13:56:45 -070038#define RCU_FANOUT (CONFIG_RCU_FANOUT)
39#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
40#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
Paul E. McKenneycf244dc2009-12-02 12:10:14 -080041#define RCU_FANOUT_FOURTH (RCU_FANOUT_CUBE * RCU_FANOUT)
Paul E. McKenney9f77da92009-08-22 13:56:45 -070042
43#if NR_CPUS <= RCU_FANOUT
44# define NUM_RCU_LVLS 1
45# define NUM_RCU_LVL_0 1
46# define NUM_RCU_LVL_1 (NR_CPUS)
47# define NUM_RCU_LVL_2 0
48# define NUM_RCU_LVL_3 0
Paul E. McKenneycf244dc2009-12-02 12:10:14 -080049# define NUM_RCU_LVL_4 0
Paul E. McKenney9f77da92009-08-22 13:56:45 -070050#elif NR_CPUS <= RCU_FANOUT_SQ
51# define NUM_RCU_LVLS 2
52# define NUM_RCU_LVL_0 1
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -070053# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
Paul E. McKenney9f77da92009-08-22 13:56:45 -070054# define NUM_RCU_LVL_2 (NR_CPUS)
55# define NUM_RCU_LVL_3 0
Paul E. McKenneycf244dc2009-12-02 12:10:14 -080056# define NUM_RCU_LVL_4 0
Paul E. McKenney9f77da92009-08-22 13:56:45 -070057#elif NR_CPUS <= RCU_FANOUT_CUBE
58# define NUM_RCU_LVLS 3
59# define NUM_RCU_LVL_0 1
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -070060# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
61# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
Paul E. McKenney9f77da92009-08-22 13:56:45 -070062# define NUM_RCU_LVL_3 NR_CPUS
Paul E. McKenneycf244dc2009-12-02 12:10:14 -080063# define NUM_RCU_LVL_4 0
64#elif NR_CPUS <= RCU_FANOUT_FOURTH
65# define NUM_RCU_LVLS 4
66# define NUM_RCU_LVL_0 1
67# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_CUBE)
68# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
69# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
70# define NUM_RCU_LVL_4 NR_CPUS
Paul E. McKenney9f77da92009-08-22 13:56:45 -070071#else
72# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
73#endif /* #if (NR_CPUS) <= RCU_FANOUT */
74
Paul E. McKenneycf244dc2009-12-02 12:10:14 -080075#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
Paul E. McKenney9f77da92009-08-22 13:56:45 -070076#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
77
78/*
79 * Dynticks per-CPU state.
80 */
81struct rcu_dynticks {
82 int dynticks_nesting; /* Track nesting level, sort of. */
83 int dynticks; /* Even value for dynticks-idle, else odd. */
84 int dynticks_nmi; /* Even value for either dynticks-idle or */
85 /* not in nmi handler, else odd. So this */
86 /* remains even for nmi from irq handler. */
87};
88
89/*
90 * Definition for node within the RCU grace-period-detection hierarchy.
91 */
92struct rcu_node {
Paul E. McKenney1eba8f82009-09-23 09:50:42 -070093 spinlock_t lock; /* Root rcu_node's lock protects some */
94 /* rcu_state fields as well as following. */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070095 long gpnum; /* Current grace period for this node. */
Paul E. McKenney86848962009-08-27 15:00:12 -070096 /* This will either be equal to or one */
97 /* behind the root rcu_node's gpnum. */
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -080098 long completed; /* Last grace period completed for this node. */
99 /* This will either be equal to or one */
100 /* behind the root rcu_node's gpnum. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700101 unsigned long qsmask; /* CPUs or groups that need to switch in */
102 /* order for current grace period to proceed.*/
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700103 /* In leaf rcu_node, each bit corresponds to */
104 /* an rcu_data structure, otherwise, each */
105 /* bit corresponds to a child rcu_node */
106 /* structure. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800107 unsigned long expmask; /* Groups that have ->blocked_tasks[] */
108 /* elements that need to drain to allow the */
109 /* current expedited grace period to */
110 /* complete (only for TREE_PREEMPT_RCU). */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700111 unsigned long qsmaskinit;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800112 /* Per-GP initial value for qsmask & expmask. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700113 unsigned long grpmask; /* Mask to apply to parent qsmask. */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700114 /* Only one bit will be set in this mask. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700115 int grplo; /* lowest-numbered CPU or group here. */
116 int grphi; /* highest-numbered CPU or group here. */
117 u8 grpnum; /* CPU/group number for next level up. */
118 u8 level; /* root is at level 0. */
119 struct rcu_node *parent;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800120 struct list_head blocked_tasks[4];
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700121 /* Tasks blocked in RCU read-side critsect. */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700122 /* Grace period number (->gpnum) x blocked */
123 /* by tasks on the (x & 0x1) element of the */
124 /* blocked_tasks[] array. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700125} ____cacheline_internodealigned_in_smp;
126
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -0700127/*
128 * Do a full breadth-first scan of the rcu_node structures for the
129 * specified rcu_state structure.
130 */
131#define rcu_for_each_node_breadth_first(rsp, rnp) \
132 for ((rnp) = &(rsp)->node[0]; \
133 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
134
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800135/*
136 * Do a breadth-first scan of the non-leaf rcu_node structures for the
137 * specified rcu_state structure. Note that if there is a singleton
138 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
139 */
140#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
141 for ((rnp) = &(rsp)->node[0]; \
142 (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
143
144/*
145 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
146 * structure. Note that if there is a singleton rcu_node tree with but
147 * one rcu_node structure, this loop -will- visit the rcu_node structure.
148 * It is still a leaf node, even if it is also the root node.
149 */
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -0700150#define rcu_for_each_leaf_node(rsp, rnp) \
151 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
152 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
153
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700154/* Index values for nxttail array in struct rcu_data. */
155#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
156#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
157#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
158#define RCU_NEXT_TAIL 3
159#define RCU_NEXT_SIZE 4
160
161/* Per-CPU data for read-copy update. */
162struct rcu_data {
163 /* 1) quiescent-state and grace-period handling : */
164 long completed; /* Track rsp->completed gp number */
165 /* in order to detect GP end. */
166 long gpnum; /* Highest gp number that this CPU */
167 /* is aware of having started. */
168 long passed_quiesc_completed;
169 /* Value of completed at time of qs. */
170 bool passed_quiesc; /* User-mode/idle loop etc. */
171 bool qs_pending; /* Core waits for quiesc state. */
172 bool beenonline; /* CPU online at least once. */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700173 bool preemptable; /* Preemptable RCU? */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700174 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
175 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
176
177 /* 2) batch handling */
178 /*
179 * If nxtlist is not NULL, it is partitioned as follows.
180 * Any of the partitions might be empty, in which case the
181 * pointer to that partition will be equal to the pointer for
182 * the following partition. When the list is empty, all of
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700183 * the nxttail elements point to the ->nxtlist pointer itself,
184 * which in that case is NULL.
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700185 *
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700186 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
187 * Entries that batch # <= ->completed
188 * The grace period for these entries has completed, and
189 * the other grace-period-completed entries may be moved
190 * here temporarily in rcu_process_callbacks().
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700191 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
192 * Entries that batch # <= ->completed - 1: waiting for current GP
193 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
194 * Entries known to have arrived before current GP ended
195 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
196 * Entries that might have arrived after current GP ended
197 * Note that the value of *nxttail[RCU_NEXT_TAIL] will
198 * always be NULL, as this is the end of the list.
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700199 */
200 struct rcu_head *nxtlist;
201 struct rcu_head **nxttail[RCU_NEXT_SIZE];
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700202 long qlen; /* # of queued callbacks */
Paul E. McKenney37c72e52009-10-14 10:15:55 -0700203 long qlen_last_fqs_check;
204 /* qlen at last check for QS forcing */
205 unsigned long n_force_qs_snap;
206 /* did other CPU force QS recently? */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700207 long blimit; /* Upper limit on a processed batch */
208
209#ifdef CONFIG_NO_HZ
210 /* 3) dynticks interface. */
211 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
212 int dynticks_snap; /* Per-GP tracking for dynticks. */
213 int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
214#endif /* #ifdef CONFIG_NO_HZ */
215
216 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
217#ifdef CONFIG_NO_HZ
218 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
219#endif /* #ifdef CONFIG_NO_HZ */
220 unsigned long offline_fqs; /* Kicked due to being offline. */
221 unsigned long resched_ipi; /* Sent a resched IPI. */
222
223 /* 5) __rcu_pending() statistics. */
224 long n_rcu_pending; /* rcu_pending() calls since boot. */
225 long n_rp_qs_pending;
226 long n_rp_cb_ready;
227 long n_rp_cpu_needs_gp;
228 long n_rp_gp_completed;
229 long n_rp_gp_started;
230 long n_rp_need_fqs;
231 long n_rp_need_nothing;
232
233 int cpu;
234};
235
236/* Values for signaled field in struct rcu_state. */
Paul E. McKenney83f5b012009-10-28 08:14:49 -0700237#define RCU_GP_IDLE 0 /* No grace period in progress. */
238#define RCU_GP_INIT 1 /* Grace period being initialized. */
239#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
Paul E. McKenneyee47eb92010-01-04 15:09:07 -0800240#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700241#ifdef CONFIG_NO_HZ
242#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
243#else /* #ifdef CONFIG_NO_HZ */
Paul E. McKenneyee47eb92010-01-04 15:09:07 -0800244#define RCU_SIGNAL_INIT RCU_FORCE_QS
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700245#endif /* #else #ifdef CONFIG_NO_HZ */
246
247#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
248#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
249#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
250#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
251#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
252 /* to take at least one */
253 /* scheduling clock irq */
254 /* before ratting on them. */
255
256#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
257
258/*
259 * RCU global state, including node hierarchy. This hierarchy is
260 * represented in "heap" form in a dense array. The root (first level)
261 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
262 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
263 * and the third level in ->node[m+1] and following (->node[m+1] referenced
264 * by ->level[2]). The number of levels is determined by the number of
265 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
266 * consisting of a single rcu_node.
267 */
268struct rcu_state {
269 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
270 struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
271 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
272 u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
273 struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
274
275 /* The following fields are guarded by the root rcu_node's lock. */
276
277 u8 signaled ____cacheline_internodealigned_in_smp;
278 /* Force QS state. */
Paul E. McKenney07079d52010-01-04 15:09:02 -0800279 u8 fqs_active; /* force_quiescent_state() */
280 /* is running. */
Paul E. McKenney46a1e342010-01-04 15:09:09 -0800281 u8 fqs_need_gp; /* A CPU was prevented from */
282 /* starting a new grace */
283 /* period because */
284 /* force_quiescent_state() */
285 /* was running. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700286 long gpnum; /* Current gp number. */
287 long completed; /* # of last completed gp. */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700288
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800289 /* End of fields guarded by root rcu_node's lock. */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700290
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700291 spinlock_t onofflock; /* exclude on/offline and */
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700292 /* starting new GP. Also */
293 /* protects the following */
294 /* orphan_cbs fields. */
295 struct rcu_head *orphan_cbs_list; /* list of rcu_head structs */
296 /* orphaned by all CPUs in */
297 /* a given leaf rcu_node */
298 /* going offline. */
299 struct rcu_head **orphan_cbs_tail; /* And tail pointer. */
300 long orphan_qlen; /* Number of orphaned cbs. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700301 spinlock_t fqslock; /* Only one task forcing */
302 /* quiescent states. */
303 unsigned long jiffies_force_qs; /* Time at which to invoke */
304 /* force_quiescent_state(). */
305 unsigned long n_force_qs; /* Number of calls to */
306 /* force_quiescent_state(). */
307 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
308 /* due to lock unavailable. */
309 unsigned long n_force_qs_ngp; /* Number of calls leaving */
310 /* due to no GP active. */
311#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
312 unsigned long gp_start; /* Time at which GP started, */
313 /* but in jiffies. */
314 unsigned long jiffies_stall; /* Time at which to check */
315 /* for CPU stalls. */
316#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700317};
318
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800319/* Return values for rcu_preempt_offline_tasks(). */
320
321#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
322 /* GP were moved to root. */
323#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
324 /* GP were moved to root. */
325
Ingo Molnar6258c4f2009-03-25 16:42:24 +0100326/*
327 * RCU implementation internal declarations:
328 */
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700329extern struct rcu_state rcu_sched_state;
330DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
Ingo Molnar6258c4f2009-03-25 16:42:24 +0100331
332extern struct rcu_state rcu_bh_state;
333DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
334
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700335#ifdef CONFIG_TREE_PREEMPT_RCU
336extern struct rcu_state rcu_preempt_state;
337DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
338#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
339
Paul E. McKenney017c4262010-01-14 16:10:58 -0800340#ifndef RCU_TREE_NONCORE
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700341
Paul E. McKenney9b2619a2009-09-23 09:50:43 -0700342/* Forward declarations for rcutree_plugin.h */
Paul E. McKenneydbe01352009-11-10 13:37:19 -0800343static void rcu_bootup_announce(void);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -0700344long rcu_batches_completed(void);
345static void rcu_preempt_note_context_switch(int cpu);
346static int rcu_preempted_readers(struct rcu_node *rnp);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800347#ifdef CONFIG_HOTPLUG_CPU
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800348static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
349 unsigned long flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800350#endif /* #ifdef CONFIG_HOTPLUG_CPU */
Paul E. McKenney9b2619a2009-09-23 09:50:43 -0700351#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
352static void rcu_print_task_stall(struct rcu_node *rnp);
353#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
354static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
355#ifdef CONFIG_HOTPLUG_CPU
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700356static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
357 struct rcu_node *rnp,
358 struct rcu_data *rdp);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -0700359static void rcu_preempt_offline_cpu(int cpu);
360#endif /* #ifdef CONFIG_HOTPLUG_CPU */
361static void rcu_preempt_check_callbacks(int cpu);
362static void rcu_preempt_process_callbacks(void);
363void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800364#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
365static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
366#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
Paul E. McKenney9b2619a2009-09-23 09:50:43 -0700367static int rcu_preempt_pending(int cpu);
368static int rcu_preempt_needs_cpu(int cpu);
369static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700370static void rcu_preempt_send_cbs_to_orphanage(void);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -0700371static void __init __rcu_init_preempt(void);
372
Paul E. McKenney017c4262010-01-14 16:10:58 -0800373#endif /* #ifndef RCU_TREE_NONCORE */