blob: 37fbccdf41d58d4dda85cc1788d972b6602d79b2 [file] [log] [blame]
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptable semantics.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -080027#include <linux/delay.h>
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070028
29#ifdef CONFIG_TREE_PREEMPT_RCU
30
31struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
32DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
33
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -080034static int rcu_preempted_readers_exp(struct rcu_node *rnp);
35
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070036/*
37 * Tell them what RCU they are running.
38 */
Paul E. McKenney0e0fc1c2009-11-11 11:28:06 -080039static void __init rcu_bootup_announce(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070040{
41 printk(KERN_INFO
42 "Experimental preemptable hierarchical RCU implementation.\n");
43}
44
45/*
46 * Return the number of RCU-preempt batches processed thus far
47 * for debug and statistics.
48 */
49long rcu_batches_completed_preempt(void)
50{
51 return rcu_preempt_state.completed;
52}
53EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
54
55/*
56 * Return the number of RCU batches processed thus far for debug & stats.
57 */
58long rcu_batches_completed(void)
59{
60 return rcu_batches_completed_preempt();
61}
62EXPORT_SYMBOL_GPL(rcu_batches_completed);
63
64/*
65 * Record a preemptable-RCU quiescent state for the specified CPU. Note
66 * that this just means that the task currently running on the CPU is
67 * not in a quiescent state. There might be any number of tasks blocked
68 * while in an RCU read-side critical section.
69 */
Paul E. McKenneyc3422be2009-09-13 09:15:10 -070070static void rcu_preempt_qs(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070071{
72 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
Paul E. McKenneyc64ac3c2009-11-10 13:37:22 -080073 rdp->passed_quiesc_completed = rdp->gpnum - 1;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -070074 barrier();
75 rdp->passed_quiesc = 1;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070076}
77
78/*
Paul E. McKenneyc3422be2009-09-13 09:15:10 -070079 * We have entered the scheduler, and the current task might soon be
80 * context-switched away from. If this task is in an RCU read-side
81 * critical section, we will no longer be able to rely on the CPU to
82 * record that fact, so we enqueue the task on the appropriate entry
83 * of the blocked_tasks[] array. The task will dequeue itself when
84 * it exits the outermost enclosing RCU read-side critical section.
85 * Therefore, the current grace period cannot be permitted to complete
86 * until the blocked_tasks[] entry indexed by the low-order bit of
87 * rnp->gpnum empties.
88 *
89 * Caller must disable preemption.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070090 */
Paul E. McKenneyc3422be2009-09-13 09:15:10 -070091static void rcu_preempt_note_context_switch(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070092{
93 struct task_struct *t = current;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -070094 unsigned long flags;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070095 int phase;
96 struct rcu_data *rdp;
97 struct rcu_node *rnp;
98
99 if (t->rcu_read_lock_nesting &&
100 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
101
102 /* Possibly blocking in an RCU read-side critical section. */
103 rdp = rcu_preempt_state.rda[cpu];
104 rnp = rdp->mynode;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700105 spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700106 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
Paul E. McKenney86848962009-08-27 15:00:12 -0700107 t->rcu_blocked_node = rnp;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700108
109 /*
110 * If this CPU has already checked in, then this task
111 * will hold up the next grace period rather than the
112 * current grace period. Queue the task accordingly.
113 * If the task is queued for the current grace period
114 * (i.e., this CPU has not yet passed through a quiescent
115 * state for the current grace period), then as long
116 * as that task remains queued, the current grace period
117 * cannot end.
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700118 *
119 * But first, note that the current CPU must still be
120 * on line!
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700121 */
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700122 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700123 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
124 phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700125 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700126 spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700127 }
128
129 /*
130 * Either we were not in an RCU read-side critical section to
131 * begin with, or we have now recorded that critical section
132 * globally. Either way, we can now note a quiescent state
133 * for this CPU. Again, if we were in an RCU read-side critical
134 * section, and if that critical section was blocking the current
135 * grace period, then the fact that the task has been enqueued
136 * means that we continue to block the current grace period.
137 */
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700138 rcu_preempt_qs(cpu);
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700139 local_irq_save(flags);
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700140 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700141 local_irq_restore(flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700142}
143
144/*
145 * Tree-preemptable RCU implementation for rcu_read_lock().
146 * Just increment ->rcu_read_lock_nesting, shared state will be updated
147 * if we block.
148 */
149void __rcu_read_lock(void)
150{
151 ACCESS_ONCE(current->rcu_read_lock_nesting)++;
152 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
153}
154EXPORT_SYMBOL_GPL(__rcu_read_lock);
155
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700156/*
157 * Check for preempted RCU readers blocking the current grace period
158 * for the specified rcu_node structure. If the caller needs a reliable
159 * answer, it must hold the rcu_node's ->lock.
160 */
161static int rcu_preempted_readers(struct rcu_node *rnp)
162{
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800163 int phase = rnp->gpnum & 0x1;
164
165 return !list_empty(&rnp->blocked_tasks[phase]) ||
166 !list_empty(&rnp->blocked_tasks[phase + 2]);
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700167}
168
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800169/*
170 * Record a quiescent state for all tasks that were previously queued
171 * on the specified rcu_node structure and that were blocking the current
172 * RCU grace period. The caller must hold the specified rnp->lock with
173 * irqs disabled, and this lock is released upon return, but irqs remain
174 * disabled.
175 */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800176static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800177 __releases(rnp->lock)
178{
179 unsigned long mask;
180 struct rcu_node *rnp_p;
181
182 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
183 spin_unlock_irqrestore(&rnp->lock, flags);
184 return; /* Still need more quiescent states! */
185 }
186
187 rnp_p = rnp->parent;
188 if (rnp_p == NULL) {
189 /*
190 * Either there is only one rcu_node in the tree,
191 * or tasks were kicked up to root rcu_node due to
192 * CPUs going offline.
193 */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800194 rcu_report_qs_rsp(&rcu_preempt_state, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800195 return;
196 }
197
198 /* Report up the rest of the hierarchy. */
199 mask = rnp->grpmask;
200 spin_unlock(&rnp->lock); /* irqs remain disabled. */
201 spin_lock(&rnp_p->lock); /* irqs already disabled. */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800202 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800203}
204
205/*
206 * Handle special cases during rcu_read_unlock(), such as needing to
207 * notify RCU core processing or task having blocked during the RCU
208 * read-side critical section.
209 */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700210static void rcu_read_unlock_special(struct task_struct *t)
211{
212 int empty;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800213 int empty_exp;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700214 unsigned long flags;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700215 struct rcu_node *rnp;
216 int special;
217
218 /* NMI handlers cannot block and cannot safely manipulate state. */
219 if (in_nmi())
220 return;
221
222 local_irq_save(flags);
223
224 /*
225 * If RCU core is waiting for this CPU to exit critical section,
226 * let it know that we have done so.
227 */
228 special = t->rcu_read_unlock_special;
229 if (special & RCU_READ_UNLOCK_NEED_QS) {
230 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700231 rcu_preempt_qs(smp_processor_id());
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700232 }
233
234 /* Hardware IRQ handlers cannot block. */
235 if (in_irq()) {
236 local_irq_restore(flags);
237 return;
238 }
239
240 /* Clean up if blocked during RCU read-side critical section. */
241 if (special & RCU_READ_UNLOCK_BLOCKED) {
242 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
243
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700244 /*
245 * Remove this task from the list it blocked on. The
246 * task can migrate while we acquire the lock, but at
247 * most one time. So at most two passes through loop.
248 */
249 for (;;) {
Paul E. McKenney86848962009-08-27 15:00:12 -0700250 rnp = t->rcu_blocked_node;
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700251 spin_lock(&rnp->lock); /* irqs already disabled. */
Paul E. McKenney86848962009-08-27 15:00:12 -0700252 if (rnp == t->rcu_blocked_node)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700253 break;
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700254 spin_unlock(&rnp->lock); /* irqs remain disabled. */
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700255 }
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700256 empty = !rcu_preempted_readers(rnp);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800257 empty_exp = !rcu_preempted_readers_exp(rnp);
258 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700259 list_del_init(&t->rcu_node_entry);
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700260 t->rcu_blocked_node = NULL;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700261
262 /*
263 * If this was the last task on the current list, and if
264 * we aren't waiting on any CPUs, report the quiescent state.
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800265 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700266 */
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800267 if (empty)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700268 spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800269 else
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800270 rcu_report_unblock_qs_rnp(rnp, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800271
272 /*
273 * If this was the last task on the expedited lists,
274 * then we need to report up the rcu_node hierarchy.
275 */
276 if (!empty_exp && !rcu_preempted_readers_exp(rnp))
277 rcu_report_exp_rnp(&rcu_preempt_state, rnp);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800278 } else {
279 local_irq_restore(flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700280 }
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700281}
282
283/*
284 * Tree-preemptable RCU implementation for rcu_read_unlock().
285 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
286 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
287 * invoke rcu_read_unlock_special() to clean up after a context switch
288 * in an RCU read-side critical section and other special cases.
289 */
290void __rcu_read_unlock(void)
291{
292 struct task_struct *t = current;
293
294 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
295 if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
296 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
297 rcu_read_unlock_special(t);
298}
299EXPORT_SYMBOL_GPL(__rcu_read_unlock);
300
301#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
302
303/*
304 * Scan the current list of tasks blocked within RCU read-side critical
305 * sections, printing out the tid of each.
306 */
307static void rcu_print_task_stall(struct rcu_node *rnp)
308{
309 unsigned long flags;
310 struct list_head *lp;
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700311 int phase;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700312 struct task_struct *t;
313
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700314 if (rcu_preempted_readers(rnp)) {
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700315 spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700316 phase = rnp->gpnum & 0x1;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700317 lp = &rnp->blocked_tasks[phase];
318 list_for_each_entry(t, lp, rcu_node_entry)
319 printk(" P%d", t->pid);
320 spin_unlock_irqrestore(&rnp->lock, flags);
321 }
322}
323
324#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
325
326/*
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700327 * Check that the list of blocked tasks for the newly completed grace
328 * period is in fact empty. It is a serious bug to complete a grace
329 * period that still has RCU readers blocked! This function must be
330 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
331 * must be held by the caller.
332 */
333static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
334{
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700335 WARN_ON_ONCE(rcu_preempted_readers(rnp));
Paul E. McKenney28ecd582009-09-18 09:50:17 -0700336 WARN_ON_ONCE(rnp->qsmask);
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700337}
338
Paul E. McKenney33f76142009-08-24 09:42:01 -0700339#ifdef CONFIG_HOTPLUG_CPU
340
341/*
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700342 * Handle tasklist migration for case in which all CPUs covered by the
343 * specified rcu_node have gone offline. Move them up to the root
344 * rcu_node. The reason for not just moving them to the immediate
345 * parent is to remove the need for rcu_read_unlock_special() to
346 * make more than two attempts to acquire the target rcu_node's lock.
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800347 * Returns true if there were tasks blocking the current RCU grace
348 * period.
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700349 *
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700350 * Returns 1 if there was previously a task blocking the current grace
351 * period on the specified rcu_node structure.
352 *
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700353 * The caller must hold rnp->lock with irqs disabled.
354 */
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700355static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
356 struct rcu_node *rnp,
357 struct rcu_data *rdp)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700358{
359 int i;
360 struct list_head *lp;
361 struct list_head *lp_root;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800362 int retval = 0;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700363 struct rcu_node *rnp_root = rcu_get_root(rsp);
364 struct task_struct *tp;
365
Paul E. McKenney86848962009-08-27 15:00:12 -0700366 if (rnp == rnp_root) {
367 WARN_ONCE(1, "Last CPU thought to be offlined?");
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700368 return 0; /* Shouldn't happen: at least one CPU online. */
Paul E. McKenney86848962009-08-27 15:00:12 -0700369 }
Paul E. McKenney28ecd582009-09-18 09:50:17 -0700370 WARN_ON_ONCE(rnp != rdp->mynode &&
371 (!list_empty(&rnp->blocked_tasks[0]) ||
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800372 !list_empty(&rnp->blocked_tasks[1]) ||
373 !list_empty(&rnp->blocked_tasks[2]) ||
374 !list_empty(&rnp->blocked_tasks[3])));
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700375
376 /*
377 * Move tasks up to root rcu_node. Rely on the fact that the
378 * root rcu_node can be at most one ahead of the rest of the
379 * rcu_nodes in terms of gp_num value. This fact allows us to
380 * move the blocked_tasks[] array directly, element by element.
381 */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800382 if (rcu_preempted_readers(rnp))
383 retval |= RCU_OFL_TASKS_NORM_GP;
384 if (rcu_preempted_readers_exp(rnp))
385 retval |= RCU_OFL_TASKS_EXP_GP;
386 for (i = 0; i < 4; i++) {
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700387 lp = &rnp->blocked_tasks[i];
388 lp_root = &rnp_root->blocked_tasks[i];
389 while (!list_empty(lp)) {
390 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
391 spin_lock(&rnp_root->lock); /* irqs already disabled */
392 list_del(&tp->rcu_node_entry);
393 tp->rcu_blocked_node = rnp_root;
394 list_add(&tp->rcu_node_entry, lp_root);
395 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
396 }
397 }
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700398 return retval;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700399}
400
401/*
Paul E. McKenney33f76142009-08-24 09:42:01 -0700402 * Do CPU-offline processing for preemptable RCU.
403 */
404static void rcu_preempt_offline_cpu(int cpu)
405{
406 __rcu_offline_cpu(cpu, &rcu_preempt_state);
407}
408
409#endif /* #ifdef CONFIG_HOTPLUG_CPU */
410
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700411/*
412 * Check for a quiescent state from the current CPU. When a task blocks,
413 * the task is recorded in the corresponding CPU's rcu_node structure,
414 * which is checked elsewhere.
415 *
416 * Caller must disable hard irqs.
417 */
418static void rcu_preempt_check_callbacks(int cpu)
419{
420 struct task_struct *t = current;
421
422 if (t->rcu_read_lock_nesting == 0) {
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700423 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
424 rcu_preempt_qs(cpu);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700425 return;
426 }
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700427 if (per_cpu(rcu_preempt_data, cpu).qs_pending)
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700428 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700429}
430
431/*
432 * Process callbacks for preemptable RCU.
433 */
434static void rcu_preempt_process_callbacks(void)
435{
436 __rcu_process_callbacks(&rcu_preempt_state,
437 &__get_cpu_var(rcu_preempt_data));
438}
439
440/*
441 * Queue a preemptable-RCU callback for invocation after a grace period.
442 */
443void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
444{
445 __call_rcu(head, func, &rcu_preempt_state);
446}
447EXPORT_SYMBOL_GPL(call_rcu);
448
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800449/**
450 * synchronize_rcu - wait until a grace period has elapsed.
451 *
452 * Control will return to the caller some time after a full grace
453 * period has elapsed, in other words after all currently executing RCU
454 * read-side critical sections have completed. RCU read-side critical
455 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
456 * and may be nested.
457 */
458void synchronize_rcu(void)
459{
460 struct rcu_synchronize rcu;
461
462 if (!rcu_scheduler_active)
463 return;
464
465 init_completion(&rcu.completion);
466 /* Will wake me after RCU finished. */
467 call_rcu(&rcu.head, wakeme_after_rcu);
468 /* Wait for it. */
469 wait_for_completion(&rcu.completion);
470}
471EXPORT_SYMBOL_GPL(synchronize_rcu);
472
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800473static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
474static long sync_rcu_preempt_exp_count;
475static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
476
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700477/*
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800478 * Return non-zero if there are any tasks in RCU read-side critical
479 * sections blocking the current preemptible-RCU expedited grace period.
480 * If there is no preemptible-RCU expedited grace period currently in
481 * progress, returns zero unconditionally.
482 */
483static int rcu_preempted_readers_exp(struct rcu_node *rnp)
484{
485 return !list_empty(&rnp->blocked_tasks[2]) ||
486 !list_empty(&rnp->blocked_tasks[3]);
487}
488
489/*
490 * return non-zero if there is no RCU expedited grace period in progress
491 * for the specified rcu_node structure, in other words, if all CPUs and
492 * tasks covered by the specified rcu_node structure have done their bit
493 * for the current expedited grace period. Works only for preemptible
494 * RCU -- other RCU implementation use other means.
495 *
496 * Caller must hold sync_rcu_preempt_exp_mutex.
497 */
498static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
499{
500 return !rcu_preempted_readers_exp(rnp) &&
501 ACCESS_ONCE(rnp->expmask) == 0;
502}
503
504/*
505 * Report the exit from RCU read-side critical section for the last task
506 * that queued itself during or before the current expedited preemptible-RCU
507 * grace period. This event is reported either to the rcu_node structure on
508 * which the task was queued or to one of that rcu_node structure's ancestors,
509 * recursively up the tree. (Calm down, calm down, we do the recursion
510 * iteratively!)
511 *
512 * Caller must hold sync_rcu_preempt_exp_mutex.
513 */
514static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
515{
516 unsigned long flags;
517 unsigned long mask;
518
519 spin_lock_irqsave(&rnp->lock, flags);
520 for (;;) {
521 if (!sync_rcu_preempt_exp_done(rnp))
522 break;
523 if (rnp->parent == NULL) {
524 wake_up(&sync_rcu_preempt_exp_wq);
525 break;
526 }
527 mask = rnp->grpmask;
528 spin_unlock(&rnp->lock); /* irqs remain disabled */
529 rnp = rnp->parent;
530 spin_lock(&rnp->lock); /* irqs already disabled */
531 rnp->expmask &= ~mask;
532 }
533 spin_unlock_irqrestore(&rnp->lock, flags);
534}
535
536/*
537 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
538 * grace period for the specified rcu_node structure. If there are no such
539 * tasks, report it up the rcu_node hierarchy.
540 *
541 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
542 */
543static void
544sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
545{
546 int must_wait;
547
548 spin_lock(&rnp->lock); /* irqs already disabled */
549 list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]);
550 list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]);
551 must_wait = rcu_preempted_readers_exp(rnp);
552 spin_unlock(&rnp->lock); /* irqs remain disabled */
553 if (!must_wait)
554 rcu_report_exp_rnp(rsp, rnp);
555}
556
557/*
558 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
559 * is to invoke synchronize_sched_expedited() to push all the tasks to
560 * the ->blocked_tasks[] lists, move all entries from the first set of
561 * ->blocked_tasks[] lists to the second set, and finally wait for this
562 * second set to drain.
Paul E. McKenney019129d52009-10-14 10:15:56 -0700563 */
564void synchronize_rcu_expedited(void)
565{
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800566 unsigned long flags;
567 struct rcu_node *rnp;
568 struct rcu_state *rsp = &rcu_preempt_state;
569 long snap;
570 int trycount = 0;
571
572 smp_mb(); /* Caller's modifications seen first by other CPUs. */
573 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
574 smp_mb(); /* Above access cannot bleed into critical section. */
575
576 /*
577 * Acquire lock, falling back to synchronize_rcu() if too many
578 * lock-acquisition failures. Of course, if someone does the
579 * expedited grace period for us, just leave.
580 */
581 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
582 if (trycount++ < 10)
583 udelay(trycount * num_online_cpus());
584 else {
585 synchronize_rcu();
586 return;
587 }
588 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
589 goto mb_ret; /* Others did our work for us. */
590 }
591 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
592 goto unlock_mb_ret; /* Others did our work for us. */
593
594 /* force all RCU readers onto blocked_tasks[]. */
595 synchronize_sched_expedited();
596
597 spin_lock_irqsave(&rsp->onofflock, flags);
598
599 /* Initialize ->expmask for all non-leaf rcu_node structures. */
600 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
601 spin_lock(&rnp->lock); /* irqs already disabled. */
602 rnp->expmask = rnp->qsmaskinit;
603 spin_unlock(&rnp->lock); /* irqs remain disabled. */
604 }
605
606 /* Snapshot current state of ->blocked_tasks[] lists. */
607 rcu_for_each_leaf_node(rsp, rnp)
608 sync_rcu_preempt_exp_init(rsp, rnp);
609 if (NUM_RCU_NODES > 1)
610 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
611
612 spin_unlock_irqrestore(&rsp->onofflock, flags);
613
614 /* Wait for snapshotted ->blocked_tasks[] lists to drain. */
615 rnp = rcu_get_root(rsp);
616 wait_event(sync_rcu_preempt_exp_wq,
617 sync_rcu_preempt_exp_done(rnp));
618
619 /* Clean up and exit. */
620 smp_mb(); /* ensure expedited GP seen before counter increment. */
621 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
622unlock_mb_ret:
623 mutex_unlock(&sync_rcu_preempt_exp_mutex);
624mb_ret:
625 smp_mb(); /* ensure subsequent action seen after grace period. */
Paul E. McKenney019129d52009-10-14 10:15:56 -0700626}
627EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
628
629/*
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700630 * Check to see if there is any immediate preemptable-RCU-related work
631 * to be done.
632 */
633static int rcu_preempt_pending(int cpu)
634{
635 return __rcu_pending(&rcu_preempt_state,
636 &per_cpu(rcu_preempt_data, cpu));
637}
638
639/*
640 * Does preemptable RCU need the CPU to stay out of dynticks mode?
641 */
642static int rcu_preempt_needs_cpu(int cpu)
643{
644 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
645}
646
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700647/**
648 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
649 */
650void rcu_barrier(void)
651{
652 _rcu_barrier(&rcu_preempt_state, call_rcu);
653}
654EXPORT_SYMBOL_GPL(rcu_barrier);
655
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700656/*
657 * Initialize preemptable RCU's per-CPU data.
658 */
659static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
660{
661 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
662}
663
664/*
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700665 * Move preemptable RCU's callbacks to ->orphan_cbs_list.
666 */
667static void rcu_preempt_send_cbs_to_orphanage(void)
668{
669 rcu_send_cbs_to_orphanage(&rcu_preempt_state);
670}
671
672/*
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700673 * Initialize preemptable RCU's state structures.
674 */
675static void __init __rcu_init_preempt(void)
676{
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700677 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
678}
679
680/*
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700681 * Check for a task exiting while in a preemptable-RCU read-side
682 * critical section, clean up if so. No need to issue warnings,
683 * as debug_check_no_locks_held() already does this if lockdep
684 * is enabled.
685 */
686void exit_rcu(void)
687{
688 struct task_struct *t = current;
689
690 if (t->rcu_read_lock_nesting == 0)
691 return;
692 t->rcu_read_lock_nesting = 1;
693 rcu_read_unlock();
694}
695
696#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
697
698/*
699 * Tell them what RCU they are running.
700 */
Paul E. McKenney0e0fc1c2009-11-11 11:28:06 -0800701static void __init rcu_bootup_announce(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700702{
703 printk(KERN_INFO "Hierarchical RCU implementation.\n");
704}
705
706/*
707 * Return the number of RCU batches processed thus far for debug & stats.
708 */
709long rcu_batches_completed(void)
710{
711 return rcu_batches_completed_sched();
712}
713EXPORT_SYMBOL_GPL(rcu_batches_completed);
714
715/*
716 * Because preemptable RCU does not exist, we never have to check for
717 * CPUs being in quiescent states.
718 */
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700719static void rcu_preempt_note_context_switch(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700720{
721}
722
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700723/*
724 * Because preemptable RCU does not exist, there are never any preempted
725 * RCU readers.
726 */
727static int rcu_preempted_readers(struct rcu_node *rnp)
728{
729 return 0;
730}
731
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800732#ifdef CONFIG_HOTPLUG_CPU
733
734/* Because preemptible RCU does not exist, no quieting of tasks. */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800735static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800736{
737 spin_unlock_irqrestore(&rnp->lock, flags);
738}
739
740#endif /* #ifdef CONFIG_HOTPLUG_CPU */
741
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700742#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
743
744/*
745 * Because preemptable RCU does not exist, we never have to check for
746 * tasks blocked within RCU read-side critical sections.
747 */
748static void rcu_print_task_stall(struct rcu_node *rnp)
749{
750}
751
752#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
753
754/*
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700755 * Because there is no preemptable RCU, there can be no readers blocked,
Paul E. McKenney49e29122009-09-18 09:50:19 -0700756 * so there is no need to check for blocked tasks. So check only for
757 * bogus qsmask values.
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700758 */
759static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
760{
Paul E. McKenney49e29122009-09-18 09:50:19 -0700761 WARN_ON_ONCE(rnp->qsmask);
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700762}
763
Paul E. McKenney33f76142009-08-24 09:42:01 -0700764#ifdef CONFIG_HOTPLUG_CPU
765
766/*
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700767 * Because preemptable RCU does not exist, it never needs to migrate
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700768 * tasks that were blocked within RCU read-side critical sections, and
769 * such non-existent tasks cannot possibly have been blocking the current
770 * grace period.
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700771 */
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700772static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
773 struct rcu_node *rnp,
774 struct rcu_data *rdp)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700775{
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700776 return 0;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700777}
778
779/*
Paul E. McKenney33f76142009-08-24 09:42:01 -0700780 * Because preemptable RCU does not exist, it never needs CPU-offline
781 * processing.
782 */
783static void rcu_preempt_offline_cpu(int cpu)
784{
785}
786
787#endif /* #ifdef CONFIG_HOTPLUG_CPU */
788
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700789/*
790 * Because preemptable RCU does not exist, it never has any callbacks
791 * to check.
792 */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700793static void rcu_preempt_check_callbacks(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700794{
795}
796
797/*
798 * Because preemptable RCU does not exist, it never has any callbacks
799 * to process.
800 */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700801static void rcu_preempt_process_callbacks(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700802{
803}
804
805/*
806 * In classic RCU, call_rcu() is just call_rcu_sched().
807 */
808void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
809{
810 call_rcu_sched(head, func);
811}
812EXPORT_SYMBOL_GPL(call_rcu);
813
814/*
Paul E. McKenney019129d52009-10-14 10:15:56 -0700815 * Wait for an rcu-preempt grace period, but make it happen quickly.
816 * But because preemptable RCU does not exist, map to rcu-sched.
817 */
818void synchronize_rcu_expedited(void)
819{
820 synchronize_sched_expedited();
821}
822EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
823
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800824#ifdef CONFIG_HOTPLUG_CPU
825
826/*
827 * Because preemptable RCU does not exist, there is never any need to
828 * report on tasks preempted in RCU read-side critical sections during
829 * expedited RCU grace periods.
830 */
831static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
832{
833 return;
834}
835
836#endif /* #ifdef CONFIG_HOTPLUG_CPU */
837
Paul E. McKenney019129d52009-10-14 10:15:56 -0700838/*
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700839 * Because preemptable RCU does not exist, it never has any work to do.
840 */
841static int rcu_preempt_pending(int cpu)
842{
843 return 0;
844}
845
846/*
847 * Because preemptable RCU does not exist, it never needs any CPU.
848 */
849static int rcu_preempt_needs_cpu(int cpu)
850{
851 return 0;
852}
853
854/*
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700855 * Because preemptable RCU does not exist, rcu_barrier() is just
856 * another name for rcu_barrier_sched().
857 */
858void rcu_barrier(void)
859{
860 rcu_barrier_sched();
861}
862EXPORT_SYMBOL_GPL(rcu_barrier);
863
864/*
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700865 * Because preemptable RCU does not exist, there is no per-CPU
866 * data to initialize.
867 */
868static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
869{
870}
871
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700872/*
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700873 * Because there is no preemptable RCU, there are no callbacks to move.
874 */
875static void rcu_preempt_send_cbs_to_orphanage(void)
876{
877}
878
879/*
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700880 * Because preemptable RCU does not exist, it need not be initialized.
881 */
882static void __init __rcu_init_preempt(void)
883{
884}
885
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700886#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */