blob: 8bb35d73e1f9d210e95fc85ac330ebb3954577af [file] [log] [blame]
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
Paul E. McKenney6cc68792011-03-02 13:15:15 -08004 * or preemptible semantics.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -080027#include <linux/delay.h>
Lai Jiangshan7b27d542010-10-21 11:29:05 +080028#include <linux/stop_machine.h>
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070029
Mike Galbraith5b61b0b2011-08-19 11:39:11 -070030#define RCU_KTHREAD_PRIO 1
31
32#ifdef CONFIG_RCU_BOOST
33#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
34#else
35#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
36#endif
37
Paul E. McKenney26845c22010-04-13 14:19:23 -070038/*
39 * Check the RCU kernel configuration parameters and print informative
40 * messages about anything out of the ordinary. If you like #ifdef, you
41 * will love this function.
42 */
43static void __init rcu_bootup_announce_oddness(void)
44{
45#ifdef CONFIG_RCU_TRACE
46 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
47#endif
48#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
49 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
50 CONFIG_RCU_FANOUT);
51#endif
52#ifdef CONFIG_RCU_FANOUT_EXACT
53 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
54#endif
55#ifdef CONFIG_RCU_FAST_NO_HZ
56 printk(KERN_INFO
57 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
58#endif
59#ifdef CONFIG_PROVE_RCU
60 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
61#endif
62#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
63 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
64#endif
Paul E. McKenney81a294c2010-08-30 09:52:50 -070065#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
Paul E. McKenney26845c22010-04-13 14:19:23 -070066 printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
67#endif
68#if NUM_RCU_LVL_4 != 0
69 printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
70#endif
71}
72
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070073#ifdef CONFIG_TREE_PREEMPT_RCU
74
Paul E. McKenneye99033c2011-06-21 00:13:44 -070075struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070076DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
Paul E. McKenney27f4d282011-02-07 12:47:15 -080077static struct rcu_state *rcu_state = &rcu_preempt_state;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070078
Paul E. McKenney10f39bb2011-07-17 21:14:35 -070079static void rcu_read_unlock_special(struct task_struct *t);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -080080static int rcu_preempted_readers_exp(struct rcu_node *rnp);
81
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070082/*
83 * Tell them what RCU they are running.
84 */
Paul E. McKenney0e0fc1c2009-11-11 11:28:06 -080085static void __init rcu_bootup_announce(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070086{
Paul E. McKenney6cc68792011-03-02 13:15:15 -080087 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
Paul E. McKenney26845c22010-04-13 14:19:23 -070088 rcu_bootup_announce_oddness();
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070089}
90
91/*
92 * Return the number of RCU-preempt batches processed thus far
93 * for debug and statistics.
94 */
95long rcu_batches_completed_preempt(void)
96{
97 return rcu_preempt_state.completed;
98}
99EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
100
101/*
102 * Return the number of RCU batches processed thus far for debug & stats.
103 */
104long rcu_batches_completed(void)
105{
106 return rcu_batches_completed_preempt();
107}
108EXPORT_SYMBOL_GPL(rcu_batches_completed);
109
110/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800111 * Force a quiescent state for preemptible RCU.
112 */
113void rcu_force_quiescent_state(void)
114{
115 force_quiescent_state(&rcu_preempt_state, 0);
116}
117EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
118
119/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800120 * Record a preemptible-RCU quiescent state for the specified CPU. Note
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700121 * that this just means that the task currently running on the CPU is
122 * not in a quiescent state. There might be any number of tasks blocked
123 * while in an RCU read-side critical section.
Paul E. McKenney25502a62010-04-01 17:37:01 -0700124 *
125 * Unlike the other rcu_*_qs() functions, callers to this function
126 * must disable irqs in order to protect the assignment to
127 * ->rcu_read_unlock_special.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700128 */
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700129static void rcu_preempt_qs(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700130{
131 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
Paul E. McKenney25502a62010-04-01 17:37:01 -0700132
Paul E. McKenneye4cc1f22011-06-27 00:17:43 -0700133 rdp->passed_quiesce_gpnum = rdp->gpnum;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700134 barrier();
Paul E. McKenneye4cc1f22011-06-27 00:17:43 -0700135 if (rdp->passed_quiesce == 0)
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700136 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
Paul E. McKenneye4cc1f22011-06-27 00:17:43 -0700137 rdp->passed_quiesce = 1;
Paul E. McKenney25502a62010-04-01 17:37:01 -0700138 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700139}
140
141/*
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700142 * We have entered the scheduler, and the current task might soon be
143 * context-switched away from. If this task is in an RCU read-side
144 * critical section, we will no longer be able to rely on the CPU to
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800145 * record that fact, so we enqueue the task on the blkd_tasks list.
146 * The task will dequeue itself when it exits the outermost enclosing
147 * RCU read-side critical section. Therefore, the current grace period
148 * cannot be permitted to complete until the blkd_tasks list entries
149 * predating the current grace period drain, in other words, until
150 * rnp->gp_tasks becomes NULL.
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700151 *
152 * Caller must disable preemption.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700153 */
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700154static void rcu_preempt_note_context_switch(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700155{
156 struct task_struct *t = current;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700157 unsigned long flags;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700158 struct rcu_data *rdp;
159 struct rcu_node *rnp;
160
Paul E. McKenney10f39bb2011-07-17 21:14:35 -0700161 if (t->rcu_read_lock_nesting > 0 &&
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700162 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
163
164 /* Possibly blocking in an RCU read-side critical section. */
Lai Jiangshan394f99a2010-06-28 16:25:04 +0800165 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700166 rnp = rdp->mynode;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800167 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700168 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
Paul E. McKenney86848962009-08-27 15:00:12 -0700169 t->rcu_blocked_node = rnp;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700170
171 /*
172 * If this CPU has already checked in, then this task
173 * will hold up the next grace period rather than the
174 * current grace period. Queue the task accordingly.
175 * If the task is queued for the current grace period
176 * (i.e., this CPU has not yet passed through a quiescent
177 * state for the current grace period), then as long
178 * as that task remains queued, the current grace period
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800179 * cannot end. Note that there is some uncertainty as
180 * to exactly when the current grace period started.
181 * We take a conservative approach, which can result
182 * in unnecessarily waiting on tasks that started very
183 * slightly after the current grace period began. C'est
184 * la vie!!!
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700185 *
186 * But first, note that the current CPU must still be
187 * on line!
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700188 */
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700189 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700190 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800191 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
192 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
193 rnp->gp_tasks = &t->rcu_node_entry;
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800194#ifdef CONFIG_RCU_BOOST
195 if (rnp->boost_tasks != NULL)
196 rnp->boost_tasks = rnp->gp_tasks;
197#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800198 } else {
199 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
200 if (rnp->qsmask & rdp->grpmask)
201 rnp->gp_tasks = &t->rcu_node_entry;
202 }
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700203 trace_rcu_preempt_task(rdp->rsp->name,
204 t->pid,
205 (rnp->qsmask & rdp->grpmask)
206 ? rnp->gpnum
207 : rnp->gpnum + 1);
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800208 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney10f39bb2011-07-17 21:14:35 -0700209 } else if (t->rcu_read_lock_nesting < 0 &&
210 t->rcu_read_unlock_special) {
211
212 /*
213 * Complete exit from RCU read-side critical section on
214 * behalf of preempted instance of __rcu_read_unlock().
215 */
216 rcu_read_unlock_special(t);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700217 }
218
219 /*
220 * Either we were not in an RCU read-side critical section to
221 * begin with, or we have now recorded that critical section
222 * globally. Either way, we can now note a quiescent state
223 * for this CPU. Again, if we were in an RCU read-side critical
224 * section, and if that critical section was blocking the current
225 * grace period, then the fact that the task has been enqueued
226 * means that we continue to block the current grace period.
227 */
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700228 local_irq_save(flags);
Paul E. McKenney25502a62010-04-01 17:37:01 -0700229 rcu_preempt_qs(cpu);
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700230 local_irq_restore(flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700231}
232
233/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800234 * Tree-preemptible RCU implementation for rcu_read_lock().
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700235 * Just increment ->rcu_read_lock_nesting, shared state will be updated
236 * if we block.
237 */
238void __rcu_read_lock(void)
239{
Paul E. McKenney80dcf602010-08-19 16:57:45 -0700240 current->rcu_read_lock_nesting++;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700241 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
242}
243EXPORT_SYMBOL_GPL(__rcu_read_lock);
244
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700245/*
246 * Check for preempted RCU readers blocking the current grace period
247 * for the specified rcu_node structure. If the caller needs a reliable
248 * answer, it must hold the rcu_node's ->lock.
249 */
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800250static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700251{
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800252 return rnp->gp_tasks != NULL;
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700253}
254
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800255/*
256 * Record a quiescent state for all tasks that were previously queued
257 * on the specified rcu_node structure and that were blocking the current
258 * RCU grace period. The caller must hold the specified rnp->lock with
259 * irqs disabled, and this lock is released upon return, but irqs remain
260 * disabled.
261 */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800262static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800263 __releases(rnp->lock)
264{
265 unsigned long mask;
266 struct rcu_node *rnp_p;
267
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800268 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800269 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800270 return; /* Still need more quiescent states! */
271 }
272
273 rnp_p = rnp->parent;
274 if (rnp_p == NULL) {
275 /*
276 * Either there is only one rcu_node in the tree,
277 * or tasks were kicked up to root rcu_node due to
278 * CPUs going offline.
279 */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800280 rcu_report_qs_rsp(&rcu_preempt_state, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800281 return;
282 }
283
284 /* Report up the rest of the hierarchy. */
285 mask = rnp->grpmask;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800286 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
287 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800288 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800289}
290
291/*
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800292 * Advance a ->blkd_tasks-list pointer to the next entry, instead
293 * returning NULL if at the end of the list.
294 */
295static struct list_head *rcu_next_node_entry(struct task_struct *t,
296 struct rcu_node *rnp)
297{
298 struct list_head *np;
299
300 np = t->rcu_node_entry.next;
301 if (np == &rnp->blkd_tasks)
302 np = NULL;
303 return np;
304}
305
306/*
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800307 * Handle special cases during rcu_read_unlock(), such as needing to
308 * notify RCU core processing or task having blocked during the RCU
309 * read-side critical section.
310 */
Paul E. McKenneybe0e1e22011-05-21 05:57:18 -0700311static noinline void rcu_read_unlock_special(struct task_struct *t)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700312{
313 int empty;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800314 int empty_exp;
Paul E. McKenney389abd42011-09-21 14:41:37 -0700315 int empty_exp_now;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700316 unsigned long flags;
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800317 struct list_head *np;
Paul E. McKenney82e78d82011-08-04 07:55:34 -0700318#ifdef CONFIG_RCU_BOOST
319 struct rt_mutex *rbmp = NULL;
320#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700321 struct rcu_node *rnp;
322 int special;
323
324 /* NMI handlers cannot block and cannot safely manipulate state. */
325 if (in_nmi())
326 return;
327
328 local_irq_save(flags);
329
330 /*
331 * If RCU core is waiting for this CPU to exit critical section,
332 * let it know that we have done so.
333 */
334 special = t->rcu_read_unlock_special;
335 if (special & RCU_READ_UNLOCK_NEED_QS) {
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700336 rcu_preempt_qs(smp_processor_id());
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700337 }
338
339 /* Hardware IRQ handlers cannot block. */
Peter Zijlstraec433f02011-07-19 15:32:00 -0700340 if (in_irq() || in_serving_softirq()) {
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700341 local_irq_restore(flags);
342 return;
343 }
344
345 /* Clean up if blocked during RCU read-side critical section. */
346 if (special & RCU_READ_UNLOCK_BLOCKED) {
347 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
348
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700349 /*
350 * Remove this task from the list it blocked on. The
351 * task can migrate while we acquire the lock, but at
352 * most one time. So at most two passes through loop.
353 */
354 for (;;) {
Paul E. McKenney86848962009-08-27 15:00:12 -0700355 rnp = t->rcu_blocked_node;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800356 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
Paul E. McKenney86848962009-08-27 15:00:12 -0700357 if (rnp == t->rcu_blocked_node)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700358 break;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800359 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700360 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800361 empty = !rcu_preempt_blocked_readers_cgp(rnp);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800362 empty_exp = !rcu_preempted_readers_exp(rnp);
363 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800364 np = rcu_next_node_entry(t, rnp);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700365 list_del_init(&t->rcu_node_entry);
Paul E. McKenney82e78d82011-08-04 07:55:34 -0700366 t->rcu_blocked_node = NULL;
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700367 trace_rcu_unlock_preempted_task("rcu_preempt",
368 rnp->gpnum, t->pid);
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800369 if (&t->rcu_node_entry == rnp->gp_tasks)
370 rnp->gp_tasks = np;
371 if (&t->rcu_node_entry == rnp->exp_tasks)
372 rnp->exp_tasks = np;
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800373#ifdef CONFIG_RCU_BOOST
374 if (&t->rcu_node_entry == rnp->boost_tasks)
375 rnp->boost_tasks = np;
Paul E. McKenney82e78d82011-08-04 07:55:34 -0700376 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
377 if (t->rcu_boost_mutex) {
378 rbmp = t->rcu_boost_mutex;
379 t->rcu_boost_mutex = NULL;
Paul E. McKenney7765be22011-07-14 12:24:11 -0700380 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800381#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700382
383 /*
384 * If this was the last task on the current list, and if
385 * we aren't waiting on any CPUs, report the quiescent state.
Paul E. McKenney389abd42011-09-21 14:41:37 -0700386 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
387 * so we must take a snapshot of the expedited state.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700388 */
Paul E. McKenney389abd42011-09-21 14:41:37 -0700389 empty_exp_now = !rcu_preempted_readers_exp(rnp);
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700390 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
391 trace_rcu_quiescent_state_report("preempt_rcu",
392 rnp->gpnum,
393 0, rnp->qsmask,
394 rnp->level,
395 rnp->grplo,
396 rnp->grphi,
397 !!rnp->gp_tasks);
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800398 rcu_report_unblock_qs_rnp(rnp, flags);
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700399 } else
400 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800401
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800402#ifdef CONFIG_RCU_BOOST
403 /* Unboost if we were boosted. */
Paul E. McKenney82e78d82011-08-04 07:55:34 -0700404 if (rbmp)
405 rt_mutex_unlock(rbmp);
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800406#endif /* #ifdef CONFIG_RCU_BOOST */
407
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800408 /*
409 * If this was the last task on the expedited lists,
410 * then we need to report up the rcu_node hierarchy.
411 */
Paul E. McKenney389abd42011-09-21 14:41:37 -0700412 if (!empty_exp && empty_exp_now)
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700413 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800414 } else {
415 local_irq_restore(flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700416 }
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700417}
418
419/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800420 * Tree-preemptible RCU implementation for rcu_read_unlock().
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700421 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
422 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
423 * invoke rcu_read_unlock_special() to clean up after a context switch
424 * in an RCU read-side critical section and other special cases.
425 */
426void __rcu_read_unlock(void)
427{
428 struct task_struct *t = current;
429
Paul E. McKenney10f39bb2011-07-17 21:14:35 -0700430 if (t->rcu_read_lock_nesting != 1)
431 --t->rcu_read_lock_nesting;
432 else {
Paul E. McKenney6206ab92011-08-01 06:22:11 -0700433 barrier(); /* critical section before exit code. */
Paul E. McKenney10f39bb2011-07-17 21:14:35 -0700434 t->rcu_read_lock_nesting = INT_MIN;
435 barrier(); /* assign before ->rcu_read_unlock_special load */
Paul E. McKenneybe0e1e22011-05-21 05:57:18 -0700436 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
437 rcu_read_unlock_special(t);
Paul E. McKenney10f39bb2011-07-17 21:14:35 -0700438 barrier(); /* ->rcu_read_unlock_special load before assign */
439 t->rcu_read_lock_nesting = 0;
Paul E. McKenneybe0e1e22011-05-21 05:57:18 -0700440 }
Paul E. McKenneycba82442010-01-04 16:04:01 -0800441#ifdef CONFIG_PROVE_LOCKING
Paul E. McKenney10f39bb2011-07-17 21:14:35 -0700442 {
443 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
444
445 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
446 }
Paul E. McKenneycba82442010-01-04 16:04:01 -0800447#endif /* #ifdef CONFIG_PROVE_LOCKING */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700448}
449EXPORT_SYMBOL_GPL(__rcu_read_unlock);
450
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800451#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
452
453/*
454 * Dump detailed information for all tasks blocking the current RCU
455 * grace period on the specified rcu_node structure.
456 */
457static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
458{
459 unsigned long flags;
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800460 struct task_struct *t;
461
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800462 if (!rcu_preempt_blocked_readers_cgp(rnp))
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800463 return;
464 raw_spin_lock_irqsave(&rnp->lock, flags);
465 t = list_entry(rnp->gp_tasks,
466 struct task_struct, rcu_node_entry);
467 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
468 sched_show_task(t);
469 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800470}
471
472/*
473 * Dump detailed information for all tasks blocking the current RCU
474 * grace period.
475 */
476static void rcu_print_detail_task_stall(struct rcu_state *rsp)
477{
478 struct rcu_node *rnp = rcu_get_root(rsp);
479
480 rcu_print_detail_task_stall_rnp(rnp);
481 rcu_for_each_leaf_node(rsp, rnp)
482 rcu_print_detail_task_stall_rnp(rnp);
483}
484
485#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
486
487static void rcu_print_detail_task_stall(struct rcu_state *rsp)
488{
489}
490
491#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
492
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700493/*
494 * Scan the current list of tasks blocked within RCU read-side critical
495 * sections, printing out the tid of each.
496 */
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700497static int rcu_print_task_stall(struct rcu_node *rnp)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700498{
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700499 struct task_struct *t;
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700500 int ndetected = 0;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700501
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800502 if (!rcu_preempt_blocked_readers_cgp(rnp))
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700503 return 0;
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800504 t = list_entry(rnp->gp_tasks,
505 struct task_struct, rcu_node_entry);
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700506 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800507 printk(" P%d", t->pid);
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700508 ndetected++;
509 }
510 return ndetected;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700511}
512
Paul E. McKenney53d84e02010-08-10 14:28:53 -0700513/*
514 * Suppress preemptible RCU's CPU stall warnings by pushing the
515 * time of the next stall-warning message comfortably far into the
516 * future.
517 */
518static void rcu_preempt_stall_reset(void)
519{
520 rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
521}
522
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700523/*
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700524 * Check that the list of blocked tasks for the newly completed grace
525 * period is in fact empty. It is a serious bug to complete a grace
526 * period that still has RCU readers blocked! This function must be
527 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
528 * must be held by the caller.
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800529 *
530 * Also, if there are blocked tasks on the list, they automatically
531 * block the newly created grace period, so set up ->gp_tasks accordingly.
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700532 */
533static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
534{
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800535 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800536 if (!list_empty(&rnp->blkd_tasks))
537 rnp->gp_tasks = rnp->blkd_tasks.next;
Paul E. McKenney28ecd582009-09-18 09:50:17 -0700538 WARN_ON_ONCE(rnp->qsmask);
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700539}
540
Paul E. McKenney33f76142009-08-24 09:42:01 -0700541#ifdef CONFIG_HOTPLUG_CPU
542
543/*
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700544 * Handle tasklist migration for case in which all CPUs covered by the
545 * specified rcu_node have gone offline. Move them up to the root
546 * rcu_node. The reason for not just moving them to the immediate
547 * parent is to remove the need for rcu_read_unlock_special() to
548 * make more than two attempts to acquire the target rcu_node's lock.
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800549 * Returns true if there were tasks blocking the current RCU grace
550 * period.
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700551 *
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700552 * Returns 1 if there was previously a task blocking the current grace
553 * period on the specified rcu_node structure.
554 *
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700555 * The caller must hold rnp->lock with irqs disabled.
556 */
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700557static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
558 struct rcu_node *rnp,
559 struct rcu_data *rdp)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700560{
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700561 struct list_head *lp;
562 struct list_head *lp_root;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800563 int retval = 0;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700564 struct rcu_node *rnp_root = rcu_get_root(rsp);
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800565 struct task_struct *t;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700566
Paul E. McKenney86848962009-08-27 15:00:12 -0700567 if (rnp == rnp_root) {
568 WARN_ONCE(1, "Last CPU thought to be offlined?");
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700569 return 0; /* Shouldn't happen: at least one CPU online. */
Paul E. McKenney86848962009-08-27 15:00:12 -0700570 }
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800571
572 /* If we are on an internal node, complain bitterly. */
573 WARN_ON_ONCE(rnp != rdp->mynode);
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700574
575 /*
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800576 * Move tasks up to root rcu_node. Don't try to get fancy for
577 * this corner-case operation -- just put this node's tasks
578 * at the head of the root node's list, and update the root node's
579 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
580 * if non-NULL. This might result in waiting for more tasks than
581 * absolutely necessary, but this is a good performance/complexity
582 * tradeoff.
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700583 */
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800584 if (rcu_preempt_blocked_readers_cgp(rnp))
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800585 retval |= RCU_OFL_TASKS_NORM_GP;
586 if (rcu_preempted_readers_exp(rnp))
587 retval |= RCU_OFL_TASKS_EXP_GP;
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800588 lp = &rnp->blkd_tasks;
589 lp_root = &rnp_root->blkd_tasks;
590 while (!list_empty(lp)) {
591 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
592 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
593 list_del(&t->rcu_node_entry);
594 t->rcu_blocked_node = rnp_root;
595 list_add(&t->rcu_node_entry, lp_root);
596 if (&t->rcu_node_entry == rnp->gp_tasks)
597 rnp_root->gp_tasks = rnp->gp_tasks;
598 if (&t->rcu_node_entry == rnp->exp_tasks)
599 rnp_root->exp_tasks = rnp->exp_tasks;
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800600#ifdef CONFIG_RCU_BOOST
601 if (&t->rcu_node_entry == rnp->boost_tasks)
602 rnp_root->boost_tasks = rnp->boost_tasks;
603#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800604 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700605 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800606
607#ifdef CONFIG_RCU_BOOST
608 /* In case root is being boosted and leaf is not. */
609 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
610 if (rnp_root->boost_tasks != NULL &&
611 rnp_root->boost_tasks != rnp_root->gp_tasks)
612 rnp_root->boost_tasks = rnp_root->gp_tasks;
613 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
614#endif /* #ifdef CONFIG_RCU_BOOST */
615
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800616 rnp->gp_tasks = NULL;
617 rnp->exp_tasks = NULL;
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700618 return retval;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700619}
620
621/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800622 * Do CPU-offline processing for preemptible RCU.
Paul E. McKenney33f76142009-08-24 09:42:01 -0700623 */
624static void rcu_preempt_offline_cpu(int cpu)
625{
626 __rcu_offline_cpu(cpu, &rcu_preempt_state);
627}
628
629#endif /* #ifdef CONFIG_HOTPLUG_CPU */
630
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700631/*
632 * Check for a quiescent state from the current CPU. When a task blocks,
633 * the task is recorded in the corresponding CPU's rcu_node structure,
634 * which is checked elsewhere.
635 *
636 * Caller must disable hard irqs.
637 */
638static void rcu_preempt_check_callbacks(int cpu)
639{
640 struct task_struct *t = current;
641
642 if (t->rcu_read_lock_nesting == 0) {
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700643 rcu_preempt_qs(cpu);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700644 return;
645 }
Paul E. McKenney10f39bb2011-07-17 21:14:35 -0700646 if (t->rcu_read_lock_nesting > 0 &&
647 per_cpu(rcu_preempt_data, cpu).qs_pending)
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700648 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700649}
650
651/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800652 * Process callbacks for preemptible RCU.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700653 */
654static void rcu_preempt_process_callbacks(void)
655{
656 __rcu_process_callbacks(&rcu_preempt_state,
657 &__get_cpu_var(rcu_preempt_data));
658}
659
Paul E. McKenneya46e0892011-06-15 15:47:09 -0700660#ifdef CONFIG_RCU_BOOST
661
Shaohua Li09223372011-06-14 13:26:25 +0800662static void rcu_preempt_do_callbacks(void)
663{
664 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
665}
666
Paul E. McKenneya46e0892011-06-15 15:47:09 -0700667#endif /* #ifdef CONFIG_RCU_BOOST */
668
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700669/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800670 * Queue a preemptible-RCU callback for invocation after a grace period.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700671 */
672void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
673{
674 __call_rcu(head, func, &rcu_preempt_state);
675}
676EXPORT_SYMBOL_GPL(call_rcu);
677
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800678/**
679 * synchronize_rcu - wait until a grace period has elapsed.
680 *
681 * Control will return to the caller some time after a full grace
682 * period has elapsed, in other words after all currently executing RCU
Paul E. McKenney77d84852010-07-08 17:38:59 -0700683 * read-side critical sections have completed. Note, however, that
684 * upon return from synchronize_rcu(), the caller might well be executing
685 * concurrently with new RCU read-side critical sections that began while
686 * synchronize_rcu() was waiting. RCU read-side critical sections are
687 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800688 */
689void synchronize_rcu(void)
690{
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800691 if (!rcu_scheduler_active)
692 return;
Paul E. McKenney2c428182011-05-26 22:14:36 -0700693 wait_rcu_gp(call_rcu);
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800694}
695EXPORT_SYMBOL_GPL(synchronize_rcu);
696
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800697static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
698static long sync_rcu_preempt_exp_count;
699static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
700
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700701/*
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800702 * Return non-zero if there are any tasks in RCU read-side critical
703 * sections blocking the current preemptible-RCU expedited grace period.
704 * If there is no preemptible-RCU expedited grace period currently in
705 * progress, returns zero unconditionally.
706 */
707static int rcu_preempted_readers_exp(struct rcu_node *rnp)
708{
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800709 return rnp->exp_tasks != NULL;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800710}
711
712/*
713 * return non-zero if there is no RCU expedited grace period in progress
714 * for the specified rcu_node structure, in other words, if all CPUs and
715 * tasks covered by the specified rcu_node structure have done their bit
716 * for the current expedited grace period. Works only for preemptible
717 * RCU -- other RCU implementation use other means.
718 *
719 * Caller must hold sync_rcu_preempt_exp_mutex.
720 */
721static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
722{
723 return !rcu_preempted_readers_exp(rnp) &&
724 ACCESS_ONCE(rnp->expmask) == 0;
725}
726
727/*
728 * Report the exit from RCU read-side critical section for the last task
729 * that queued itself during or before the current expedited preemptible-RCU
730 * grace period. This event is reported either to the rcu_node structure on
731 * which the task was queued or to one of that rcu_node structure's ancestors,
732 * recursively up the tree. (Calm down, calm down, we do the recursion
733 * iteratively!)
734 *
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700735 * Most callers will set the "wake" flag, but the task initiating the
736 * expedited grace period need not wake itself.
737 *
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800738 * Caller must hold sync_rcu_preempt_exp_mutex.
739 */
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700740static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
741 bool wake)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800742{
743 unsigned long flags;
744 unsigned long mask;
745
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800746 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800747 for (;;) {
Paul E. McKenney131906b2011-07-17 02:05:49 -0700748 if (!sync_rcu_preempt_exp_done(rnp)) {
749 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800750 break;
Paul E. McKenney131906b2011-07-17 02:05:49 -0700751 }
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800752 if (rnp->parent == NULL) {
Paul E. McKenney131906b2011-07-17 02:05:49 -0700753 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700754 if (wake)
755 wake_up(&sync_rcu_preempt_exp_wq);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800756 break;
757 }
758 mask = rnp->grpmask;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800759 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800760 rnp = rnp->parent;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800761 raw_spin_lock(&rnp->lock); /* irqs already disabled */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800762 rnp->expmask &= ~mask;
763 }
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800764}
765
766/*
767 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
768 * grace period for the specified rcu_node structure. If there are no such
769 * tasks, report it up the rcu_node hierarchy.
770 *
771 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
772 */
773static void
774sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
775{
Paul E. McKenney1217ed12011-05-04 21:43:49 -0700776 unsigned long flags;
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800777 int must_wait = 0;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800778
Paul E. McKenney1217ed12011-05-04 21:43:49 -0700779 raw_spin_lock_irqsave(&rnp->lock, flags);
780 if (list_empty(&rnp->blkd_tasks))
781 raw_spin_unlock_irqrestore(&rnp->lock, flags);
782 else {
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800783 rnp->exp_tasks = rnp->blkd_tasks.next;
Paul E. McKenney1217ed12011-05-04 21:43:49 -0700784 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800785 must_wait = 1;
786 }
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800787 if (!must_wait)
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700788 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800789}
790
791/*
792 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
793 * is to invoke synchronize_sched_expedited() to push all the tasks to
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800794 * the ->blkd_tasks lists and wait for this list to drain.
Paul E. McKenney019129d2009-10-14 10:15:56 -0700795 */
796void synchronize_rcu_expedited(void)
797{
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800798 unsigned long flags;
799 struct rcu_node *rnp;
800 struct rcu_state *rsp = &rcu_preempt_state;
801 long snap;
802 int trycount = 0;
803
804 smp_mb(); /* Caller's modifications seen first by other CPUs. */
805 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
806 smp_mb(); /* Above access cannot bleed into critical section. */
807
808 /*
809 * Acquire lock, falling back to synchronize_rcu() if too many
810 * lock-acquisition failures. Of course, if someone does the
811 * expedited grace period for us, just leave.
812 */
813 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
814 if (trycount++ < 10)
815 udelay(trycount * num_online_cpus());
816 else {
817 synchronize_rcu();
818 return;
819 }
820 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
821 goto mb_ret; /* Others did our work for us. */
822 }
823 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
824 goto unlock_mb_ret; /* Others did our work for us. */
825
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800826 /* force all RCU readers onto ->blkd_tasks lists. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800827 synchronize_sched_expedited();
828
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800829 raw_spin_lock_irqsave(&rsp->onofflock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800830
831 /* Initialize ->expmask for all non-leaf rcu_node structures. */
832 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800833 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800834 rnp->expmask = rnp->qsmaskinit;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800835 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800836 }
837
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800838 /* Snapshot current state of ->blkd_tasks lists. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800839 rcu_for_each_leaf_node(rsp, rnp)
840 sync_rcu_preempt_exp_init(rsp, rnp);
841 if (NUM_RCU_NODES > 1)
842 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
843
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800844 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800845
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800846 /* Wait for snapshotted ->blkd_tasks lists to drain. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800847 rnp = rcu_get_root(rsp);
848 wait_event(sync_rcu_preempt_exp_wq,
849 sync_rcu_preempt_exp_done(rnp));
850
851 /* Clean up and exit. */
852 smp_mb(); /* ensure expedited GP seen before counter increment. */
853 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
854unlock_mb_ret:
855 mutex_unlock(&sync_rcu_preempt_exp_mutex);
856mb_ret:
857 smp_mb(); /* ensure subsequent action seen after grace period. */
Paul E. McKenney019129d2009-10-14 10:15:56 -0700858}
859EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
860
861/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800862 * Check to see if there is any immediate preemptible-RCU-related work
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700863 * to be done.
864 */
865static int rcu_preempt_pending(int cpu)
866{
867 return __rcu_pending(&rcu_preempt_state,
868 &per_cpu(rcu_preempt_data, cpu));
869}
870
871/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800872 * Does preemptible RCU need the CPU to stay out of dynticks mode?
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700873 */
874static int rcu_preempt_needs_cpu(int cpu)
875{
876 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
877}
878
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700879/**
880 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
881 */
882void rcu_barrier(void)
883{
884 _rcu_barrier(&rcu_preempt_state, call_rcu);
885}
886EXPORT_SYMBOL_GPL(rcu_barrier);
887
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700888/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800889 * Initialize preemptible RCU's per-CPU data.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700890 */
891static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
892{
893 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
894}
895
896/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800897 * Move preemptible RCU's callbacks from dying CPU to other online CPU.
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700898 */
Lai Jiangshan29494be2010-10-20 14:13:06 +0800899static void rcu_preempt_send_cbs_to_online(void)
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700900{
Lai Jiangshan29494be2010-10-20 14:13:06 +0800901 rcu_send_cbs_to_online(&rcu_preempt_state);
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700902}
903
904/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800905 * Initialize preemptible RCU's state structures.
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700906 */
907static void __init __rcu_init_preempt(void)
908{
Lai Jiangshan394f99a2010-06-28 16:25:04 +0800909 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700910}
911
912/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800913 * Check for a task exiting while in a preemptible-RCU read-side
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700914 * critical section, clean up if so. No need to issue warnings,
915 * as debug_check_no_locks_held() already does this if lockdep
916 * is enabled.
917 */
918void exit_rcu(void)
919{
920 struct task_struct *t = current;
921
922 if (t->rcu_read_lock_nesting == 0)
923 return;
924 t->rcu_read_lock_nesting = 1;
Lai Jiangshan13491a02011-02-25 11:37:59 -0800925 __rcu_read_unlock();
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700926}
927
928#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
929
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800930static struct rcu_state *rcu_state = &rcu_sched_state;
931
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700932/*
933 * Tell them what RCU they are running.
934 */
Paul E. McKenney0e0fc1c2009-11-11 11:28:06 -0800935static void __init rcu_bootup_announce(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700936{
937 printk(KERN_INFO "Hierarchical RCU implementation.\n");
Paul E. McKenney26845c22010-04-13 14:19:23 -0700938 rcu_bootup_announce_oddness();
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700939}
940
941/*
942 * Return the number of RCU batches processed thus far for debug & stats.
943 */
944long rcu_batches_completed(void)
945{
946 return rcu_batches_completed_sched();
947}
948EXPORT_SYMBOL_GPL(rcu_batches_completed);
949
950/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800951 * Force a quiescent state for RCU, which, because there is no preemptible
952 * RCU, becomes the same as rcu-sched.
953 */
954void rcu_force_quiescent_state(void)
955{
956 rcu_sched_force_quiescent_state();
957}
958EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
959
960/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800961 * Because preemptible RCU does not exist, we never have to check for
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700962 * CPUs being in quiescent states.
963 */
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700964static void rcu_preempt_note_context_switch(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700965{
966}
967
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700968/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800969 * Because preemptible RCU does not exist, there are never any preempted
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700970 * RCU readers.
971 */
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800972static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700973{
974 return 0;
975}
976
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800977#ifdef CONFIG_HOTPLUG_CPU
978
979/* Because preemptible RCU does not exist, no quieting of tasks. */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800980static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800981{
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800982 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800983}
984
985#endif /* #ifdef CONFIG_HOTPLUG_CPU */
986
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700987/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800988 * Because preemptible RCU does not exist, we never have to check for
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700989 * tasks blocked within RCU read-side critical sections.
990 */
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800991static void rcu_print_detail_task_stall(struct rcu_state *rsp)
992{
993}
994
995/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800996 * Because preemptible RCU does not exist, we never have to check for
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800997 * tasks blocked within RCU read-side critical sections.
998 */
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700999static int rcu_print_task_stall(struct rcu_node *rnp)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001000{
Paul E. McKenney9bc8b552011-08-13 13:31:47 -07001001 return 0;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001002}
1003
Paul E. McKenney53d84e02010-08-10 14:28:53 -07001004/*
1005 * Because preemptible RCU does not exist, there is no need to suppress
1006 * its CPU stall warnings.
1007 */
1008static void rcu_preempt_stall_reset(void)
1009{
1010}
1011
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001012/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001013 * Because there is no preemptible RCU, there can be no readers blocked,
Paul E. McKenney49e29122009-09-18 09:50:19 -07001014 * so there is no need to check for blocked tasks. So check only for
1015 * bogus qsmask values.
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -07001016 */
1017static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1018{
Paul E. McKenney49e29122009-09-18 09:50:19 -07001019 WARN_ON_ONCE(rnp->qsmask);
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -07001020}
1021
Paul E. McKenney33f76142009-08-24 09:42:01 -07001022#ifdef CONFIG_HOTPLUG_CPU
1023
1024/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001025 * Because preemptible RCU does not exist, it never needs to migrate
Paul E. McKenney237c80c2009-10-15 09:26:14 -07001026 * tasks that were blocked within RCU read-side critical sections, and
1027 * such non-existent tasks cannot possibly have been blocking the current
1028 * grace period.
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -07001029 */
Paul E. McKenney237c80c2009-10-15 09:26:14 -07001030static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1031 struct rcu_node *rnp,
1032 struct rcu_data *rdp)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -07001033{
Paul E. McKenney237c80c2009-10-15 09:26:14 -07001034 return 0;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -07001035}
1036
1037/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001038 * Because preemptible RCU does not exist, it never needs CPU-offline
Paul E. McKenney33f76142009-08-24 09:42:01 -07001039 * processing.
1040 */
1041static void rcu_preempt_offline_cpu(int cpu)
1042{
1043}
1044
1045#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1046
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001047/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001048 * Because preemptible RCU does not exist, it never has any callbacks
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001049 * to check.
1050 */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -07001051static void rcu_preempt_check_callbacks(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001052{
1053}
1054
1055/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001056 * Because preemptible RCU does not exist, it never has any callbacks
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001057 * to process.
1058 */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -07001059static void rcu_preempt_process_callbacks(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001060{
1061}
1062
1063/*
Paul E. McKenney019129d2009-10-14 10:15:56 -07001064 * Wait for an rcu-preempt grace period, but make it happen quickly.
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001065 * But because preemptible RCU does not exist, map to rcu-sched.
Paul E. McKenney019129d2009-10-14 10:15:56 -07001066 */
1067void synchronize_rcu_expedited(void)
1068{
1069 synchronize_sched_expedited();
1070}
1071EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1072
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001073#ifdef CONFIG_HOTPLUG_CPU
1074
1075/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001076 * Because preemptible RCU does not exist, there is never any need to
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001077 * report on tasks preempted in RCU read-side critical sections during
1078 * expedited RCU grace periods.
1079 */
Thomas Gleixnerb40d2932011-10-22 07:12:34 -07001080static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1081 bool wake)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001082{
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001083}
1084
1085#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1086
Paul E. McKenney019129d2009-10-14 10:15:56 -07001087/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001088 * Because preemptible RCU does not exist, it never has any work to do.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001089 */
1090static int rcu_preempt_pending(int cpu)
1091{
1092 return 0;
1093}
1094
1095/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001096 * Because preemptible RCU does not exist, it never needs any CPU.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001097 */
1098static int rcu_preempt_needs_cpu(int cpu)
1099{
1100 return 0;
1101}
1102
1103/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001104 * Because preemptible RCU does not exist, rcu_barrier() is just
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001105 * another name for rcu_barrier_sched().
1106 */
1107void rcu_barrier(void)
1108{
1109 rcu_barrier_sched();
1110}
1111EXPORT_SYMBOL_GPL(rcu_barrier);
1112
1113/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001114 * Because preemptible RCU does not exist, there is no per-CPU
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001115 * data to initialize.
1116 */
1117static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
1118{
1119}
1120
Paul E. McKenney1eba8f82009-09-23 09:50:42 -07001121/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001122 * Because there is no preemptible RCU, there are no callbacks to move.
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001123 */
Lai Jiangshan29494be2010-10-20 14:13:06 +08001124static void rcu_preempt_send_cbs_to_online(void)
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001125{
1126}
1127
1128/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001129 * Because preemptible RCU does not exist, it need not be initialized.
Paul E. McKenney1eba8f82009-09-23 09:50:42 -07001130 */
1131static void __init __rcu_init_preempt(void)
1132{
1133}
1134
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001135#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001136
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001137#ifdef CONFIG_RCU_BOOST
1138
1139#include "rtmutex_common.h"
1140
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001141#ifdef CONFIG_RCU_TRACE
1142
1143static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1144{
1145 if (list_empty(&rnp->blkd_tasks))
1146 rnp->n_balk_blkd_tasks++;
1147 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1148 rnp->n_balk_exp_gp_tasks++;
1149 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1150 rnp->n_balk_boost_tasks++;
1151 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1152 rnp->n_balk_notblocked++;
1153 else if (rnp->gp_tasks != NULL &&
Paul E. McKenneya9f47932011-05-02 03:46:10 -07001154 ULONG_CMP_LT(jiffies, rnp->boost_time))
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001155 rnp->n_balk_notyet++;
1156 else
1157 rnp->n_balk_nos++;
1158}
1159
1160#else /* #ifdef CONFIG_RCU_TRACE */
1161
1162static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1163{
1164}
1165
1166#endif /* #else #ifdef CONFIG_RCU_TRACE */
1167
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001168/*
1169 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1170 * or ->boost_tasks, advancing the pointer to the next task in the
1171 * ->blkd_tasks list.
1172 *
1173 * Note that irqs must be enabled: boosting the task can block.
1174 * Returns 1 if there are more tasks needing to be boosted.
1175 */
1176static int rcu_boost(struct rcu_node *rnp)
1177{
1178 unsigned long flags;
1179 struct rt_mutex mtx;
1180 struct task_struct *t;
1181 struct list_head *tb;
1182
1183 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1184 return 0; /* Nothing left to boost. */
1185
1186 raw_spin_lock_irqsave(&rnp->lock, flags);
1187
1188 /*
1189 * Recheck under the lock: all tasks in need of boosting
1190 * might exit their RCU read-side critical sections on their own.
1191 */
1192 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1193 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1194 return 0;
1195 }
1196
1197 /*
1198 * Preferentially boost tasks blocking expedited grace periods.
1199 * This cannot starve the normal grace periods because a second
1200 * expedited grace period must boost all blocked tasks, including
1201 * those blocking the pre-existing normal grace period.
1202 */
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001203 if (rnp->exp_tasks != NULL) {
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001204 tb = rnp->exp_tasks;
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001205 rnp->n_exp_boosts++;
1206 } else {
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001207 tb = rnp->boost_tasks;
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001208 rnp->n_normal_boosts++;
1209 }
1210 rnp->n_tasks_boosted++;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001211
1212 /*
1213 * We boost task t by manufacturing an rt_mutex that appears to
1214 * be held by task t. We leave a pointer to that rt_mutex where
1215 * task t can find it, and task t will release the mutex when it
1216 * exits its outermost RCU read-side critical section. Then
1217 * simply acquiring this artificial rt_mutex will boost task
1218 * t's priority. (Thanks to tglx for suggesting this approach!)
1219 *
1220 * Note that task t must acquire rnp->lock to remove itself from
1221 * the ->blkd_tasks list, which it will do from exit() if from
1222 * nowhere else. We therefore are guaranteed that task t will
1223 * stay around at least until we drop rnp->lock. Note that
1224 * rnp->lock also resolves races between our priority boosting
1225 * and task t's exiting its outermost RCU read-side critical
1226 * section.
1227 */
1228 t = container_of(tb, struct task_struct, rcu_node_entry);
1229 rt_mutex_init_proxy_locked(&mtx, t);
1230 t->rcu_boost_mutex = &mtx;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001231 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1232 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1233 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1234
Paul E. McKenney4f89b332011-12-09 14:43:47 -08001235 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1236 ACCESS_ONCE(rnp->boost_tasks) != NULL;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001237}
1238
1239/*
1240 * Timer handler to initiate waking up of boost kthreads that
1241 * have yielded the CPU due to excessive numbers of tasks to
1242 * boost. We wake up the per-rcu_node kthread, which in turn
1243 * will wake up the booster kthread.
1244 */
1245static void rcu_boost_kthread_timer(unsigned long arg)
1246{
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001247 invoke_rcu_node_kthread((struct rcu_node *)arg);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001248}
1249
1250/*
1251 * Priority-boosting kthread. One per leaf rcu_node and one for the
1252 * root rcu_node.
1253 */
1254static int rcu_boost_kthread(void *arg)
1255{
1256 struct rcu_node *rnp = (struct rcu_node *)arg;
1257 int spincnt = 0;
1258 int more2boost;
1259
Paul E. McKenney385680a2011-06-21 22:43:26 -07001260 trace_rcu_utilization("Start boost kthread@init");
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001261 for (;;) {
Paul E. McKenneyd71df902011-03-29 17:48:28 -07001262 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
Paul E. McKenney385680a2011-06-21 22:43:26 -07001263 trace_rcu_utilization("End boost kthread@rcu_wait");
Peter Zijlstra08bca602011-05-20 16:06:29 -07001264 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
Paul E. McKenney385680a2011-06-21 22:43:26 -07001265 trace_rcu_utilization("Start boost kthread@rcu_wait");
Paul E. McKenneyd71df902011-03-29 17:48:28 -07001266 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001267 more2boost = rcu_boost(rnp);
1268 if (more2boost)
1269 spincnt++;
1270 else
1271 spincnt = 0;
1272 if (spincnt > 10) {
Paul E. McKenney385680a2011-06-21 22:43:26 -07001273 trace_rcu_utilization("End boost kthread@rcu_yield");
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001274 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
Paul E. McKenney385680a2011-06-21 22:43:26 -07001275 trace_rcu_utilization("Start boost kthread@rcu_yield");
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001276 spincnt = 0;
1277 }
1278 }
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001279 /* NOTREACHED */
Paul E. McKenney385680a2011-06-21 22:43:26 -07001280 trace_rcu_utilization("End boost kthread@notreached");
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001281 return 0;
1282}
1283
1284/*
1285 * Check to see if it is time to start boosting RCU readers that are
1286 * blocking the current grace period, and, if so, tell the per-rcu_node
1287 * kthread to start boosting them. If there is an expedited grace
1288 * period in progress, it is always time to boost.
1289 *
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001290 * The caller must hold rnp->lock, which this function releases,
1291 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1292 * so we don't need to worry about it going away.
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001293 */
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001294static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001295{
1296 struct task_struct *t;
1297
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001298 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1299 rnp->n_balk_exp_gp_tasks++;
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001300 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001301 return;
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001302 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001303 if (rnp->exp_tasks != NULL ||
1304 (rnp->gp_tasks != NULL &&
1305 rnp->boost_tasks == NULL &&
1306 rnp->qsmask == 0 &&
1307 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1308 if (rnp->exp_tasks == NULL)
1309 rnp->boost_tasks = rnp->gp_tasks;
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001310 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001311 t = rnp->boost_kthread_task;
1312 if (t != NULL)
1313 wake_up_process(t);
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001314 } else {
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001315 rcu_initiate_boost_trace(rnp);
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001316 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1317 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001318}
1319
Paul E. McKenney0f962a52011-04-14 12:13:53 -07001320/*
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001321 * Wake up the per-CPU kthread to invoke RCU callbacks.
1322 */
1323static void invoke_rcu_callbacks_kthread(void)
1324{
1325 unsigned long flags;
1326
1327 local_irq_save(flags);
1328 __this_cpu_write(rcu_cpu_has_work, 1);
Shaohua Li1eb52122011-06-16 16:02:54 -07001329 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1330 current != __this_cpu_read(rcu_cpu_kthread_task))
1331 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001332 local_irq_restore(flags);
1333}
1334
1335/*
Paul E. McKenneydff16722011-11-29 15:57:13 -08001336 * Is the current CPU running the RCU-callbacks kthread?
1337 * Caller must have preemption disabled.
1338 */
1339static bool rcu_is_callbacks_kthread(void)
1340{
1341 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1342}
1343
1344/*
Paul E. McKenney0f962a52011-04-14 12:13:53 -07001345 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1346 * held, so no one should be messing with the existence of the boost
1347 * kthread.
1348 */
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001349static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1350 cpumask_var_t cm)
1351{
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001352 struct task_struct *t;
1353
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001354 t = rnp->boost_kthread_task;
1355 if (t != NULL)
1356 set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001357}
1358
1359#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1360
1361/*
1362 * Do priority-boost accounting for the start of a new grace period.
1363 */
1364static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1365{
1366 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1367}
1368
1369/*
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001370 * Create an RCU-boost kthread for the specified node if one does not
1371 * already exist. We only create this kthread for preemptible RCU.
1372 * Returns zero if all is well, a negated errno otherwise.
1373 */
1374static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1375 struct rcu_node *rnp,
1376 int rnp_index)
1377{
1378 unsigned long flags;
1379 struct sched_param sp;
1380 struct task_struct *t;
1381
1382 if (&rcu_preempt_state != rsp)
1383 return 0;
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001384 rsp->boost = 1;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001385 if (rnp->boost_kthread_task != NULL)
1386 return 0;
1387 t = kthread_create(rcu_boost_kthread, (void *)rnp,
Mike Galbraith5b61b0b2011-08-19 11:39:11 -07001388 "rcub/%d", rnp_index);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001389 if (IS_ERR(t))
1390 return PTR_ERR(t);
1391 raw_spin_lock_irqsave(&rnp->lock, flags);
1392 rnp->boost_kthread_task = t;
1393 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Mike Galbraith5b61b0b2011-08-19 11:39:11 -07001394 sp.sched_priority = RCU_BOOST_PRIO;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001395 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
Paul E. McKenney9a432732011-05-30 20:38:55 -07001396 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001397 return 0;
1398}
1399
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001400#ifdef CONFIG_HOTPLUG_CPU
1401
1402/*
1403 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1404 */
1405static void rcu_stop_cpu_kthread(int cpu)
1406{
1407 struct task_struct *t;
1408
1409 /* Stop the CPU's kthread. */
1410 t = per_cpu(rcu_cpu_kthread_task, cpu);
1411 if (t != NULL) {
1412 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1413 kthread_stop(t);
1414 }
1415}
1416
1417#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1418
1419static void rcu_kthread_do_work(void)
1420{
1421 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1422 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1423 rcu_preempt_do_callbacks();
1424}
1425
1426/*
1427 * Wake up the specified per-rcu_node-structure kthread.
1428 * Because the per-rcu_node kthreads are immortal, we don't need
1429 * to do anything to keep them alive.
1430 */
1431static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1432{
1433 struct task_struct *t;
1434
1435 t = rnp->node_kthread_task;
1436 if (t != NULL)
1437 wake_up_process(t);
1438}
1439
1440/*
1441 * Set the specified CPU's kthread to run RT or not, as specified by
1442 * the to_rt argument. The CPU-hotplug locks are held, so the task
1443 * is not going away.
1444 */
1445static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1446{
1447 int policy;
1448 struct sched_param sp;
1449 struct task_struct *t;
1450
1451 t = per_cpu(rcu_cpu_kthread_task, cpu);
1452 if (t == NULL)
1453 return;
1454 if (to_rt) {
1455 policy = SCHED_FIFO;
1456 sp.sched_priority = RCU_KTHREAD_PRIO;
1457 } else {
1458 policy = SCHED_NORMAL;
1459 sp.sched_priority = 0;
1460 }
1461 sched_setscheduler_nocheck(t, policy, &sp);
1462}
1463
1464/*
1465 * Timer handler to initiate the waking up of per-CPU kthreads that
1466 * have yielded the CPU due to excess numbers of RCU callbacks.
1467 * We wake up the per-rcu_node kthread, which in turn will wake up
1468 * the booster kthread.
1469 */
1470static void rcu_cpu_kthread_timer(unsigned long arg)
1471{
1472 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1473 struct rcu_node *rnp = rdp->mynode;
1474
1475 atomic_or(rdp->grpmask, &rnp->wakemask);
1476 invoke_rcu_node_kthread(rnp);
1477}
1478
1479/*
1480 * Drop to non-real-time priority and yield, but only after posting a
1481 * timer that will cause us to regain our real-time priority if we
1482 * remain preempted. Either way, we restore our real-time priority
1483 * before returning.
1484 */
1485static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1486{
1487 struct sched_param sp;
1488 struct timer_list yield_timer;
Mike Galbraith5b61b0b2011-08-19 11:39:11 -07001489 int prio = current->rt_priority;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001490
1491 setup_timer_on_stack(&yield_timer, f, arg);
1492 mod_timer(&yield_timer, jiffies + 2);
1493 sp.sched_priority = 0;
1494 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1495 set_user_nice(current, 19);
1496 schedule();
Mike Galbraith5b61b0b2011-08-19 11:39:11 -07001497 set_user_nice(current, 0);
1498 sp.sched_priority = prio;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001499 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1500 del_timer(&yield_timer);
1501}
1502
1503/*
1504 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1505 * This can happen while the corresponding CPU is either coming online
1506 * or going offline. We cannot wait until the CPU is fully online
1507 * before starting the kthread, because the various notifier functions
1508 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1509 * the corresponding CPU is online.
1510 *
1511 * Return 1 if the kthread needs to stop, 0 otherwise.
1512 *
1513 * Caller must disable bh. This function can momentarily enable it.
1514 */
1515static int rcu_cpu_kthread_should_stop(int cpu)
1516{
1517 while (cpu_is_offline(cpu) ||
1518 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1519 smp_processor_id() != cpu) {
1520 if (kthread_should_stop())
1521 return 1;
1522 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1523 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1524 local_bh_enable();
1525 schedule_timeout_uninterruptible(1);
1526 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1527 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1528 local_bh_disable();
1529 }
1530 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1531 return 0;
1532}
1533
1534/*
1535 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
Paul E. McKenneye0f23062011-06-21 01:29:39 -07001536 * RCU softirq used in flavors and configurations of RCU that do not
1537 * support RCU priority boosting.
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001538 */
1539static int rcu_cpu_kthread(void *arg)
1540{
1541 int cpu = (int)(long)arg;
1542 unsigned long flags;
1543 int spincnt = 0;
1544 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1545 char work;
1546 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1547
Paul E. McKenney385680a2011-06-21 22:43:26 -07001548 trace_rcu_utilization("Start CPU kthread@init");
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001549 for (;;) {
1550 *statusp = RCU_KTHREAD_WAITING;
Paul E. McKenney385680a2011-06-21 22:43:26 -07001551 trace_rcu_utilization("End CPU kthread@rcu_wait");
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001552 rcu_wait(*workp != 0 || kthread_should_stop());
Paul E. McKenney385680a2011-06-21 22:43:26 -07001553 trace_rcu_utilization("Start CPU kthread@rcu_wait");
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001554 local_bh_disable();
1555 if (rcu_cpu_kthread_should_stop(cpu)) {
1556 local_bh_enable();
1557 break;
1558 }
1559 *statusp = RCU_KTHREAD_RUNNING;
1560 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1561 local_irq_save(flags);
1562 work = *workp;
1563 *workp = 0;
1564 local_irq_restore(flags);
1565 if (work)
1566 rcu_kthread_do_work();
1567 local_bh_enable();
1568 if (*workp != 0)
1569 spincnt++;
1570 else
1571 spincnt = 0;
1572 if (spincnt > 10) {
1573 *statusp = RCU_KTHREAD_YIELDING;
Paul E. McKenney385680a2011-06-21 22:43:26 -07001574 trace_rcu_utilization("End CPU kthread@rcu_yield");
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001575 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
Paul E. McKenney385680a2011-06-21 22:43:26 -07001576 trace_rcu_utilization("Start CPU kthread@rcu_yield");
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001577 spincnt = 0;
1578 }
1579 }
1580 *statusp = RCU_KTHREAD_STOPPED;
Paul E. McKenney385680a2011-06-21 22:43:26 -07001581 trace_rcu_utilization("End CPU kthread@term");
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001582 return 0;
1583}
1584
1585/*
1586 * Spawn a per-CPU kthread, setting up affinity and priority.
1587 * Because the CPU hotplug lock is held, no other CPU will be attempting
1588 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1589 * attempting to access it during boot, but the locking in kthread_bind()
1590 * will enforce sufficient ordering.
1591 *
1592 * Please note that we cannot simply refuse to wake up the per-CPU
1593 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1594 * which can result in softlockup complaints if the task ends up being
1595 * idle for more than a couple of minutes.
1596 *
1597 * However, please note also that we cannot bind the per-CPU kthread to its
1598 * CPU until that CPU is fully online. We also cannot wait until the
1599 * CPU is fully online before we create its per-CPU kthread, as this would
1600 * deadlock the system when CPU notifiers tried waiting for grace
1601 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1602 * is online. If its CPU is not yet fully online, then the code in
1603 * rcu_cpu_kthread() will wait until it is fully online, and then do
1604 * the binding.
1605 */
1606static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1607{
1608 struct sched_param sp;
1609 struct task_struct *t;
1610
Paul E. McKenneyb0d30412011-07-10 15:57:35 -07001611 if (!rcu_scheduler_fully_active ||
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001612 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1613 return 0;
Eric Dumazet1f288092011-06-16 15:53:18 -07001614 t = kthread_create_on_node(rcu_cpu_kthread,
1615 (void *)(long)cpu,
1616 cpu_to_node(cpu),
Mike Galbraith5b61b0b2011-08-19 11:39:11 -07001617 "rcuc/%d", cpu);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001618 if (IS_ERR(t))
1619 return PTR_ERR(t);
1620 if (cpu_online(cpu))
1621 kthread_bind(t, cpu);
1622 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1623 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1624 sp.sched_priority = RCU_KTHREAD_PRIO;
1625 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1626 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1627 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1628 return 0;
1629}
1630
1631/*
1632 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1633 * kthreads when needed. We ignore requests to wake up kthreads
1634 * for offline CPUs, which is OK because force_quiescent_state()
1635 * takes care of this case.
1636 */
1637static int rcu_node_kthread(void *arg)
1638{
1639 int cpu;
1640 unsigned long flags;
1641 unsigned long mask;
1642 struct rcu_node *rnp = (struct rcu_node *)arg;
1643 struct sched_param sp;
1644 struct task_struct *t;
1645
1646 for (;;) {
1647 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1648 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1649 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1650 raw_spin_lock_irqsave(&rnp->lock, flags);
1651 mask = atomic_xchg(&rnp->wakemask, 0);
1652 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1653 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1654 if ((mask & 0x1) == 0)
1655 continue;
1656 preempt_disable();
1657 t = per_cpu(rcu_cpu_kthread_task, cpu);
1658 if (!cpu_online(cpu) || t == NULL) {
1659 preempt_enable();
1660 continue;
1661 }
1662 per_cpu(rcu_cpu_has_work, cpu) = 1;
1663 sp.sched_priority = RCU_KTHREAD_PRIO;
1664 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1665 preempt_enable();
1666 }
1667 }
1668 /* NOTREACHED */
1669 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1670 return 0;
1671}
1672
1673/*
1674 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1675 * served by the rcu_node in question. The CPU hotplug lock is still
1676 * held, so the value of rnp->qsmaskinit will be stable.
1677 *
1678 * We don't include outgoingcpu in the affinity set, use -1 if there is
1679 * no outgoing CPU. If there are no CPUs left in the affinity set,
1680 * this function allows the kthread to execute on any CPU.
1681 */
1682static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1683{
1684 cpumask_var_t cm;
1685 int cpu;
1686 unsigned long mask = rnp->qsmaskinit;
1687
1688 if (rnp->node_kthread_task == NULL)
1689 return;
1690 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1691 return;
1692 cpumask_clear(cm);
1693 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1694 if ((mask & 0x1) && cpu != outgoingcpu)
1695 cpumask_set_cpu(cpu, cm);
1696 if (cpumask_weight(cm) == 0) {
1697 cpumask_setall(cm);
1698 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1699 cpumask_clear_cpu(cpu, cm);
1700 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1701 }
1702 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1703 rcu_boost_kthread_setaffinity(rnp, cm);
1704 free_cpumask_var(cm);
1705}
1706
1707/*
1708 * Spawn a per-rcu_node kthread, setting priority and affinity.
1709 * Called during boot before online/offline can happen, or, if
1710 * during runtime, with the main CPU-hotplug locks held. So only
1711 * one of these can be executing at a time.
1712 */
1713static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1714 struct rcu_node *rnp)
1715{
1716 unsigned long flags;
1717 int rnp_index = rnp - &rsp->node[0];
1718 struct sched_param sp;
1719 struct task_struct *t;
1720
Paul E. McKenneyb0d30412011-07-10 15:57:35 -07001721 if (!rcu_scheduler_fully_active ||
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001722 rnp->qsmaskinit == 0)
1723 return 0;
1724 if (rnp->node_kthread_task == NULL) {
1725 t = kthread_create(rcu_node_kthread, (void *)rnp,
Mike Galbraith5b61b0b2011-08-19 11:39:11 -07001726 "rcun/%d", rnp_index);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001727 if (IS_ERR(t))
1728 return PTR_ERR(t);
1729 raw_spin_lock_irqsave(&rnp->lock, flags);
1730 rnp->node_kthread_task = t;
1731 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1732 sp.sched_priority = 99;
1733 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1734 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1735 }
1736 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1737}
1738
1739/*
1740 * Spawn all kthreads -- called as soon as the scheduler is running.
1741 */
1742static int __init rcu_spawn_kthreads(void)
1743{
1744 int cpu;
1745 struct rcu_node *rnp;
1746
Paul E. McKenneyb0d30412011-07-10 15:57:35 -07001747 rcu_scheduler_fully_active = 1;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001748 for_each_possible_cpu(cpu) {
1749 per_cpu(rcu_cpu_has_work, cpu) = 0;
1750 if (cpu_online(cpu))
1751 (void)rcu_spawn_one_cpu_kthread(cpu);
1752 }
1753 rnp = rcu_get_root(rcu_state);
1754 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1755 if (NUM_RCU_NODES > 1) {
1756 rcu_for_each_leaf_node(rcu_state, rnp)
1757 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1758 }
1759 return 0;
1760}
1761early_initcall(rcu_spawn_kthreads);
1762
1763static void __cpuinit rcu_prepare_kthreads(int cpu)
1764{
1765 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1766 struct rcu_node *rnp = rdp->mynode;
1767
1768 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
Paul E. McKenneyb0d30412011-07-10 15:57:35 -07001769 if (rcu_scheduler_fully_active) {
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001770 (void)rcu_spawn_one_cpu_kthread(cpu);
1771 if (rnp->node_kthread_task == NULL)
1772 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1773 }
1774}
1775
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001776#else /* #ifdef CONFIG_RCU_BOOST */
1777
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001778static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001779{
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001780 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001781}
1782
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001783static void invoke_rcu_callbacks_kthread(void)
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001784{
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001785 WARN_ON_ONCE(1);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001786}
1787
Paul E. McKenneydff16722011-11-29 15:57:13 -08001788static bool rcu_is_callbacks_kthread(void)
1789{
1790 return false;
1791}
1792
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001793static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1794{
1795}
1796
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001797#ifdef CONFIG_HOTPLUG_CPU
1798
1799static void rcu_stop_cpu_kthread(int cpu)
1800{
1801}
1802
1803#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1804
1805static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1806{
1807}
1808
1809static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1810{
1811}
1812
Paul E. McKenneyb0d30412011-07-10 15:57:35 -07001813static int __init rcu_scheduler_really_started(void)
1814{
1815 rcu_scheduler_fully_active = 1;
1816 return 0;
1817}
1818early_initcall(rcu_scheduler_really_started);
1819
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001820static void __cpuinit rcu_prepare_kthreads(int cpu)
1821{
1822}
1823
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001824#endif /* #else #ifdef CONFIG_RCU_BOOST */
1825
Lai Jiangshan7b27d542010-10-21 11:29:05 +08001826#ifndef CONFIG_SMP
1827
1828void synchronize_sched_expedited(void)
1829{
1830 cond_resched();
1831}
1832EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1833
1834#else /* #ifndef CONFIG_SMP */
1835
Tejun Heoe27fc962010-11-22 21:36:11 -08001836static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
1837static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
Lai Jiangshan7b27d542010-10-21 11:29:05 +08001838
1839static int synchronize_sched_expedited_cpu_stop(void *data)
1840{
1841 /*
1842 * There must be a full memory barrier on each affected CPU
1843 * between the time that try_stop_cpus() is called and the
1844 * time that it returns.
1845 *
1846 * In the current initial implementation of cpu_stop, the
1847 * above condition is already met when the control reaches
1848 * this point and the following smp_mb() is not strictly
1849 * necessary. Do smp_mb() anyway for documentation and
1850 * robustness against future implementation changes.
1851 */
1852 smp_mb(); /* See above comment block. */
1853 return 0;
1854}
1855
1856/*
1857 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1858 * approach to force grace period to end quickly. This consumes
1859 * significant time on all CPUs, and is thus not recommended for
1860 * any sort of common-case code.
1861 *
1862 * Note that it is illegal to call this function while holding any
1863 * lock that is acquired by a CPU-hotplug notifier. Failing to
1864 * observe this restriction will result in deadlock.
Paul E. McKenneydb3a8922010-10-25 07:39:22 -07001865 *
Tejun Heoe27fc962010-11-22 21:36:11 -08001866 * This implementation can be thought of as an application of ticket
1867 * locking to RCU, with sync_sched_expedited_started and
1868 * sync_sched_expedited_done taking on the roles of the halves
1869 * of the ticket-lock word. Each task atomically increments
1870 * sync_sched_expedited_started upon entry, snapshotting the old value,
1871 * then attempts to stop all the CPUs. If this succeeds, then each
1872 * CPU will have executed a context switch, resulting in an RCU-sched
1873 * grace period. We are then done, so we use atomic_cmpxchg() to
1874 * update sync_sched_expedited_done to match our snapshot -- but
1875 * only if someone else has not already advanced past our snapshot.
1876 *
1877 * On the other hand, if try_stop_cpus() fails, we check the value
1878 * of sync_sched_expedited_done. If it has advanced past our
1879 * initial snapshot, then someone else must have forced a grace period
1880 * some time after we took our snapshot. In this case, our work is
1881 * done for us, and we can simply return. Otherwise, we try again,
1882 * but keep our initial snapshot for purposes of checking for someone
1883 * doing our work for us.
1884 *
1885 * If we fail too many times in a row, we fall back to synchronize_sched().
Lai Jiangshan7b27d542010-10-21 11:29:05 +08001886 */
1887void synchronize_sched_expedited(void)
1888{
Tejun Heoe27fc962010-11-22 21:36:11 -08001889 int firstsnap, s, snap, trycount = 0;
Lai Jiangshan7b27d542010-10-21 11:29:05 +08001890
Tejun Heoe27fc962010-11-22 21:36:11 -08001891 /* Note that atomic_inc_return() implies full memory barrier. */
1892 firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
Lai Jiangshan7b27d542010-10-21 11:29:05 +08001893 get_online_cpus();
Tejun Heoe27fc962010-11-22 21:36:11 -08001894
1895 /*
1896 * Each pass through the following loop attempts to force a
1897 * context switch on each CPU.
1898 */
Lai Jiangshan7b27d542010-10-21 11:29:05 +08001899 while (try_stop_cpus(cpu_online_mask,
1900 synchronize_sched_expedited_cpu_stop,
1901 NULL) == -EAGAIN) {
1902 put_online_cpus();
Tejun Heoe27fc962010-11-22 21:36:11 -08001903
1904 /* No joy, try again later. Or just synchronize_sched(). */
Lai Jiangshan7b27d542010-10-21 11:29:05 +08001905 if (trycount++ < 10)
1906 udelay(trycount * num_online_cpus());
1907 else {
1908 synchronize_sched();
1909 return;
1910 }
Tejun Heoe27fc962010-11-22 21:36:11 -08001911
1912 /* Check to see if someone else did our work for us. */
1913 s = atomic_read(&sync_sched_expedited_done);
1914 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
Lai Jiangshan7b27d542010-10-21 11:29:05 +08001915 smp_mb(); /* ensure test happens before caller kfree */
1916 return;
1917 }
Tejun Heoe27fc962010-11-22 21:36:11 -08001918
1919 /*
1920 * Refetching sync_sched_expedited_started allows later
1921 * callers to piggyback on our grace period. We subtract
1922 * 1 to get the same token that the last incrementer got.
1923 * We retry after they started, so our grace period works
1924 * for them, and they started after our first try, so their
1925 * grace period works for us.
1926 */
Lai Jiangshan7b27d542010-10-21 11:29:05 +08001927 get_online_cpus();
Paul E. McKenney70777142011-09-22 13:18:44 -07001928 snap = atomic_read(&sync_sched_expedited_started);
Tejun Heoe27fc962010-11-22 21:36:11 -08001929 smp_mb(); /* ensure read is before try_stop_cpus(). */
Lai Jiangshan7b27d542010-10-21 11:29:05 +08001930 }
Tejun Heoe27fc962010-11-22 21:36:11 -08001931
1932 /*
1933 * Everyone up to our most recent fetch is covered by our grace
1934 * period. Update the counter, but only if our work is still
1935 * relevant -- which it won't be if someone who started later
1936 * than we did beat us to the punch.
1937 */
1938 do {
1939 s = atomic_read(&sync_sched_expedited_done);
1940 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
1941 smp_mb(); /* ensure test happens before caller kfree */
1942 break;
1943 }
1944 } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
1945
Lai Jiangshan7b27d542010-10-21 11:29:05 +08001946 put_online_cpus();
1947}
1948EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1949
1950#endif /* #else #ifndef CONFIG_SMP */
1951
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001952#if !defined(CONFIG_RCU_FAST_NO_HZ)
1953
1954/*
1955 * Check to see if any future RCU-related work will need to be done
1956 * by the current CPU, even if none need be done immediately, returning
1957 * 1 if so. This function is part of the RCU implementation; it is -not-
1958 * an exported member of the RCU API.
1959 *
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001960 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1961 * any flavor of RCU.
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001962 */
1963int rcu_needs_cpu(int cpu)
1964{
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001965 return rcu_cpu_has_callbacks(cpu);
1966}
1967
1968/*
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001969 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1970 */
1971static void rcu_prepare_for_idle_init(int cpu)
1972{
1973}
1974
1975/*
1976 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1977 * after it.
1978 */
1979static void rcu_cleanup_after_idle(int cpu)
1980{
1981}
1982
1983/*
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001984 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=y,
1985 * is nothing.
1986 */
1987static void rcu_prepare_for_idle(int cpu)
1988{
1989}
1990
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001991#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1992
Paul E. McKenneyf23f7fa2011-11-30 15:41:14 -08001993/*
1994 * This code is invoked when a CPU goes idle, at which point we want
1995 * to have the CPU do everything required for RCU so that it can enter
1996 * the energy-efficient dyntick-idle mode. This is handled by a
1997 * state machine implemented by rcu_prepare_for_idle() below.
1998 *
1999 * The following three proprocessor symbols control this state machine:
2000 *
2001 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
2002 * to satisfy RCU. Beyond this point, it is better to incur a periodic
2003 * scheduling-clock interrupt than to loop through the state machine
2004 * at full power.
2005 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
2006 * optional if RCU does not need anything immediately from this
2007 * CPU, even if this CPU still has RCU callbacks queued. The first
2008 * times through the state machine are mandatory: we need to give
2009 * the state machine a chance to communicate a quiescent state
2010 * to the RCU core.
2011 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
2012 * to sleep in dyntick-idle mode with RCU callbacks pending. This
2013 * is sized to be roughly one RCU grace period. Those energy-efficiency
2014 * benchmarkers who might otherwise be tempted to set this to a large
2015 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
2016 * system. And if you are -that- concerned about energy efficiency,
2017 * just power the system down and be done with it!
2018 *
2019 * The values below work well in practice. If future workloads require
2020 * adjustment, they can be converted into kernel config parameters, though
2021 * making the state machine smarter might be a better option.
2022 */
2023#define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
2024#define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
Paul E. McKenney7cb92492011-11-28 12:28:34 -08002025#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
Paul E. McKenneyf23f7fa2011-11-30 15:41:14 -08002026
Paul E. McKenneya47cd882010-02-26 16:38:56 -08002027static DEFINE_PER_CPU(int, rcu_dyntick_drain);
Paul E. McKenney71da8132010-02-26 16:38:58 -08002028static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
Paul E. McKenney7cb92492011-11-28 12:28:34 -08002029static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
2030static ktime_t rcu_idle_gp_wait;
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08002031
2032/*
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07002033 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
2034 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
2035 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
2036 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
2037 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
2038 * it is better to incur scheduling-clock interrupts than to spin
2039 * continuously for the same time duration!
2040 */
2041int rcu_needs_cpu(int cpu)
2042{
2043 /* If no callbacks, RCU doesn't need the CPU. */
2044 if (!rcu_cpu_has_callbacks(cpu))
2045 return 0;
2046 /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
2047 return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
2048}
2049
2050/*
Paul E. McKenney7cb92492011-11-28 12:28:34 -08002051 * Timer handler used to force CPU to start pushing its remaining RCU
2052 * callbacks in the case where it entered dyntick-idle mode with callbacks
2053 * pending. The hander doesn't really need to do anything because the
2054 * real work is done upon re-entry to idle, or by the next scheduling-clock
2055 * interrupt should idle not be re-entered.
2056 */
2057static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
2058{
2059 trace_rcu_prep_idle("Timer");
2060 return HRTIMER_NORESTART;
2061}
2062
2063/*
2064 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
2065 */
2066static void rcu_prepare_for_idle_init(int cpu)
2067{
2068 static int firsttime = 1;
2069 struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
2070
2071 hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2072 hrtp->function = rcu_idle_gp_timer_func;
2073 if (firsttime) {
2074 unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
2075
2076 rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
2077 firsttime = 0;
2078 }
2079}
2080
2081/*
2082 * Clean up for exit from idle. Because we are exiting from idle, there
2083 * is no longer any point to rcu_idle_gp_timer, so cancel it. This will
2084 * do nothing if this timer is not active, so just cancel it unconditionally.
2085 */
2086static void rcu_cleanup_after_idle(int cpu)
2087{
2088 hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
2089}
2090
2091/*
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07002092 * Check to see if any RCU-related work can be done by the current CPU,
2093 * and if so, schedule a softirq to get it done. This function is part
2094 * of the RCU implementation; it is -not- an exported member of the RCU API.
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08002095 *
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07002096 * The idea is for the current CPU to clear out all work required by the
2097 * RCU core for the current grace period, so that this CPU can be permitted
2098 * to enter dyntick-idle mode. In some cases, it will need to be awakened
2099 * at the end of the grace period by whatever CPU ends the grace period.
2100 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
2101 * number of wakeups by a modest integer factor.
Paul E. McKenneya47cd882010-02-26 16:38:56 -08002102 *
2103 * Because it is not legal to invoke rcu_process_callbacks() with irqs
2104 * disabled, we do one pass of force_quiescent_state(), then do a
Paul E. McKenneya46e0892011-06-15 15:47:09 -07002105 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
Paul E. McKenney27f4d282011-02-07 12:47:15 -08002106 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07002107 *
2108 * The caller must have disabled interrupts.
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08002109 */
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07002110static void rcu_prepare_for_idle(int cpu)
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08002111{
Paul E. McKenney84ad00c2011-11-22 17:46:19 -08002112 unsigned long flags;
2113
2114 local_irq_save(flags);
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08002115
Paul E. McKenney3084f2f2011-11-22 17:07:11 -08002116 /*
Paul E. McKenneyf535a602011-11-22 20:43:02 -08002117 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2118 * Also reset state to avoid prejudicing later attempts.
Paul E. McKenney3084f2f2011-11-22 17:07:11 -08002119 */
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07002120 if (!rcu_cpu_has_callbacks(cpu)) {
2121 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
Paul E. McKenney3084f2f2011-11-22 17:07:11 -08002122 per_cpu(rcu_dyntick_drain, cpu) = 0;
Paul E. McKenney84ad00c2011-11-22 17:46:19 -08002123 local_irq_restore(flags);
Paul E. McKenney433cddd2011-11-22 14:58:03 -08002124 trace_rcu_prep_idle("No callbacks");
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07002125 return;
Paul E. McKenney77e38ed2010-04-25 21:04:29 -07002126 }
Paul E. McKenney3084f2f2011-11-22 17:07:11 -08002127
2128 /*
2129 * If in holdoff mode, just return. We will presumably have
2130 * refrained from disabling the scheduling-clock tick.
2131 */
Paul E. McKenney433cddd2011-11-22 14:58:03 -08002132 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
Paul E. McKenney84ad00c2011-11-22 17:46:19 -08002133 local_irq_restore(flags);
Paul E. McKenney433cddd2011-11-22 14:58:03 -08002134 trace_rcu_prep_idle("In holdoff");
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07002135 return;
Paul E. McKenney433cddd2011-11-22 14:58:03 -08002136 }
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08002137
Paul E. McKenneya47cd882010-02-26 16:38:56 -08002138 /* Check and update the rcu_dyntick_drain sequencing. */
2139 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2140 /* First time through, initialize the counter. */
Paul E. McKenneyf23f7fa2011-11-30 15:41:14 -08002141 per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
2142 } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
2143 !rcu_pending(cpu)) {
Paul E. McKenney7cb92492011-11-28 12:28:34 -08002144 /* Can we go dyntick-idle despite still having callbacks? */
Paul E. McKenneyf23f7fa2011-11-30 15:41:14 -08002145 trace_rcu_prep_idle("Dyntick with callbacks");
2146 per_cpu(rcu_dyntick_drain, cpu) = 0;
2147 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2148 hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
2149 rcu_idle_gp_wait, HRTIMER_MODE_REL);
2150 return; /* Nothing more to do immediately. */
2151 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
Paul E. McKenneya47cd882010-02-26 16:38:56 -08002152 /* We have hit the limit, so time to give up. */
Paul E. McKenney71da8132010-02-26 16:38:58 -08002153 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
Paul E. McKenney84ad00c2011-11-22 17:46:19 -08002154 local_irq_restore(flags);
Paul E. McKenney433cddd2011-11-22 14:58:03 -08002155 trace_rcu_prep_idle("Begin holdoff");
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07002156 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2157 return;
Paul E. McKenneya47cd882010-02-26 16:38:56 -08002158 }
2159
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07002160 /*
2161 * Do one step of pushing the remaining RCU callbacks through
2162 * the RCU core state machine.
2163 */
2164#ifdef CONFIG_TREE_PREEMPT_RCU
2165 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
Paul E. McKenney3ad0dec2011-11-22 21:08:13 -08002166 local_irq_restore(flags);
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07002167 rcu_preempt_qs(cpu);
2168 force_quiescent_state(&rcu_preempt_state, 0);
Paul E. McKenney3ad0dec2011-11-22 21:08:13 -08002169 local_irq_save(flags);
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07002170 }
2171#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
Paul E. McKenneya47cd882010-02-26 16:38:56 -08002172 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
Paul E. McKenney3ad0dec2011-11-22 21:08:13 -08002173 local_irq_restore(flags);
Paul E. McKenneya47cd882010-02-26 16:38:56 -08002174 rcu_sched_qs(cpu);
2175 force_quiescent_state(&rcu_sched_state, 0);
Paul E. McKenney3ad0dec2011-11-22 21:08:13 -08002176 local_irq_save(flags);
Paul E. McKenneya47cd882010-02-26 16:38:56 -08002177 }
2178 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
Paul E. McKenney3ad0dec2011-11-22 21:08:13 -08002179 local_irq_restore(flags);
Paul E. McKenneya47cd882010-02-26 16:38:56 -08002180 rcu_bh_qs(cpu);
2181 force_quiescent_state(&rcu_bh_state, 0);
Paul E. McKenney3ad0dec2011-11-22 21:08:13 -08002182 local_irq_save(flags);
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08002183 }
2184
Paul E. McKenney433cddd2011-11-22 14:58:03 -08002185 /*
2186 * If RCU callbacks are still pending, RCU still needs this CPU.
2187 * So try forcing the callbacks through the grace period.
2188 */
Paul E. McKenney3ad0dec2011-11-22 21:08:13 -08002189 if (rcu_cpu_has_callbacks(cpu)) {
Paul E. McKenney84ad00c2011-11-22 17:46:19 -08002190 local_irq_restore(flags);
Paul E. McKenney433cddd2011-11-22 14:58:03 -08002191 trace_rcu_prep_idle("More callbacks");
Paul E. McKenneya46e0892011-06-15 15:47:09 -07002192 invoke_rcu_core();
Paul E. McKenney84ad00c2011-11-22 17:46:19 -08002193 } else {
2194 local_irq_restore(flags);
Paul E. McKenney433cddd2011-11-22 14:58:03 -08002195 trace_rcu_prep_idle("Callbacks drained");
Paul E. McKenney84ad00c2011-11-22 17:46:19 -08002196 }
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08002197}
2198
2199#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */