blob: 22ecea0dfb62575d14fafb8e5776a11d21f309f9 [file] [log] [blame]
Paul E. McKenneybbad9372010-04-02 16:17:17 -07001/*
Paul E. McKenneya57eb942010-06-29 16:49:16 -07002 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
Paul E. McKenneybbad9372010-04-02 16:17:17 -07003 * Internal non-public definitions that provide either classic
Paul E. McKenneya57eb942010-06-29 16:49:16 -07004 * or preemptible semantics.
Paul E. McKenneybbad9372010-04-02 16:17:17 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
Paul E. McKenneya57eb942010-06-29 16:49:16 -070020 * Copyright (c) 2010 Linaro
Paul E. McKenneybbad9372010-04-02 16:17:17 -070021 *
22 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */
24
Paul E. McKenneyb2c07102010-09-09 13:40:39 -070025#include <linux/kthread.h>
Paul Gortmakerbdfa97b2011-10-25 13:13:57 -040026#include <linux/module.h>
Paul E. McKenney9e571a82010-09-30 21:26:52 -070027#include <linux/debugfs.h>
28#include <linux/seq_file.h>
29
Paul E. McKenney24278d12010-09-27 17:25:23 -070030/* Global control variables for rcupdate callback mechanism. */
31struct rcu_ctrlblk {
32 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
33 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
34 struct rcu_head **curtail; /* ->next pointer of last CB. */
Paul E. McKenney9e571a82010-09-30 21:26:52 -070035 RCU_TRACE(long qlen); /* Number of pending CBs. */
Paul E. McKenneye99033c2011-06-21 00:13:44 -070036 RCU_TRACE(char *name); /* Name of RCU type. */
Paul E. McKenney24278d12010-09-27 17:25:23 -070037};
38
39/* Definition for rcupdate control block. */
40static struct rcu_ctrlblk rcu_sched_ctrlblk = {
41 .donetail = &rcu_sched_ctrlblk.rcucblist,
42 .curtail = &rcu_sched_ctrlblk.rcucblist,
Paul E. McKenneye99033c2011-06-21 00:13:44 -070043 RCU_TRACE(.name = "rcu_sched")
Paul E. McKenney24278d12010-09-27 17:25:23 -070044};
45
46static struct rcu_ctrlblk rcu_bh_ctrlblk = {
47 .donetail = &rcu_bh_ctrlblk.rcucblist,
48 .curtail = &rcu_bh_ctrlblk.rcucblist,
Paul E. McKenneye99033c2011-06-21 00:13:44 -070049 RCU_TRACE(.name = "rcu_bh")
Paul E. McKenney24278d12010-09-27 17:25:23 -070050};
51
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53int rcu_scheduler_active __read_mostly;
54EXPORT_SYMBOL_GPL(rcu_scheduler_active);
55#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
56
Paul E. McKenneya57eb942010-06-29 16:49:16 -070057#ifdef CONFIG_TINY_PREEMPT_RCU
58
59#include <linux/delay.h>
60
Paul E. McKenneya57eb942010-06-29 16:49:16 -070061/* Global control variables for preemptible RCU. */
62struct rcu_preempt_ctrlblk {
63 struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
64 struct rcu_head **nexttail;
65 /* Tasks blocked in a preemptible RCU */
66 /* read-side critical section while an */
67 /* preemptible-RCU grace period is in */
68 /* progress must wait for a later grace */
69 /* period. This pointer points to the */
70 /* ->next pointer of the last task that */
71 /* must wait for a later grace period, or */
72 /* to &->rcb.rcucblist if there is no */
73 /* such task. */
74 struct list_head blkd_tasks;
75 /* Tasks blocked in RCU read-side critical */
76 /* section. Tasks are placed at the head */
77 /* of this list and age towards the tail. */
78 struct list_head *gp_tasks;
79 /* Pointer to the first task blocking the */
80 /* current grace period, or NULL if there */
Paul E. McKenney24278d12010-09-27 17:25:23 -070081 /* is no such task. */
Paul E. McKenneya57eb942010-06-29 16:49:16 -070082 struct list_head *exp_tasks;
83 /* Pointer to first task blocking the */
84 /* current expedited grace period, or NULL */
85 /* if there is no such task. If there */
86 /* is no current expedited grace period, */
87 /* then there cannot be any such task. */
Paul E. McKenney24278d12010-09-27 17:25:23 -070088#ifdef CONFIG_RCU_BOOST
89 struct list_head *boost_tasks;
90 /* Pointer to first task that needs to be */
91 /* priority-boosted, or NULL if no priority */
92 /* boosting is needed. If there is no */
93 /* current or expedited grace period, there */
94 /* can be no such task. */
95#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneya57eb942010-06-29 16:49:16 -070096 u8 gpnum; /* Current grace period. */
97 u8 gpcpu; /* Last grace period blocked by the CPU. */
98 u8 completed; /* Last grace period completed. */
99 /* If all three are equal, RCU is idle. */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700100#ifdef CONFIG_RCU_BOOST
Paul E. McKenney24278d12010-09-27 17:25:23 -0700101 unsigned long boost_time; /* When to start boosting (jiffies) */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700102#endif /* #ifdef CONFIG_RCU_BOOST */
103#ifdef CONFIG_RCU_TRACE
104 unsigned long n_grace_periods;
105#ifdef CONFIG_RCU_BOOST
106 unsigned long n_tasks_boosted;
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800107 /* Total number of tasks boosted. */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700108 unsigned long n_exp_boosts;
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800109 /* Number of tasks boosted for expedited GP. */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700110 unsigned long n_normal_boosts;
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800111 /* Number of tasks boosted for normal GP. */
112 unsigned long n_balk_blkd_tasks;
113 /* Refused to boost: no blocked tasks. */
114 unsigned long n_balk_exp_gp_tasks;
115 /* Refused to boost: nothing blocking GP. */
116 unsigned long n_balk_boost_tasks;
117 /* Refused to boost: already boosting. */
118 unsigned long n_balk_notyet;
119 /* Refused to boost: not yet time. */
120 unsigned long n_balk_nos;
121 /* Refused to boost: not sure why, though. */
122 /* This can happen due to race conditions. */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700123#endif /* #ifdef CONFIG_RCU_BOOST */
124#endif /* #ifdef CONFIG_RCU_TRACE */
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700125};
126
127static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
128 .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
129 .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
130 .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
131 .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
Paul E. McKenneye99033c2011-06-21 00:13:44 -0700132 RCU_TRACE(.rcb.name = "rcu_preempt")
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700133};
134
Paul E. McKenney26861fa2012-01-11 14:40:20 -0800135static void rcu_read_unlock_special(struct task_struct *t);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700136static int rcu_preempted_readers_exp(void);
137static void rcu_report_exp_done(void);
138
139/*
140 * Return true if the CPU has not yet responded to the current grace period.
141 */
Paul E. McKenneydd7c4d82010-08-27 10:51:17 -0700142static int rcu_cpu_blocking_cur_gp(void)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700143{
144 return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
145}
146
147/*
148 * Check for a running RCU reader. Because there is only one CPU,
149 * there can be but one running RCU reader at a time. ;-)
Paul E. McKenney26861fa2012-01-11 14:40:20 -0800150 *
151 * Returns zero if there are no running readers. Returns a positive
152 * number if there is at least one reader within its RCU read-side
153 * critical section. Returns a negative number if an outermost reader
154 * is in the midst of exiting from its RCU read-side critical section
155 *
156 * Returns zero if there are no running readers. Returns a positive
157 * number if there is at least one reader within its RCU read-side
158 * critical section. Returns a negative number if an outermost reader
159 * is in the midst of exiting from its RCU read-side critical section.
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700160 */
161static int rcu_preempt_running_reader(void)
162{
163 return current->rcu_read_lock_nesting;
164}
165
166/*
167 * Check for preempted RCU readers blocking any grace period.
168 * If the caller needs a reliable answer, it must disable hard irqs.
169 */
170static int rcu_preempt_blocked_readers_any(void)
171{
172 return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
173}
174
175/*
176 * Check for preempted RCU readers blocking the current grace period.
177 * If the caller needs a reliable answer, it must disable hard irqs.
178 */
179static int rcu_preempt_blocked_readers_cgp(void)
180{
181 return rcu_preempt_ctrlblk.gp_tasks != NULL;
182}
183
184/*
185 * Return true if another preemptible-RCU grace period is needed.
186 */
187static int rcu_preempt_needs_another_gp(void)
188{
189 return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
190}
191
192/*
193 * Return true if a preemptible-RCU grace period is in progress.
194 * The caller must disable hardirqs.
195 */
196static int rcu_preempt_gp_in_progress(void)
197{
198 return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
199}
200
201/*
Paul E. McKenney24278d12010-09-27 17:25:23 -0700202 * Advance a ->blkd_tasks-list pointer to the next entry, instead
203 * returning NULL if at the end of the list.
204 */
205static struct list_head *rcu_next_node_entry(struct task_struct *t)
206{
207 struct list_head *np;
208
209 np = t->rcu_node_entry.next;
210 if (np == &rcu_preempt_ctrlblk.blkd_tasks)
211 np = NULL;
212 return np;
213}
214
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700215#ifdef CONFIG_RCU_TRACE
216
217#ifdef CONFIG_RCU_BOOST
218static void rcu_initiate_boost_trace(void);
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700219#endif /* #ifdef CONFIG_RCU_BOOST */
220
221/*
222 * Dump additional statistice for TINY_PREEMPT_RCU.
223 */
224static void show_tiny_preempt_stats(struct seq_file *m)
225{
226 seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
227 rcu_preempt_ctrlblk.rcb.qlen,
228 rcu_preempt_ctrlblk.n_grace_periods,
229 rcu_preempt_ctrlblk.gpnum,
230 rcu_preempt_ctrlblk.gpcpu,
231 rcu_preempt_ctrlblk.completed,
232 "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
233 "N."[!rcu_preempt_ctrlblk.gp_tasks],
234 "E."[!rcu_preempt_ctrlblk.exp_tasks]);
235#ifdef CONFIG_RCU_BOOST
Paul E. McKenney203373c82011-02-24 15:25:21 -0800236 seq_printf(m, "%sttb=%c ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
237 " ",
238 "B."[!rcu_preempt_ctrlblk.boost_tasks],
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700239 rcu_preempt_ctrlblk.n_tasks_boosted,
240 rcu_preempt_ctrlblk.n_exp_boosts,
241 rcu_preempt_ctrlblk.n_normal_boosts,
242 (int)(jiffies & 0xffff),
243 (int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800244 seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu ny=%lu nos=%lu\n",
245 " balk",
246 rcu_preempt_ctrlblk.n_balk_blkd_tasks,
247 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks,
248 rcu_preempt_ctrlblk.n_balk_boost_tasks,
249 rcu_preempt_ctrlblk.n_balk_notyet,
250 rcu_preempt_ctrlblk.n_balk_nos);
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700251#endif /* #ifdef CONFIG_RCU_BOOST */
252}
253
254#endif /* #ifdef CONFIG_RCU_TRACE */
255
Paul E. McKenney24278d12010-09-27 17:25:23 -0700256#ifdef CONFIG_RCU_BOOST
257
258#include "rtmutex_common.h"
259
Paul E. McKenney965a0022011-06-18 09:55:39 -0700260#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
261
262/* Controls for rcu_kthread() kthread. */
263static struct task_struct *rcu_kthread_task;
264static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
265static unsigned long have_rcu_kthread_work;
266
Paul E. McKenney24278d12010-09-27 17:25:23 -0700267/*
268 * Carry out RCU priority boosting on the task indicated by ->boost_tasks,
269 * and advance ->boost_tasks to the next task in the ->blkd_tasks list.
270 */
271static int rcu_boost(void)
272{
273 unsigned long flags;
274 struct rt_mutex mtx;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700275 struct task_struct *t;
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800276 struct list_head *tb;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700277
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800278 if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
279 rcu_preempt_ctrlblk.exp_tasks == NULL)
Paul E. McKenney24278d12010-09-27 17:25:23 -0700280 return 0; /* Nothing to boost. */
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800281
Paul E. McKenney24278d12010-09-27 17:25:23 -0700282 raw_local_irq_save(flags);
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800283
284 /*
285 * Recheck with irqs disabled: all tasks in need of boosting
286 * might exit their RCU read-side critical sections on their own
287 * if we are preempted just before disabling irqs.
288 */
289 if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
290 rcu_preempt_ctrlblk.exp_tasks == NULL) {
291 raw_local_irq_restore(flags);
292 return 0;
293 }
294
295 /*
296 * Preferentially boost tasks blocking expedited grace periods.
297 * This cannot starve the normal grace periods because a second
298 * expedited grace period must boost all blocked tasks, including
299 * those blocking the pre-existing normal grace period.
300 */
301 if (rcu_preempt_ctrlblk.exp_tasks != NULL) {
302 tb = rcu_preempt_ctrlblk.exp_tasks;
303 RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
304 } else {
305 tb = rcu_preempt_ctrlblk.boost_tasks;
306 RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
307 }
308 RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
309
310 /*
311 * We boost task t by manufacturing an rt_mutex that appears to
312 * be held by task t. We leave a pointer to that rt_mutex where
313 * task t can find it, and task t will release the mutex when it
314 * exits its outermost RCU read-side critical section. Then
315 * simply acquiring this artificial rt_mutex will boost task
316 * t's priority. (Thanks to tglx for suggesting this approach!)
317 */
318 t = container_of(tb, struct task_struct, rcu_node_entry);
Paul E. McKenney24278d12010-09-27 17:25:23 -0700319 rt_mutex_init_proxy_locked(&mtx, t);
320 t->rcu_boost_mutex = &mtx;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700321 raw_local_irq_restore(flags);
322 rt_mutex_lock(&mtx);
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800323 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
324
Paul E. McKenney4f89b332011-12-09 14:43:47 -0800325 return ACCESS_ONCE(rcu_preempt_ctrlblk.boost_tasks) != NULL ||
326 ACCESS_ONCE(rcu_preempt_ctrlblk.exp_tasks) != NULL;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700327}
328
329/*
330 * Check to see if it is now time to start boosting RCU readers blocking
331 * the current grace period, and, if so, tell the rcu_kthread_task to
332 * start boosting them. If there is an expedited boost in progress,
333 * we wait for it to complete.
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700334 *
335 * If there are no blocked readers blocking the current grace period,
336 * return 0 to let the caller know, otherwise return 1. Note that this
337 * return value is independent of whether or not boosting was done.
Paul E. McKenney24278d12010-09-27 17:25:23 -0700338 */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700339static int rcu_initiate_boost(void)
Paul E. McKenney24278d12010-09-27 17:25:23 -0700340{
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800341 if (!rcu_preempt_blocked_readers_cgp() &&
342 rcu_preempt_ctrlblk.exp_tasks == NULL) {
343 RCU_TRACE(rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++);
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700344 return 0;
345 }
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800346 if (rcu_preempt_ctrlblk.exp_tasks != NULL ||
347 (rcu_preempt_ctrlblk.gp_tasks != NULL &&
348 rcu_preempt_ctrlblk.boost_tasks == NULL &&
349 ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))) {
350 if (rcu_preempt_ctrlblk.exp_tasks == NULL)
351 rcu_preempt_ctrlblk.boost_tasks =
352 rcu_preempt_ctrlblk.gp_tasks;
Paul E. McKenney965a0022011-06-18 09:55:39 -0700353 invoke_rcu_callbacks();
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700354 } else
355 RCU_TRACE(rcu_initiate_boost_trace());
356 return 1;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700357}
358
Paul E. McKenneyddeb7582011-02-23 17:03:06 -0800359#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
Paul E. McKenney24278d12010-09-27 17:25:23 -0700360
361/*
362 * Do priority-boost accounting for the start of a new grace period.
363 */
364static void rcu_preempt_boost_start_gp(void)
365{
366 rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700367}
368
369#else /* #ifdef CONFIG_RCU_BOOST */
370
371/*
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700372 * If there is no RCU priority boosting, we don't initiate boosting,
373 * but we do indicate whether there are blocked readers blocking the
374 * current grace period.
Paul E. McKenney24278d12010-09-27 17:25:23 -0700375 */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700376static int rcu_initiate_boost(void)
Paul E. McKenney24278d12010-09-27 17:25:23 -0700377{
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700378 return rcu_preempt_blocked_readers_cgp();
Paul E. McKenney24278d12010-09-27 17:25:23 -0700379}
380
381/*
Paul E. McKenney24278d12010-09-27 17:25:23 -0700382 * If there is no RCU priority boosting, nothing to do at grace-period start.
383 */
384static void rcu_preempt_boost_start_gp(void)
385{
386}
387
388#endif /* else #ifdef CONFIG_RCU_BOOST */
389
390/*
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700391 * Record a preemptible-RCU quiescent state for the specified CPU. Note
392 * that this just means that the task currently running on the CPU is
393 * in a quiescent state. There might be any number of tasks blocked
394 * while in an RCU read-side critical section.
395 *
396 * Unlike the other rcu_*_qs() functions, callers to this function
397 * must disable irqs in order to protect the assignment to
398 * ->rcu_read_unlock_special.
399 *
400 * Because this is a single-CPU implementation, the only way a grace
401 * period can end is if the CPU is in a quiescent state. The reason is
402 * that a blocked preemptible-RCU reader can exit its critical section
403 * only if the CPU is running it at the time. Therefore, when the
404 * last task blocking the current grace period exits its RCU read-side
405 * critical section, neither the CPU nor blocked tasks will be stopping
406 * the current grace period. (In contrast, SMP implementations
407 * might have CPUs running in RCU read-side critical sections that
408 * block later grace periods -- but this is not possible given only
409 * one CPU.)
410 */
411static void rcu_preempt_cpu_qs(void)
412{
413 /* Record both CPU and task as having responded to current GP. */
414 rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
415 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
416
Paul E. McKenney24278d12010-09-27 17:25:23 -0700417 /* If there is no GP then there is nothing more to do. */
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700418 if (!rcu_preempt_gp_in_progress())
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700419 return;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700420 /*
Paul E. McKenneyddeb7582011-02-23 17:03:06 -0800421 * Check up on boosting. If there are readers blocking the
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700422 * current grace period, leave.
423 */
424 if (rcu_initiate_boost())
Paul E. McKenney24278d12010-09-27 17:25:23 -0700425 return;
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700426
427 /* Advance callbacks. */
428 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
429 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
430 rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
431
432 /* If there are no blocked readers, next GP is done instantly. */
433 if (!rcu_preempt_blocked_readers_any())
434 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
435
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700436 /* If there are done callbacks, cause them to be invoked. */
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700437 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
Paul E. McKenney965a0022011-06-18 09:55:39 -0700438 invoke_rcu_callbacks();
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700439}
440
441/*
442 * Start a new RCU grace period if warranted. Hard irqs must be disabled.
443 */
444static void rcu_preempt_start_gp(void)
445{
446 if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
447
448 /* Official start of GP. */
449 rcu_preempt_ctrlblk.gpnum++;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700450 RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700451
452 /* Any blocked RCU readers block new GP. */
453 if (rcu_preempt_blocked_readers_any())
454 rcu_preempt_ctrlblk.gp_tasks =
455 rcu_preempt_ctrlblk.blkd_tasks.next;
456
Paul E. McKenney24278d12010-09-27 17:25:23 -0700457 /* Set up for RCU priority boosting. */
458 rcu_preempt_boost_start_gp();
459
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700460 /* If there is no running reader, CPU is done with GP. */
461 if (!rcu_preempt_running_reader())
462 rcu_preempt_cpu_qs();
463 }
464}
465
466/*
467 * We have entered the scheduler, and the current task might soon be
468 * context-switched away from. If this task is in an RCU read-side
469 * critical section, we will no longer be able to rely on the CPU to
470 * record that fact, so we enqueue the task on the blkd_tasks list.
471 * If the task started after the current grace period began, as recorded
472 * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
473 * before the element referenced by ->gp_tasks (or at the tail if
474 * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
475 * The task will dequeue itself when it exits the outermost enclosing
476 * RCU read-side critical section. Therefore, the current grace period
477 * cannot be permitted to complete until the ->gp_tasks pointer becomes
478 * NULL.
479 *
480 * Caller must disable preemption.
481 */
482void rcu_preempt_note_context_switch(void)
483{
484 struct task_struct *t = current;
485 unsigned long flags;
486
487 local_irq_save(flags); /* must exclude scheduler_tick(). */
Paul E. McKenney26861fa2012-01-11 14:40:20 -0800488 if (rcu_preempt_running_reader() > 0 &&
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700489 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
490
491 /* Possibly blocking in an RCU read-side critical section. */
492 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
493
494 /*
495 * If this CPU has already checked in, then this task
496 * will hold up the next grace period rather than the
497 * current grace period. Queue the task accordingly.
498 * If the task is queued for the current grace period
499 * (i.e., this CPU has not yet passed through a quiescent
500 * state for the current grace period), then as long
501 * as that task remains queued, the current grace period
502 * cannot end.
503 */
504 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
Paul E. McKenneydd7c4d82010-08-27 10:51:17 -0700505 if (rcu_cpu_blocking_cur_gp())
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700506 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
Paul E. McKenney26861fa2012-01-11 14:40:20 -0800507 } else if (rcu_preempt_running_reader() < 0 &&
508 t->rcu_read_unlock_special) {
509 /*
510 * Complete exit from RCU read-side critical section on
511 * behalf of preempted instance of __rcu_read_unlock().
512 */
513 rcu_read_unlock_special(t);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700514 }
515
516 /*
517 * Either we were not in an RCU read-side critical section to
518 * begin with, or we have now recorded that critical section
519 * globally. Either way, we can now note a quiescent state
520 * for this CPU. Again, if we were in an RCU read-side critical
521 * section, and if that critical section was blocking the current
522 * grace period, then the fact that the task has been enqueued
523 * means that current grace period continues to be blocked.
524 */
525 rcu_preempt_cpu_qs();
526 local_irq_restore(flags);
527}
528
529/*
530 * Tiny-preemptible RCU implementation for rcu_read_lock().
531 * Just increment ->rcu_read_lock_nesting, shared state will be updated
532 * if we block.
533 */
534void __rcu_read_lock(void)
535{
536 current->rcu_read_lock_nesting++;
537 barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
538}
539EXPORT_SYMBOL_GPL(__rcu_read_lock);
540
541/*
542 * Handle special cases during rcu_read_unlock(), such as needing to
543 * notify RCU core processing or task having blocked during the RCU
544 * read-side critical section.
545 */
Paul E. McKenneyafef2052012-01-11 15:30:36 -0800546static noinline void rcu_read_unlock_special(struct task_struct *t)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700547{
548 int empty;
549 int empty_exp;
550 unsigned long flags;
551 struct list_head *np;
Paul E. McKenney1aa03f12012-01-11 17:25:17 -0800552#ifdef CONFIG_RCU_BOOST
553 struct rt_mutex *rbmp = NULL;
554#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700555 int special;
556
557 /*
558 * NMI handlers cannot block and cannot safely manipulate state.
559 * They therefore cannot possibly be special, so just leave.
560 */
561 if (in_nmi())
562 return;
563
564 local_irq_save(flags);
565
566 /*
567 * If RCU core is waiting for this CPU to exit critical section,
568 * let it know that we have done so.
569 */
570 special = t->rcu_read_unlock_special;
571 if (special & RCU_READ_UNLOCK_NEED_QS)
572 rcu_preempt_cpu_qs();
573
574 /* Hardware IRQ handlers cannot block. */
Paul E. McKenney87627052012-01-11 16:59:01 -0800575 if (in_irq() || in_serving_softirq()) {
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700576 local_irq_restore(flags);
577 return;
578 }
579
580 /* Clean up if blocked during RCU read-side critical section. */
581 if (special & RCU_READ_UNLOCK_BLOCKED) {
582 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
583
584 /*
585 * Remove this task from the ->blkd_tasks list and adjust
586 * any pointers that might have been referencing it.
587 */
588 empty = !rcu_preempt_blocked_readers_cgp();
589 empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700590 np = rcu_next_node_entry(t);
Paul E. McKenneyddeb7582011-02-23 17:03:06 -0800591 list_del_init(&t->rcu_node_entry);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700592 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
593 rcu_preempt_ctrlblk.gp_tasks = np;
594 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
595 rcu_preempt_ctrlblk.exp_tasks = np;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700596#ifdef CONFIG_RCU_BOOST
597 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks)
598 rcu_preempt_ctrlblk.boost_tasks = np;
599#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700600
601 /*
602 * If this was the last task on the current list, and if
603 * we aren't waiting on the CPU, report the quiescent state
604 * and start a new grace period if needed.
605 */
606 if (!empty && !rcu_preempt_blocked_readers_cgp()) {
607 rcu_preempt_cpu_qs();
608 rcu_preempt_start_gp();
609 }
610
611 /*
612 * If this was the last task on the expedited lists,
613 * then we need wake up the waiting task.
614 */
615 if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
616 rcu_report_exp_done();
617 }
Paul E. McKenney24278d12010-09-27 17:25:23 -0700618#ifdef CONFIG_RCU_BOOST
619 /* Unboost self if was boosted. */
Paul E. McKenney1aa03f12012-01-11 17:25:17 -0800620 if (t->rcu_boost_mutex != NULL) {
621 rbmp = t->rcu_boost_mutex;
Paul E. McKenney24278d12010-09-27 17:25:23 -0700622 t->rcu_boost_mutex = NULL;
Paul E. McKenney1aa03f12012-01-11 17:25:17 -0800623 rt_mutex_unlock(rbmp);
Paul E. McKenney24278d12010-09-27 17:25:23 -0700624 }
625#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700626 local_irq_restore(flags);
627}
628
629/*
630 * Tiny-preemptible RCU implementation for rcu_read_unlock().
631 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
632 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
633 * invoke rcu_read_unlock_special() to clean up after a context switch
634 * in an RCU read-side critical section and other special cases.
635 */
636void __rcu_read_unlock(void)
637{
638 struct task_struct *t = current;
639
640 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
Paul E. McKenney26861fa2012-01-11 14:40:20 -0800641 if (t->rcu_read_lock_nesting != 1)
642 --t->rcu_read_lock_nesting;
643 else {
644 t->rcu_read_lock_nesting = INT_MIN;
645 barrier(); /* assign before ->rcu_read_unlock_special load */
646 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
647 rcu_read_unlock_special(t);
648 barrier(); /* ->rcu_read_unlock_special load before assign */
649 t->rcu_read_lock_nesting = 0;
650 }
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700651#ifdef CONFIG_PROVE_LOCKING
Paul E. McKenney26861fa2012-01-11 14:40:20 -0800652 {
653 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
654
655 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
656 }
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700657#endif /* #ifdef CONFIG_PROVE_LOCKING */
658}
659EXPORT_SYMBOL_GPL(__rcu_read_unlock);
660
661/*
662 * Check for a quiescent state from the current CPU. When a task blocks,
663 * the task is recorded in the rcu_preempt_ctrlblk structure, which is
664 * checked elsewhere. This is called from the scheduling-clock interrupt.
665 *
666 * Caller must disable hard irqs.
667 */
668static void rcu_preempt_check_callbacks(void)
669{
670 struct task_struct *t = current;
671
Paul E. McKenneydd7c4d82010-08-27 10:51:17 -0700672 if (rcu_preempt_gp_in_progress() &&
673 (!rcu_preempt_running_reader() ||
674 !rcu_cpu_blocking_cur_gp()))
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700675 rcu_preempt_cpu_qs();
676 if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
677 rcu_preempt_ctrlblk.rcb.donetail)
Paul E. McKenney965a0022011-06-18 09:55:39 -0700678 invoke_rcu_callbacks();
Paul E. McKenneydd7c4d82010-08-27 10:51:17 -0700679 if (rcu_preempt_gp_in_progress() &&
680 rcu_cpu_blocking_cur_gp() &&
Paul E. McKenney26861fa2012-01-11 14:40:20 -0800681 rcu_preempt_running_reader() > 0)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700682 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
683}
684
685/*
686 * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700687 * update, so this is invoked from rcu_process_callbacks() to
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700688 * handle that case. Of course, it is invoked for all flavors of
689 * RCU, but RCU callbacks can appear only on one of the lists, and
690 * neither ->nexttail nor ->donetail can possibly be NULL, so there
691 * is no need for an explicit check.
692 */
693static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
694{
695 if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
696 rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
697}
698
699/*
700 * Process callbacks for preemptible RCU.
701 */
702static void rcu_preempt_process_callbacks(void)
703{
Paul E. McKenney965a0022011-06-18 09:55:39 -0700704 __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700705}
706
707/*
708 * Queue a preemptible -RCU callback for invocation after a grace period.
709 */
710void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
711{
712 unsigned long flags;
713
714 debug_rcu_head_queue(head);
715 head->func = func;
716 head->next = NULL;
717
718 local_irq_save(flags);
719 *rcu_preempt_ctrlblk.nexttail = head;
720 rcu_preempt_ctrlblk.nexttail = &head->next;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700721 RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700722 rcu_preempt_start_gp(); /* checks to see if GP needed. */
723 local_irq_restore(flags);
724}
725EXPORT_SYMBOL_GPL(call_rcu);
726
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700727/*
728 * synchronize_rcu - wait until a grace period has elapsed.
729 *
730 * Control will return to the caller some time after a full grace
731 * period has elapsed, in other words after all currently executing RCU
732 * read-side critical sections have completed. RCU read-side critical
733 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
734 * and may be nested.
735 */
736void synchronize_rcu(void)
737{
Paul E. McKenneyfe15d702012-01-04 13:30:33 -0800738 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
739 !lock_is_held(&rcu_lock_map) &&
740 !lock_is_held(&rcu_sched_lock_map),
741 "Illegal synchronize_rcu() in RCU read-side critical section");
742
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700743#ifdef CONFIG_DEBUG_LOCK_ALLOC
744 if (!rcu_scheduler_active)
745 return;
746#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
747
748 WARN_ON_ONCE(rcu_preempt_running_reader());
749 if (!rcu_preempt_blocked_readers_any())
750 return;
751
752 /* Once we get past the fastpath checks, same code as rcu_barrier(). */
753 rcu_barrier();
754}
755EXPORT_SYMBOL_GPL(synchronize_rcu);
756
757static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
758static unsigned long sync_rcu_preempt_exp_count;
759static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
760
761/*
762 * Return non-zero if there are any tasks in RCU read-side critical
763 * sections blocking the current preemptible-RCU expedited grace period.
764 * If there is no preemptible-RCU expedited grace period currently in
765 * progress, returns zero unconditionally.
766 */
767static int rcu_preempted_readers_exp(void)
768{
769 return rcu_preempt_ctrlblk.exp_tasks != NULL;
770}
771
772/*
773 * Report the exit from RCU read-side critical section for the last task
774 * that queued itself during or before the current expedited preemptible-RCU
775 * grace period.
776 */
777static void rcu_report_exp_done(void)
778{
779 wake_up(&sync_rcu_preempt_exp_wq);
780}
781
782/*
783 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
784 * is to rely in the fact that there is but one CPU, and that it is
785 * illegal for a task to invoke synchronize_rcu_expedited() while in a
786 * preemptible-RCU read-side critical section. Therefore, any such
787 * critical sections must correspond to blocked tasks, which must therefore
788 * be on the ->blkd_tasks list. So just record the current head of the
789 * list in the ->exp_tasks pointer, and wait for all tasks including and
790 * after the task pointed to by ->exp_tasks to drain.
791 */
792void synchronize_rcu_expedited(void)
793{
794 unsigned long flags;
795 struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
796 unsigned long snap;
797
798 barrier(); /* ensure prior action seen before grace period. */
799
800 WARN_ON_ONCE(rcu_preempt_running_reader());
801
802 /*
803 * Acquire lock so that there is only one preemptible RCU grace
804 * period in flight. Of course, if someone does the expedited
805 * grace period for us while we are acquiring the lock, just leave.
806 */
807 snap = sync_rcu_preempt_exp_count + 1;
808 mutex_lock(&sync_rcu_preempt_exp_mutex);
809 if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
810 goto unlock_mb_ret; /* Others did our work for us. */
811
812 local_irq_save(flags);
813
814 /*
815 * All RCU readers have to already be on blkd_tasks because
816 * we cannot legally be executing in an RCU read-side critical
817 * section.
818 */
819
820 /* Snapshot current head of ->blkd_tasks list. */
821 rpcp->exp_tasks = rpcp->blkd_tasks.next;
822 if (rpcp->exp_tasks == &rpcp->blkd_tasks)
823 rpcp->exp_tasks = NULL;
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700824
825 /* Wait for tail of ->blkd_tasks list to drain. */
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800826 if (!rcu_preempted_readers_exp())
827 local_irq_restore(flags);
828 else {
829 rcu_initiate_boost();
830 local_irq_restore(flags);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700831 wait_event(sync_rcu_preempt_exp_wq,
832 !rcu_preempted_readers_exp());
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -0800833 }
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700834
835 /* Clean up and exit. */
836 barrier(); /* ensure expedited GP seen before counter increment. */
837 sync_rcu_preempt_exp_count++;
838unlock_mb_ret:
839 mutex_unlock(&sync_rcu_preempt_exp_mutex);
840 barrier(); /* ensure subsequent action seen after grace period. */
841}
842EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
843
844/*
845 * Does preemptible RCU need the CPU to stay out of dynticks mode?
846 */
847int rcu_preempt_needs_cpu(void)
848{
849 if (!rcu_preempt_running_reader())
850 rcu_preempt_cpu_qs();
851 return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
852}
853
854/*
855 * Check for a task exiting while in a preemptible -RCU read-side
856 * critical section, clean up if so. No need to issue warnings,
857 * as debug_check_no_locks_held() already does this if lockdep
858 * is enabled.
859 */
860void exit_rcu(void)
861{
862 struct task_struct *t = current;
863
864 if (t->rcu_read_lock_nesting == 0)
865 return;
866 t->rcu_read_lock_nesting = 1;
Lai Jiangshanba74f4d2011-01-09 18:09:51 -0800867 __rcu_read_unlock();
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700868}
869
870#else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
871
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700872#ifdef CONFIG_RCU_TRACE
873
874/*
875 * Because preemptible RCU does not exist, it is not necessary to
876 * dump out its statistics.
877 */
878static void show_tiny_preempt_stats(struct seq_file *m)
879{
880}
881
882#endif /* #ifdef CONFIG_RCU_TRACE */
883
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700884/*
885 * Because preemptible RCU does not exist, it never has any callbacks
886 * to check.
887 */
888static void rcu_preempt_check_callbacks(void)
889{
890}
891
892/*
893 * Because preemptible RCU does not exist, it never has any callbacks
894 * to remove.
895 */
896static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
897{
898}
899
900/*
901 * Because preemptible RCU does not exist, it never has any callbacks
902 * to process.
903 */
904static void rcu_preempt_process_callbacks(void)
905{
906}
907
908#endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
909
Paul E. McKenney965a0022011-06-18 09:55:39 -0700910#ifdef CONFIG_RCU_BOOST
911
912/*
913 * Wake up rcu_kthread() to process callbacks now eligible for invocation
914 * or to boost readers.
915 */
916static void invoke_rcu_callbacks(void)
917{
918 have_rcu_kthread_work = 1;
Paul E. McKenney768dfff2012-01-11 16:33:17 -0800919 if (rcu_kthread_task != NULL)
920 wake_up(&rcu_kthread_wq);
Paul E. McKenney965a0022011-06-18 09:55:39 -0700921}
922
Paul E. McKenney4968c302011-12-07 16:32:40 -0800923#ifdef CONFIG_RCU_TRACE
924
925/*
926 * Is the current CPU running the RCU-callbacks kthread?
927 * Caller must have preemption disabled.
928 */
929static bool rcu_is_callbacks_kthread(void)
930{
931 return rcu_kthread_task == current;
932}
933
934#endif /* #ifdef CONFIG_RCU_TRACE */
935
Paul E. McKenney965a0022011-06-18 09:55:39 -0700936/*
937 * This kthread invokes RCU callbacks whose grace periods have
938 * elapsed. It is awakened as needed, and takes the place of the
939 * RCU_SOFTIRQ that is used for this purpose when boosting is disabled.
940 * This is a kthread, but it is never stopped, at least not until
941 * the system goes down.
942 */
943static int rcu_kthread(void *arg)
944{
945 unsigned long work;
946 unsigned long morework;
947 unsigned long flags;
948
949 for (;;) {
950 wait_event_interruptible(rcu_kthread_wq,
951 have_rcu_kthread_work != 0);
952 morework = rcu_boost();
953 local_irq_save(flags);
954 work = have_rcu_kthread_work;
955 have_rcu_kthread_work = morework;
956 local_irq_restore(flags);
957 if (work)
958 rcu_process_callbacks(NULL);
959 schedule_timeout_interruptible(1); /* Leave CPU for others. */
960 }
961
962 return 0; /* Not reached, but needed to shut gcc up. */
963}
964
965/*
966 * Spawn the kthread that invokes RCU callbacks.
967 */
968static int __init rcu_spawn_kthreads(void)
969{
970 struct sched_param sp;
971
972 rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
973 sp.sched_priority = RCU_BOOST_PRIO;
974 sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
975 return 0;
976}
977early_initcall(rcu_spawn_kthreads);
978
979#else /* #ifdef CONFIG_RCU_BOOST */
980
Paul E. McKenney768dfff2012-01-11 16:33:17 -0800981/* Hold off callback invocation until early_initcall() time. */
982static int rcu_scheduler_fully_active __read_mostly;
983
Paul E. McKenney965a0022011-06-18 09:55:39 -0700984/*
985 * Start up softirq processing of callbacks.
986 */
987void invoke_rcu_callbacks(void)
988{
Paul E. McKenney768dfff2012-01-11 16:33:17 -0800989 if (rcu_scheduler_fully_active)
990 raise_softirq(RCU_SOFTIRQ);
Paul E. McKenney965a0022011-06-18 09:55:39 -0700991}
992
Paul E. McKenney4968c302011-12-07 16:32:40 -0800993#ifdef CONFIG_RCU_TRACE
994
995/*
996 * There is no callback kthread, so this thread is never it.
997 */
998static bool rcu_is_callbacks_kthread(void)
999{
1000 return false;
1001}
1002
1003#endif /* #ifdef CONFIG_RCU_TRACE */
1004
Paul E. McKenney768dfff2012-01-11 16:33:17 -08001005static int __init rcu_scheduler_really_started(void)
Paul E. McKenney965a0022011-06-18 09:55:39 -07001006{
Paul E. McKenney768dfff2012-01-11 16:33:17 -08001007 rcu_scheduler_fully_active = 1;
Paul E. McKenney965a0022011-06-18 09:55:39 -07001008 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
Paul E. McKenney768dfff2012-01-11 16:33:17 -08001009 raise_softirq(RCU_SOFTIRQ); /* Invoke any callbacks from early boot. */
1010 return 0;
Paul E. McKenney965a0022011-06-18 09:55:39 -07001011}
Paul E. McKenney768dfff2012-01-11 16:33:17 -08001012early_initcall(rcu_scheduler_really_started);
Paul E. McKenney965a0022011-06-18 09:55:39 -07001013
1014#endif /* #else #ifdef CONFIG_RCU_BOOST */
1015
Paul E. McKenneybbad9372010-04-02 16:17:17 -07001016#ifdef CONFIG_DEBUG_LOCK_ALLOC
Paul E. McKenneybbad9372010-04-02 16:17:17 -07001017#include <linux/kernel_stat.h>
1018
1019/*
1020 * During boot, we forgive RCU lockdep issues. After this function is
1021 * invoked, we start taking RCU lockdep issues seriously.
1022 */
Paul E. McKenneyb2c07102010-09-09 13:40:39 -07001023void __init rcu_scheduler_starting(void)
Paul E. McKenneybbad9372010-04-02 16:17:17 -07001024{
1025 WARN_ON(nr_context_switches() > 0);
1026 rcu_scheduler_active = 1;
1027}
1028
1029#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
Paul E. McKenney24278d12010-09-27 17:25:23 -07001030
Paul E. McKenney9e571a82010-09-30 21:26:52 -07001031#ifdef CONFIG_RCU_TRACE
1032
1033#ifdef CONFIG_RCU_BOOST
1034
1035static void rcu_initiate_boost_trace(void)
1036{
Paul E. McKenney9e571a82010-09-30 21:26:52 -07001037 if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -08001038 rcu_preempt_ctrlblk.n_balk_blkd_tasks++;
1039 else if (rcu_preempt_ctrlblk.gp_tasks == NULL &&
1040 rcu_preempt_ctrlblk.exp_tasks == NULL)
1041 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++;
1042 else if (rcu_preempt_ctrlblk.boost_tasks != NULL)
1043 rcu_preempt_ctrlblk.n_balk_boost_tasks++;
1044 else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
1045 rcu_preempt_ctrlblk.n_balk_notyet++;
Paul E. McKenney9e571a82010-09-30 21:26:52 -07001046 else
Paul E. McKenney7e8b4c72011-02-24 19:26:21 -08001047 rcu_preempt_ctrlblk.n_balk_nos++;
Paul E. McKenney9e571a82010-09-30 21:26:52 -07001048}
1049
1050#endif /* #ifdef CONFIG_RCU_BOOST */
1051
1052static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
1053{
1054 unsigned long flags;
1055
1056 raw_local_irq_save(flags);
1057 rcp->qlen -= n;
1058 raw_local_irq_restore(flags);
1059}
1060
1061/*
1062 * Dump statistics for TINY_RCU, such as they are.
1063 */
1064static int show_tiny_stats(struct seq_file *m, void *unused)
1065{
1066 show_tiny_preempt_stats(m);
1067 seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
1068 seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
1069 return 0;
1070}
1071
1072static int show_tiny_stats_open(struct inode *inode, struct file *file)
1073{
1074 return single_open(file, show_tiny_stats, NULL);
1075}
1076
1077static const struct file_operations show_tiny_stats_fops = {
1078 .owner = THIS_MODULE,
1079 .open = show_tiny_stats_open,
1080 .read = seq_read,
1081 .llseek = seq_lseek,
1082 .release = single_release,
1083};
1084
1085static struct dentry *rcudir;
1086
1087static int __init rcutiny_trace_init(void)
1088{
1089 struct dentry *retval;
1090
1091 rcudir = debugfs_create_dir("rcu", NULL);
1092 if (!rcudir)
1093 goto free_out;
1094 retval = debugfs_create_file("rcudata", 0444, rcudir,
1095 NULL, &show_tiny_stats_fops);
1096 if (!retval)
1097 goto free_out;
1098 return 0;
1099free_out:
1100 debugfs_remove_recursive(rcudir);
1101 return 1;
1102}
1103
1104static void __exit rcutiny_trace_cleanup(void)
1105{
1106 debugfs_remove_recursive(rcudir);
1107}
1108
1109module_init(rcutiny_trace_init);
1110module_exit(rcutiny_trace_cleanup);
1111
1112MODULE_AUTHOR("Paul E. McKenney");
1113MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
1114MODULE_LICENSE("GPL");
1115
1116#endif /* #ifdef CONFIG_RCU_TRACE */