blob: b23a4d076f3d2c64862172c83c18f21605e87159 [file] [log] [blame]
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Paul E. McKenney87de1cf2013-12-03 10:02:52 -080015 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070017 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
Ingo Molnar4ce5b902009-10-26 07:55:55 +010023 * Documentation/RCU
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070024 */
Ingo Molnar4ce5b902009-10-26 07:55:55 +010025#include <linux/completion.h>
26#include <linux/interrupt.h>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070027#include <linux/notifier.h>
Ingo Molnar4ce5b902009-10-26 07:55:55 +010028#include <linux/rcupdate.h>
29#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040030#include <linux/export.h>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070031#include <linux/mutex.h>
Ingo Molnar4ce5b902009-10-26 07:55:55 +010032#include <linux/sched.h>
33#include <linux/types.h>
34#include <linux/init.h>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070035#include <linux/time.h>
Ingo Molnar4ce5b902009-10-26 07:55:55 +010036#include <linux/cpu.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070037#include <linux/prefetch.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040038#include <linux/trace_events.h>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070039
Paul E. McKenney29c00b42011-06-17 15:53:19 -070040#include "rcu.h"
41
Paul E. McKenney4102ada2013-10-08 20:23:47 -070042/* Forward declarations for tiny_plugin.h. */
Paul E. McKenney24278d12010-09-27 17:25:23 -070043struct rcu_ctrlblk;
Paul E. McKenney965a0022011-06-18 09:55:39 -070044static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
45static void rcu_process_callbacks(struct softirq_action *unused);
Paul E. McKenneya57eb942010-06-29 16:49:16 -070046static void __call_rcu(struct rcu_head *head,
Boqun Fengb6a4ae72015-07-29 13:29:38 +080047 rcu_callback_t func,
Paul E. McKenneya57eb942010-06-29 16:49:16 -070048 struct rcu_ctrlblk *rcp);
49
Paul E. McKenney4102ada2013-10-08 20:23:47 -070050#include "tiny_plugin.h"
Paul E. McKenney6bfc09e2012-10-19 12:49:17 -070051
Paul E. McKenneycc6783f2013-09-06 17:39:49 -070052#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070053
54/*
55 * Test whether RCU thinks that the current CPU is idle.
56 */
Linus Torvaldsb29c8302013-11-16 12:23:18 -080057bool notrace __rcu_is_watching(void)
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070058{
Lai Jiangshan5f6130f2014-12-09 17:53:34 +080059 return true;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070060}
Paul E. McKenney5c173eb2013-09-13 17:20:11 -070061EXPORT_SYMBOL(__rcu_is_watching);
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070062
Paul E. McKenneycc6783f2013-09-06 17:39:49 -070063#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070064
65/*
Eric Dumazetb554d7d2011-04-28 07:23:45 +020066 * Helper function for rcu_sched_qs() and rcu_bh_qs().
67 * Also irqs are disabled to avoid confusion due to interrupt handlers
Ingo Molnar4ce5b902009-10-26 07:55:55 +010068 * invoking call_rcu().
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070069 */
70static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
71{
Paul E. McKenney14961442013-04-16 07:49:22 -070072 RCU_TRACE(reset_cpu_stall_ticks(rcp));
Alexander Gordeev27153ac2015-02-11 15:42:37 +010073 if (rcp->donetail != rcp->curtail) {
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070074 rcp->donetail = rcp->curtail;
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070075 return 1;
76 }
Ingo Molnar4ce5b902009-10-26 07:55:55 +010077
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070078 return 0;
79}
80
81/*
82 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
83 * are at it, given that any rcu quiescent state is also an rcu_bh
84 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
85 */
Paul E. McKenney284a8c92014-08-14 16:38:46 -070086void rcu_sched_qs(void)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070087{
Eric Dumazetb554d7d2011-04-28 07:23:45 +020088 unsigned long flags;
89
90 local_irq_save(flags);
Paul E. McKenney99652b52010-03-30 15:50:01 -070091 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
92 rcu_qsctr_help(&rcu_bh_ctrlblk))
Paul E. McKenney9dc5ad32013-03-27 10:11:15 -070093 raise_softirq(RCU_SOFTIRQ);
Eric Dumazetb554d7d2011-04-28 07:23:45 +020094 local_irq_restore(flags);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070095}
96
97/*
98 * Record an rcu_bh quiescent state.
99 */
Paul E. McKenney284a8c92014-08-14 16:38:46 -0700100void rcu_bh_qs(void)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700101{
Eric Dumazetb554d7d2011-04-28 07:23:45 +0200102 unsigned long flags;
103
104 local_irq_save(flags);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700105 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
Paul E. McKenney9dc5ad32013-03-27 10:11:15 -0700106 raise_softirq(RCU_SOFTIRQ);
Eric Dumazetb554d7d2011-04-28 07:23:45 +0200107 local_irq_restore(flags);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700108}
109
110/*
111 * Check to see if the scheduling-clock interrupt came from an extended
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700112 * quiescent state, and, if so, tell RCU about it. This function must
113 * be called from hardirq context. It is normally called from the
114 * scheduling-clock interrupt.
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700115 */
Paul E. McKenneyc3377c2d2014-10-21 07:53:02 -0700116void rcu_check_callbacks(int user)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700117{
Paul E. McKenney14961442013-04-16 07:49:22 -0700118 RCU_TRACE(check_cpu_stalls());
Alexander Gordeevca9558a2014-10-31 14:55:05 +0000119 if (user)
Paul E. McKenney284a8c92014-08-14 16:38:46 -0700120 rcu_sched_qs();
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700121 else if (!in_softirq())
Paul E. McKenney284a8c92014-08-14 16:38:46 -0700122 rcu_bh_qs();
Paul E. McKenney8315f422014-06-27 13:42:20 -0700123 if (user)
124 rcu_note_voluntary_context_switch(current);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700125}
126
127/*
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700128 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
129 * whose grace period has elapsed.
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700130 */
Paul E. McKenney965a0022011-06-18 09:55:39 -0700131static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700132{
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -0400133 const char *rn = NULL;
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700134 struct rcu_head *next, *list;
Ingo Molnar4ce5b902009-10-26 07:55:55 +0100135 unsigned long flags;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700136 RCU_TRACE(int cb_count = 0);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700137
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700138 /* Move the ready-to-invoke callbacks to a local list. */
139 local_irq_save(flags);
Paul E. McKenney6e91f8c2015-05-11 11:13:05 -0700140 if (rcp->donetail == &rcp->rcucblist) {
141 /* No callbacks ready, so just leave. */
142 local_irq_restore(flags);
143 return;
144 }
Paul E. McKenney486e2592012-01-06 14:11:30 -0800145 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700146 list = rcp->rcucblist;
147 rcp->rcucblist = *rcp->donetail;
148 *rcp->donetail = NULL;
149 if (rcp->curtail == rcp->donetail)
150 rcp->curtail = &rcp->rcucblist;
151 rcp->donetail = &rcp->rcucblist;
152 local_irq_restore(flags);
153
154 /* Invoke the callbacks on the local list. */
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700155 RCU_TRACE(rn = rcp->name);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700156 while (list) {
157 next = list->next;
158 prefetch(next);
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400159 debug_rcu_head_unqueue(list);
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700160 local_bh_disable();
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700161 __rcu_reclaim(rn, list);
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700162 local_bh_enable();
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700163 list = next;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700164 RCU_TRACE(cb_count++);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700165 }
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700166 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
Paul E. McKenney0d752922013-08-17 18:08:37 -0700167 RCU_TRACE(trace_rcu_batch_end(rcp->name,
168 cb_count, 0, need_resched(),
Paul E. McKenney4968c302011-12-07 16:32:40 -0800169 is_idle_task(current),
Paul E. McKenney9dc5ad32013-03-27 10:11:15 -0700170 false));
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700171}
172
Emese Revfy0766f782016-06-20 20:42:34 +0200173static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700174{
Paul E. McKenney965a0022011-06-18 09:55:39 -0700175 __rcu_process_callbacks(&rcu_sched_ctrlblk);
176 __rcu_process_callbacks(&rcu_bh_ctrlblk);
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700177}
178
179/*
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700180 * Wait for a grace period to elapse. But it is illegal to invoke
181 * synchronize_sched() from within an RCU read-side critical section.
182 * Therefore, any legal call to synchronize_sched() is a quiescent
183 * state, and so on a UP system, synchronize_sched() need do nothing.
184 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
185 * benefits of doing might_sleep() to reduce latency.)
186 *
187 * Cool, huh? (Due to Josh Triplett.)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700188 */
189void synchronize_sched(void)
190{
Paul E. McKenneyf78f5b92015-06-18 15:50:02 -0700191 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
192 lock_is_held(&rcu_lock_map) ||
193 lock_is_held(&rcu_sched_lock_map),
194 "Illegal synchronize_sched() in RCU read-side critical section");
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700195}
196EXPORT_SYMBOL_GPL(synchronize_sched);
197
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700198/*
199 * Helper function for call_rcu() and call_rcu_bh().
200 */
201static void __call_rcu(struct rcu_head *head,
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800202 rcu_callback_t func,
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700203 struct rcu_ctrlblk *rcp)
204{
205 unsigned long flags;
206
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400207 debug_rcu_head_queue(head);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700208 head->func = func;
209 head->next = NULL;
Ingo Molnar4ce5b902009-10-26 07:55:55 +0100210
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700211 local_irq_save(flags);
212 *rcp->curtail = head;
213 rcp->curtail = &head->next;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700214 RCU_TRACE(rcp->qlen++);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700215 local_irq_restore(flags);
Lai Jiangshan5f6130f2014-12-09 17:53:34 +0800216
217 if (unlikely(is_idle_task(current))) {
218 /* force scheduling for rcu_sched_qs() */
219 resched_cpu(0);
220 }
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700221}
222
223/*
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700224 * Post an RCU callback to be invoked after the end of an RCU-sched grace
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700225 * period. But since we have but one CPU, that would be after any
226 * quiescent state.
227 */
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800228void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700229{
Paul E. McKenney99652b52010-03-30 15:50:01 -0700230 __call_rcu(head, func, &rcu_sched_ctrlblk);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700231}
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700232EXPORT_SYMBOL_GPL(call_rcu_sched);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700233
234/*
235 * Post an RCU bottom-half callback to be invoked after any subsequent
236 * quiescent state.
237 */
Boqun Fengb6a4ae72015-07-29 13:29:38 +0800238void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700239{
240 __call_rcu(head, func, &rcu_bh_ctrlblk);
241}
242EXPORT_SYMBOL_GPL(call_rcu_bh);
Paul E. McKenney9dc5ad32013-03-27 10:11:15 -0700243
Pranith Kumaraa23c6f2014-09-19 11:32:29 -0400244void __init rcu_init(void)
Paul E. McKenney9dc5ad32013-03-27 10:11:15 -0700245{
246 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
Paul E. McKenney630181c2014-12-23 21:33:14 -0800247 RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk));
248 RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk));
Pranith Kumaraa23c6f2014-09-19 11:32:29 -0400249
250 rcu_early_boot_tests();
Paul E. McKenney9dc5ad32013-03-27 10:11:15 -0700251}