blob: 4e6a61b15e86fe0e27fc30d80775de4f4c8c47e6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
Paul E. McKenney01c1c662008-01-25 21:08:24 +010018 * Copyright IBM Corporation, 2001
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
Paul E. McKenneya71fca52009-09-18 10:28:19 -070022 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers:
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 *
29 * For detailed explanation of Read-Copy Update mechanism see -
Paul E. McKenneya71fca52009-09-18 10:28:19 -070030 * http://lse.sourceforge.net/locking/rcupdate.html
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 *
32 */
33#include <linux/types.h>
34#include <linux/kernel.h>
35#include <linux/init.h>
36#include <linux/spinlock.h>
37#include <linux/smp.h>
38#include <linux/interrupt.h>
39#include <linux/sched.h>
Arun Sharma600634972011-07-26 16:09:06 -070040#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/percpu.h>
43#include <linux/notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/cpu.h>
Ingo Molnar9331b312006-03-23 03:00:19 -080045#include <linux/mutex.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040046#include <linux/export.h>
Paul E. McKenneye3818b82010-03-15 17:03:43 -070047#include <linux/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Paul E. McKenney29c00b42011-06-17 15:53:19 -070049#define CREATE_TRACE_POINTS
50#include <trace/events/rcu.h>
51
52#include "rcu.h"
53
Paul E. McKenney9dd8fb12012-04-13 12:54:22 -070054#ifdef CONFIG_PREEMPT_RCU
55
56/*
Paul E. McKenney2a3fa842012-05-21 11:58:36 -070057 * Preemptible RCU implementation for rcu_read_lock().
58 * Just increment ->rcu_read_lock_nesting, shared state will be updated
59 * if we block.
60 */
61void __rcu_read_lock(void)
62{
63 current->rcu_read_lock_nesting++;
64 barrier(); /* critical section after entry code. */
65}
66EXPORT_SYMBOL_GPL(__rcu_read_lock);
67
68/*
69 * Preemptible RCU implementation for rcu_read_unlock().
70 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
71 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
72 * invoke rcu_read_unlock_special() to clean up after a context switch
73 * in an RCU read-side critical section and other special cases.
74 */
75void __rcu_read_unlock(void)
76{
77 struct task_struct *t = current;
78
79 if (t->rcu_read_lock_nesting != 1) {
80 --t->rcu_read_lock_nesting;
81 } else {
82 barrier(); /* critical section before exit code. */
83 t->rcu_read_lock_nesting = INT_MIN;
84 barrier(); /* assign before ->rcu_read_unlock_special load */
85 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
86 rcu_read_unlock_special(t);
87 barrier(); /* ->rcu_read_unlock_special load before assign */
88 t->rcu_read_lock_nesting = 0;
89 }
90#ifdef CONFIG_PROVE_LOCKING
91 {
92 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
93
94 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
95 }
96#endif /* #ifdef CONFIG_PROVE_LOCKING */
97}
98EXPORT_SYMBOL_GPL(__rcu_read_unlock);
99
100/*
Paul E. McKenney9dd8fb12012-04-13 12:54:22 -0700101 * Check for a task exiting while in a preemptible-RCU read-side
102 * critical section, clean up if so. No need to issue warnings,
103 * as debug_check_no_locks_held() already does this if lockdep
104 * is enabled.
105 */
106void exit_rcu(void)
107{
108 struct task_struct *t = current;
109
110 if (likely(list_empty(&current->rcu_node_entry)))
111 return;
112 t->rcu_read_lock_nesting = 1;
113 barrier();
114 t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED;
115 __rcu_read_unlock();
116}
117
118#else /* #ifdef CONFIG_PREEMPT_RCU */
119
120void exit_rcu(void)
121{
122}
123
124#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
125
Paul E. McKenney162cc272009-09-23 16:18:13 -0700126#ifdef CONFIG_DEBUG_LOCK_ALLOC
127static struct lock_class_key rcu_lock_key;
128struct lockdep_map rcu_lock_map =
129 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
130EXPORT_SYMBOL_GPL(rcu_lock_map);
Paul E. McKenney632ee202010-02-22 17:04:45 -0800131
132static struct lock_class_key rcu_bh_lock_key;
133struct lockdep_map rcu_bh_lock_map =
134 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
135EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
136
137static struct lock_class_key rcu_sched_lock_key;
138struct lockdep_map rcu_sched_lock_map =
139 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
140EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
Paul E. McKenney162cc272009-09-23 16:18:13 -0700141#endif
142
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700143#ifdef CONFIG_DEBUG_LOCK_ALLOC
144
Paul E. McKenneybc293d62010-04-15 12:50:39 -0700145int debug_lockdep_rcu_enabled(void)
146{
147 return rcu_scheduler_active && debug_locks &&
148 current->lockdep_recursion == 0;
149}
150EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
151
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700152/**
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700153 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700154 *
155 * Check for bottom half being disabled, which covers both the
156 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
157 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
Paul E. McKenneyca5ecdd2010-04-28 14:39:09 -0700158 * will show the situation. This is useful for debug checks in functions
159 * that require that they be called within an RCU read-side critical
160 * section.
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700161 *
162 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
Paul E. McKenneyc0d6d012012-01-23 12:41:26 -0800163 *
164 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
165 * offline from an RCU perspective, so check for those as well.
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700166 */
167int rcu_read_lock_bh_held(void)
168{
169 if (!debug_lockdep_rcu_enabled())
170 return 1;
Frederic Weisbeckere6b80a32011-10-07 16:25:18 -0700171 if (rcu_is_cpu_idle())
172 return 0;
Paul E. McKenneyc0d6d012012-01-23 12:41:26 -0800173 if (!rcu_lockdep_current_cpu_online())
174 return 0;
Paul E. McKenney773e3f92010-10-05 14:03:02 -0700175 return in_softirq() || irqs_disabled();
Paul E. McKenneye3818b82010-03-15 17:03:43 -0700176}
177EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
178
179#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
180
Paul E. McKenney2c428182011-05-26 22:14:36 -0700181struct rcu_synchronize {
182 struct rcu_head head;
183 struct completion completion;
184};
185
Paul E. McKenneyd9f1bb62010-02-25 14:06:47 -0800186/*
Paul E. McKenneyfbf6bfc2008-02-13 15:03:15 -0800187 * Awaken the corresponding synchronize_rcu() instance now that a
188 * grace period has elapsed.
189 */
Paul E. McKenney2c428182011-05-26 22:14:36 -0700190static void wakeme_after_rcu(struct rcu_head *head)
Dipankar Sarma21a1ea92006-03-07 21:55:33 -0800191{
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100192 struct rcu_synchronize *rcu;
193
194 rcu = container_of(head, struct rcu_synchronize, head);
195 complete(&rcu->completion);
Dipankar Sarma21a1ea92006-03-07 21:55:33 -0800196}
Paul E. McKenneyee84b822010-05-06 09:28:41 -0700197
Paul E. McKenney2c428182011-05-26 22:14:36 -0700198void wait_rcu_gp(call_rcu_func_t crf)
199{
200 struct rcu_synchronize rcu;
201
202 init_rcu_head_on_stack(&rcu.head);
203 init_completion(&rcu.completion);
204 /* Will wake me after RCU finished. */
205 crf(&rcu.head, wakeme_after_rcu);
206 /* Wait for it. */
207 wait_for_completion(&rcu.completion);
208 destroy_rcu_head_on_stack(&rcu.head);
209}
210EXPORT_SYMBOL_GPL(wait_rcu_gp);
211
Paul E. McKenneyee84b822010-05-06 09:28:41 -0700212#ifdef CONFIG_PROVE_RCU
213/*
214 * wrapper function to avoid #include problems.
215 */
216int rcu_my_thread_group_empty(void)
217{
218 return thread_group_empty(current);
219}
220EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty);
221#endif /* #ifdef CONFIG_PROVE_RCU */
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400222
223#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
224static inline void debug_init_rcu_head(struct rcu_head *head)
225{
226 debug_object_init(head, &rcuhead_debug_descr);
227}
228
229static inline void debug_rcu_head_free(struct rcu_head *head)
230{
231 debug_object_free(head, &rcuhead_debug_descr);
232}
233
234/*
235 * fixup_init is called when:
236 * - an active object is initialized
237 */
238static int rcuhead_fixup_init(void *addr, enum debug_obj_state state)
239{
240 struct rcu_head *head = addr;
241
242 switch (state) {
243 case ODEBUG_STATE_ACTIVE:
244 /*
245 * Ensure that queued callbacks are all executed.
246 * If we detect that we are nested in a RCU read-side critical
247 * section, we should simply fail, otherwise we would deadlock.
Mathieu Desnoyersfc2ecf72011-02-23 09:42:14 -0800248 * In !PREEMPT configurations, there is no way to tell if we are
249 * in a RCU read-side critical section or not, so we never
250 * attempt any fixup and just print a warning.
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400251 */
Mathieu Desnoyersfc2ecf72011-02-23 09:42:14 -0800252#ifndef CONFIG_PREEMPT
Paul E. McKenney108aae22011-02-23 09:56:00 -0800253 WARN_ON_ONCE(1);
Mathieu Desnoyersfc2ecf72011-02-23 09:42:14 -0800254 return 0;
255#endif
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400256 if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
257 irqs_disabled()) {
Paul E. McKenney108aae22011-02-23 09:56:00 -0800258 WARN_ON_ONCE(1);
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400259 return 0;
260 }
261 rcu_barrier();
262 rcu_barrier_sched();
263 rcu_barrier_bh();
264 debug_object_init(head, &rcuhead_debug_descr);
265 return 1;
266 default:
267 return 0;
268 }
269}
270
271/*
272 * fixup_activate is called when:
273 * - an active object is activated
274 * - an unknown object is activated (might be a statically initialized object)
275 * Activation is performed internally by call_rcu().
276 */
277static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
278{
279 struct rcu_head *head = addr;
280
281 switch (state) {
282
283 case ODEBUG_STATE_NOTAVAILABLE:
284 /*
285 * This is not really a fixup. We just make sure that it is
286 * tracked in the object tracker.
287 */
288 debug_object_init(head, &rcuhead_debug_descr);
289 debug_object_activate(head, &rcuhead_debug_descr);
290 return 0;
291
292 case ODEBUG_STATE_ACTIVE:
293 /*
294 * Ensure that queued callbacks are all executed.
295 * If we detect that we are nested in a RCU read-side critical
296 * section, we should simply fail, otherwise we would deadlock.
Mathieu Desnoyersfc2ecf72011-02-23 09:42:14 -0800297 * In !PREEMPT configurations, there is no way to tell if we are
298 * in a RCU read-side critical section or not, so we never
299 * attempt any fixup and just print a warning.
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400300 */
Mathieu Desnoyersfc2ecf72011-02-23 09:42:14 -0800301#ifndef CONFIG_PREEMPT
Paul E. McKenney108aae22011-02-23 09:56:00 -0800302 WARN_ON_ONCE(1);
Mathieu Desnoyersfc2ecf72011-02-23 09:42:14 -0800303 return 0;
304#endif
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400305 if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
306 irqs_disabled()) {
Paul E. McKenney108aae22011-02-23 09:56:00 -0800307 WARN_ON_ONCE(1);
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400308 return 0;
309 }
310 rcu_barrier();
311 rcu_barrier_sched();
312 rcu_barrier_bh();
313 debug_object_activate(head, &rcuhead_debug_descr);
314 return 1;
315 default:
316 return 0;
317 }
318}
319
320/*
321 * fixup_free is called when:
322 * - an active object is freed
323 */
324static int rcuhead_fixup_free(void *addr, enum debug_obj_state state)
325{
326 struct rcu_head *head = addr;
327
328 switch (state) {
329 case ODEBUG_STATE_ACTIVE:
330 /*
331 * Ensure that queued callbacks are all executed.
332 * If we detect that we are nested in a RCU read-side critical
333 * section, we should simply fail, otherwise we would deadlock.
Mathieu Desnoyersfc2ecf72011-02-23 09:42:14 -0800334 * In !PREEMPT configurations, there is no way to tell if we are
335 * in a RCU read-side critical section or not, so we never
336 * attempt any fixup and just print a warning.
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400337 */
Mathieu Desnoyersfc2ecf72011-02-23 09:42:14 -0800338#ifndef CONFIG_PREEMPT
Paul E. McKenney108aae22011-02-23 09:56:00 -0800339 WARN_ON_ONCE(1);
Mathieu Desnoyersfc2ecf72011-02-23 09:42:14 -0800340 return 0;
341#endif
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400342 if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
343 irqs_disabled()) {
Paul E. McKenney108aae22011-02-23 09:56:00 -0800344 WARN_ON_ONCE(1);
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400345 return 0;
346 }
347 rcu_barrier();
348 rcu_barrier_sched();
349 rcu_barrier_bh();
350 debug_object_free(head, &rcuhead_debug_descr);
351 return 1;
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400352 default:
353 return 0;
354 }
355}
356
357/**
358 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
359 * @head: pointer to rcu_head structure to be initialized
360 *
361 * This function informs debugobjects of a new rcu_head structure that
362 * has been allocated as an auto variable on the stack. This function
363 * is not required for rcu_head structures that are statically defined or
364 * that are dynamically allocated on the heap. This function has no
365 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
366 */
367void init_rcu_head_on_stack(struct rcu_head *head)
368{
369 debug_object_init_on_stack(head, &rcuhead_debug_descr);
370}
371EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
372
373/**
374 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
375 * @head: pointer to rcu_head structure to be initialized
376 *
377 * This function informs debugobjects that an on-stack rcu_head structure
378 * is about to go out of scope. As with init_rcu_head_on_stack(), this
379 * function is not required for rcu_head structures that are statically
380 * defined or that are dynamically allocated on the heap. Also as with
381 * init_rcu_head_on_stack(), this function has no effect for
382 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
383 */
384void destroy_rcu_head_on_stack(struct rcu_head *head)
385{
386 debug_object_free(head, &rcuhead_debug_descr);
387}
388EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
389
390struct debug_obj_descr rcuhead_debug_descr = {
391 .name = "rcu_head",
392 .fixup_init = rcuhead_fixup_init,
393 .fixup_activate = rcuhead_fixup_activate,
394 .fixup_free = rcuhead_fixup_free,
395};
396EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
397#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
Paul E. McKenney91afaf32011-10-02 07:44:32 -0700398
399#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
400void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp)
401{
402 trace_rcu_torture_read(rcutorturename, rhp);
403}
404EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
405#else
406#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
407#endif