blob: 86f1f5e43e333766ec6a9fe5276875046c2f2526 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
Paul E. McKenney01c1c662008-01-25 21:08:24 +010018 * Copyright IBM Corporation, 2001
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 *
Josh Triplett595182b2006-10-04 02:17:21 -070022 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * http://lse.sourceforge.net/locking/rcupdate.html
30 *
31 */
32
33#ifndef __LINUX_RCUPDATE_H
34#define __LINUX_RCUPDATE_H
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/cache.h>
37#include <linux/spinlock.h>
38#include <linux/threads.h>
39#include <linux/percpu.h>
40#include <linux/cpumask.h>
41#include <linux/seqlock.h>
Peter Zijlstra851a67b2007-10-11 22:11:12 +020042#include <linux/lockdep.h>
Paul E. McKenney4446a362008-05-12 21:21:05 +020043#include <linux/completion.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/**
46 * struct rcu_head - callback structure for use with RCU
47 * @next: next update requests in a list
48 * @func: actual update function to call after the grace period.
49 */
50struct rcu_head {
51 struct rcu_head *next;
52 void (*func)(struct rcu_head *head);
53};
54
Paul E. McKenneye260be62008-01-25 21:08:24 +010055#ifdef CONFIG_CLASSIC_RCU
Paul E. McKenney01c1c662008-01-25 21:08:24 +010056#include <linux/rcuclassic.h>
Paul E. McKenneye260be62008-01-25 21:08:24 +010057#else /* #ifdef CONFIG_CLASSIC_RCU */
58#include <linux/rcupreempt.h>
59#endif /* #else #ifdef CONFIG_CLASSIC_RCU */
Paul E. McKenney01c1c662008-01-25 21:08:24 +010060
Dipankar Sarma8b6490e2005-09-09 13:04:07 -070061#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
62#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#define INIT_RCU_HEAD(ptr) do { \
64 (ptr)->next = NULL; (ptr)->func = NULL; \
65} while (0)
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067/**
68 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
69 *
Paul E. McKenney9b06e812005-05-01 08:59:04 -070070 * When synchronize_rcu() is invoked on one CPU while other CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 * are within RCU read-side critical sections, then the
Paul E. McKenney9b06e812005-05-01 08:59:04 -070072 * synchronize_rcu() is guaranteed to block until after all the other
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
74 * on one CPU while other CPUs are within RCU read-side critical
75 * sections, invocation of the corresponding RCU callback is deferred
76 * until after the all the other CPUs exit their critical sections.
77 *
78 * Note, however, that RCU callbacks are permitted to run concurrently
79 * with RCU read-side critical sections. One way that this can happen
80 * is via the following sequence of events: (1) CPU 0 enters an RCU
81 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
82 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
83 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
84 * callback is invoked. This is legal, because the RCU read-side critical
85 * section that was running concurrently with the call_rcu() (and which
86 * therefore might be referencing something that the corresponding RCU
87 * callback would free up) has completed before the corresponding
88 * RCU callback is invoked.
89 *
90 * RCU read-side critical sections may be nested. Any deferred actions
91 * will be deferred until the outermost RCU read-side critical section
92 * completes.
93 *
94 * It is illegal to block while in an RCU read-side critical section.
95 */
Paul E. McKenney01c1c662008-01-25 21:08:24 +010096#define rcu_read_lock() __rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/**
99 * rcu_read_unlock - marks the end of an RCU read-side critical section.
100 *
101 * See rcu_read_lock() for more information.
102 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104/*
105 * So where is rcu_write_lock()? It does not exist, as there is no
106 * way for writers to lock out RCU readers. This is a feature, not
107 * a bug -- this property is what provides RCU's performance benefits.
108 * Of course, writers must coordinate with each other. The normal
109 * spinlock primitives work well for this, but any other technique may be
110 * used as well. RCU does not care how the writers keep out of each
111 * others' way, as long as they do so.
112 */
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100113#define rcu_read_unlock() __rcu_read_unlock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115/**
116 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
117 *
118 * This is equivalent of rcu_read_lock(), but to be used when updates
119 * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks
120 * consider completion of a softirq handler to be a quiescent state,
121 * a process in RCU read-side critical section must be protected by
122 * disabling softirqs. Read-side critical sections in interrupt context
123 * can use just rcu_read_lock().
124 *
125 */
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100126#define rcu_read_lock_bh() __rcu_read_lock_bh()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128/*
129 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
130 *
131 * See rcu_read_lock_bh() for more information.
132 */
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100133#define rcu_read_unlock_bh() __rcu_read_unlock_bh()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135/**
Mathieu Desnoyers1c50b722008-09-29 11:06:46 -0400136 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
137 *
138 * Should be used with either
139 * - synchronize_sched()
140 * or
141 * - call_rcu_sched() and rcu_barrier_sched()
142 * on the write-side to insure proper synchronization.
143 */
144#define rcu_read_lock_sched() preempt_disable()
145
146/*
147 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
148 *
149 * See rcu_read_lock_sched for more information.
150 */
151#define rcu_read_unlock_sched() preempt_enable()
152
153
154
155/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 * rcu_dereference - fetch an RCU-protected pointer in an
157 * RCU read-side critical section. This pointer may later
158 * be safely dereferenced.
159 *
160 * Inserts memory barriers on architectures that require them
161 * (currently only the Alpha), and, more importantly, documents
162 * exactly which pointers are protected by RCU.
163 */
164
165#define rcu_dereference(p) ({ \
Paul E. McKenney97b43032007-10-16 23:26:04 -0700166 typeof(p) _________p1 = ACCESS_ONCE(p); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 smp_read_barrier_depends(); \
168 (_________p1); \
169 })
170
171/**
172 * rcu_assign_pointer - assign (publicize) a pointer to a newly
173 * initialized structure that will be dereferenced by RCU read-side
174 * critical sections. Returns the value assigned.
175 *
176 * Inserts memory barriers on architectures that require them
177 * (pretty much all of them other than x86), and also prevents
178 * the compiler from reordering the code that initializes the
179 * structure after the pointer assignment. More importantly, this
180 * call documents which pointers will be dereferenced by RCU read-side
181 * code.
182 */
183
Paul E. McKenneyd99c4f62008-02-06 01:37:25 -0800184#define rcu_assign_pointer(p, v) \
185 ({ \
186 if (!__builtin_constant_p(v) || \
187 ((v) != NULL)) \
188 smp_wmb(); \
189 (p) = (v); \
190 })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Paul E. McKenney4446a362008-05-12 21:21:05 +0200192/* Infrastructure to implement the synchronize_() primitives. */
193
194struct rcu_synchronize {
195 struct rcu_head head;
196 struct completion completion;
197};
198
199extern void wakeme_after_rcu(struct rcu_head *head);
200
201#define synchronize_rcu_xxx(name, func) \
202void name(void) \
203{ \
204 struct rcu_synchronize rcu; \
205 \
206 init_completion(&rcu.completion); \
207 /* Will wake me after RCU finished. */ \
208 func(&rcu.head, wakeme_after_rcu); \
209 /* Wait for it. */ \
210 wait_for_completion(&rcu.completion); \
211}
212
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700213/**
214 * synchronize_sched - block until all CPUs have exited any non-preemptive
215 * kernel code sequences.
216 *
217 * This means that all preempt_disable code sequences, including NMI and
218 * hardware-interrupt handlers, in progress on entry will have completed
219 * before this primitive returns. However, this does not guarantee that
Paul E. McKenneybb3b9cf2006-02-03 03:04:38 -0800220 * softirq handlers will have completed, since in some kernels, these
221 * handlers can run in process context, and can block.
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700222 *
Paul E. McKenneyd83015b2006-06-23 02:05:51 -0700223 * This primitive provides the guarantees made by the (now removed)
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700224 * synchronize_kernel() API. In contrast, synchronize_rcu() only
225 * guarantees that rcu_read_lock() sections will have completed.
Paul E. McKenneybb3b9cf2006-02-03 03:04:38 -0800226 * In "classic RCU", these two guarantees happen to be one and
227 * the same, but can differ in realtime RCU implementations.
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700228 */
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100229#define synchronize_sched() __synchronize_sched()
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700230
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100231/**
232 * call_rcu - Queue an RCU callback for invocation after a grace period.
233 * @head: structure to be used for queueing the RCU updates.
234 * @func: actual update function to be invoked after the grace period
235 *
236 * The update function will be invoked some time after a full grace
237 * period elapses, in other words after all currently executing RCU
238 * read-side critical sections have completed. RCU read-side critical
239 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
240 * and may be nested.
241 */
242extern void call_rcu(struct rcu_head *head,
243 void (*func)(struct rcu_head *head));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100245/**
246 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
247 * @head: structure to be used for queueing the RCU updates.
248 * @func: actual update function to be invoked after the grace period
249 *
250 * The update function will be invoked some time after a full grace
251 * period elapses, in other words after all currently executing RCU
252 * read-side critical sections have completed. call_rcu_bh() assumes
253 * that the read-side critical sections end on completion of a softirq
254 * handler. This means that read-side critical sections in process
255 * context must not be interrupted by softirqs. This interface is to be
256 * used when most of the read-side critical sections are in softirq context.
257 * RCU read-side critical sections are delimited by :
258 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
259 * OR
260 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
261 * These may be nested.
262 */
263extern void call_rcu_bh(struct rcu_head *head,
264 void (*func)(struct rcu_head *head));
265
266/* Exported common interfaces */
Paul E. McKenney9b06e812005-05-01 08:59:04 -0700267extern void synchronize_rcu(void);
Dipankar Sarmaab4720e2005-12-12 00:37:05 -0800268extern void rcu_barrier(void);
Paul E. McKenney70f12f82008-05-12 21:21:05 +0200269extern void rcu_barrier_bh(void);
270extern void rcu_barrier_sched(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100272/* Internal to kernel */
273extern void rcu_init(void);
Paul E. McKenneye260be62008-01-25 21:08:24 +0100274extern int rcu_needs_cpu(int cpu);
Paul E. McKenney01c1c662008-01-25 21:08:24 +0100275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276#endif /* __LINUX_RCUPDATE_H */