blob: 01d5ccb8bfe3d96cf3cba70d217a8bddf0ff5f8a [file] [log] [blame]
Paul E. McKenney621934e2006-10-04 02:17:02 -07001/*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
Lai Jiangshan4e87b2d2012-10-13 01:14:14 +080019 * Copyright (C) Fujitsu, 2012
Paul E. McKenney621934e2006-10-04 02:17:02 -070020 *
21 * Author: Paul McKenney <paulmck@us.ibm.com>
Lai Jiangshan4e87b2d2012-10-13 01:14:14 +080022 * Lai Jiangshan <laijs@cn.fujitsu.com>
Paul E. McKenney621934e2006-10-04 02:17:02 -070023 *
24 * For detailed explanation of Read-Copy Update mechanism see -
25 * Documentation/RCU/ *.txt
26 *
27 */
28
Paul Gortmaker9984de12011-05-23 14:51:41 -040029#include <linux/export.h>
Paul E. McKenney621934e2006-10-04 02:17:02 -070030#include <linux/mutex.h>
31#include <linux/percpu.h>
32#include <linux/preempt.h>
33#include <linux/rcupdate.h>
34#include <linux/sched.h>
Paul E. McKenney621934e2006-10-04 02:17:02 -070035#include <linux/smp.h>
Paul E. McKenney46fdb092010-10-26 02:11:40 -070036#include <linux/delay.h>
Paul E. McKenney621934e2006-10-04 02:17:02 -070037#include <linux/srcu.h>
38
Antti P Miettinen3705b882012-10-05 09:59:15 +030039#include <trace/events/rcu.h>
40
41#include "rcu.h"
42
Lai Jiangshan931ea9d2012-03-19 16:12:13 +080043/*
44 * Initialize an rcu_batch structure to empty.
45 */
46static inline void rcu_batch_init(struct rcu_batch *b)
47{
48 b->head = NULL;
49 b->tail = &b->head;
50}
51
52/*
53 * Enqueue a callback onto the tail of the specified rcu_batch structure.
54 */
55static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head)
56{
57 *b->tail = head;
58 b->tail = &head->next;
59}
60
61/*
62 * Is the specified rcu_batch structure empty?
63 */
64static inline bool rcu_batch_empty(struct rcu_batch *b)
65{
66 return b->tail == &b->head;
67}
68
69/*
70 * Remove the callback at the head of the specified rcu_batch structure
71 * and return a pointer to it, or return NULL if the structure is empty.
72 */
73static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b)
74{
75 struct rcu_head *head;
76
77 if (rcu_batch_empty(b))
78 return NULL;
79
80 head = b->head;
81 b->head = head->next;
82 if (b->tail == &head->next)
83 rcu_batch_init(b);
84
85 return head;
86}
87
88/*
89 * Move all callbacks from the rcu_batch structure specified by "from" to
90 * the structure specified by "to".
91 */
92static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from)
93{
94 if (!rcu_batch_empty(from)) {
95 *to->tail = from->head;
96 to->tail = from->tail;
97 rcu_batch_init(from);
98 }
99}
100
Paul E. McKenney632ee202010-02-22 17:04:45 -0800101static int init_srcu_struct_fields(struct srcu_struct *sp)
102{
103 sp->completed = 0;
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800104 spin_lock_init(&sp->queue_lock);
105 sp->running = false;
106 rcu_batch_init(&sp->batch_queue);
107 rcu_batch_init(&sp->batch_check0);
108 rcu_batch_init(&sp->batch_check1);
109 rcu_batch_init(&sp->batch_done);
110 INIT_DELAYED_WORK(&sp->work, process_srcu);
Paul E. McKenney632ee202010-02-22 17:04:45 -0800111 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
112 return sp->per_cpu_ref ? 0 : -ENOMEM;
113}
114
115#ifdef CONFIG_DEBUG_LOCK_ALLOC
116
117int __init_srcu_struct(struct srcu_struct *sp, const char *name,
118 struct lock_class_key *key)
119{
Paul E. McKenney632ee202010-02-22 17:04:45 -0800120 /* Don't re-initialize a lock while it is held. */
121 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
122 lockdep_init_map(&sp->dep_map, name, key, 0);
Paul E. McKenney632ee202010-02-22 17:04:45 -0800123 return init_srcu_struct_fields(sp);
124}
125EXPORT_SYMBOL_GPL(__init_srcu_struct);
126
127#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
128
Paul E. McKenney621934e2006-10-04 02:17:02 -0700129/**
130 * init_srcu_struct - initialize a sleep-RCU structure
131 * @sp: structure to initialize.
132 *
133 * Must invoke this on a given srcu_struct before passing that srcu_struct
134 * to any other function. Each srcu_struct represents a separate domain
135 * of SRCU protection.
136 */
Alan Sterne6a92012006-10-04 02:17:05 -0700137int init_srcu_struct(struct srcu_struct *sp)
Paul E. McKenney621934e2006-10-04 02:17:02 -0700138{
Paul E. McKenney632ee202010-02-22 17:04:45 -0800139 return init_srcu_struct_fields(sp);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700140}
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700141EXPORT_SYMBOL_GPL(init_srcu_struct);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700142
Paul E. McKenney632ee202010-02-22 17:04:45 -0800143#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
144
Paul E. McKenney621934e2006-10-04 02:17:02 -0700145/*
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800146 * Returns approximate total of the readers' ->seq[] values for the
147 * rank of per-CPU counters specified by idx.
148 */
149static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx)
150{
151 int cpu;
152 unsigned long sum = 0;
153 unsigned long t;
154
155 for_each_possible_cpu(cpu) {
156 t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
157 sum += t;
158 }
159 return sum;
160}
161
162/*
Paul E. McKenneycef50122012-02-05 07:42:44 -0800163 * Returns approximate number of readers active on the specified rank
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800164 * of the per-CPU ->c[] counters.
Paul E. McKenney621934e2006-10-04 02:17:02 -0700165 */
Paul E. McKenneycef50122012-02-05 07:42:44 -0800166static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx)
Paul E. McKenney621934e2006-10-04 02:17:02 -0700167{
168 int cpu;
Paul E. McKenneycef50122012-02-05 07:42:44 -0800169 unsigned long sum = 0;
170 unsigned long t;
Paul E. McKenney621934e2006-10-04 02:17:02 -0700171
Paul E. McKenneycef50122012-02-05 07:42:44 -0800172 for_each_possible_cpu(cpu) {
173 t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
174 sum += t;
Paul E. McKenneycef50122012-02-05 07:42:44 -0800175 }
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800176 return sum;
Paul E. McKenneycef50122012-02-05 07:42:44 -0800177}
178
179/*
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800180 * Return true if the number of pre-existing readers is determined to
181 * be stably zero. An example unstable zero can occur if the call
182 * to srcu_readers_active_idx() misses an __srcu_read_lock() increment,
183 * but due to task migration, sees the corresponding __srcu_read_unlock()
184 * decrement. This can happen because srcu_readers_active_idx() takes
185 * time to sum the array, and might in fact be interrupted or preempted
186 * partway through the summation.
Paul E. McKenneycef50122012-02-05 07:42:44 -0800187 */
188static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
189{
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800190 unsigned long seq;
191
192 seq = srcu_readers_seq_idx(sp, idx);
193
194 /*
195 * The following smp_mb() A pairs with the smp_mb() B located in
196 * __srcu_read_lock(). This pairing ensures that if an
197 * __srcu_read_lock() increments its counter after the summation
198 * in srcu_readers_active_idx(), then the corresponding SRCU read-side
199 * critical section will see any changes made prior to the start
200 * of the current SRCU grace period.
201 *
202 * Also, if the above call to srcu_readers_seq_idx() saw the
203 * increment of ->seq[], then the call to srcu_readers_active_idx()
204 * must see the increment of ->c[].
205 */
206 smp_mb(); /* A */
Paul E. McKenneycef50122012-02-05 07:42:44 -0800207
208 /*
209 * Note that srcu_readers_active_idx() can incorrectly return
210 * zero even though there is a pre-existing reader throughout.
211 * To see this, suppose that task A is in a very long SRCU
212 * read-side critical section that started on CPU 0, and that
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800213 * no other reader exists, so that the sum of the counters
Paul E. McKenneycef50122012-02-05 07:42:44 -0800214 * is equal to one. Then suppose that task B starts executing
215 * srcu_readers_active_idx(), summing up to CPU 1, and then that
216 * task C starts reading on CPU 0, so that its increment is not
217 * summed, but finishes reading on CPU 2, so that its decrement
218 * -is- summed. Then when task B completes its sum, it will
219 * incorrectly get zero, despite the fact that task A has been
220 * in its SRCU read-side critical section the whole time.
221 *
222 * We therefore do a validation step should srcu_readers_active_idx()
223 * return zero.
224 */
225 if (srcu_readers_active_idx(sp, idx) != 0)
226 return false;
227
228 /*
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800229 * The remainder of this function is the validation step.
230 * The following smp_mb() D pairs with the smp_mb() C in
231 * __srcu_read_unlock(). If the __srcu_read_unlock() was seen
232 * by srcu_readers_active_idx() above, then any destructive
233 * operation performed after the grace period will happen after
234 * the corresponding SRCU read-side critical section.
235 *
236 * Note that there can be at most NR_CPUS worth of readers using
237 * the old index, which is not enough to overflow even a 32-bit
238 * integer. (Yes, this does mean that systems having more than
239 * a billion or so CPUs need to be 64-bit systems.) Therefore,
240 * the sum of the ->seq[] counters cannot possibly overflow.
241 * Therefore, the only way that the return values of the two
242 * calls to srcu_readers_seq_idx() can be equal is if there were
243 * no increments of the corresponding rank of ->seq[] counts
244 * in the interim. But the missed-increment scenario laid out
245 * above includes an increment of the ->seq[] counter by
246 * the corresponding __srcu_read_lock(). Therefore, if this
247 * scenario occurs, the return values from the two calls to
248 * srcu_readers_seq_idx() will differ, and thus the validation
249 * step below suffices.
Paul E. McKenneycef50122012-02-05 07:42:44 -0800250 */
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800251 smp_mb(); /* D */
Paul E. McKenneycef50122012-02-05 07:42:44 -0800252
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800253 return srcu_readers_seq_idx(sp, idx) == seq;
Paul E. McKenney621934e2006-10-04 02:17:02 -0700254}
255
256/**
257 * srcu_readers_active - returns approximate number of readers.
258 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
259 *
260 * Note that this is not an atomic primitive, and can therefore suffer
261 * severe errors when invoked on an active srcu_struct. That said, it
262 * can be useful as an error check at cleanup time.
263 */
Adrian Bunkbb695172008-02-06 01:36:45 -0800264static int srcu_readers_active(struct srcu_struct *sp)
Paul E. McKenney621934e2006-10-04 02:17:02 -0700265{
Lai Jiangshandc879172012-03-06 17:57:34 +0800266 int cpu;
267 unsigned long sum = 0;
268
269 for_each_possible_cpu(cpu) {
270 sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
271 sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
272 }
273 return sum;
Paul E. McKenney621934e2006-10-04 02:17:02 -0700274}
275
276/**
277 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
278 * @sp: structure to clean up.
279 *
280 * Must invoke this after you are finished using a given srcu_struct that
281 * was initialized via init_srcu_struct(), else you leak memory.
282 */
283void cleanup_srcu_struct(struct srcu_struct *sp)
284{
Lai Jiangshanab4d2982012-11-29 16:46:04 +0800285 if (WARN_ON(srcu_readers_active(sp)))
286 return; /* Leakage unless caller handles error. */
Paul E. McKenney621934e2006-10-04 02:17:02 -0700287 free_percpu(sp->per_cpu_ref);
288 sp->per_cpu_ref = NULL;
289}
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700290EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700291
Paul E. McKenney632ee202010-02-22 17:04:45 -0800292/*
Paul E. McKenney621934e2006-10-04 02:17:02 -0700293 * Counts the new reader in the appropriate per-CPU element of the
294 * srcu_struct. Must be called from process context.
295 * Returns an index that must be passed to the matching srcu_read_unlock().
296 */
Paul E. McKenney632ee202010-02-22 17:04:45 -0800297int __srcu_read_lock(struct srcu_struct *sp)
Paul E. McKenney621934e2006-10-04 02:17:02 -0700298{
299 int idx;
300
Lai Jiangshan7a6b55e2012-11-29 16:46:09 +0800301 idx = ACCESS_ONCE(sp->completed) & 0x1;
Paul E. McKenney621934e2006-10-04 02:17:02 -0700302 preempt_disable();
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800303 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
Paul E. McKenneycef50122012-02-05 07:42:44 -0800304 smp_mb(); /* B */ /* Avoid leaking the critical section. */
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800305 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
Paul E. McKenney621934e2006-10-04 02:17:02 -0700306 preempt_enable();
307 return idx;
308}
Paul E. McKenney632ee202010-02-22 17:04:45 -0800309EXPORT_SYMBOL_GPL(__srcu_read_lock);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700310
Paul E. McKenney632ee202010-02-22 17:04:45 -0800311/*
Paul E. McKenney621934e2006-10-04 02:17:02 -0700312 * Removes the count for the old reader from the appropriate per-CPU
313 * element of the srcu_struct. Note that this may well be a different
314 * CPU than that which was incremented by the corresponding srcu_read_lock().
315 * Must be called from process context.
316 */
Paul E. McKenney632ee202010-02-22 17:04:45 -0800317void __srcu_read_unlock(struct srcu_struct *sp, int idx)
Paul E. McKenney621934e2006-10-04 02:17:02 -0700318{
Paul E. McKenneycef50122012-02-05 07:42:44 -0800319 smp_mb(); /* C */ /* Avoid leaking the critical section. */
Lai Jiangshan5a413442012-11-29 16:46:02 +0800320 this_cpu_dec(sp->per_cpu_ref->c[idx]);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700321}
Paul E. McKenney632ee202010-02-22 17:04:45 -0800322EXPORT_SYMBOL_GPL(__srcu_read_unlock);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700323
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700324/*
Paul E. McKenneyc072a382011-01-07 02:33:47 -0800325 * We use an adaptive strategy for synchronize_srcu() and especially for
326 * synchronize_srcu_expedited(). We spin for a fixed time period
327 * (defined below) to allow SRCU readers to exit their read-side critical
328 * sections. If there are still some readers after 10 microseconds,
329 * we repeatedly block for 1-millisecond time periods. This approach
330 * has done well in testing, so there is no need for a config parameter.
331 */
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800332#define SRCU_RETRY_CHECK_DELAY 5
Lai Jiangshand9792ed2012-03-19 16:12:12 +0800333#define SYNCHRONIZE_SRCU_TRYCOUNT 2
334#define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12
Paul E. McKenneycef50122012-02-05 07:42:44 -0800335
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800336/*
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800337 * @@@ Wait until all pre-existing readers complete. Such readers
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800338 * will have used the index specified by "idx".
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800339 * the caller should ensures the ->completed is not changed while checking
340 * and idx = (->completed & 1) ^ 1
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800341 */
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800342static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
Paul E. McKenneycef50122012-02-05 07:42:44 -0800343{
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800344 for (;;) {
345 if (srcu_readers_active_idx_check(sp, idx))
346 return true;
347 if (--trycount <= 0)
348 return false;
349 udelay(SRCU_RETRY_CHECK_DELAY);
Paul E. McKenneycef50122012-02-05 07:42:44 -0800350 }
Paul E. McKenneycef50122012-02-05 07:42:44 -0800351}
Paul E. McKenneyc072a382011-01-07 02:33:47 -0800352
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800353/*
354 * Increment the ->completed counter so that future SRCU readers will
355 * use the other rank of the ->c[] and ->seq[] arrays. This allows
356 * us to wait for pre-existing readers in a starvation-free manner.
357 */
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800358static void srcu_flip(struct srcu_struct *sp)
Lai Jiangshan944ce9a2012-02-22 16:43:55 -0800359{
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800360 sp->completed++;
Lai Jiangshan944ce9a2012-02-22 16:43:55 -0800361}
362
363/*
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800364 * Enqueue an SRCU callback on the specified srcu_struct structure,
365 * initiating grace-period processing if it is not already running.
366 */
367void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
368 void (*func)(struct rcu_head *head))
369{
370 unsigned long flags;
371
372 head->next = NULL;
373 head->func = func;
374 spin_lock_irqsave(&sp->queue_lock, flags);
375 rcu_batch_queue(&sp->batch_queue, head);
376 if (!sp->running) {
377 sp->running = true;
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700378 schedule_delayed_work(&sp->work, 0);
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800379 }
380 spin_unlock_irqrestore(&sp->queue_lock, flags);
381}
382EXPORT_SYMBOL_GPL(call_srcu);
383
384struct rcu_synchronize {
385 struct rcu_head head;
386 struct completion completion;
387};
388
389/*
390 * Awaken the corresponding synchronize_srcu() instance now that a
391 * grace period has elapsed.
392 */
393static void wakeme_after_rcu(struct rcu_head *head)
394{
395 struct rcu_synchronize *rcu;
396
397 rcu = container_of(head, struct rcu_synchronize, head);
398 complete(&rcu->completion);
399}
400
401static void srcu_advance_batches(struct srcu_struct *sp, int trycount);
402static void srcu_reschedule(struct srcu_struct *sp);
403
404/*
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700405 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
Paul E. McKenney621934e2006-10-04 02:17:02 -0700406 */
Lai Jiangshand9792ed2012-03-19 16:12:12 +0800407static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
Paul E. McKenney621934e2006-10-04 02:17:02 -0700408{
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800409 struct rcu_synchronize rcu;
410 struct rcu_head *head = &rcu.head;
411 bool done = false;
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800412
Paul E. McKenneyfe15d702012-01-04 13:30:33 -0800413 rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
414 !lock_is_held(&rcu_bh_lock_map) &&
415 !lock_is_held(&rcu_lock_map) &&
416 !lock_is_held(&rcu_sched_lock_map),
417 "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
418
Lai Jiangshan6e6f1b32012-11-29 16:46:03 +0800419 might_sleep();
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800420 init_completion(&rcu.completion);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700421
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800422 head->next = NULL;
423 head->func = wakeme_after_rcu;
424 spin_lock_irq(&sp->queue_lock);
425 if (!sp->running) {
426 /* steal the processing owner */
427 sp->running = true;
428 rcu_batch_queue(&sp->batch_check0, head);
429 spin_unlock_irq(&sp->queue_lock);
Lai Jiangshan944ce9a2012-02-22 16:43:55 -0800430
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800431 srcu_advance_batches(sp, trycount);
432 if (!rcu_batch_empty(&sp->batch_done)) {
433 BUG_ON(sp->batch_done.head != head);
434 rcu_batch_dequeue(&sp->batch_done);
435 done = true;
436 }
437 /* give the processing owner to work_struct */
438 srcu_reschedule(sp);
439 } else {
440 rcu_batch_queue(&sp->batch_queue, head);
441 spin_unlock_irq(&sp->queue_lock);
442 }
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800443
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800444 if (!done)
445 wait_for_completion(&rcu.completion);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700446}
447
448/**
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700449 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
450 * @sp: srcu_struct with which to synchronize.
451 *
Lai Jiangshan34a64b62012-11-29 16:46:07 +0800452 * Wait for the count to drain to zero of both indexes. To avoid the
453 * possible starvation of synchronize_srcu(), it waits for the count of
454 * the index=((->completed & 1) ^ 1) to drain to zero at first,
455 * and then flip the completed and wait for the count of the other index.
456 *
457 * Can block; must be called from process context.
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700458 *
459 * Note that it is illegal to call synchronize_srcu() from the corresponding
460 * SRCU read-side critical section; doing so will result in deadlock.
461 * However, it is perfectly legal to call synchronize_srcu() on one
462 * srcu_struct from some other srcu_struct's read-side critical section.
463 */
464void synchronize_srcu(struct srcu_struct *sp)
465{
Antti P Miettinen3705b882012-10-05 09:59:15 +0300466 __synchronize_srcu(sp, rcu_expedited
467 ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
468 : SYNCHRONIZE_SRCU_TRYCOUNT);
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700469}
470EXPORT_SYMBOL_GPL(synchronize_srcu);
471
472/**
Paul E. McKenney236fefa2012-01-31 14:00:41 -0800473 * synchronize_srcu_expedited - Brute-force SRCU grace period
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700474 * @sp: srcu_struct with which to synchronize.
475 *
Paul E. McKenneycef50122012-02-05 07:42:44 -0800476 * Wait for an SRCU grace period to elapse, but be more aggressive about
477 * spinning rather than blocking when waiting.
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700478 *
Lai Jiangshan49271ca2012-11-29 16:46:08 +0800479 * Note that it is also illegal to call synchronize_srcu_expedited()
480 * from the corresponding SRCU read-side critical section;
481 * doing so will result in deadlock. However, it is perfectly legal
482 * to call synchronize_srcu_expedited() on one srcu_struct from some
483 * other srcu_struct's read-side critical section, as long as
Paul E. McKenney236fefa2012-01-31 14:00:41 -0800484 * the resulting graph of srcu_structs is acyclic.
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700485 */
486void synchronize_srcu_expedited(struct srcu_struct *sp)
487{
Lai Jiangshand9792ed2012-03-19 16:12:12 +0800488 __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT);
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700489}
490EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
491
492/**
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800493 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
494 */
495void srcu_barrier(struct srcu_struct *sp)
496{
497 synchronize_srcu(sp);
498}
499EXPORT_SYMBOL_GPL(srcu_barrier);
500
501/**
Paul E. McKenney621934e2006-10-04 02:17:02 -0700502 * srcu_batches_completed - return batches completed.
503 * @sp: srcu_struct on which to report batch completion.
504 *
505 * Report the number of batches, correlated with, but not necessarily
506 * precisely the same as, the number of grace periods that have elapsed.
507 */
Paul E. McKenney621934e2006-10-04 02:17:02 -0700508long srcu_batches_completed(struct srcu_struct *sp)
509{
510 return sp->completed;
511}
Paul E. McKenney621934e2006-10-04 02:17:02 -0700512EXPORT_SYMBOL_GPL(srcu_batches_completed);
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800513
514#define SRCU_CALLBACK_BATCH 10
515#define SRCU_INTERVAL 1
516
517/*
518 * Move any new SRCU callbacks to the first stage of the SRCU grace
519 * period pipeline.
520 */
521static void srcu_collect_new(struct srcu_struct *sp)
522{
523 if (!rcu_batch_empty(&sp->batch_queue)) {
524 spin_lock_irq(&sp->queue_lock);
525 rcu_batch_move(&sp->batch_check0, &sp->batch_queue);
526 spin_unlock_irq(&sp->queue_lock);
527 }
528}
529
530/*
531 * Core SRCU state machine. Advance callbacks from ->batch_check0 to
532 * ->batch_check1 and then to ->batch_done as readers drain.
533 */
534static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
535{
536 int idx = 1 ^ (sp->completed & 1);
537
538 /*
539 * Because readers might be delayed for an extended period after
540 * fetching ->completed for their index, at any point in time there
541 * might well be readers using both idx=0 and idx=1. We therefore
542 * need to wait for readers to clear from both index values before
543 * invoking a callback.
544 */
545
546 if (rcu_batch_empty(&sp->batch_check0) &&
547 rcu_batch_empty(&sp->batch_check1))
548 return; /* no callbacks need to be advanced */
549
550 if (!try_check_zero(sp, idx, trycount))
551 return; /* failed to advance, will try after SRCU_INTERVAL */
552
553 /*
554 * The callbacks in ->batch_check1 have already done with their
555 * first zero check and flip back when they were enqueued on
556 * ->batch_check0 in a previous invocation of srcu_advance_batches().
557 * (Presumably try_check_zero() returned false during that
558 * invocation, leaving the callbacks stranded on ->batch_check1.)
559 * They are therefore ready to invoke, so move them to ->batch_done.
560 */
561 rcu_batch_move(&sp->batch_done, &sp->batch_check1);
562
563 if (rcu_batch_empty(&sp->batch_check0))
564 return; /* no callbacks need to be advanced */
565 srcu_flip(sp);
566
567 /*
568 * The callbacks in ->batch_check0 just finished their
569 * first check zero and flip, so move them to ->batch_check1
570 * for future checking on the other idx.
571 */
572 rcu_batch_move(&sp->batch_check1, &sp->batch_check0);
573
574 /*
575 * SRCU read-side critical sections are normally short, so check
576 * at least twice in quick succession after a flip.
577 */
578 trycount = trycount < 2 ? 2 : trycount;
579 if (!try_check_zero(sp, idx^1, trycount))
580 return; /* failed to advance, will try after SRCU_INTERVAL */
581
582 /*
583 * The callbacks in ->batch_check1 have now waited for all
584 * pre-existing readers using both idx values. They are therefore
585 * ready to invoke, so move them to ->batch_done.
586 */
587 rcu_batch_move(&sp->batch_done, &sp->batch_check1);
588}
589
590/*
591 * Invoke a limited number of SRCU callbacks that have passed through
592 * their grace period. If there are more to do, SRCU will reschedule
593 * the workqueue.
594 */
595static void srcu_invoke_callbacks(struct srcu_struct *sp)
596{
597 int i;
598 struct rcu_head *head;
599
600 for (i = 0; i < SRCU_CALLBACK_BATCH; i++) {
601 head = rcu_batch_dequeue(&sp->batch_done);
602 if (!head)
603 break;
604 local_bh_disable();
605 head->func(head);
606 local_bh_enable();
607 }
608}
609
610/*
611 * Finished one round of SRCU grace period. Start another if there are
612 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
613 */
614static void srcu_reschedule(struct srcu_struct *sp)
615{
616 bool pending = true;
617
618 if (rcu_batch_empty(&sp->batch_done) &&
619 rcu_batch_empty(&sp->batch_check1) &&
620 rcu_batch_empty(&sp->batch_check0) &&
621 rcu_batch_empty(&sp->batch_queue)) {
622 spin_lock_irq(&sp->queue_lock);
623 if (rcu_batch_empty(&sp->batch_done) &&
624 rcu_batch_empty(&sp->batch_check1) &&
625 rcu_batch_empty(&sp->batch_check0) &&
626 rcu_batch_empty(&sp->batch_queue)) {
627 sp->running = false;
628 pending = false;
629 }
630 spin_unlock_irq(&sp->queue_lock);
631 }
632
633 if (pending)
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700634 schedule_delayed_work(&sp->work, SRCU_INTERVAL);
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800635}
636
637/*
638 * This is the work-queue function that handles SRCU grace periods.
639 */
Lai Jiangshanf2ebfbc2012-10-13 01:14:15 +0800640void process_srcu(struct work_struct *work)
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800641{
642 struct srcu_struct *sp;
643
644 sp = container_of(work, struct srcu_struct, work.work);
645
646 srcu_collect_new(sp);
647 srcu_advance_batches(sp, 1);
648 srcu_invoke_callbacks(sp);
649 srcu_reschedule(sp);
650}
Lai Jiangshanf2ebfbc2012-10-13 01:14:15 +0800651EXPORT_SYMBOL_GPL(process_srcu);