blob: 5f68d0a391cee8506f8e0d94cda72d8bd357b10f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_SEQLOCK_H
2#define __LINUX_SEQLOCK_H
3/*
4 * Reader/writer consistent mechanism without starving writers. This type of
Robert P. J. Dayd08df602007-02-17 19:07:33 +01005 * lock for data where the reader wants a consistent set of information
Waiman Long1370e972013-09-12 10:55:34 -04006 * and is willing to retry if the information changes. There are two types
7 * of readers:
8 * 1. Sequence readers which never block a writer but they may have to retry
9 * if a writer is in progress by detecting change in sequence number.
10 * Writers do not wait for a sequence reader.
11 * 2. Locking readers which will wait if a writer or another locking reader
12 * is in progress. A locking reader in progress will also block a writer
13 * from going forward. Unlike the regular rwlock, the read lock here is
14 * exclusive so that only one locking reader can get it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 *
Waiman Long1370e972013-09-12 10:55:34 -040016 * This is not as cache friendly as brlock. Also, this may not work well
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 * for data that contains pointers, because any writer could
18 * invalidate a pointer that a reader was following.
19 *
Waiman Long1370e972013-09-12 10:55:34 -040020 * Expected non-blocking reader usage:
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 * do {
22 * seq = read_seqbegin(&foo);
23 * ...
24 * } while (read_seqretry(&foo, seq));
25 *
26 *
27 * On non-SMP the spin locks disappear but the writer still needs
28 * to increment the sequence variables because an interrupt routine could
29 * change the state of the data.
30 *
31 * Based on x86_64 vsyscall gettimeofday
32 * by Keith Owens and Andrea Arcangeli
33 */
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/spinlock.h>
36#include <linux/preempt.h>
John Stultz1ca7d672013-10-07 15:51:59 -070037#include <linux/lockdep.h>
David Howells56a21052011-06-11 12:29:58 +010038#include <asm/processor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/*
41 * Version using sequence counter only.
42 * This can be used when code has its own mutex protecting the
43 * updating starting before the write_seqcountbeqin() and ending
44 * after the write_seqcount_end().
45 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070046typedef struct seqcount {
47 unsigned sequence;
John Stultz1ca7d672013-10-07 15:51:59 -070048#ifdef CONFIG_DEBUG_LOCK_ALLOC
49 struct lockdep_map dep_map;
50#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070051} seqcount_t;
52
John Stultz1ca7d672013-10-07 15:51:59 -070053static inline void __seqcount_init(seqcount_t *s, const char *name,
54 struct lock_class_key *key)
55{
56 /*
57 * Make sure we are not reinitializing a held lock:
58 */
59 lockdep_init_map(&s->dep_map, name, key, 0);
60 s->sequence = 0;
61}
62
63#ifdef CONFIG_DEBUG_LOCK_ALLOC
64# define SEQCOUNT_DEP_MAP_INIT(lockname) \
65 .dep_map = { .name = #lockname } \
66
67# define seqcount_init(s) \
68 do { \
69 static struct lock_class_key __key; \
70 __seqcount_init((s), #s, &__key); \
71 } while (0)
72
73static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
74{
75 seqcount_t *l = (seqcount_t *)s;
76 unsigned long flags;
77
78 local_irq_save(flags);
79 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
80 seqcount_release(&l->dep_map, 1, _RET_IP_);
81 local_irq_restore(flags);
82}
83
84#else
85# define SEQCOUNT_DEP_MAP_INIT(lockname)
86# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
87# define seqcount_lockdep_reader_access(x)
88#endif
89
90#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
91
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Nick Piggin3c22cd52011-01-07 17:49:51 +110093/**
94 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
95 * @s: pointer to seqcount_t
96 * Returns: count to be passed to read_seqcount_retry
97 *
98 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
99 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
100 * provided before actually loading any of the variables that are to be
101 * protected in this critical section.
102 *
103 * Use carefully, only in critical code, and comment how the barrier is
104 * provided.
105 */
106static inline unsigned __read_seqcount_begin(const seqcount_t *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107{
Ingo Molnar88a411c2008-04-03 09:06:13 +0200108 unsigned ret;
109
110repeat:
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800111 ret = READ_ONCE(s->sequence);
Ingo Molnar88a411c2008-04-03 09:06:13 +0200112 if (unlikely(ret & 1)) {
113 cpu_relax();
114 goto repeat;
115 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 return ret;
117}
118
Nick Piggin3c22cd52011-01-07 17:49:51 +1100119/**
Thomas Gleixner0ea5a522014-07-16 21:05:20 +0000120 * raw_read_seqcount - Read the raw seqcount
121 * @s: pointer to seqcount_t
122 * Returns: count to be passed to read_seqcount_retry
123 *
124 * raw_read_seqcount opens a read critical section of the given
125 * seqcount without any lockdep checking and without checking or
126 * masking the LSB. Calling code is responsible for handling that.
127 */
128static inline unsigned raw_read_seqcount(const seqcount_t *s)
129{
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800130 unsigned ret = READ_ONCE(s->sequence);
Thomas Gleixner0ea5a522014-07-16 21:05:20 +0000131 smp_rmb();
132 return ret;
133}
134
135/**
John Stultz0c3351d2014-01-02 15:11:13 -0800136 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
John Stultz1ca7d672013-10-07 15:51:59 -0700137 * @s: pointer to seqcount_t
138 * Returns: count to be passed to read_seqcount_retry
139 *
John Stultz0c3351d2014-01-02 15:11:13 -0800140 * raw_read_seqcount_begin opens a read critical section of the given
John Stultz1ca7d672013-10-07 15:51:59 -0700141 * seqcount, but without any lockdep checking. Validity of the critical
142 * section is tested by checking read_seqcount_retry function.
143 */
John Stultz0c3351d2014-01-02 15:11:13 -0800144static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
John Stultz1ca7d672013-10-07 15:51:59 -0700145{
146 unsigned ret = __read_seqcount_begin(s);
147 smp_rmb();
148 return ret;
149}
150
151/**
Nick Piggin3c22cd52011-01-07 17:49:51 +1100152 * read_seqcount_begin - begin a seq-read critical section
153 * @s: pointer to seqcount_t
154 * Returns: count to be passed to read_seqcount_retry
155 *
156 * read_seqcount_begin opens a read critical section of the given seqcount.
157 * Validity of the critical section is tested by checking read_seqcount_retry
158 * function.
159 */
160static inline unsigned read_seqcount_begin(const seqcount_t *s)
161{
John Stultz1ca7d672013-10-07 15:51:59 -0700162 seqcount_lockdep_reader_access(s);
John Stultz0c3351d2014-01-02 15:11:13 -0800163 return raw_read_seqcount_begin(s);
Nick Piggin3c22cd52011-01-07 17:49:51 +1100164}
165
166/**
Linus Torvalds4f988f12012-05-04 15:13:54 -0700167 * raw_seqcount_begin - begin a seq-read critical section
168 * @s: pointer to seqcount_t
169 * Returns: count to be passed to read_seqcount_retry
170 *
171 * raw_seqcount_begin opens a read critical section of the given seqcount.
172 * Validity of the critical section is tested by checking read_seqcount_retry
173 * function.
174 *
175 * Unlike read_seqcount_begin(), this function will not wait for the count
176 * to stabilize. If a writer is active when we begin, we will fail the
177 * read_seqcount_retry() instead of stabilizing at the beginning of the
178 * critical section.
179 */
180static inline unsigned raw_seqcount_begin(const seqcount_t *s)
181{
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800182 unsigned ret = READ_ONCE(s->sequence);
Linus Torvalds4f988f12012-05-04 15:13:54 -0700183 smp_rmb();
184 return ret & ~1;
185}
186
187/**
Nick Piggin3c22cd52011-01-07 17:49:51 +1100188 * __read_seqcount_retry - end a seq-read critical section (without barrier)
189 * @s: pointer to seqcount_t
190 * @start: count, from read_seqcount_begin
191 * Returns: 1 if retry is required, else 0
192 *
193 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
194 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
195 * provided before actually loading any of the variables that are to be
196 * protected in this critical section.
197 *
198 * Use carefully, only in critical code, and comment how the barrier is
199 * provided.
200 */
201static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
202{
203 return unlikely(s->sequence != start);
204}
205
206/**
207 * read_seqcount_retry - end a seq-read critical section
208 * @s: pointer to seqcount_t
209 * @start: count, from read_seqcount_begin
210 * Returns: 1 if retry is required, else 0
211 *
212 * read_seqcount_retry closes a read critical section of the given seqcount.
213 * If the critical section was invalid, it must be ignored (and typically
214 * retried).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 */
Ingo Molnar88a411c2008-04-03 09:06:13 +0200216static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217{
218 smp_rmb();
Nick Piggin3c22cd52011-01-07 17:49:51 +1100219 return __read_seqcount_retry(s, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
221
222
John Stultz0c3351d2014-01-02 15:11:13 -0800223
224static inline void raw_write_seqcount_begin(seqcount_t *s)
225{
226 s->sequence++;
227 smp_wmb();
228}
229
230static inline void raw_write_seqcount_end(seqcount_t *s)
231{
232 smp_wmb();
233 s->sequence++;
234}
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236/*
Mathieu Desnoyers9b0fd802014-07-16 21:05:21 +0000237 * raw_write_seqcount_latch - redirect readers to even/odd copy
238 * @s: pointer to seqcount_t
239 */
240static inline void raw_write_seqcount_latch(seqcount_t *s)
241{
242 smp_wmb(); /* prior stores before incrementing "sequence" */
243 s->sequence++;
244 smp_wmb(); /* increment "sequence" before following stores */
245}
246
247/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 * Sequence counter only version assumes that callers are using their
249 * own mutexing.
250 */
John Stultz1ca7d672013-10-07 15:51:59 -0700251static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252{
John Stultz0c3351d2014-01-02 15:11:13 -0800253 raw_write_seqcount_begin(s);
John Stultz1ca7d672013-10-07 15:51:59 -0700254 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
255}
256
257static inline void write_seqcount_begin(seqcount_t *s)
258{
259 write_seqcount_begin_nested(s, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260}
261
262static inline void write_seqcount_end(seqcount_t *s)
263{
John Stultz1ca7d672013-10-07 15:51:59 -0700264 seqcount_release(&s->dep_map, 1, _RET_IP_);
John Stultz0c3351d2014-01-02 15:11:13 -0800265 raw_write_seqcount_end(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266}
267
Nick Piggin3c22cd52011-01-07 17:49:51 +1100268/**
269 * write_seqcount_barrier - invalidate in-progress read-side seq operations
270 * @s: pointer to seqcount_t
271 *
272 * After write_seqcount_barrier, no read-side seq operations will complete
273 * successfully and see data older than this.
274 */
275static inline void write_seqcount_barrier(seqcount_t *s)
276{
277 smp_wmb();
278 s->sequence+=2;
279}
280
Thomas Gleixner6617fec2011-07-16 18:40:26 +0200281typedef struct {
282 struct seqcount seqcount;
283 spinlock_t lock;
284} seqlock_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Thomas Gleixner6617fec2011-07-16 18:40:26 +0200286/*
287 * These macros triggered gcc-3.x compile-time problems. We think these are
288 * OK now. Be cautious.
289 */
290#define __SEQLOCK_UNLOCKED(lockname) \
291 { \
John Stultz1ca7d672013-10-07 15:51:59 -0700292 .seqcount = SEQCNT_ZERO(lockname), \
Thomas Gleixner6617fec2011-07-16 18:40:26 +0200293 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
294 }
295
296#define seqlock_init(x) \
297 do { \
298 seqcount_init(&(x)->seqcount); \
299 spin_lock_init(&(x)->lock); \
300 } while (0)
301
302#define DEFINE_SEQLOCK(x) \
303 seqlock_t x = __SEQLOCK_UNLOCKED(x)
304
305/*
306 * Read side functions for starting and finalizing a read side section.
307 */
308static inline unsigned read_seqbegin(const seqlock_t *sl)
309{
310 return read_seqcount_begin(&sl->seqcount);
311}
312
313static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
314{
315 return read_seqcount_retry(&sl->seqcount, start);
316}
317
318/*
319 * Lock out other writers and update the count.
320 * Acts like a normal spin_lock/unlock.
321 * Don't need preempt_disable() because that is in the spin_lock already.
322 */
323static inline void write_seqlock(seqlock_t *sl)
324{
325 spin_lock(&sl->lock);
326 write_seqcount_begin(&sl->seqcount);
327}
328
329static inline void write_sequnlock(seqlock_t *sl)
330{
331 write_seqcount_end(&sl->seqcount);
332 spin_unlock(&sl->lock);
333}
334
335static inline void write_seqlock_bh(seqlock_t *sl)
336{
337 spin_lock_bh(&sl->lock);
338 write_seqcount_begin(&sl->seqcount);
339}
340
341static inline void write_sequnlock_bh(seqlock_t *sl)
342{
343 write_seqcount_end(&sl->seqcount);
344 spin_unlock_bh(&sl->lock);
345}
346
347static inline void write_seqlock_irq(seqlock_t *sl)
348{
349 spin_lock_irq(&sl->lock);
350 write_seqcount_begin(&sl->seqcount);
351}
352
353static inline void write_sequnlock_irq(seqlock_t *sl)
354{
355 write_seqcount_end(&sl->seqcount);
356 spin_unlock_irq(&sl->lock);
357}
358
359static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
360{
361 unsigned long flags;
362
363 spin_lock_irqsave(&sl->lock, flags);
364 write_seqcount_begin(&sl->seqcount);
365 return flags;
366}
367
368#define write_seqlock_irqsave(lock, flags) \
369 do { flags = __write_seqlock_irqsave(lock); } while (0)
370
371static inline void
372write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
373{
374 write_seqcount_end(&sl->seqcount);
375 spin_unlock_irqrestore(&sl->lock, flags);
376}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
Waiman Long1370e972013-09-12 10:55:34 -0400378/*
379 * A locking reader exclusively locks out other writers and locking readers,
380 * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
381 * Don't need preempt_disable() because that is in the spin_lock already.
382 */
383static inline void read_seqlock_excl(seqlock_t *sl)
384{
385 spin_lock(&sl->lock);
386}
387
388static inline void read_sequnlock_excl(seqlock_t *sl)
389{
390 spin_unlock(&sl->lock);
391}
392
Al Viro2bc74fe2013-10-25 16:39:14 -0400393/**
394 * read_seqbegin_or_lock - begin a sequence number check or locking block
395 * @lock: sequence lock
396 * @seq : sequence number to be checked
397 *
398 * First try it once optimistically without taking the lock. If that fails,
399 * take the lock. The sequence number is also used as a marker for deciding
400 * whether to be a reader (even) or writer (odd).
401 * N.B. seq must be initialized to an even number to begin with.
402 */
403static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
404{
405 if (!(*seq & 1)) /* Even */
406 *seq = read_seqbegin(lock);
407 else /* Odd */
408 read_seqlock_excl(lock);
409}
410
411static inline int need_seqretry(seqlock_t *lock, int seq)
412{
413 return !(seq & 1) && read_seqretry(lock, seq);
414}
415
416static inline void done_seqretry(seqlock_t *lock, int seq)
417{
418 if (seq & 1)
419 read_sequnlock_excl(lock);
420}
421
Waiman Long1370e972013-09-12 10:55:34 -0400422static inline void read_seqlock_excl_bh(seqlock_t *sl)
423{
424 spin_lock_bh(&sl->lock);
425}
426
427static inline void read_sequnlock_excl_bh(seqlock_t *sl)
428{
429 spin_unlock_bh(&sl->lock);
430}
431
432static inline void read_seqlock_excl_irq(seqlock_t *sl)
433{
434 spin_lock_irq(&sl->lock);
435}
436
437static inline void read_sequnlock_excl_irq(seqlock_t *sl)
438{
439 spin_unlock_irq(&sl->lock);
440}
441
442static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
443{
444 unsigned long flags;
445
446 spin_lock_irqsave(&sl->lock, flags);
447 return flags;
448}
449
450#define read_seqlock_excl_irqsave(lock, flags) \
451 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
452
453static inline void
454read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
455{
456 spin_unlock_irqrestore(&sl->lock, flags);
457}
458
Rik van Rielef8ac062014-09-12 09:12:14 -0400459static inline unsigned long
460read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
461{
462 unsigned long flags = 0;
463
464 if (!(*seq & 1)) /* Even */
465 *seq = read_seqbegin(lock);
466 else /* Odd */
467 read_seqlock_excl_irqsave(lock, flags);
468
469 return flags;
470}
471
472static inline void
473done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
474{
475 if (seq & 1)
476 read_sequnlock_excl_irqrestore(lock, flags);
477}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478#endif /* __LINUX_SEQLOCK_H */