Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_SEQLOCK_H |
| 2 | #define __LINUX_SEQLOCK_H |
| 3 | /* |
| 4 | * Reader/writer consistent mechanism without starving writers. This type of |
Robert P. J. Day | d08df60 | 2007-02-17 19:07:33 +0100 | [diff] [blame] | 5 | * lock for data where the reader wants a consistent set of information |
Waiman Long | 1370e97 | 2013-09-12 10:55:34 -0400 | [diff] [blame] | 6 | * and is willing to retry if the information changes. There are two types |
| 7 | * of readers: |
| 8 | * 1. Sequence readers which never block a writer but they may have to retry |
| 9 | * if a writer is in progress by detecting change in sequence number. |
| 10 | * Writers do not wait for a sequence reader. |
| 11 | * 2. Locking readers which will wait if a writer or another locking reader |
| 12 | * is in progress. A locking reader in progress will also block a writer |
| 13 | * from going forward. Unlike the regular rwlock, the read lock here is |
| 14 | * exclusive so that only one locking reader can get it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | * |
Waiman Long | 1370e97 | 2013-09-12 10:55:34 -0400 | [diff] [blame] | 16 | * This is not as cache friendly as brlock. Also, this may not work well |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | * for data that contains pointers, because any writer could |
| 18 | * invalidate a pointer that a reader was following. |
| 19 | * |
Waiman Long | 1370e97 | 2013-09-12 10:55:34 -0400 | [diff] [blame] | 20 | * Expected non-blocking reader usage: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | * do { |
| 22 | * seq = read_seqbegin(&foo); |
| 23 | * ... |
| 24 | * } while (read_seqretry(&foo, seq)); |
| 25 | * |
| 26 | * |
| 27 | * On non-SMP the spin locks disappear but the writer still needs |
| 28 | * to increment the sequence variables because an interrupt routine could |
| 29 | * change the state of the data. |
| 30 | * |
| 31 | * Based on x86_64 vsyscall gettimeofday |
| 32 | * by Keith Owens and Andrea Arcangeli |
| 33 | */ |
| 34 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <linux/spinlock.h> |
| 36 | #include <linux/preempt.h> |
David Howells | 56a2105 | 2011-06-11 12:29:58 +0100 | [diff] [blame] | 37 | #include <asm/processor.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | /* |
| 40 | * Version using sequence counter only. |
| 41 | * This can be used when code has its own mutex protecting the |
| 42 | * updating starting before the write_seqcountbeqin() and ending |
| 43 | * after the write_seqcount_end(). |
| 44 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | typedef struct seqcount { |
| 46 | unsigned sequence; |
| 47 | } seqcount_t; |
| 48 | |
| 49 | #define SEQCNT_ZERO { 0 } |
| 50 | #define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0) |
| 51 | |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 52 | /** |
| 53 | * __read_seqcount_begin - begin a seq-read critical section (without barrier) |
| 54 | * @s: pointer to seqcount_t |
| 55 | * Returns: count to be passed to read_seqcount_retry |
| 56 | * |
| 57 | * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() |
| 58 | * barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
| 59 | * provided before actually loading any of the variables that are to be |
| 60 | * protected in this critical section. |
| 61 | * |
| 62 | * Use carefully, only in critical code, and comment how the barrier is |
| 63 | * provided. |
| 64 | */ |
| 65 | static inline unsigned __read_seqcount_begin(const seqcount_t *s) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | { |
Ingo Molnar | 88a411c | 2008-04-03 09:06:13 +0200 | [diff] [blame] | 67 | unsigned ret; |
| 68 | |
| 69 | repeat: |
Linus Torvalds | 2f62427 | 2012-05-04 14:46:02 -0700 | [diff] [blame] | 70 | ret = ACCESS_ONCE(s->sequence); |
Ingo Molnar | 88a411c | 2008-04-03 09:06:13 +0200 | [diff] [blame] | 71 | if (unlikely(ret & 1)) { |
| 72 | cpu_relax(); |
| 73 | goto repeat; |
| 74 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | return ret; |
| 76 | } |
| 77 | |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 78 | /** |
| 79 | * read_seqcount_begin - begin a seq-read critical section |
| 80 | * @s: pointer to seqcount_t |
| 81 | * Returns: count to be passed to read_seqcount_retry |
| 82 | * |
| 83 | * read_seqcount_begin opens a read critical section of the given seqcount. |
| 84 | * Validity of the critical section is tested by checking read_seqcount_retry |
| 85 | * function. |
| 86 | */ |
| 87 | static inline unsigned read_seqcount_begin(const seqcount_t *s) |
| 88 | { |
| 89 | unsigned ret = __read_seqcount_begin(s); |
| 90 | smp_rmb(); |
| 91 | return ret; |
| 92 | } |
| 93 | |
| 94 | /** |
Linus Torvalds | 4f988f1 | 2012-05-04 15:13:54 -0700 | [diff] [blame] | 95 | * raw_seqcount_begin - begin a seq-read critical section |
| 96 | * @s: pointer to seqcount_t |
| 97 | * Returns: count to be passed to read_seqcount_retry |
| 98 | * |
| 99 | * raw_seqcount_begin opens a read critical section of the given seqcount. |
| 100 | * Validity of the critical section is tested by checking read_seqcount_retry |
| 101 | * function. |
| 102 | * |
| 103 | * Unlike read_seqcount_begin(), this function will not wait for the count |
| 104 | * to stabilize. If a writer is active when we begin, we will fail the |
| 105 | * read_seqcount_retry() instead of stabilizing at the beginning of the |
| 106 | * critical section. |
| 107 | */ |
| 108 | static inline unsigned raw_seqcount_begin(const seqcount_t *s) |
| 109 | { |
| 110 | unsigned ret = ACCESS_ONCE(s->sequence); |
| 111 | smp_rmb(); |
| 112 | return ret & ~1; |
| 113 | } |
| 114 | |
| 115 | /** |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 116 | * __read_seqcount_retry - end a seq-read critical section (without barrier) |
| 117 | * @s: pointer to seqcount_t |
| 118 | * @start: count, from read_seqcount_begin |
| 119 | * Returns: 1 if retry is required, else 0 |
| 120 | * |
| 121 | * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() |
| 122 | * barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
| 123 | * provided before actually loading any of the variables that are to be |
| 124 | * protected in this critical section. |
| 125 | * |
| 126 | * Use carefully, only in critical code, and comment how the barrier is |
| 127 | * provided. |
| 128 | */ |
| 129 | static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) |
| 130 | { |
| 131 | return unlikely(s->sequence != start); |
| 132 | } |
| 133 | |
| 134 | /** |
| 135 | * read_seqcount_retry - end a seq-read critical section |
| 136 | * @s: pointer to seqcount_t |
| 137 | * @start: count, from read_seqcount_begin |
| 138 | * Returns: 1 if retry is required, else 0 |
| 139 | * |
| 140 | * read_seqcount_retry closes a read critical section of the given seqcount. |
| 141 | * If the critical section was invalid, it must be ignored (and typically |
| 142 | * retried). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | */ |
Ingo Molnar | 88a411c | 2008-04-03 09:06:13 +0200 | [diff] [blame] | 144 | static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | { |
| 146 | smp_rmb(); |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 147 | return __read_seqcount_retry(s, start); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | } |
| 149 | |
| 150 | |
| 151 | /* |
| 152 | * Sequence counter only version assumes that callers are using their |
| 153 | * own mutexing. |
| 154 | */ |
| 155 | static inline void write_seqcount_begin(seqcount_t *s) |
| 156 | { |
| 157 | s->sequence++; |
| 158 | smp_wmb(); |
| 159 | } |
| 160 | |
| 161 | static inline void write_seqcount_end(seqcount_t *s) |
| 162 | { |
| 163 | smp_wmb(); |
| 164 | s->sequence++; |
| 165 | } |
| 166 | |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 167 | /** |
| 168 | * write_seqcount_barrier - invalidate in-progress read-side seq operations |
| 169 | * @s: pointer to seqcount_t |
| 170 | * |
| 171 | * After write_seqcount_barrier, no read-side seq operations will complete |
| 172 | * successfully and see data older than this. |
| 173 | */ |
| 174 | static inline void write_seqcount_barrier(seqcount_t *s) |
| 175 | { |
| 176 | smp_wmb(); |
| 177 | s->sequence+=2; |
| 178 | } |
| 179 | |
Thomas Gleixner | 6617fec | 2011-07-16 18:40:26 +0200 | [diff] [blame] | 180 | typedef struct { |
| 181 | struct seqcount seqcount; |
| 182 | spinlock_t lock; |
| 183 | } seqlock_t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | |
Thomas Gleixner | 6617fec | 2011-07-16 18:40:26 +0200 | [diff] [blame] | 185 | /* |
| 186 | * These macros triggered gcc-3.x compile-time problems. We think these are |
| 187 | * OK now. Be cautious. |
| 188 | */ |
| 189 | #define __SEQLOCK_UNLOCKED(lockname) \ |
| 190 | { \ |
| 191 | .seqcount = SEQCNT_ZERO, \ |
| 192 | .lock = __SPIN_LOCK_UNLOCKED(lockname) \ |
| 193 | } |
| 194 | |
| 195 | #define seqlock_init(x) \ |
| 196 | do { \ |
| 197 | seqcount_init(&(x)->seqcount); \ |
| 198 | spin_lock_init(&(x)->lock); \ |
| 199 | } while (0) |
| 200 | |
| 201 | #define DEFINE_SEQLOCK(x) \ |
| 202 | seqlock_t x = __SEQLOCK_UNLOCKED(x) |
| 203 | |
| 204 | /* |
| 205 | * Read side functions for starting and finalizing a read side section. |
| 206 | */ |
| 207 | static inline unsigned read_seqbegin(const seqlock_t *sl) |
| 208 | { |
| 209 | return read_seqcount_begin(&sl->seqcount); |
| 210 | } |
| 211 | |
| 212 | static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) |
| 213 | { |
| 214 | return read_seqcount_retry(&sl->seqcount, start); |
| 215 | } |
| 216 | |
| 217 | /* |
| 218 | * Lock out other writers and update the count. |
| 219 | * Acts like a normal spin_lock/unlock. |
| 220 | * Don't need preempt_disable() because that is in the spin_lock already. |
| 221 | */ |
| 222 | static inline void write_seqlock(seqlock_t *sl) |
| 223 | { |
| 224 | spin_lock(&sl->lock); |
| 225 | write_seqcount_begin(&sl->seqcount); |
| 226 | } |
| 227 | |
| 228 | static inline void write_sequnlock(seqlock_t *sl) |
| 229 | { |
| 230 | write_seqcount_end(&sl->seqcount); |
| 231 | spin_unlock(&sl->lock); |
| 232 | } |
| 233 | |
| 234 | static inline void write_seqlock_bh(seqlock_t *sl) |
| 235 | { |
| 236 | spin_lock_bh(&sl->lock); |
| 237 | write_seqcount_begin(&sl->seqcount); |
| 238 | } |
| 239 | |
| 240 | static inline void write_sequnlock_bh(seqlock_t *sl) |
| 241 | { |
| 242 | write_seqcount_end(&sl->seqcount); |
| 243 | spin_unlock_bh(&sl->lock); |
| 244 | } |
| 245 | |
| 246 | static inline void write_seqlock_irq(seqlock_t *sl) |
| 247 | { |
| 248 | spin_lock_irq(&sl->lock); |
| 249 | write_seqcount_begin(&sl->seqcount); |
| 250 | } |
| 251 | |
| 252 | static inline void write_sequnlock_irq(seqlock_t *sl) |
| 253 | { |
| 254 | write_seqcount_end(&sl->seqcount); |
| 255 | spin_unlock_irq(&sl->lock); |
| 256 | } |
| 257 | |
| 258 | static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) |
| 259 | { |
| 260 | unsigned long flags; |
| 261 | |
| 262 | spin_lock_irqsave(&sl->lock, flags); |
| 263 | write_seqcount_begin(&sl->seqcount); |
| 264 | return flags; |
| 265 | } |
| 266 | |
| 267 | #define write_seqlock_irqsave(lock, flags) \ |
| 268 | do { flags = __write_seqlock_irqsave(lock); } while (0) |
| 269 | |
| 270 | static inline void |
| 271 | write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) |
| 272 | { |
| 273 | write_seqcount_end(&sl->seqcount); |
| 274 | spin_unlock_irqrestore(&sl->lock, flags); |
| 275 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | |
Waiman Long | 1370e97 | 2013-09-12 10:55:34 -0400 | [diff] [blame] | 277 | /* |
| 278 | * A locking reader exclusively locks out other writers and locking readers, |
| 279 | * but doesn't update the sequence number. Acts like a normal spin_lock/unlock. |
| 280 | * Don't need preempt_disable() because that is in the spin_lock already. |
| 281 | */ |
| 282 | static inline void read_seqlock_excl(seqlock_t *sl) |
| 283 | { |
| 284 | spin_lock(&sl->lock); |
| 285 | } |
| 286 | |
| 287 | static inline void read_sequnlock_excl(seqlock_t *sl) |
| 288 | { |
| 289 | spin_unlock(&sl->lock); |
| 290 | } |
| 291 | |
| 292 | static inline void read_seqlock_excl_bh(seqlock_t *sl) |
| 293 | { |
| 294 | spin_lock_bh(&sl->lock); |
| 295 | } |
| 296 | |
| 297 | static inline void read_sequnlock_excl_bh(seqlock_t *sl) |
| 298 | { |
| 299 | spin_unlock_bh(&sl->lock); |
| 300 | } |
| 301 | |
| 302 | static inline void read_seqlock_excl_irq(seqlock_t *sl) |
| 303 | { |
| 304 | spin_lock_irq(&sl->lock); |
| 305 | } |
| 306 | |
| 307 | static inline void read_sequnlock_excl_irq(seqlock_t *sl) |
| 308 | { |
| 309 | spin_unlock_irq(&sl->lock); |
| 310 | } |
| 311 | |
| 312 | static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) |
| 313 | { |
| 314 | unsigned long flags; |
| 315 | |
| 316 | spin_lock_irqsave(&sl->lock, flags); |
| 317 | return flags; |
| 318 | } |
| 319 | |
| 320 | #define read_seqlock_excl_irqsave(lock, flags) \ |
| 321 | do { flags = __read_seqlock_excl_irqsave(lock); } while (0) |
| 322 | |
| 323 | static inline void |
| 324 | read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) |
| 325 | { |
| 326 | spin_unlock_irqrestore(&sl->lock, flags); |
| 327 | } |
| 328 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | #endif /* __LINUX_SEQLOCK_H */ |