Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_SEQLOCK_H |
| 2 | #define __LINUX_SEQLOCK_H |
| 3 | /* |
| 4 | * Reader/writer consistent mechanism without starving writers. This type of |
Robert P. J. Day | d08df60 | 2007-02-17 19:07:33 +0100 | [diff] [blame] | 5 | * lock for data where the reader wants a consistent set of information |
Waiman Long | 1370e97 | 2013-09-12 10:55:34 -0400 | [diff] [blame] | 6 | * and is willing to retry if the information changes. There are two types |
| 7 | * of readers: |
| 8 | * 1. Sequence readers which never block a writer but they may have to retry |
| 9 | * if a writer is in progress by detecting change in sequence number. |
| 10 | * Writers do not wait for a sequence reader. |
| 11 | * 2. Locking readers which will wait if a writer or another locking reader |
| 12 | * is in progress. A locking reader in progress will also block a writer |
| 13 | * from going forward. Unlike the regular rwlock, the read lock here is |
| 14 | * exclusive so that only one locking reader can get it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | * |
Waiman Long | 1370e97 | 2013-09-12 10:55:34 -0400 | [diff] [blame] | 16 | * This is not as cache friendly as brlock. Also, this may not work well |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | * for data that contains pointers, because any writer could |
| 18 | * invalidate a pointer that a reader was following. |
| 19 | * |
Waiman Long | 1370e97 | 2013-09-12 10:55:34 -0400 | [diff] [blame] | 20 | * Expected non-blocking reader usage: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | * do { |
| 22 | * seq = read_seqbegin(&foo); |
| 23 | * ... |
| 24 | * } while (read_seqretry(&foo, seq)); |
| 25 | * |
| 26 | * |
| 27 | * On non-SMP the spin locks disappear but the writer still needs |
| 28 | * to increment the sequence variables because an interrupt routine could |
| 29 | * change the state of the data. |
| 30 | * |
| 31 | * Based on x86_64 vsyscall gettimeofday |
| 32 | * by Keith Owens and Andrea Arcangeli |
| 33 | */ |
| 34 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <linux/spinlock.h> |
| 36 | #include <linux/preempt.h> |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 37 | #include <linux/lockdep.h> |
Peter Zijlstra | 7fc2632 | 2015-05-27 11:09:36 +0930 | [diff] [blame] | 38 | #include <linux/compiler.h> |
David Howells | 56a2105 | 2011-06-11 12:29:58 +0100 | [diff] [blame] | 39 | #include <asm/processor.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | /* |
| 42 | * Version using sequence counter only. |
| 43 | * This can be used when code has its own mutex protecting the |
| 44 | * updating starting before the write_seqcountbeqin() and ending |
| 45 | * after the write_seqcount_end(). |
| 46 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | typedef struct seqcount { |
| 48 | unsigned sequence; |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 50 | struct lockdep_map dep_map; |
| 51 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | } seqcount_t; |
| 53 | |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 54 | static inline void __seqcount_init(seqcount_t *s, const char *name, |
| 55 | struct lock_class_key *key) |
| 56 | { |
| 57 | /* |
| 58 | * Make sure we are not reinitializing a held lock: |
| 59 | */ |
| 60 | lockdep_init_map(&s->dep_map, name, key, 0); |
| 61 | s->sequence = 0; |
| 62 | } |
| 63 | |
| 64 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 65 | # define SEQCOUNT_DEP_MAP_INIT(lockname) \ |
| 66 | .dep_map = { .name = #lockname } \ |
| 67 | |
| 68 | # define seqcount_init(s) \ |
| 69 | do { \ |
| 70 | static struct lock_class_key __key; \ |
| 71 | __seqcount_init((s), #s, &__key); \ |
| 72 | } while (0) |
| 73 | |
| 74 | static inline void seqcount_lockdep_reader_access(const seqcount_t *s) |
| 75 | { |
| 76 | seqcount_t *l = (seqcount_t *)s; |
| 77 | unsigned long flags; |
| 78 | |
| 79 | local_irq_save(flags); |
| 80 | seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); |
| 81 | seqcount_release(&l->dep_map, 1, _RET_IP_); |
| 82 | local_irq_restore(flags); |
| 83 | } |
| 84 | |
| 85 | #else |
| 86 | # define SEQCOUNT_DEP_MAP_INIT(lockname) |
| 87 | # define seqcount_init(s) __seqcount_init(s, NULL, NULL) |
| 88 | # define seqcount_lockdep_reader_access(x) |
| 89 | #endif |
| 90 | |
| 91 | #define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)} |
| 92 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 94 | /** |
| 95 | * __read_seqcount_begin - begin a seq-read critical section (without barrier) |
| 96 | * @s: pointer to seqcount_t |
| 97 | * Returns: count to be passed to read_seqcount_retry |
| 98 | * |
| 99 | * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() |
| 100 | * barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
| 101 | * provided before actually loading any of the variables that are to be |
| 102 | * protected in this critical section. |
| 103 | * |
| 104 | * Use carefully, only in critical code, and comment how the barrier is |
| 105 | * provided. |
| 106 | */ |
| 107 | static inline unsigned __read_seqcount_begin(const seqcount_t *s) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | { |
Ingo Molnar | 88a411c | 2008-04-03 09:06:13 +0200 | [diff] [blame] | 109 | unsigned ret; |
| 110 | |
| 111 | repeat: |
Davidlohr Bueso | 4d3199e | 2015-02-22 19:31:41 -0800 | [diff] [blame] | 112 | ret = READ_ONCE(s->sequence); |
Ingo Molnar | 88a411c | 2008-04-03 09:06:13 +0200 | [diff] [blame] | 113 | if (unlikely(ret & 1)) { |
| 114 | cpu_relax(); |
| 115 | goto repeat; |
| 116 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | return ret; |
| 118 | } |
| 119 | |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 120 | /** |
Thomas Gleixner | 0ea5a52 | 2014-07-16 21:05:20 +0000 | [diff] [blame] | 121 | * raw_read_seqcount - Read the raw seqcount |
| 122 | * @s: pointer to seqcount_t |
| 123 | * Returns: count to be passed to read_seqcount_retry |
| 124 | * |
| 125 | * raw_read_seqcount opens a read critical section of the given |
| 126 | * seqcount without any lockdep checking and without checking or |
| 127 | * masking the LSB. Calling code is responsible for handling that. |
| 128 | */ |
| 129 | static inline unsigned raw_read_seqcount(const seqcount_t *s) |
| 130 | { |
Davidlohr Bueso | 4d3199e | 2015-02-22 19:31:41 -0800 | [diff] [blame] | 131 | unsigned ret = READ_ONCE(s->sequence); |
Thomas Gleixner | 0ea5a52 | 2014-07-16 21:05:20 +0000 | [diff] [blame] | 132 | smp_rmb(); |
| 133 | return ret; |
| 134 | } |
| 135 | |
| 136 | /** |
John Stultz | 0c3351d | 2014-01-02 15:11:13 -0800 | [diff] [blame] | 137 | * raw_read_seqcount_begin - start seq-read critical section w/o lockdep |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 138 | * @s: pointer to seqcount_t |
| 139 | * Returns: count to be passed to read_seqcount_retry |
| 140 | * |
John Stultz | 0c3351d | 2014-01-02 15:11:13 -0800 | [diff] [blame] | 141 | * raw_read_seqcount_begin opens a read critical section of the given |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 142 | * seqcount, but without any lockdep checking. Validity of the critical |
| 143 | * section is tested by checking read_seqcount_retry function. |
| 144 | */ |
John Stultz | 0c3351d | 2014-01-02 15:11:13 -0800 | [diff] [blame] | 145 | static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 146 | { |
| 147 | unsigned ret = __read_seqcount_begin(s); |
| 148 | smp_rmb(); |
| 149 | return ret; |
| 150 | } |
| 151 | |
| 152 | /** |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 153 | * read_seqcount_begin - begin a seq-read critical section |
| 154 | * @s: pointer to seqcount_t |
| 155 | * Returns: count to be passed to read_seqcount_retry |
| 156 | * |
| 157 | * read_seqcount_begin opens a read critical section of the given seqcount. |
| 158 | * Validity of the critical section is tested by checking read_seqcount_retry |
| 159 | * function. |
| 160 | */ |
| 161 | static inline unsigned read_seqcount_begin(const seqcount_t *s) |
| 162 | { |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 163 | seqcount_lockdep_reader_access(s); |
John Stultz | 0c3351d | 2014-01-02 15:11:13 -0800 | [diff] [blame] | 164 | return raw_read_seqcount_begin(s); |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 165 | } |
| 166 | |
| 167 | /** |
Linus Torvalds | 4f988f1 | 2012-05-04 15:13:54 -0700 | [diff] [blame] | 168 | * raw_seqcount_begin - begin a seq-read critical section |
| 169 | * @s: pointer to seqcount_t |
| 170 | * Returns: count to be passed to read_seqcount_retry |
| 171 | * |
| 172 | * raw_seqcount_begin opens a read critical section of the given seqcount. |
| 173 | * Validity of the critical section is tested by checking read_seqcount_retry |
| 174 | * function. |
| 175 | * |
| 176 | * Unlike read_seqcount_begin(), this function will not wait for the count |
| 177 | * to stabilize. If a writer is active when we begin, we will fail the |
| 178 | * read_seqcount_retry() instead of stabilizing at the beginning of the |
| 179 | * critical section. |
| 180 | */ |
| 181 | static inline unsigned raw_seqcount_begin(const seqcount_t *s) |
| 182 | { |
Davidlohr Bueso | 4d3199e | 2015-02-22 19:31:41 -0800 | [diff] [blame] | 183 | unsigned ret = READ_ONCE(s->sequence); |
Linus Torvalds | 4f988f1 | 2012-05-04 15:13:54 -0700 | [diff] [blame] | 184 | smp_rmb(); |
| 185 | return ret & ~1; |
| 186 | } |
| 187 | |
| 188 | /** |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 189 | * __read_seqcount_retry - end a seq-read critical section (without barrier) |
| 190 | * @s: pointer to seqcount_t |
| 191 | * @start: count, from read_seqcount_begin |
| 192 | * Returns: 1 if retry is required, else 0 |
| 193 | * |
| 194 | * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() |
| 195 | * barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
| 196 | * provided before actually loading any of the variables that are to be |
| 197 | * protected in this critical section. |
| 198 | * |
| 199 | * Use carefully, only in critical code, and comment how the barrier is |
| 200 | * provided. |
| 201 | */ |
| 202 | static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) |
| 203 | { |
| 204 | return unlikely(s->sequence != start); |
| 205 | } |
| 206 | |
| 207 | /** |
| 208 | * read_seqcount_retry - end a seq-read critical section |
| 209 | * @s: pointer to seqcount_t |
| 210 | * @start: count, from read_seqcount_begin |
| 211 | * Returns: 1 if retry is required, else 0 |
| 212 | * |
| 213 | * read_seqcount_retry closes a read critical section of the given seqcount. |
| 214 | * If the critical section was invalid, it must be ignored (and typically |
| 215 | * retried). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | */ |
Ingo Molnar | 88a411c | 2008-04-03 09:06:13 +0200 | [diff] [blame] | 217 | static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | { |
| 219 | smp_rmb(); |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 220 | return __read_seqcount_retry(s, start); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | } |
| 222 | |
| 223 | |
John Stultz | 0c3351d | 2014-01-02 15:11:13 -0800 | [diff] [blame] | 224 | |
| 225 | static inline void raw_write_seqcount_begin(seqcount_t *s) |
| 226 | { |
| 227 | s->sequence++; |
| 228 | smp_wmb(); |
| 229 | } |
| 230 | |
| 231 | static inline void raw_write_seqcount_end(seqcount_t *s) |
| 232 | { |
| 233 | smp_wmb(); |
| 234 | s->sequence++; |
| 235 | } |
| 236 | |
Peter Zijlstra | c4bfa3f | 2015-06-17 14:29:24 +0200 | [diff] [blame] | 237 | /** |
| 238 | * raw_write_seqcount_barrier - do a seq write barrier |
| 239 | * @s: pointer to seqcount_t |
| 240 | * |
| 241 | * This can be used to provide an ordering guarantee instead of the |
| 242 | * usual consistency guarantee. It is one wmb cheaper, because we can |
| 243 | * collapse the two back-to-back wmb()s. |
| 244 | * |
| 245 | * seqcount_t seq; |
| 246 | * bool X = true, Y = false; |
| 247 | * |
| 248 | * void read(void) |
| 249 | * { |
| 250 | * bool x, y; |
| 251 | * |
| 252 | * do { |
| 253 | * int s = read_seqcount_begin(&seq); |
| 254 | * |
| 255 | * x = X; y = Y; |
| 256 | * |
| 257 | * } while (read_seqcount_retry(&seq, s)); |
| 258 | * |
| 259 | * BUG_ON(!x && !y); |
| 260 | * } |
| 261 | * |
| 262 | * void write(void) |
| 263 | * { |
| 264 | * Y = true; |
| 265 | * |
| 266 | * raw_write_seqcount_barrier(seq); |
| 267 | * |
| 268 | * X = false; |
| 269 | * } |
| 270 | */ |
| 271 | static inline void raw_write_seqcount_barrier(seqcount_t *s) |
| 272 | { |
| 273 | s->sequence++; |
| 274 | smp_wmb(); |
| 275 | s->sequence++; |
| 276 | } |
| 277 | |
Peter Zijlstra | 7fc2632 | 2015-05-27 11:09:36 +0930 | [diff] [blame] | 278 | static inline int raw_read_seqcount_latch(seqcount_t *s) |
| 279 | { |
| 280 | return lockless_dereference(s->sequence); |
| 281 | } |
| 282 | |
Peter Zijlstra | 6695b92 | 2015-05-27 11:09:36 +0930 | [diff] [blame] | 283 | /** |
Mathieu Desnoyers | 9b0fd80 | 2014-07-16 21:05:21 +0000 | [diff] [blame] | 284 | * raw_write_seqcount_latch - redirect readers to even/odd copy |
| 285 | * @s: pointer to seqcount_t |
Peter Zijlstra | 6695b92 | 2015-05-27 11:09:36 +0930 | [diff] [blame] | 286 | * |
| 287 | * The latch technique is a multiversion concurrency control method that allows |
| 288 | * queries during non-atomic modifications. If you can guarantee queries never |
| 289 | * interrupt the modification -- e.g. the concurrency is strictly between CPUs |
| 290 | * -- you most likely do not need this. |
| 291 | * |
| 292 | * Where the traditional RCU/lockless data structures rely on atomic |
| 293 | * modifications to ensure queries observe either the old or the new state the |
| 294 | * latch allows the same for non-atomic updates. The trade-off is doubling the |
| 295 | * cost of storage; we have to maintain two copies of the entire data |
| 296 | * structure. |
| 297 | * |
| 298 | * Very simply put: we first modify one copy and then the other. This ensures |
| 299 | * there is always one copy in a stable state, ready to give us an answer. |
| 300 | * |
| 301 | * The basic form is a data structure like: |
| 302 | * |
| 303 | * struct latch_struct { |
| 304 | * seqcount_t seq; |
| 305 | * struct data_struct data[2]; |
| 306 | * }; |
| 307 | * |
| 308 | * Where a modification, which is assumed to be externally serialized, does the |
| 309 | * following: |
| 310 | * |
| 311 | * void latch_modify(struct latch_struct *latch, ...) |
| 312 | * { |
| 313 | * smp_wmb(); <- Ensure that the last data[1] update is visible |
| 314 | * latch->seq++; |
| 315 | * smp_wmb(); <- Ensure that the seqcount update is visible |
| 316 | * |
| 317 | * modify(latch->data[0], ...); |
| 318 | * |
| 319 | * smp_wmb(); <- Ensure that the data[0] update is visible |
| 320 | * latch->seq++; |
| 321 | * smp_wmb(); <- Ensure that the seqcount update is visible |
| 322 | * |
| 323 | * modify(latch->data[1], ...); |
| 324 | * } |
| 325 | * |
| 326 | * The query will have a form like: |
| 327 | * |
| 328 | * struct entry *latch_query(struct latch_struct *latch, ...) |
| 329 | * { |
| 330 | * struct entry *entry; |
| 331 | * unsigned seq, idx; |
| 332 | * |
| 333 | * do { |
Peter Zijlstra | 7fc2632 | 2015-05-27 11:09:36 +0930 | [diff] [blame] | 334 | * seq = lockless_dereference(latch->seq); |
Peter Zijlstra | 6695b92 | 2015-05-27 11:09:36 +0930 | [diff] [blame] | 335 | * |
| 336 | * idx = seq & 0x01; |
| 337 | * entry = data_query(latch->data[idx], ...); |
| 338 | * |
| 339 | * smp_rmb(); |
| 340 | * } while (seq != latch->seq); |
| 341 | * |
| 342 | * return entry; |
| 343 | * } |
| 344 | * |
| 345 | * So during the modification, queries are first redirected to data[1]. Then we |
| 346 | * modify data[0]. When that is complete, we redirect queries back to data[0] |
| 347 | * and we can modify data[1]. |
| 348 | * |
| 349 | * NOTE: The non-requirement for atomic modifications does _NOT_ include |
| 350 | * the publishing of new entries in the case where data is a dynamic |
| 351 | * data structure. |
| 352 | * |
| 353 | * An iteration might start in data[0] and get suspended long enough |
| 354 | * to miss an entire modification sequence, once it resumes it might |
| 355 | * observe the new entry. |
| 356 | * |
| 357 | * NOTE: When data is a dynamic data structure; one should use regular RCU |
| 358 | * patterns to manage the lifetimes of the objects within. |
Mathieu Desnoyers | 9b0fd80 | 2014-07-16 21:05:21 +0000 | [diff] [blame] | 359 | */ |
| 360 | static inline void raw_write_seqcount_latch(seqcount_t *s) |
| 361 | { |
| 362 | smp_wmb(); /* prior stores before incrementing "sequence" */ |
| 363 | s->sequence++; |
| 364 | smp_wmb(); /* increment "sequence" before following stores */ |
| 365 | } |
| 366 | |
| 367 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | * Sequence counter only version assumes that callers are using their |
| 369 | * own mutexing. |
| 370 | */ |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 371 | static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | { |
John Stultz | 0c3351d | 2014-01-02 15:11:13 -0800 | [diff] [blame] | 373 | raw_write_seqcount_begin(s); |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 374 | seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); |
| 375 | } |
| 376 | |
| 377 | static inline void write_seqcount_begin(seqcount_t *s) |
| 378 | { |
| 379 | write_seqcount_begin_nested(s, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | } |
| 381 | |
| 382 | static inline void write_seqcount_end(seqcount_t *s) |
| 383 | { |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 384 | seqcount_release(&s->dep_map, 1, _RET_IP_); |
John Stultz | 0c3351d | 2014-01-02 15:11:13 -0800 | [diff] [blame] | 385 | raw_write_seqcount_end(s); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | } |
| 387 | |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 388 | /** |
Peter Zijlstra | a7c6f57 | 2015-06-11 14:46:46 +0200 | [diff] [blame] | 389 | * write_seqcount_invalidate - invalidate in-progress read-side seq operations |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 390 | * @s: pointer to seqcount_t |
| 391 | * |
Peter Zijlstra | a7c6f57 | 2015-06-11 14:46:46 +0200 | [diff] [blame] | 392 | * After write_seqcount_invalidate, no read-side seq operations will complete |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 393 | * successfully and see data older than this. |
| 394 | */ |
Peter Zijlstra | a7c6f57 | 2015-06-11 14:46:46 +0200 | [diff] [blame] | 395 | static inline void write_seqcount_invalidate(seqcount_t *s) |
Nick Piggin | 3c22cd5 | 2011-01-07 17:49:51 +1100 | [diff] [blame] | 396 | { |
| 397 | smp_wmb(); |
| 398 | s->sequence+=2; |
| 399 | } |
| 400 | |
Thomas Gleixner | 6617fec | 2011-07-16 18:40:26 +0200 | [diff] [blame] | 401 | typedef struct { |
| 402 | struct seqcount seqcount; |
| 403 | spinlock_t lock; |
| 404 | } seqlock_t; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | |
Thomas Gleixner | 6617fec | 2011-07-16 18:40:26 +0200 | [diff] [blame] | 406 | /* |
| 407 | * These macros triggered gcc-3.x compile-time problems. We think these are |
| 408 | * OK now. Be cautious. |
| 409 | */ |
| 410 | #define __SEQLOCK_UNLOCKED(lockname) \ |
| 411 | { \ |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 412 | .seqcount = SEQCNT_ZERO(lockname), \ |
Thomas Gleixner | 6617fec | 2011-07-16 18:40:26 +0200 | [diff] [blame] | 413 | .lock = __SPIN_LOCK_UNLOCKED(lockname) \ |
| 414 | } |
| 415 | |
| 416 | #define seqlock_init(x) \ |
| 417 | do { \ |
| 418 | seqcount_init(&(x)->seqcount); \ |
| 419 | spin_lock_init(&(x)->lock); \ |
| 420 | } while (0) |
| 421 | |
| 422 | #define DEFINE_SEQLOCK(x) \ |
| 423 | seqlock_t x = __SEQLOCK_UNLOCKED(x) |
| 424 | |
| 425 | /* |
| 426 | * Read side functions for starting and finalizing a read side section. |
| 427 | */ |
| 428 | static inline unsigned read_seqbegin(const seqlock_t *sl) |
| 429 | { |
| 430 | return read_seqcount_begin(&sl->seqcount); |
| 431 | } |
| 432 | |
| 433 | static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) |
| 434 | { |
| 435 | return read_seqcount_retry(&sl->seqcount, start); |
| 436 | } |
| 437 | |
| 438 | /* |
| 439 | * Lock out other writers and update the count. |
| 440 | * Acts like a normal spin_lock/unlock. |
| 441 | * Don't need preempt_disable() because that is in the spin_lock already. |
| 442 | */ |
| 443 | static inline void write_seqlock(seqlock_t *sl) |
| 444 | { |
| 445 | spin_lock(&sl->lock); |
| 446 | write_seqcount_begin(&sl->seqcount); |
| 447 | } |
| 448 | |
| 449 | static inline void write_sequnlock(seqlock_t *sl) |
| 450 | { |
| 451 | write_seqcount_end(&sl->seqcount); |
| 452 | spin_unlock(&sl->lock); |
| 453 | } |
| 454 | |
| 455 | static inline void write_seqlock_bh(seqlock_t *sl) |
| 456 | { |
| 457 | spin_lock_bh(&sl->lock); |
| 458 | write_seqcount_begin(&sl->seqcount); |
| 459 | } |
| 460 | |
| 461 | static inline void write_sequnlock_bh(seqlock_t *sl) |
| 462 | { |
| 463 | write_seqcount_end(&sl->seqcount); |
| 464 | spin_unlock_bh(&sl->lock); |
| 465 | } |
| 466 | |
| 467 | static inline void write_seqlock_irq(seqlock_t *sl) |
| 468 | { |
| 469 | spin_lock_irq(&sl->lock); |
| 470 | write_seqcount_begin(&sl->seqcount); |
| 471 | } |
| 472 | |
| 473 | static inline void write_sequnlock_irq(seqlock_t *sl) |
| 474 | { |
| 475 | write_seqcount_end(&sl->seqcount); |
| 476 | spin_unlock_irq(&sl->lock); |
| 477 | } |
| 478 | |
| 479 | static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) |
| 480 | { |
| 481 | unsigned long flags; |
| 482 | |
| 483 | spin_lock_irqsave(&sl->lock, flags); |
| 484 | write_seqcount_begin(&sl->seqcount); |
| 485 | return flags; |
| 486 | } |
| 487 | |
| 488 | #define write_seqlock_irqsave(lock, flags) \ |
| 489 | do { flags = __write_seqlock_irqsave(lock); } while (0) |
| 490 | |
| 491 | static inline void |
| 492 | write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) |
| 493 | { |
| 494 | write_seqcount_end(&sl->seqcount); |
| 495 | spin_unlock_irqrestore(&sl->lock, flags); |
| 496 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | |
Waiman Long | 1370e97 | 2013-09-12 10:55:34 -0400 | [diff] [blame] | 498 | /* |
| 499 | * A locking reader exclusively locks out other writers and locking readers, |
| 500 | * but doesn't update the sequence number. Acts like a normal spin_lock/unlock. |
| 501 | * Don't need preempt_disable() because that is in the spin_lock already. |
| 502 | */ |
| 503 | static inline void read_seqlock_excl(seqlock_t *sl) |
| 504 | { |
| 505 | spin_lock(&sl->lock); |
| 506 | } |
| 507 | |
| 508 | static inline void read_sequnlock_excl(seqlock_t *sl) |
| 509 | { |
| 510 | spin_unlock(&sl->lock); |
| 511 | } |
| 512 | |
Al Viro | 2bc74fe | 2013-10-25 16:39:14 -0400 | [diff] [blame] | 513 | /** |
| 514 | * read_seqbegin_or_lock - begin a sequence number check or locking block |
| 515 | * @lock: sequence lock |
| 516 | * @seq : sequence number to be checked |
| 517 | * |
| 518 | * First try it once optimistically without taking the lock. If that fails, |
| 519 | * take the lock. The sequence number is also used as a marker for deciding |
| 520 | * whether to be a reader (even) or writer (odd). |
| 521 | * N.B. seq must be initialized to an even number to begin with. |
| 522 | */ |
| 523 | static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) |
| 524 | { |
| 525 | if (!(*seq & 1)) /* Even */ |
| 526 | *seq = read_seqbegin(lock); |
| 527 | else /* Odd */ |
| 528 | read_seqlock_excl(lock); |
| 529 | } |
| 530 | |
| 531 | static inline int need_seqretry(seqlock_t *lock, int seq) |
| 532 | { |
| 533 | return !(seq & 1) && read_seqretry(lock, seq); |
| 534 | } |
| 535 | |
| 536 | static inline void done_seqretry(seqlock_t *lock, int seq) |
| 537 | { |
| 538 | if (seq & 1) |
| 539 | read_sequnlock_excl(lock); |
| 540 | } |
| 541 | |
Waiman Long | 1370e97 | 2013-09-12 10:55:34 -0400 | [diff] [blame] | 542 | static inline void read_seqlock_excl_bh(seqlock_t *sl) |
| 543 | { |
| 544 | spin_lock_bh(&sl->lock); |
| 545 | } |
| 546 | |
| 547 | static inline void read_sequnlock_excl_bh(seqlock_t *sl) |
| 548 | { |
| 549 | spin_unlock_bh(&sl->lock); |
| 550 | } |
| 551 | |
| 552 | static inline void read_seqlock_excl_irq(seqlock_t *sl) |
| 553 | { |
| 554 | spin_lock_irq(&sl->lock); |
| 555 | } |
| 556 | |
| 557 | static inline void read_sequnlock_excl_irq(seqlock_t *sl) |
| 558 | { |
| 559 | spin_unlock_irq(&sl->lock); |
| 560 | } |
| 561 | |
| 562 | static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) |
| 563 | { |
| 564 | unsigned long flags; |
| 565 | |
| 566 | spin_lock_irqsave(&sl->lock, flags); |
| 567 | return flags; |
| 568 | } |
| 569 | |
| 570 | #define read_seqlock_excl_irqsave(lock, flags) \ |
| 571 | do { flags = __read_seqlock_excl_irqsave(lock); } while (0) |
| 572 | |
| 573 | static inline void |
| 574 | read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) |
| 575 | { |
| 576 | spin_unlock_irqrestore(&sl->lock, flags); |
| 577 | } |
| 578 | |
Rik van Riel | ef8ac06 | 2014-09-12 09:12:14 -0400 | [diff] [blame] | 579 | static inline unsigned long |
| 580 | read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) |
| 581 | { |
| 582 | unsigned long flags = 0; |
| 583 | |
| 584 | if (!(*seq & 1)) /* Even */ |
| 585 | *seq = read_seqbegin(lock); |
| 586 | else /* Odd */ |
| 587 | read_seqlock_excl_irqsave(lock, flags); |
| 588 | |
| 589 | return flags; |
| 590 | } |
| 591 | |
| 592 | static inline void |
| 593 | done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) |
| 594 | { |
| 595 | if (seq & 1) |
| 596 | read_sequnlock_excl_irqrestore(lock, flags); |
| 597 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | #endif /* __LINUX_SEQLOCK_H */ |