Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * include/asm-sh/spinlock.h |
| 3 | * |
| 4 | * Copyright (C) 2002, 2003 Paul Mundt |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | */ |
| 10 | #ifndef __ASM_SH_SPINLOCK_H |
| 11 | #define __ASM_SH_SPINLOCK_H |
| 12 | |
| 13 | #include <asm/atomic.h> |
| 14 | |
| 15 | /* |
| 16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 17 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 19 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 20 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 21 | #define __raw_spin_unlock_wait(x) \ |
| 22 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
| 24 | /* |
| 25 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 26 | * on the local processor, one does not. |
| 27 | * |
| 28 | * We make no fairness assumptions. They have a cost. |
| 29 | */ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 30 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | { |
| 32 | __asm__ __volatile__ ( |
| 33 | "1:\n\t" |
| 34 | "tas.b @%0\n\t" |
| 35 | "bf/s 1b\n\t" |
| 36 | "nop\n\t" |
| 37 | : "=r" (lock->lock) |
| 38 | : "r" (&lock->lock) |
| 39 | : "t", "memory" |
| 40 | ); |
| 41 | } |
| 42 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 43 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | { |
| 45 | assert_spin_locked(lock); |
| 46 | |
| 47 | lock->lock = 0; |
| 48 | } |
| 49 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 50 | #define __raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
| 52 | /* |
| 53 | * Read-write spinlocks, allowing multiple readers but only one writer. |
| 54 | * |
| 55 | * NOTE! it is quite common to have readers in interrupts but no interrupt |
| 56 | * writers. For those circumstances we can "mix" irq-safe locks - any writer |
| 57 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe |
| 58 | * read-locks. |
| 59 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 61 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | { |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 63 | __raw_spin_lock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
| 65 | atomic_inc(&rw->counter); |
| 66 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 67 | __raw_spin_unlock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | } |
| 69 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 70 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | { |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 72 | __raw_spin_lock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | |
| 74 | atomic_dec(&rw->counter); |
| 75 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 76 | __raw_spin_unlock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | } |
| 78 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 79 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | { |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 81 | __raw_spin_lock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | atomic_set(&rw->counter, -1); |
| 83 | } |
| 84 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 85 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | { |
| 87 | atomic_set(&rw->counter, 0); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 88 | __raw_spin_unlock(&rw->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | } |
| 90 | |
Paul Mundt | fac99d9 | 2006-10-03 14:13:09 +0900 | [diff] [blame] | 91 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
| 92 | { |
| 93 | atomic_t *count = (atomic_t*)lock; |
| 94 | if (atomic_dec_return(count) >= 0) |
| 95 | return 1; |
| 96 | atomic_inc(count); |
| 97 | return 0; |
| 98 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 100 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | { |
| 102 | if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter)) |
| 103 | return 1; |
| 104 | |
| 105 | atomic_add(RW_LOCK_BIAS, &rw->counter); |
| 106 | |
| 107 | return 0; |
| 108 | } |
| 109 | |
Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 110 | #define _raw_spin_relax(lock) cpu_relax() |
| 111 | #define _raw_read_relax(lock) cpu_relax() |
| 112 | #define _raw_write_relax(lock) cpu_relax() |
| 113 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | #endif /* __ASM_SH_SPINLOCK_H */ |