Chris Metcalf | 18aecc2 | 2011-05-04 14:38:26 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation, version 2. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| 11 | * NON INFRINGEMENT. See the GNU General Public License for |
| 12 | * more details. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <asm/processor.h> |
| 18 | |
| 19 | #include "spinlock_common.h" |
| 20 | |
| 21 | /* |
| 22 | * Read the spinlock value without allocating in our cache and without |
| 23 | * causing an invalidation to another cpu with a copy of the cacheline. |
| 24 | * This is important when we are spinning waiting for the lock. |
| 25 | */ |
| 26 | static inline u32 arch_spin_read_noalloc(void *lock) |
| 27 | { |
| 28 | return atomic_cmpxchg((atomic_t *)lock, -1, -1); |
| 29 | } |
| 30 | |
| 31 | /* |
| 32 | * Wait until the high bits (current) match my ticket. |
| 33 | * If we notice the overflow bit set on entry, we clear it. |
| 34 | */ |
| 35 | void arch_spin_lock_slow(arch_spinlock_t *lock, u32 my_ticket) |
| 36 | { |
| 37 | if (unlikely(my_ticket & __ARCH_SPIN_NEXT_OVERFLOW)) { |
| 38 | __insn_fetchand4(&lock->lock, ~__ARCH_SPIN_NEXT_OVERFLOW); |
| 39 | my_ticket &= ~__ARCH_SPIN_NEXT_OVERFLOW; |
| 40 | } |
| 41 | |
| 42 | for (;;) { |
| 43 | u32 val = arch_spin_read_noalloc(lock); |
| 44 | u32 delta = my_ticket - arch_spin_current(val); |
| 45 | if (delta == 0) |
| 46 | return; |
| 47 | relax((128 / CYCLES_PER_RELAX_LOOP) * delta); |
| 48 | } |
| 49 | } |
| 50 | EXPORT_SYMBOL(arch_spin_lock_slow); |
| 51 | |
| 52 | /* |
| 53 | * Check the lock to see if it is plausible, and try to get it with cmpxchg(). |
| 54 | */ |
| 55 | int arch_spin_trylock(arch_spinlock_t *lock) |
| 56 | { |
| 57 | u32 val = arch_spin_read_noalloc(lock); |
| 58 | if (unlikely(arch_spin_current(val) != arch_spin_next(val))) |
| 59 | return 0; |
| 60 | return cmpxchg(&lock->lock, val, (val + 1) & ~__ARCH_SPIN_NEXT_OVERFLOW) |
| 61 | == val; |
| 62 | } |
| 63 | EXPORT_SYMBOL(arch_spin_trylock); |
| 64 | |
| 65 | void arch_spin_unlock_wait(arch_spinlock_t *lock) |
| 66 | { |
| 67 | u32 iterations = 0; |
Chris Metcalf | 14c3dec | 2015-04-28 13:02:26 -0400 | [diff] [blame] | 68 | u32 val = READ_ONCE(lock->lock); |
| 69 | u32 curr = arch_spin_current(val); |
| 70 | |
| 71 | /* Return immediately if unlocked. */ |
| 72 | if (arch_spin_next(val) == curr) |
| 73 | return; |
| 74 | |
| 75 | /* Wait until the current locker has released the lock. */ |
| 76 | do { |
Chris Metcalf | 18aecc2 | 2011-05-04 14:38:26 -0400 | [diff] [blame] | 77 | delay_backoff(iterations++); |
Chris Metcalf | 14c3dec | 2015-04-28 13:02:26 -0400 | [diff] [blame] | 78 | } while (arch_spin_current(READ_ONCE(lock->lock)) == curr); |
Chris Metcalf | 18aecc2 | 2011-05-04 14:38:26 -0400 | [diff] [blame] | 79 | } |
| 80 | EXPORT_SYMBOL(arch_spin_unlock_wait); |
| 81 | |
| 82 | /* |
| 83 | * If the read lock fails due to a writer, we retry periodically |
| 84 | * until the value is positive and we write our incremented reader count. |
| 85 | */ |
| 86 | void __read_lock_failed(arch_rwlock_t *rw) |
| 87 | { |
| 88 | u32 val; |
| 89 | int iterations = 0; |
| 90 | do { |
| 91 | delay_backoff(iterations++); |
| 92 | val = __insn_fetchaddgez4(&rw->lock, 1); |
| 93 | } while (unlikely(arch_write_val_locked(val))); |
| 94 | } |
| 95 | EXPORT_SYMBOL(__read_lock_failed); |
| 96 | |
| 97 | /* |
| 98 | * If we failed because there were readers, clear the "writer" bit |
| 99 | * so we don't block additional readers. Otherwise, there was another |
| 100 | * writer anyway, so our "fetchor" made no difference. Then wait, |
| 101 | * issuing periodic fetchor instructions, till we get the lock. |
| 102 | */ |
| 103 | void __write_lock_failed(arch_rwlock_t *rw, u32 val) |
| 104 | { |
| 105 | int iterations = 0; |
| 106 | do { |
| 107 | if (!arch_write_val_locked(val)) |
| 108 | val = __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT); |
| 109 | delay_backoff(iterations++); |
| 110 | val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT); |
| 111 | } while (val != 0); |
| 112 | } |
| 113 | EXPORT_SYMBOL(__write_lock_failed); |