Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SPINLOCK_H |
| 2 | #define __ASM_SPINLOCK_H |
| 3 | |
| 4 | #if __LINUX_ARM_ARCH__ < 6 |
| 5 | #error SMP not supported on pre-ARMv6 CPUs |
| 6 | #endif |
| 7 | |
Marc Zyngier | 603605a | 2011-05-23 17:16:59 +0100 | [diff] [blame] | 8 | #include <asm/processor.h> |
| 9 | |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 10 | /* |
| 11 | * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K |
| 12 | * extensions, so when running on UP, we have to patch these instructions away. |
| 13 | */ |
| 14 | #define ALT_SMP(smp, up) \ |
| 15 | "9998: " smp "\n" \ |
| 16 | " .pushsection \".alt.smp.init\", \"a\"\n" \ |
| 17 | " .long 9998b\n" \ |
| 18 | " " up "\n" \ |
| 19 | " .popsection\n" |
| 20 | |
| 21 | #ifdef CONFIG_THUMB2_KERNEL |
| 22 | #define SEV ALT_SMP("sev.w", "nop.w") |
Dave Martin | 917692f | 2011-02-09 12:06:59 +0100 | [diff] [blame] | 23 | /* |
| 24 | * For Thumb-2, special care is needed to ensure that the conditional WFE |
| 25 | * instruction really does assemble to exactly 4 bytes (as required by |
| 26 | * the SMP_ON_UP fixup code). By itself "wfene" might cause the |
| 27 | * assembler to insert a extra (16-bit) IT instruction, depending on the |
| 28 | * presence or absence of neighbouring conditional instructions. |
| 29 | * |
| 30 | * To avoid this unpredictableness, an approprite IT is inserted explicitly: |
| 31 | * the assembler won't change IT instructions which are explicitly present |
| 32 | * in the input. |
| 33 | */ |
| 34 | #define WFE(cond) ALT_SMP( \ |
| 35 | "it " cond "\n\t" \ |
| 36 | "wfe" cond ".n", \ |
| 37 | \ |
| 38 | "nop.w" \ |
| 39 | ) |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 40 | #else |
| 41 | #define SEV ALT_SMP("sev", "nop") |
| 42 | #define WFE(cond) ALT_SMP("wfe" cond, "nop") |
| 43 | #endif |
| 44 | |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 45 | static inline void dsb_sev(void) |
| 46 | { |
| 47 | #if __LINUX_ARM_ARCH__ >= 7 |
| 48 | __asm__ __volatile__ ( |
| 49 | "dsb\n" |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 50 | SEV |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 51 | ); |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 52 | #else |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 53 | __asm__ __volatile__ ( |
| 54 | "mcr p15, 0, %0, c7, c10, 4\n" |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 55 | SEV |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 56 | : : "r" (0) |
| 57 | ); |
| 58 | #endif |
| 59 | } |
| 60 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | /* |
Will Deacon | 546c289 | 2012-07-06 15:43:41 +0100 | [diff] [blame] | 62 | * ARMv6 ticket-based spin-locking. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | * |
Will Deacon | 546c289 | 2012-07-06 15:43:41 +0100 | [diff] [blame] | 64 | * A memory barrier is required after we get a lock, and before we |
| 65 | * release it, because V6 CPUs are assumed to have weakly ordered |
| 66 | * memory. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 69 | #define arch_spin_unlock_wait(lock) \ |
| 70 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 72 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 74 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | { |
| 76 | unsigned long tmp; |
Will Deacon | 546c289 | 2012-07-06 15:43:41 +0100 | [diff] [blame] | 77 | u32 newval; |
| 78 | arch_spinlock_t lockval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | |
| 80 | __asm__ __volatile__( |
Will Deacon | 546c289 | 2012-07-06 15:43:41 +0100 | [diff] [blame] | 81 | "1: ldrex %0, [%3]\n" |
| 82 | " add %1, %0, %4\n" |
| 83 | " strex %2, %1, [%3]\n" |
| 84 | " teq %2, #0\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | " bne 1b" |
Will Deacon | 546c289 | 2012-07-06 15:43:41 +0100 | [diff] [blame] | 86 | : "=&r" (lockval), "=&r" (newval), "=&r" (tmp) |
| 87 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 88 | : "cc"); |
| 89 | |
Will Deacon | 546c289 | 2012-07-06 15:43:41 +0100 | [diff] [blame] | 90 | while (lockval.tickets.next != lockval.tickets.owner) { |
| 91 | wfe(); |
| 92 | lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); |
| 93 | } |
| 94 | |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 95 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | } |
| 97 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 98 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | { |
Will Deacon | 15e7e5c | 2013-06-05 11:27:26 +0100 | [diff] [blame] | 100 | unsigned long contended, res; |
Will Deacon | 546c289 | 2012-07-06 15:43:41 +0100 | [diff] [blame] | 101 | u32 slock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | |
Will Deacon | 15e7e5c | 2013-06-05 11:27:26 +0100 | [diff] [blame] | 103 | do { |
| 104 | __asm__ __volatile__( |
| 105 | " ldrex %0, [%3]\n" |
| 106 | " mov %2, #0\n" |
| 107 | " subs %1, %0, %0, ror #16\n" |
| 108 | " addeq %0, %0, %4\n" |
| 109 | " strexeq %2, %0, [%3]" |
Will Deacon | afa31d8 | 2013-08-12 18:03:26 +0100 | [diff] [blame] | 110 | : "=&r" (slock), "=&r" (contended), "=&r" (res) |
Will Deacon | 15e7e5c | 2013-06-05 11:27:26 +0100 | [diff] [blame] | 111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) |
| 112 | : "cc"); |
| 113 | } while (res); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | |
Will Deacon | 15e7e5c | 2013-06-05 11:27:26 +0100 | [diff] [blame] | 115 | if (!contended) { |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 116 | smp_mb(); |
| 117 | return 1; |
| 118 | } else { |
| 119 | return 0; |
| 120 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | } |
| 122 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 123 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | { |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 125 | smp_mb(); |
Will Deacon | 20e260b | 2013-01-24 14:47:38 +0100 | [diff] [blame] | 126 | lock->tickets.owner++; |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 127 | dsb_sev(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | } |
| 129 | |
Will Deacon | 546c289 | 2012-07-06 15:43:41 +0100 | [diff] [blame] | 130 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
| 131 | { |
| 132 | struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); |
| 133 | return tickets.owner != tickets.next; |
| 134 | } |
| 135 | |
| 136 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
| 137 | { |
| 138 | struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); |
| 139 | return (tickets.next - tickets.owner) > 1; |
| 140 | } |
| 141 | #define arch_spin_is_contended arch_spin_is_contended |
| 142 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | /* |
| 144 | * RWLOCKS |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 145 | * |
| 146 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | * Write locks are easy - we just set bit 31. When unlocking, we can |
| 148 | * just write zero since the lock is exclusively held. |
| 149 | */ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 150 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 151 | static inline void arch_write_lock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | { |
| 153 | unsigned long tmp; |
| 154 | |
| 155 | __asm__ __volatile__( |
| 156 | "1: ldrex %0, [%1]\n" |
| 157 | " teq %0, #0\n" |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 158 | WFE("ne") |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | " strexeq %0, %2, [%1]\n" |
| 160 | " teq %0, #0\n" |
| 161 | " bne 1b" |
| 162 | : "=&r" (tmp) |
| 163 | : "r" (&rw->lock), "r" (0x80000000) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 164 | : "cc"); |
| 165 | |
| 166 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | } |
| 168 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 169 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 170 | { |
Will Deacon | 00efaa0 | 2013-08-12 18:04:05 +0100 | [diff] [blame^] | 171 | unsigned long contended, res; |
Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 172 | |
Will Deacon | 00efaa0 | 2013-08-12 18:04:05 +0100 | [diff] [blame^] | 173 | do { |
| 174 | __asm__ __volatile__( |
| 175 | " ldrex %0, [%2]\n" |
| 176 | " mov %1, #0\n" |
| 177 | " teq %0, #0\n" |
| 178 | " strexeq %1, %3, [%2]" |
| 179 | : "=&r" (contended), "=&r" (res) |
| 180 | : "r" (&rw->lock), "r" (0x80000000) |
| 181 | : "cc"); |
| 182 | } while (res); |
Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 183 | |
Will Deacon | 00efaa0 | 2013-08-12 18:04:05 +0100 | [diff] [blame^] | 184 | if (!contended) { |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 185 | smp_mb(); |
| 186 | return 1; |
| 187 | } else { |
| 188 | return 0; |
| 189 | } |
Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 190 | } |
| 191 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 192 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | { |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 194 | smp_mb(); |
| 195 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | __asm__ __volatile__( |
Russell King | 00b4c90 | 2005-12-01 15:47:24 +0000 | [diff] [blame] | 197 | "str %1, [%0]\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | : |
| 199 | : "r" (&rw->lock), "r" (0) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 200 | : "cc"); |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 201 | |
| 202 | dsb_sev(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | } |
| 204 | |
Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 205 | /* write_can_lock - would write_trylock() succeed? */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 206 | #define arch_write_can_lock(x) ((x)->lock == 0) |
Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 207 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | /* |
| 209 | * Read locks are a bit more hairy: |
| 210 | * - Exclusively load the lock value. |
| 211 | * - Increment it. |
| 212 | * - Store new lock value if positive, and we still own this location. |
| 213 | * If the value is negative, we've already failed. |
| 214 | * - If we failed to store the value, we want a negative result. |
| 215 | * - If we failed, try again. |
| 216 | * Unlocking is similarly hairy. We may have multiple read locks |
| 217 | * currently active. However, we know we won't have any write |
| 218 | * locks. |
| 219 | */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 220 | static inline void arch_read_lock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | { |
| 222 | unsigned long tmp, tmp2; |
| 223 | |
| 224 | __asm__ __volatile__( |
| 225 | "1: ldrex %0, [%2]\n" |
| 226 | " adds %0, %0, #1\n" |
| 227 | " strexpl %1, %0, [%2]\n" |
Russell King | 000d9c7 | 2011-01-15 16:22:12 +0000 | [diff] [blame] | 228 | WFE("mi") |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | " rsbpls %0, %1, #0\n" |
| 230 | " bmi 1b" |
| 231 | : "=&r" (tmp), "=&r" (tmp2) |
| 232 | : "r" (&rw->lock) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 233 | : "cc"); |
| 234 | |
| 235 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | } |
| 237 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 238 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | { |
Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 240 | unsigned long tmp, tmp2; |
| 241 | |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 242 | smp_mb(); |
| 243 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | __asm__ __volatile__( |
| 245 | "1: ldrex %0, [%2]\n" |
| 246 | " sub %0, %0, #1\n" |
| 247 | " strex %1, %0, [%2]\n" |
| 248 | " teq %1, #0\n" |
| 249 | " bne 1b" |
| 250 | : "=&r" (tmp), "=&r" (tmp2) |
| 251 | : "r" (&rw->lock) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 252 | : "cc"); |
Rabin Vincent | c5113b6 | 2010-01-25 19:43:03 +0100 | [diff] [blame] | 253 | |
| 254 | if (tmp == 0) |
| 255 | dsb_sev(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | } |
| 257 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 258 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
Russell King | 8e34703 | 2006-08-31 15:09:30 +0100 | [diff] [blame] | 259 | { |
Will Deacon | 00efaa0 | 2013-08-12 18:04:05 +0100 | [diff] [blame^] | 260 | unsigned long contended, res; |
Russell King | 8e34703 | 2006-08-31 15:09:30 +0100 | [diff] [blame] | 261 | |
Will Deacon | 00efaa0 | 2013-08-12 18:04:05 +0100 | [diff] [blame^] | 262 | do { |
| 263 | __asm__ __volatile__( |
| 264 | " ldrex %0, [%2]\n" |
| 265 | " mov %1, #0\n" |
| 266 | " adds %0, %0, #1\n" |
| 267 | " strexpl %1, %0, [%2]" |
| 268 | : "=&r" (contended), "=&r" (res) |
| 269 | : "r" (&rw->lock) |
| 270 | : "cc"); |
| 271 | } while (res); |
Russell King | 8e34703 | 2006-08-31 15:09:30 +0100 | [diff] [blame] | 272 | |
Will Deacon | 00efaa0 | 2013-08-12 18:04:05 +0100 | [diff] [blame^] | 273 | /* If the lock is negative, then it is already held for write. */ |
| 274 | if (contended < 0x80000000) { |
| 275 | smp_mb(); |
| 276 | return 1; |
| 277 | } else { |
| 278 | return 0; |
| 279 | } |
Russell King | 8e34703 | 2006-08-31 15:09:30 +0100 | [diff] [blame] | 280 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | |
Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 282 | /* read_can_lock - would read_trylock() succeed? */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 283 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) |
Catalin Marinas | c2a4c40 | 2006-05-19 21:55:35 +0100 | [diff] [blame] | 284 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 285 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 286 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
Robin Holt | f5f7eac | 2009-04-02 16:59:46 -0700 | [diff] [blame] | 287 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 288 | #define arch_spin_relax(lock) cpu_relax() |
| 289 | #define arch_read_relax(lock) cpu_relax() |
| 290 | #define arch_write_relax(lock) cpu_relax() |
Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 291 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | #endif /* __ASM_SPINLOCK_H */ |