blob: 9fd5a0221671b856b0f2f28841167ab9d552aea1 [file] [log] [blame]
Vineet Gupta6e35fa22013-01-18 15:12:18 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
12#include <asm/spinlock_types.h>
13#include <asm/processor.h>
14#include <asm/barrier.h>
15
16#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
18#define arch_spin_unlock_wait(x) \
19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20
Vineet Guptaae7eae92015-07-14 17:55:05 +053021#ifdef CONFIG_ARC_HAS_LLSC
22
Vineet Gupta6e35fa22013-01-18 15:12:18 +053023static inline void arch_spin_lock(arch_spinlock_t *lock)
24{
Vineet Guptaae7eae92015-07-14 17:55:05 +053025 unsigned int val;
26
27 smp_mb();
28
29 __asm__ __volatile__(
30 "1: llock %[val], [%[slock]] \n"
31 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
32 " scond %[LOCKED], [%[slock]] \n" /* acquire */
33 " bnz 1b \n"
34 " \n"
35 : [val] "=&r" (val)
36 : [slock] "r" (&(lock->slock)),
37 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
38 : "memory", "cc");
39
40 smp_mb();
41}
42
43/* 1 - lock taken successfully */
44static inline int arch_spin_trylock(arch_spinlock_t *lock)
45{
46 unsigned int val, got_it = 0;
47
48 smp_mb();
49
50 __asm__ __volatile__(
51 "1: llock %[val], [%[slock]] \n"
52 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
53 " scond %[LOCKED], [%[slock]] \n" /* acquire */
54 " bnz 1b \n"
55 " mov %[got_it], 1 \n"
56 "4: \n"
57 " \n"
58 : [val] "=&r" (val),
59 [got_it] "+&r" (got_it)
60 : [slock] "r" (&(lock->slock)),
61 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
62 : "memory", "cc");
63
64 smp_mb();
65
66 return got_it;
67}
68
69static inline void arch_spin_unlock(arch_spinlock_t *lock)
70{
71 smp_mb();
72
73 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
74
75 smp_mb();
76}
77
Vineet Gupta69cbe632015-07-16 10:31:45 +053078/*
79 * Read-write spinlocks, allowing multiple readers but only one writer.
80 * Unfair locking as Writers could be starved indefinitely by Reader(s)
81 */
82
83static inline void arch_read_lock(arch_rwlock_t *rw)
84{
85 unsigned int val;
86
87 smp_mb();
88
89 /*
90 * zero means writer holds the lock exclusively, deny Reader.
91 * Otherwise grant lock to first/subseq reader
92 *
93 * if (rw->counter > 0) {
94 * rw->counter--;
95 * ret = 1;
96 * }
97 */
98
99 __asm__ __volatile__(
100 "1: llock %[val], [%[rwlock]] \n"
101 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
102 " sub %[val], %[val], 1 \n" /* reader lock */
103 " scond %[val], [%[rwlock]] \n"
104 " bnz 1b \n"
105 " \n"
106 : [val] "=&r" (val)
107 : [rwlock] "r" (&(rw->counter)),
108 [WR_LOCKED] "ir" (0)
109 : "memory", "cc");
110
111 smp_mb();
112}
113
114/* 1 - lock taken successfully */
115static inline int arch_read_trylock(arch_rwlock_t *rw)
116{
117 unsigned int val, got_it = 0;
118
119 smp_mb();
120
121 __asm__ __volatile__(
122 "1: llock %[val], [%[rwlock]] \n"
123 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
124 " sub %[val], %[val], 1 \n" /* counter-- */
125 " scond %[val], [%[rwlock]] \n"
126 " bnz 1b \n" /* retry if collided with someone */
127 " mov %[got_it], 1 \n"
128 " \n"
129 "4: ; --- done --- \n"
130
131 : [val] "=&r" (val),
132 [got_it] "+&r" (got_it)
133 : [rwlock] "r" (&(rw->counter)),
134 [WR_LOCKED] "ir" (0)
135 : "memory", "cc");
136
137 smp_mb();
138
139 return got_it;
140}
141
142static inline void arch_write_lock(arch_rwlock_t *rw)
143{
144 unsigned int val;
145
146 smp_mb();
147
148 /*
149 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
150 * deny writer. Otherwise if unlocked grant to writer
151 * Hence the claim that Linux rwlocks are unfair to writers.
152 * (can be starved for an indefinite time by readers).
153 *
154 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
155 * rw->counter = 0;
156 * ret = 1;
157 * }
158 */
159
160 __asm__ __volatile__(
161 "1: llock %[val], [%[rwlock]] \n"
162 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
163 " mov %[val], %[WR_LOCKED] \n"
164 " scond %[val], [%[rwlock]] \n"
165 " bnz 1b \n"
166 " \n"
167 : [val] "=&r" (val)
168 : [rwlock] "r" (&(rw->counter)),
169 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
170 [WR_LOCKED] "ir" (0)
171 : "memory", "cc");
172
173 smp_mb();
174}
175
176/* 1 - lock taken successfully */
177static inline int arch_write_trylock(arch_rwlock_t *rw)
178{
179 unsigned int val, got_it = 0;
180
181 smp_mb();
182
183 __asm__ __volatile__(
184 "1: llock %[val], [%[rwlock]] \n"
185 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
186 " mov %[val], %[WR_LOCKED] \n"
187 " scond %[val], [%[rwlock]] \n"
188 " bnz 1b \n" /* retry if collided with someone */
189 " mov %[got_it], 1 \n"
190 " \n"
191 "4: ; --- done --- \n"
192
193 : [val] "=&r" (val),
194 [got_it] "+&r" (got_it)
195 : [rwlock] "r" (&(rw->counter)),
196 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
197 [WR_LOCKED] "ir" (0)
198 : "memory", "cc");
199
200 smp_mb();
201
202 return got_it;
203}
204
205static inline void arch_read_unlock(arch_rwlock_t *rw)
206{
207 unsigned int val;
208
209 smp_mb();
210
211 /*
212 * rw->counter++;
213 */
214 __asm__ __volatile__(
215 "1: llock %[val], [%[rwlock]] \n"
216 " add %[val], %[val], 1 \n"
217 " scond %[val], [%[rwlock]] \n"
218 " bnz 1b \n"
219 " \n"
220 : [val] "=&r" (val)
221 : [rwlock] "r" (&(rw->counter))
222 : "memory", "cc");
223
224 smp_mb();
225}
226
227static inline void arch_write_unlock(arch_rwlock_t *rw)
228{
229 smp_mb();
230
231 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
232
233 smp_mb();
234}
235
Vineet Guptaae7eae92015-07-14 17:55:05 +0530236#else /* !CONFIG_ARC_HAS_LLSC */
237
238static inline void arch_spin_lock(arch_spinlock_t *lock)
239{
240 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530241
Vineet Gupta2576c282014-11-20 15:42:09 +0530242 /*
243 * This smp_mb() is technically superfluous, we only need the one
244 * after the lock for providing the ACQUIRE semantics.
245 * However doing the "right" thing was regressing hackbench
246 * so keeping this, pending further investigation
247 */
248 smp_mb();
249
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530250 __asm__ __volatile__(
251 "1: ex %0, [%1] \n"
252 " breq %0, %2, 1b \n"
Vineet Guptaae7eae92015-07-14 17:55:05 +0530253 : "+&r" (val)
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530254 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
255 : "memory");
Vineet Gupta2576c282014-11-20 15:42:09 +0530256
257 /*
258 * ACQUIRE barrier to ensure load/store after taking the lock
259 * don't "bleed-up" out of the critical section (leak-in is allowed)
260 * http://www.spinics.net/lists/kernel/msg2010409.html
261 *
262 * ARCv2 only has load-load, store-store and all-all barrier
263 * thus need the full all-all barrier
264 */
265 smp_mb();
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530266}
267
Vineet Guptaae7eae92015-07-14 17:55:05 +0530268/* 1 - lock taken successfully */
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530269static inline int arch_spin_trylock(arch_spinlock_t *lock)
270{
Vineet Guptaae7eae92015-07-14 17:55:05 +0530271 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530272
Vineet Gupta2576c282014-11-20 15:42:09 +0530273 smp_mb();
274
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530275 __asm__ __volatile__(
276 "1: ex %0, [%1] \n"
Vineet Guptaae7eae92015-07-14 17:55:05 +0530277 : "+r" (val)
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530278 : "r"(&(lock->slock))
279 : "memory");
280
Vineet Gupta2576c282014-11-20 15:42:09 +0530281 smp_mb();
282
Vineet Guptaae7eae92015-07-14 17:55:05 +0530283 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530284}
285
286static inline void arch_spin_unlock(arch_spinlock_t *lock)
287{
Vineet Guptaae7eae92015-07-14 17:55:05 +0530288 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
Vineet Gupta6c00350b2013-09-25 16:53:32 +0530289
Vineet Gupta2576c282014-11-20 15:42:09 +0530290 /*
291 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
292 * is the only option
293 */
294 smp_mb();
295
Vineet Gupta6c00350b2013-09-25 16:53:32 +0530296 __asm__ __volatile__(
297 " ex %0, [%1] \n"
Vineet Guptaae7eae92015-07-14 17:55:05 +0530298 : "+r" (val)
Vineet Gupta6c00350b2013-09-25 16:53:32 +0530299 : "r"(&(lock->slock))
300 : "memory");
301
Vineet Gupta2576c282014-11-20 15:42:09 +0530302 /*
303 * superfluous, but keeping for now - see pairing version in
304 * arch_spin_lock above
305 */
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530306 smp_mb();
307}
308
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530309/*
310 * Read-write spinlocks, allowing multiple readers but only one writer.
Vineet Gupta69cbe632015-07-16 10:31:45 +0530311 * Unfair locking as Writers could be starved indefinitely by Reader(s)
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530312 *
313 * The spinlock itself is contained in @counter and access to it is
314 * serialized with @lock_mutex.
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530315 */
316
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530317/* 1 - lock taken successfully */
318static inline int arch_read_trylock(arch_rwlock_t *rw)
319{
320 int ret = 0;
321
322 arch_spin_lock(&(rw->lock_mutex));
323
324 /*
325 * zero means writer holds the lock exclusively, deny Reader.
326 * Otherwise grant lock to first/subseq reader
327 */
328 if (rw->counter > 0) {
329 rw->counter--;
330 ret = 1;
331 }
332
333 arch_spin_unlock(&(rw->lock_mutex));
334
335 smp_mb();
336 return ret;
337}
338
339/* 1 - lock taken successfully */
340static inline int arch_write_trylock(arch_rwlock_t *rw)
341{
342 int ret = 0;
343
344 arch_spin_lock(&(rw->lock_mutex));
345
346 /*
347 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
348 * deny writer. Otherwise if unlocked grant to writer
349 * Hence the claim that Linux rwlocks are unfair to writers.
350 * (can be starved for an indefinite time by readers).
351 */
352 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
353 rw->counter = 0;
354 ret = 1;
355 }
356 arch_spin_unlock(&(rw->lock_mutex));
357
358 return ret;
359}
360
361static inline void arch_read_lock(arch_rwlock_t *rw)
362{
363 while (!arch_read_trylock(rw))
364 cpu_relax();
365}
366
367static inline void arch_write_lock(arch_rwlock_t *rw)
368{
369 while (!arch_write_trylock(rw))
370 cpu_relax();
371}
372
373static inline void arch_read_unlock(arch_rwlock_t *rw)
374{
375 arch_spin_lock(&(rw->lock_mutex));
376 rw->counter++;
377 arch_spin_unlock(&(rw->lock_mutex));
378}
379
380static inline void arch_write_unlock(arch_rwlock_t *rw)
381{
382 arch_spin_lock(&(rw->lock_mutex));
383 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
384 arch_spin_unlock(&(rw->lock_mutex));
385}
386
Vineet Gupta69cbe632015-07-16 10:31:45 +0530387#endif
388
389#define arch_read_can_lock(x) ((x)->counter > 0)
390#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
391
Vineet Gupta6e35fa22013-01-18 15:12:18 +0530392#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
393#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
394
395#define arch_spin_relax(lock) cpu_relax()
396#define arch_read_relax(lock) cpu_relax()
397#define arch_write_relax(lock) cpu_relax()
398
399#endif /* __ASM_SPINLOCK_H */