blob: b6a8c2dfbe6e42cd51def893784f0780bc67264e [file] [log] [blame]
Vineet Gupta6e35fa22013-01-18 15:12:18 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
12#include <asm/spinlock_types.h>
13#include <asm/processor.h>
14#include <asm/barrier.h>
15
16#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
18#define arch_spin_unlock_wait(x) \
19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20
21static inline void arch_spin_lock(arch_spinlock_t *lock)
22{
23 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
24
25 __asm__ __volatile__(
26 "1: ex %0, [%1] \n"
27 " breq %0, %2, 1b \n"
28 : "+&r" (tmp)
29 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
30 : "memory");
31}
32
33static inline int arch_spin_trylock(arch_spinlock_t *lock)
34{
35 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
36
37 __asm__ __volatile__(
38 "1: ex %0, [%1] \n"
39 : "+r" (tmp)
40 : "r"(&(lock->slock))
41 : "memory");
42
43 return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
44}
45
46static inline void arch_spin_unlock(arch_spinlock_t *lock)
47{
Vineet Gupta6c003502013-09-25 16:53:32 +053048 unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
49
50 __asm__ __volatile__(
51 " ex %0, [%1] \n"
52 : "+r" (tmp)
53 : "r"(&(lock->slock))
54 : "memory");
55
Vineet Gupta6e35fa22013-01-18 15:12:18 +053056 smp_mb();
57}
58
59/*
60 * Read-write spinlocks, allowing multiple readers but only one writer.
61 *
62 * The spinlock itself is contained in @counter and access to it is
63 * serialized with @lock_mutex.
64 *
65 * Unfair locking as Writers could be starved indefinitely by Reader(s)
66 */
67
68/* Would read_trylock() succeed? */
69#define arch_read_can_lock(x) ((x)->counter > 0)
70
71/* Would write_trylock() succeed? */
72#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
73
74/* 1 - lock taken successfully */
75static inline int arch_read_trylock(arch_rwlock_t *rw)
76{
77 int ret = 0;
78
79 arch_spin_lock(&(rw->lock_mutex));
80
81 /*
82 * zero means writer holds the lock exclusively, deny Reader.
83 * Otherwise grant lock to first/subseq reader
84 */
85 if (rw->counter > 0) {
86 rw->counter--;
87 ret = 1;
88 }
89
90 arch_spin_unlock(&(rw->lock_mutex));
91
92 smp_mb();
93 return ret;
94}
95
96/* 1 - lock taken successfully */
97static inline int arch_write_trylock(arch_rwlock_t *rw)
98{
99 int ret = 0;
100
101 arch_spin_lock(&(rw->lock_mutex));
102
103 /*
104 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
105 * deny writer. Otherwise if unlocked grant to writer
106 * Hence the claim that Linux rwlocks are unfair to writers.
107 * (can be starved for an indefinite time by readers).
108 */
109 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
110 rw->counter = 0;
111 ret = 1;
112 }
113 arch_spin_unlock(&(rw->lock_mutex));
114
115 return ret;
116}
117
118static inline void arch_read_lock(arch_rwlock_t *rw)
119{
120 while (!arch_read_trylock(rw))
121 cpu_relax();
122}
123
124static inline void arch_write_lock(arch_rwlock_t *rw)
125{
126 while (!arch_write_trylock(rw))
127 cpu_relax();
128}
129
130static inline void arch_read_unlock(arch_rwlock_t *rw)
131{
132 arch_spin_lock(&(rw->lock_mutex));
133 rw->counter++;
134 arch_spin_unlock(&(rw->lock_mutex));
135}
136
137static inline void arch_write_unlock(arch_rwlock_t *rw)
138{
139 arch_spin_lock(&(rw->lock_mutex));
140 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
141 arch_spin_unlock(&(rw->lock_mutex));
142}
143
144#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
145#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
146
147#define arch_spin_relax(lock) cpu_relax()
148#define arch_read_relax(lock) cpu_relax()
149#define arch_write_relax(lock) cpu_relax()
150
151#endif /* __ASM_SPINLOCK_H */