blob: c26298f3891f7d83774da0fb31147f7c113b34d5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/include/asm-arm/locks.h
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Interrupt safe locking assembler.
11 */
12#ifndef __ASM_PROC_LOCKS_H
13#define __ASM_PROC_LOCKS_H
14
15#if __LINUX_ARM_ARCH__ >= 6
16
17#define __down_op(ptr,fail) \
18 ({ \
19 __asm__ __volatile__( \
20 "@ down_op\n" \
21"1: ldrex lr, [%0]\n" \
22" sub lr, lr, %1\n" \
23" strex ip, lr, [%0]\n" \
24" teq ip, #0\n" \
25" bne 1b\n" \
26" teq lr, #0\n" \
27" movmi ip, %0\n" \
28" blmi " #fail \
29 : \
30 : "r" (ptr), "I" (1) \
31 : "ip", "lr", "cc", "memory"); \
32 })
33
34#define __down_op_ret(ptr,fail) \
35 ({ \
36 unsigned int ret; \
37 __asm__ __volatile__( \
38 "@ down_op_ret\n" \
39"1: ldrex lr, [%1]\n" \
40" sub lr, lr, %2\n" \
41" strex ip, lr, [%1]\n" \
42" teq ip, #0\n" \
43" bne 1b\n" \
44" teq lr, #0\n" \
45" movmi ip, %1\n" \
46" movpl ip, #0\n" \
47" blmi " #fail "\n" \
48" mov %0, ip" \
49 : "=&r" (ret) \
50 : "r" (ptr), "I" (1) \
51 : "ip", "lr", "cc", "memory"); \
52 ret; \
53 })
54
55#define __up_op(ptr,wake) \
56 ({ \
57 __asm__ __volatile__( \
58 "@ up_op\n" \
59"1: ldrex lr, [%0]\n" \
60" add lr, lr, %1\n" \
61" strex ip, lr, [%0]\n" \
62" teq ip, #0\n" \
63" bne 1b\n" \
64" teq lr, #0\n" \
65" movle ip, %0\n" \
66" blle " #wake \
67 : \
68 : "r" (ptr), "I" (1) \
69 : "ip", "lr", "cc", "memory"); \
70 })
71
72/*
73 * The value 0x01000000 supports up to 128 processors and
74 * lots of processes. BIAS must be chosen such that sub'ing
75 * BIAS once per CPU will result in the long remaining
76 * negative.
77 */
78#define RW_LOCK_BIAS 0x01000000
79#define RW_LOCK_BIAS_STR "0x01000000"
80
81#define __down_op_write(ptr,fail) \
82 ({ \
83 __asm__ __volatile__( \
84 "@ down_op_write\n" \
85"1: ldrex lr, [%0]\n" \
86" sub lr, lr, %1\n" \
87" strex ip, lr, [%0]\n" \
88" teq ip, #0\n" \
89" bne 1b\n" \
90" teq lr, #0\n" \
91" movne ip, %0\n" \
92" blne " #fail \
93 : \
94 : "r" (ptr), "I" (RW_LOCK_BIAS) \
95 : "ip", "lr", "cc", "memory"); \
96 })
97
98#define __up_op_write(ptr,wake) \
99 ({ \
100 __asm__ __volatile__( \
101 "@ up_op_read\n" \
102"1: ldrex lr, [%0]\n" \
103" add lr, lr, %1\n" \
104" strex ip, lr, [%0]\n" \
105" teq ip, #0\n" \
106" bne 1b\n" \
107" movcs ip, %0\n" \
108" blcs " #wake \
109 : \
110 : "r" (ptr), "I" (RW_LOCK_BIAS) \
111 : "ip", "lr", "cc", "memory"); \
112 })
113
114#define __down_op_read(ptr,fail) \
115 __down_op(ptr, fail)
116
117#define __up_op_read(ptr,wake) \
118 ({ \
119 __asm__ __volatile__( \
120 "@ up_op_read\n" \
121"1: ldrex lr, [%0]\n" \
122" add lr, lr, %1\n" \
123" strex ip, lr, [%0]\n" \
124" teq ip, #0\n" \
125" bne 1b\n" \
126" teq lr, #0\n" \
127" moveq ip, %0\n" \
128" bleq " #wake \
129 : \
130 : "r" (ptr), "I" (1) \
131 : "ip", "lr", "cc", "memory"); \
132 })
133
134#else
135
136#define __down_op(ptr,fail) \
137 ({ \
138 __asm__ __volatile__( \
139 "@ down_op\n" \
140" mrs ip, cpsr\n" \
141" orr lr, ip, #128\n" \
142" msr cpsr_c, lr\n" \
143" ldr lr, [%0]\n" \
144" subs lr, lr, %1\n" \
145" str lr, [%0]\n" \
146" msr cpsr_c, ip\n" \
147" movmi ip, %0\n" \
148" blmi " #fail \
149 : \
150 : "r" (ptr), "I" (1) \
151 : "ip", "lr", "cc", "memory"); \
152 })
153
154#define __down_op_ret(ptr,fail) \
155 ({ \
156 unsigned int ret; \
157 __asm__ __volatile__( \
158 "@ down_op_ret\n" \
159" mrs ip, cpsr\n" \
160" orr lr, ip, #128\n" \
161" msr cpsr_c, lr\n" \
162" ldr lr, [%1]\n" \
163" subs lr, lr, %2\n" \
164" str lr, [%1]\n" \
165" msr cpsr_c, ip\n" \
166" movmi ip, %1\n" \
167" movpl ip, #0\n" \
168" blmi " #fail "\n" \
169" mov %0, ip" \
170 : "=&r" (ret) \
171 : "r" (ptr), "I" (1) \
172 : "ip", "lr", "cc", "memory"); \
173 ret; \
174 })
175
176#define __up_op(ptr,wake) \
177 ({ \
178 __asm__ __volatile__( \
179 "@ up_op\n" \
180" mrs ip, cpsr\n" \
181" orr lr, ip, #128\n" \
182" msr cpsr_c, lr\n" \
183" ldr lr, [%0]\n" \
184" adds lr, lr, %1\n" \
185" str lr, [%0]\n" \
186" msr cpsr_c, ip\n" \
187" movle ip, %0\n" \
188" blle " #wake \
189 : \
190 : "r" (ptr), "I" (1) \
191 : "ip", "lr", "cc", "memory"); \
192 })
193
194/*
195 * The value 0x01000000 supports up to 128 processors and
196 * lots of processes. BIAS must be chosen such that sub'ing
197 * BIAS once per CPU will result in the long remaining
198 * negative.
199 */
200#define RW_LOCK_BIAS 0x01000000
201#define RW_LOCK_BIAS_STR "0x01000000"
202
203#define __down_op_write(ptr,fail) \
204 ({ \
205 __asm__ __volatile__( \
206 "@ down_op_write\n" \
207" mrs ip, cpsr\n" \
208" orr lr, ip, #128\n" \
209" msr cpsr_c, lr\n" \
210" ldr lr, [%0]\n" \
211" subs lr, lr, %1\n" \
212" str lr, [%0]\n" \
213" msr cpsr_c, ip\n" \
214" movne ip, %0\n" \
215" blne " #fail \
216 : \
217 : "r" (ptr), "I" (RW_LOCK_BIAS) \
218 : "ip", "lr", "cc", "memory"); \
219 })
220
221#define __up_op_write(ptr,wake) \
222 ({ \
223 __asm__ __volatile__( \
224 "@ up_op_read\n" \
225" mrs ip, cpsr\n" \
226" orr lr, ip, #128\n" \
227" msr cpsr_c, lr\n" \
228" ldr lr, [%0]\n" \
229" adds lr, lr, %1\n" \
230" str lr, [%0]\n" \
231" msr cpsr_c, ip\n" \
232" movcs ip, %0\n" \
233" blcs " #wake \
234 : \
235 : "r" (ptr), "I" (RW_LOCK_BIAS) \
236 : "ip", "lr", "cc", "memory"); \
237 })
238
239#define __down_op_read(ptr,fail) \
240 __down_op(ptr, fail)
241
242#define __up_op_read(ptr,wake) \
243 ({ \
244 __asm__ __volatile__( \
245 "@ up_op_read\n" \
246" mrs ip, cpsr\n" \
247" orr lr, ip, #128\n" \
248" msr cpsr_c, lr\n" \
249" ldr lr, [%0]\n" \
250" adds lr, lr, %1\n" \
251" str lr, [%0]\n" \
252" msr cpsr_c, ip\n" \
253" moveq ip, %0\n" \
254" bleq " #wake \
255 : \
256 : "r" (ptr), "I" (1) \
257 : "ip", "lr", "cc", "memory"); \
258 })
259
260#endif
261
262#endif