blob: 2c87b41e69baa60019143bfed35550fc13fc6e2f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Atomic operations that C can't guarantee us. Useful for
3 * resource counting etc..
4 *
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * Copyright (C) 1996, 97, 99, 2000, 03, 04 by Ralf Baechle
13 */
14
15/*
16 * As workaround for the ATOMIC_DEC_AND_LOCK / atomic_dec_and_lock mess in
17 * <linux/spinlock.h> we have to include <linux/spinlock.h> outside the
18 * main big wrapper ...
19 */
20#include <linux/config.h>
21#include <linux/spinlock.h>
22
23#ifndef _ASM_ATOMIC_H
24#define _ASM_ATOMIC_H
25
26#include <asm/cpu-features.h>
27#include <asm/war.h>
28
29extern spinlock_t atomic_lock;
30
31typedef struct { volatile int counter; } atomic_t;
32
33#define ATOMIC_INIT(i) { (i) }
34
35/*
36 * atomic_read - read atomic variable
37 * @v: pointer of type atomic_t
38 *
39 * Atomically reads the value of @v.
40 */
41#define atomic_read(v) ((v)->counter)
42
43/*
44 * atomic_set - set atomic variable
45 * @v: pointer of type atomic_t
46 * @i: required value
47 *
48 * Atomically sets the value of @v to @i.
49 */
50#define atomic_set(v,i) ((v)->counter = (i))
51
52/*
53 * atomic_add - add integer to atomic variable
54 * @i: integer value to add
55 * @v: pointer of type atomic_t
56 *
57 * Atomically adds @i to @v.
58 */
59static __inline__ void atomic_add(int i, atomic_t * v)
60{
61 if (cpu_has_llsc && R10000_LLSC_WAR) {
62 unsigned long temp;
63
64 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +000065 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 "1: ll %0, %1 # atomic_add \n"
67 " addu %0, %2 \n"
68 " sc %0, %1 \n"
69 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000070 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 : "=&r" (temp), "=m" (v->counter)
72 : "Ir" (i), "m" (v->counter));
73 } else if (cpu_has_llsc) {
74 unsigned long temp;
75
76 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +000077 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 "1: ll %0, %1 # atomic_add \n"
79 " addu %0, %2 \n"
80 " sc %0, %1 \n"
81 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000082 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 : "=&r" (temp), "=m" (v->counter)
84 : "Ir" (i), "m" (v->counter));
85 } else {
86 unsigned long flags;
87
88 spin_lock_irqsave(&atomic_lock, flags);
89 v->counter += i;
90 spin_unlock_irqrestore(&atomic_lock, flags);
91 }
92}
93
94/*
95 * atomic_sub - subtract the atomic variable
96 * @i: integer value to subtract
97 * @v: pointer of type atomic_t
98 *
99 * Atomically subtracts @i from @v.
100 */
101static __inline__ void atomic_sub(int i, atomic_t * v)
102{
103 if (cpu_has_llsc && R10000_LLSC_WAR) {
104 unsigned long temp;
105
106 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000107 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 "1: ll %0, %1 # atomic_sub \n"
109 " subu %0, %2 \n"
110 " sc %0, %1 \n"
111 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000112 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 : "=&r" (temp), "=m" (v->counter)
114 : "Ir" (i), "m" (v->counter));
115 } else if (cpu_has_llsc) {
116 unsigned long temp;
117
118 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000119 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 "1: ll %0, %1 # atomic_sub \n"
121 " subu %0, %2 \n"
122 " sc %0, %1 \n"
123 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000124 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 : "=&r" (temp), "=m" (v->counter)
126 : "Ir" (i), "m" (v->counter));
127 } else {
128 unsigned long flags;
129
130 spin_lock_irqsave(&atomic_lock, flags);
131 v->counter -= i;
132 spin_unlock_irqrestore(&atomic_lock, flags);
133 }
134}
135
136/*
137 * Same as above, but return the result value
138 */
139static __inline__ int atomic_add_return(int i, atomic_t * v)
140{
141 unsigned long result;
142
143 if (cpu_has_llsc && R10000_LLSC_WAR) {
144 unsigned long temp;
145
146 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000147 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 "1: ll %1, %2 # atomic_add_return \n"
149 " addu %0, %1, %3 \n"
150 " sc %0, %2 \n"
151 " beqzl %0, 1b \n"
152 " addu %0, %1, %3 \n"
153 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000154 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
156 : "Ir" (i), "m" (v->counter)
157 : "memory");
158 } else if (cpu_has_llsc) {
159 unsigned long temp;
160
161 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000162 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 "1: ll %1, %2 # atomic_add_return \n"
164 " addu %0, %1, %3 \n"
165 " sc %0, %2 \n"
166 " beqz %0, 1b \n"
167 " addu %0, %1, %3 \n"
168 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000169 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
171 : "Ir" (i), "m" (v->counter)
172 : "memory");
173 } else {
174 unsigned long flags;
175
176 spin_lock_irqsave(&atomic_lock, flags);
177 result = v->counter;
178 result += i;
179 v->counter = result;
180 spin_unlock_irqrestore(&atomic_lock, flags);
181 }
182
183 return result;
184}
185
186static __inline__ int atomic_sub_return(int i, atomic_t * v)
187{
188 unsigned long result;
189
190 if (cpu_has_llsc && R10000_LLSC_WAR) {
191 unsigned long temp;
192
193 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000194 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 "1: ll %1, %2 # atomic_sub_return \n"
196 " subu %0, %1, %3 \n"
197 " sc %0, %2 \n"
198 " beqzl %0, 1b \n"
199 " subu %0, %1, %3 \n"
200 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000201 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
203 : "Ir" (i), "m" (v->counter)
204 : "memory");
205 } else if (cpu_has_llsc) {
206 unsigned long temp;
207
208 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000209 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 "1: ll %1, %2 # atomic_sub_return \n"
211 " subu %0, %1, %3 \n"
212 " sc %0, %2 \n"
213 " beqz %0, 1b \n"
214 " subu %0, %1, %3 \n"
215 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000216 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
218 : "Ir" (i), "m" (v->counter)
219 : "memory");
220 } else {
221 unsigned long flags;
222
223 spin_lock_irqsave(&atomic_lock, flags);
224 result = v->counter;
225 result -= i;
226 v->counter = result;
227 spin_unlock_irqrestore(&atomic_lock, flags);
228 }
229
230 return result;
231}
232
233/*
234 * atomic_sub_if_positive - add integer to atomic variable
235 * @v: pointer of type atomic_t
236 *
237 * Atomically test @v and decrement if it is greater than 0.
238 * The function returns the old value of @v minus 1.
239 */
240static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
241{
242 unsigned long result;
243
244 if (cpu_has_llsc && R10000_LLSC_WAR) {
245 unsigned long temp;
246
247 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000248 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 "1: ll %1, %2 # atomic_sub_if_positive\n"
250 " subu %0, %1, %3 \n"
251 " bltz %0, 1f \n"
252 " sc %0, %2 \n"
253 " beqzl %0, 1b \n"
254 " sync \n"
255 "1: \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000256 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
258 : "Ir" (i), "m" (v->counter)
259 : "memory");
260 } else if (cpu_has_llsc) {
261 unsigned long temp;
262
263 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000264 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 "1: ll %1, %2 # atomic_sub_if_positive\n"
266 " subu %0, %1, %3 \n"
267 " bltz %0, 1f \n"
268 " sc %0, %2 \n"
269 " beqz %0, 1b \n"
270 " sync \n"
271 "1: \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000272 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
274 : "Ir" (i), "m" (v->counter)
275 : "memory");
276 } else {
277 unsigned long flags;
278
279 spin_lock_irqsave(&atomic_lock, flags);
280 result = v->counter;
281 result -= i;
282 if (result >= 0)
283 v->counter = result;
284 spin_unlock_irqrestore(&atomic_lock, flags);
285 }
286
287 return result;
288}
289
Nick Piggin4a6dae62005-11-13 16:07:24 -0800290#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
291
Nick Piggin8426e1f2005-11-13 16:07:25 -0800292/**
293 * atomic_add_unless - add unless the number is a given value
294 * @v: pointer of type atomic_t
295 * @a: the amount to add to v...
296 * @u: ...unless v is equal to u.
297 *
298 * Atomically adds @a to @v, so long as it was not @u.
299 * Returns non-zero if @v was not @u, and zero otherwise.
300 */
301#define atomic_add_unless(v, a, u) \
302({ \
303 int c, old; \
304 c = atomic_read(v); \
305 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
306 c = old; \
307 c != (u); \
308})
309#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311#define atomic_dec_return(v) atomic_sub_return(1,(v))
312#define atomic_inc_return(v) atomic_add_return(1,(v))
313
314/*
315 * atomic_sub_and_test - subtract value from variable and test result
316 * @i: integer value to subtract
317 * @v: pointer of type atomic_t
318 *
319 * Atomically subtracts @i from @v and returns
320 * true if the result is zero, or false for all
321 * other cases.
322 */
323#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
324
325/*
326 * atomic_inc_and_test - increment and test
327 * @v: pointer of type atomic_t
328 *
329 * Atomically increments @v by 1
330 * and returns true if the result is zero, or false for all
331 * other cases.
332 */
333#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
334
335/*
336 * atomic_dec_and_test - decrement by 1 and test
337 * @v: pointer of type atomic_t
338 *
339 * Atomically decrements @v by 1 and
340 * returns true if the result is 0, or false for all other
341 * cases.
342 */
343#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
344
345/*
346 * atomic_dec_if_positive - decrement by 1 if old value positive
347 * @v: pointer of type atomic_t
348 */
349#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
350
351/*
352 * atomic_inc - increment atomic variable
353 * @v: pointer of type atomic_t
354 *
355 * Atomically increments @v by 1.
356 */
357#define atomic_inc(v) atomic_add(1,(v))
358
359/*
360 * atomic_dec - decrement and test
361 * @v: pointer of type atomic_t
362 *
363 * Atomically decrements @v by 1.
364 */
365#define atomic_dec(v) atomic_sub(1,(v))
366
367/*
368 * atomic_add_negative - add and test if negative
369 * @v: pointer of type atomic_t
370 * @i: integer value to add
371 *
372 * Atomically adds @i to @v and returns true
373 * if the result is negative, or false when
374 * result is greater than or equal to zero.
375 */
376#define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0)
377
Ralf Baechle875d43e2005-09-03 15:56:16 -0700378#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379
380typedef struct { volatile __s64 counter; } atomic64_t;
381
382#define ATOMIC64_INIT(i) { (i) }
383
384/*
385 * atomic64_read - read atomic variable
386 * @v: pointer of type atomic64_t
387 *
388 */
389#define atomic64_read(v) ((v)->counter)
390
391/*
392 * atomic64_set - set atomic variable
393 * @v: pointer of type atomic64_t
394 * @i: required value
395 */
396#define atomic64_set(v,i) ((v)->counter = (i))
397
398/*
399 * atomic64_add - add integer to atomic variable
400 * @i: integer value to add
401 * @v: pointer of type atomic64_t
402 *
403 * Atomically adds @i to @v.
404 */
405static __inline__ void atomic64_add(long i, atomic64_t * v)
406{
407 if (cpu_has_llsc && R10000_LLSC_WAR) {
408 unsigned long temp;
409
410 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000411 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 "1: lld %0, %1 # atomic64_add \n"
413 " addu %0, %2 \n"
414 " scd %0, %1 \n"
415 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000416 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 : "=&r" (temp), "=m" (v->counter)
418 : "Ir" (i), "m" (v->counter));
419 } else if (cpu_has_llsc) {
420 unsigned long temp;
421
422 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000423 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 "1: lld %0, %1 # atomic64_add \n"
425 " addu %0, %2 \n"
426 " scd %0, %1 \n"
427 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000428 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 : "=&r" (temp), "=m" (v->counter)
430 : "Ir" (i), "m" (v->counter));
431 } else {
432 unsigned long flags;
433
434 spin_lock_irqsave(&atomic_lock, flags);
435 v->counter += i;
436 spin_unlock_irqrestore(&atomic_lock, flags);
437 }
438}
439
440/*
441 * atomic64_sub - subtract the atomic variable
442 * @i: integer value to subtract
443 * @v: pointer of type atomic64_t
444 *
445 * Atomically subtracts @i from @v.
446 */
447static __inline__ void atomic64_sub(long i, atomic64_t * v)
448{
449 if (cpu_has_llsc && R10000_LLSC_WAR) {
450 unsigned long temp;
451
452 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000453 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 "1: lld %0, %1 # atomic64_sub \n"
455 " subu %0, %2 \n"
456 " scd %0, %1 \n"
457 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000458 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 : "=&r" (temp), "=m" (v->counter)
460 : "Ir" (i), "m" (v->counter));
461 } else if (cpu_has_llsc) {
462 unsigned long temp;
463
464 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000465 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 "1: lld %0, %1 # atomic64_sub \n"
467 " subu %0, %2 \n"
468 " scd %0, %1 \n"
469 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000470 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 : "=&r" (temp), "=m" (v->counter)
472 : "Ir" (i), "m" (v->counter));
473 } else {
474 unsigned long flags;
475
476 spin_lock_irqsave(&atomic_lock, flags);
477 v->counter -= i;
478 spin_unlock_irqrestore(&atomic_lock, flags);
479 }
480}
481
482/*
483 * Same as above, but return the result value
484 */
485static __inline__ long atomic64_add_return(long i, atomic64_t * v)
486{
487 unsigned long result;
488
489 if (cpu_has_llsc && R10000_LLSC_WAR) {
490 unsigned long temp;
491
492 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000493 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 "1: lld %1, %2 # atomic64_add_return \n"
495 " addu %0, %1, %3 \n"
496 " scd %0, %2 \n"
497 " beqzl %0, 1b \n"
498 " addu %0, %1, %3 \n"
499 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000500 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
502 : "Ir" (i), "m" (v->counter)
503 : "memory");
504 } else if (cpu_has_llsc) {
505 unsigned long temp;
506
507 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000508 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 "1: lld %1, %2 # atomic64_add_return \n"
510 " addu %0, %1, %3 \n"
511 " scd %0, %2 \n"
512 " beqz %0, 1b \n"
513 " addu %0, %1, %3 \n"
514 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000515 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
517 : "Ir" (i), "m" (v->counter)
518 : "memory");
519 } else {
520 unsigned long flags;
521
522 spin_lock_irqsave(&atomic_lock, flags);
523 result = v->counter;
524 result += i;
525 v->counter = result;
526 spin_unlock_irqrestore(&atomic_lock, flags);
527 }
528
529 return result;
530}
531
532static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
533{
534 unsigned long result;
535
536 if (cpu_has_llsc && R10000_LLSC_WAR) {
537 unsigned long temp;
538
539 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000540 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 "1: lld %1, %2 # atomic64_sub_return \n"
542 " subu %0, %1, %3 \n"
543 " scd %0, %2 \n"
544 " beqzl %0, 1b \n"
545 " subu %0, %1, %3 \n"
546 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000547 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
549 : "Ir" (i), "m" (v->counter)
550 : "memory");
551 } else if (cpu_has_llsc) {
552 unsigned long temp;
553
554 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000555 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 "1: lld %1, %2 # atomic64_sub_return \n"
557 " subu %0, %1, %3 \n"
558 " scd %0, %2 \n"
559 " beqz %0, 1b \n"
560 " subu %0, %1, %3 \n"
561 " sync \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000562 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
564 : "Ir" (i), "m" (v->counter)
565 : "memory");
566 } else {
567 unsigned long flags;
568
569 spin_lock_irqsave(&atomic_lock, flags);
570 result = v->counter;
571 result -= i;
572 v->counter = result;
573 spin_unlock_irqrestore(&atomic_lock, flags);
574 }
575
576 return result;
577}
578
579/*
580 * atomic64_sub_if_positive - add integer to atomic variable
581 * @v: pointer of type atomic64_t
582 *
583 * Atomically test @v and decrement if it is greater than 0.
584 * The function returns the old value of @v minus 1.
585 */
586static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
587{
588 unsigned long result;
589
590 if (cpu_has_llsc && R10000_LLSC_WAR) {
591 unsigned long temp;
592
593 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000594 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 "1: lld %1, %2 # atomic64_sub_if_positive\n"
596 " dsubu %0, %1, %3 \n"
597 " bltz %0, 1f \n"
598 " scd %0, %2 \n"
599 " beqzl %0, 1b \n"
600 " sync \n"
601 "1: \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000602 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
604 : "Ir" (i), "m" (v->counter)
605 : "memory");
606 } else if (cpu_has_llsc) {
607 unsigned long temp;
608
609 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000610 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 "1: lld %1, %2 # atomic64_sub_if_positive\n"
612 " dsubu %0, %1, %3 \n"
613 " bltz %0, 1f \n"
614 " scd %0, %2 \n"
615 " beqz %0, 1b \n"
616 " sync \n"
617 "1: \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000618 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
620 : "Ir" (i), "m" (v->counter)
621 : "memory");
622 } else {
623 unsigned long flags;
624
625 spin_lock_irqsave(&atomic_lock, flags);
626 result = v->counter;
627 result -= i;
628 if (result >= 0)
629 v->counter = result;
630 spin_unlock_irqrestore(&atomic_lock, flags);
631 }
632
633 return result;
634}
635
636#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
637#define atomic64_inc_return(v) atomic64_add_return(1,(v))
638
639/*
640 * atomic64_sub_and_test - subtract value from variable and test result
641 * @i: integer value to subtract
642 * @v: pointer of type atomic64_t
643 *
644 * Atomically subtracts @i from @v and returns
645 * true if the result is zero, or false for all
646 * other cases.
647 */
648#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
649
650/*
651 * atomic64_inc_and_test - increment and test
652 * @v: pointer of type atomic64_t
653 *
654 * Atomically increments @v by 1
655 * and returns true if the result is zero, or false for all
656 * other cases.
657 */
658#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
659
660/*
661 * atomic64_dec_and_test - decrement by 1 and test
662 * @v: pointer of type atomic64_t
663 *
664 * Atomically decrements @v by 1 and
665 * returns true if the result is 0, or false for all other
666 * cases.
667 */
668#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
669
670/*
671 * atomic64_dec_if_positive - decrement by 1 if old value positive
672 * @v: pointer of type atomic64_t
673 */
674#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
675
676/*
677 * atomic64_inc - increment atomic variable
678 * @v: pointer of type atomic64_t
679 *
680 * Atomically increments @v by 1.
681 */
682#define atomic64_inc(v) atomic64_add(1,(v))
683
684/*
685 * atomic64_dec - decrement and test
686 * @v: pointer of type atomic64_t
687 *
688 * Atomically decrements @v by 1.
689 */
690#define atomic64_dec(v) atomic64_sub(1,(v))
691
692/*
693 * atomic64_add_negative - add and test if negative
694 * @v: pointer of type atomic64_t
695 * @i: integer value to add
696 *
697 * Atomically adds @i to @v and returns true
698 * if the result is negative, or false when
699 * result is greater than or equal to zero.
700 */
701#define atomic64_add_negative(i,v) (atomic64_add_return(i, (v)) < 0)
702
Ralf Baechle875d43e2005-09-03 15:56:16 -0700703#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
705/*
706 * atomic*_return operations are serializing but not the non-*_return
707 * versions.
708 */
709#define smp_mb__before_atomic_dec() smp_mb()
710#define smp_mb__after_atomic_dec() smp_mb()
711#define smp_mb__before_atomic_inc() smp_mb()
712#define smp_mb__after_atomic_inc() smp_mb()
713
714#endif /* _ASM_ATOMIC_H */