Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * x86 version of "atomic_dec_and_lock()" using |
| 3 | * the atomic "cmpxchg" instruction. |
| 4 | * |
| 5 | * (For CPU's lacking cmpxchg, we use the slow |
| 6 | * generic version, and this one never even gets |
| 7 | * compiled). |
| 8 | */ |
| 9 | |
| 10 | #include <linux/spinlock.h> |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 11 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <asm/atomic.h> |
| 13 | |
| 14 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
| 15 | { |
| 16 | int counter; |
| 17 | int newcount; |
| 18 | |
| 19 | repeat: |
| 20 | counter = atomic_read(atomic); |
| 21 | newcount = counter-1; |
| 22 | |
| 23 | if (!newcount) |
| 24 | goto slow_path; |
| 25 | |
| 26 | asm volatile("lock; cmpxchgl %1,%2" |
| 27 | :"=a" (newcount) |
| 28 | :"r" (newcount), "m" (atomic->counter), "0" (counter)); |
| 29 | |
| 30 | /* If the above failed, "eax" will have changed */ |
| 31 | if (newcount != counter) |
| 32 | goto repeat; |
| 33 | return 0; |
| 34 | |
| 35 | slow_path: |
| 36 | spin_lock(lock); |
| 37 | if (atomic_dec_and_test(atomic)) |
| 38 | return 1; |
| 39 | spin_unlock(lock); |
| 40 | return 0; |
| 41 | } |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 42 | EXPORT_SYMBOL(_atomic_dec_and_lock); |