blob: 6658d81e1836517107b5b8b39aa0c7e65032ec6e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/module.h>
2#include <linux/spinlock.h>
3#include <asm/atomic.h>
4
5/*
6 * This is an architecture-neutral, but slow,
7 * implementation of the notion of "decrement
8 * a reference count, and return locked if it
9 * decremented to zero".
10 *
11 * NOTE NOTE NOTE! This is _not_ equivalent to
12 *
13 * if (atomic_dec_and_test(&atomic)) {
14 * spin_lock(&lock);
15 * return 1;
16 * }
17 * return 0;
18 *
19 * because the spin-lock and the decrement must be
20 * "atomic".
21 *
22 * This slow version gets the spinlock unconditionally,
23 * and releases it if it isn't needed. Architectures
24 * are encouraged to come up with better approaches,
25 * this is trivially done efficiently using a load-locked
26 * store-conditional approach, for example.
27 */
28
29#ifndef ATOMIC_DEC_AND_LOCK
30int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
31{
32 spin_lock(lock);
33 if (atomic_dec_and_test(atomic))
34 return 1;
35 spin_unlock(lock);
36 return 0;
37}
38
39EXPORT_SYMBOL(_atomic_dec_and_lock);
40#endif