blob: a7c418ac26afbb46500ff812d939a8aefff27945 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * atomic32.c: 32-bit atomic_t implementation
3 *
4 * Copyright (C) 2004 Keith M Wesolowski
Kyle McMartin6197fe42007-05-29 02:51:13 -07005 * Copyright (C) 2007 Kyle McMartin
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
8 */
9
Arun Sharma600634972011-07-26 16:09:06 -070010#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/spinlock.h>
12#include <linux/module.h>
13
14#ifdef CONFIG_SMP
15#define ATOMIC_HASH_SIZE 4
16#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
17
18spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
Thomas Gleixner24774fb2011-01-23 15:19:12 +010019 [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -070020};
21
22#else /* SMP */
23
Ingo Molnara9f6a0d2005-09-09 13:10:41 -070024static DEFINE_SPINLOCK(dummy);
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#define ATOMIC_HASH_SIZE 1
26#define ATOMIC_HASH(a) (&dummy)
27
28#endif /* SMP */
29
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010030#define ATOMIC_OP(op, cop) \
31int atomic_##op##_return(int i, atomic_t *v) \
32{ \
33 int ret; \
34 unsigned long flags; \
35 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
36 \
37 ret = (v->counter cop i); \
38 \
39 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
40 return ret; \
41} \
42EXPORT_SYMBOL(atomic_##op##_return);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010044ATOMIC_OP(add, +=)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010046#undef ATOMIC_OP
Nick Piggin4a6dae62005-11-13 16:07:24 -080047
48int atomic_cmpxchg(atomic_t *v, int old, int new)
49{
50 int ret;
51 unsigned long flags;
52
53 spin_lock_irqsave(ATOMIC_HASH(v), flags);
54 ret = v->counter;
55 if (likely(ret == old))
56 v->counter = new;
57
58 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
59 return ret;
60}
Robert Reif74e61de2007-03-26 19:10:43 -070061EXPORT_SYMBOL(atomic_cmpxchg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Stephen Rothwell678624e402011-07-27 12:49:44 -070063int __atomic_add_unless(atomic_t *v, int a, int u)
Nick Piggin8426e1f2005-11-13 16:07:25 -080064{
65 int ret;
66 unsigned long flags;
67
68 spin_lock_irqsave(ATOMIC_HASH(v), flags);
69 ret = v->counter;
70 if (ret != u)
71 v->counter += a;
72 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
Josip Rodina61b5822011-08-04 02:47:40 -070073 return ret;
Nick Piggin8426e1f2005-11-13 16:07:25 -080074}
Stephen Rothwell678624e402011-07-27 12:49:44 -070075EXPORT_SYMBOL(__atomic_add_unless);
Nick Piggin8426e1f2005-11-13 16:07:25 -080076
Nick Piggin8426e1f2005-11-13 16:07:25 -080077/* Atomic operations are already serializing */
Linus Torvalds1da177e2005-04-16 15:20:36 -070078void atomic_set(atomic_t *v, int i)
79{
80 unsigned long flags;
Nick Piggin4a6dae62005-11-13 16:07:24 -080081
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 spin_lock_irqsave(ATOMIC_HASH(v), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 v->counter = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
85}
Linus Torvalds1da177e2005-04-16 15:20:36 -070086EXPORT_SYMBOL(atomic_set);
David S. Miller8a8b8362006-12-17 16:18:47 -080087
88unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
89{
90 unsigned long old, flags;
91
92 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
93 old = *addr;
94 *addr = old | mask;
95 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
96
97 return old & mask;
98}
99EXPORT_SYMBOL(___set_bit);
100
101unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
102{
103 unsigned long old, flags;
104
105 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
106 old = *addr;
107 *addr = old & ~mask;
108 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
109
110 return old & mask;
111}
112EXPORT_SYMBOL(___clear_bit);
113
114unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
115{
116 unsigned long old, flags;
117
118 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
119 old = *addr;
120 *addr = old ^ mask;
121 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
122
123 return old & mask;
124}
125EXPORT_SYMBOL(___change_bit);
Kyle McMartin6197fe42007-05-29 02:51:13 -0700126
127unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
128{
129 unsigned long flags;
130 u32 prev;
131
Andrew Morton1fb88122007-05-31 01:19:24 -0700132 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
Kyle McMartin6197fe42007-05-29 02:51:13 -0700133 if ((prev = *ptr) == old)
134 *ptr = new;
Andrew Morton1fb88122007-05-31 01:19:24 -0700135 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
Kyle McMartin6197fe42007-05-29 02:51:13 -0700136
137 return (unsigned long)prev;
138}
139EXPORT_SYMBOL(__cmpxchg_u32);