blob: 2c373329d5cb8dc8dc87c934feb8dbf64ecf9ce6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * atomic32.c: 32-bit atomic_t implementation
3 *
4 * Copyright (C) 2004 Keith M Wesolowski
Kyle McMartin6197fe42007-05-29 02:51:13 -07005 * Copyright (C) 2007 Kyle McMartin
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
8 */
9
Arun Sharma600634972011-07-26 16:09:06 -070010#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/spinlock.h>
12#include <linux/module.h>
13
14#ifdef CONFIG_SMP
15#define ATOMIC_HASH_SIZE 4
16#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
17
18spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
Thomas Gleixner24774fb2011-01-23 15:19:12 +010019 [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -070020};
21
22#else /* SMP */
23
Ingo Molnara9f6a0d2005-09-09 13:10:41 -070024static DEFINE_SPINLOCK(dummy);
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#define ATOMIC_HASH_SIZE 1
26#define ATOMIC_HASH(a) (&dummy)
27
28#endif /* SMP */
29
Peter Zijlstra3a1adb22016-04-18 01:16:04 +020030#define ATOMIC_FETCH_OP(op, c_op) \
31int atomic_fetch_##op(int i, atomic_t *v) \
32{ \
33 int ret; \
34 unsigned long flags; \
35 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
36 \
37 ret = v->counter; \
38 v->counter c_op i; \
39 \
40 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
41 return ret; \
42} \
43EXPORT_SYMBOL(atomic_fetch_##op);
44
Peter Zijlstra304a0d62014-04-23 19:40:25 +020045#define ATOMIC_OP_RETURN(op, c_op) \
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010046int atomic_##op##_return(int i, atomic_t *v) \
47{ \
48 int ret; \
49 unsigned long flags; \
50 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
51 \
Peter Zijlstra304a0d62014-04-23 19:40:25 +020052 ret = (v->counter c_op i); \
Peter Zijlstra4f3316c2014-03-26 18:29:28 +010053 \
54 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
55 return ret; \
56} \
57EXPORT_SYMBOL(atomic_##op##_return);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Peter Zijlstra304a0d62014-04-23 19:40:25 +020059ATOMIC_OP_RETURN(add, +=)
Peter Zijlstra304a0d62014-04-23 19:40:25 +020060
Peter Zijlstra3a1adb22016-04-18 01:16:04 +020061ATOMIC_FETCH_OP(add, +=)
62ATOMIC_FETCH_OP(and, &=)
63ATOMIC_FETCH_OP(or, |=)
64ATOMIC_FETCH_OP(xor, ^=)
65
66#undef ATOMIC_FETCH_OP
Peter Zijlstra304a0d62014-04-23 19:40:25 +020067#undef ATOMIC_OP_RETURN
Nick Piggin4a6dae62005-11-13 16:07:24 -080068
Andreas Larsson1a17fdc2014-11-05 15:52:08 +010069int atomic_xchg(atomic_t *v, int new)
70{
71 int ret;
72 unsigned long flags;
73
74 spin_lock_irqsave(ATOMIC_HASH(v), flags);
75 ret = v->counter;
76 v->counter = new;
77 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
78 return ret;
79}
80EXPORT_SYMBOL(atomic_xchg);
81
Nick Piggin4a6dae62005-11-13 16:07:24 -080082int atomic_cmpxchg(atomic_t *v, int old, int new)
83{
84 int ret;
85 unsigned long flags;
86
87 spin_lock_irqsave(ATOMIC_HASH(v), flags);
88 ret = v->counter;
89 if (likely(ret == old))
90 v->counter = new;
91
92 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
93 return ret;
94}
Robert Reif74e61de2007-03-26 19:10:43 -070095EXPORT_SYMBOL(atomic_cmpxchg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Stephen Rothwell678624e402011-07-27 12:49:44 -070097int __atomic_add_unless(atomic_t *v, int a, int u)
Nick Piggin8426e1f2005-11-13 16:07:25 -080098{
99 int ret;
100 unsigned long flags;
101
102 spin_lock_irqsave(ATOMIC_HASH(v), flags);
103 ret = v->counter;
104 if (ret != u)
105 v->counter += a;
106 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
Josip Rodina61b5822011-08-04 02:47:40 -0700107 return ret;
Nick Piggin8426e1f2005-11-13 16:07:25 -0800108}
Stephen Rothwell678624e402011-07-27 12:49:44 -0700109EXPORT_SYMBOL(__atomic_add_unless);
Nick Piggin8426e1f2005-11-13 16:07:25 -0800110
Nick Piggin8426e1f2005-11-13 16:07:25 -0800111/* Atomic operations are already serializing */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112void atomic_set(atomic_t *v, int i)
113{
114 unsigned long flags;
Nick Piggin4a6dae62005-11-13 16:07:24 -0800115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 spin_lock_irqsave(ATOMIC_HASH(v), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 v->counter = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
119}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120EXPORT_SYMBOL(atomic_set);
David S. Miller8a8b8362006-12-17 16:18:47 -0800121
122unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
123{
124 unsigned long old, flags;
125
126 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
127 old = *addr;
128 *addr = old | mask;
129 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
130
131 return old & mask;
132}
133EXPORT_SYMBOL(___set_bit);
134
135unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
136{
137 unsigned long old, flags;
138
139 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
140 old = *addr;
141 *addr = old & ~mask;
142 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
143
144 return old & mask;
145}
146EXPORT_SYMBOL(___clear_bit);
147
148unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
149{
150 unsigned long old, flags;
151
152 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
153 old = *addr;
154 *addr = old ^ mask;
155 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
156
157 return old & mask;
158}
159EXPORT_SYMBOL(___change_bit);
Kyle McMartin6197fe42007-05-29 02:51:13 -0700160
161unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
162{
163 unsigned long flags;
164 u32 prev;
165
Andrew Morton1fb88122007-05-31 01:19:24 -0700166 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
Kyle McMartin6197fe42007-05-29 02:51:13 -0700167 if ((prev = *ptr) == old)
168 *ptr = new;
Andrew Morton1fb88122007-05-31 01:19:24 -0700169 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
Kyle McMartin6197fe42007-05-29 02:51:13 -0700170
171 return (unsigned long)prev;
172}
173EXPORT_SYMBOL(__cmpxchg_u32);
Andreas Larsson1a17fdc2014-11-05 15:52:08 +0100174
175unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
176{
177 unsigned long flags;
178 u32 prev;
179
180 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
181 prev = *ptr;
182 *ptr = new;
183 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
184
185 return (unsigned long)prev;
186}
187EXPORT_SYMBOL(__xchg_u32);