blob: d8ba0a9eb0852b4d9927ddc2c2cd20141fd3d063 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/include/asm-arm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
14#include <linux/config.h>
Russell King8dc39b82005-11-16 17:23:57 +000015#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17typedef struct { volatile int counter; } atomic_t;
18
19#define ATOMIC_INIT(i) { (i) }
20
21#ifdef __KERNEL__
22
23#define atomic_read(v) ((v)->counter)
24
25#if __LINUX_ARM_ARCH__ >= 6
26
27/*
28 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
29 * store exclusive to ensure that these are atomic. We may loop
30 * to ensure that the update happens. Writing to 'v->counter'
31 * without using the following operations WILL break the atomic
32 * nature of these ops.
33 */
34static inline void atomic_set(atomic_t *v, int i)
35{
36 unsigned long tmp;
37
38 __asm__ __volatile__("@ atomic_set\n"
39"1: ldrex %0, [%1]\n"
40" strex %0, %2, [%1]\n"
41" teq %0, #0\n"
42" bne 1b"
43 : "=&r" (tmp)
44 : "r" (&v->counter), "r" (i)
45 : "cc");
46}
47
48static inline int atomic_add_return(int i, atomic_t *v)
49{
50 unsigned long tmp;
51 int result;
52
53 __asm__ __volatile__("@ atomic_add_return\n"
54"1: ldrex %0, [%2]\n"
55" add %0, %0, %3\n"
56" strex %1, %0, [%2]\n"
57" teq %1, #0\n"
58" bne 1b"
59 : "=&r" (result), "=&r" (tmp)
60 : "r" (&v->counter), "Ir" (i)
61 : "cc");
62
63 return result;
64}
65
66static inline int atomic_sub_return(int i, atomic_t *v)
67{
68 unsigned long tmp;
69 int result;
70
71 __asm__ __volatile__("@ atomic_sub_return\n"
72"1: ldrex %0, [%2]\n"
73" sub %0, %0, %3\n"
74" strex %1, %0, [%2]\n"
75" teq %1, #0\n"
76" bne 1b"
77 : "=&r" (result), "=&r" (tmp)
78 : "r" (&v->counter), "Ir" (i)
79 : "cc");
80
81 return result;
82}
83
Nick Piggin4a6dae62005-11-13 16:07:24 -080084static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
85{
86 u32 oldval, res;
87
88 do {
89 __asm__ __volatile__("@ atomic_cmpxchg\n"
90 "ldrex %1, [%2]\n"
Nicolas Pitrea7d06832005-11-16 15:05:11 +000091 "mov %0, #0\n"
Nick Piggin4a6dae62005-11-13 16:07:24 -080092 "teq %1, %3\n"
93 "strexeq %0, %4, [%2]\n"
94 : "=&r" (res), "=&r" (oldval)
95 : "r" (&ptr->counter), "Ir" (old), "r" (new)
96 : "cc");
97 } while (res);
98
99 return oldval;
100}
101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
103{
104 unsigned long tmp, tmp2;
105
106 __asm__ __volatile__("@ atomic_clear_mask\n"
107"1: ldrex %0, %2\n"
108" bic %0, %0, %3\n"
109" strex %1, %0, %2\n"
110" teq %1, #0\n"
111" bne 1b"
112 : "=&r" (tmp), "=&r" (tmp2)
113 : "r" (addr), "Ir" (mask)
114 : "cc");
115}
116
117#else /* ARM_ARCH_6 */
118
119#include <asm/system.h>
120
121#ifdef CONFIG_SMP
122#error SMP not supported on pre-ARMv6 CPUs
123#endif
124
125#define atomic_set(v,i) (((v)->counter) = (i))
126
127static inline int atomic_add_return(int i, atomic_t *v)
128{
129 unsigned long flags;
130 int val;
131
132 local_irq_save(flags);
133 val = v->counter;
134 v->counter = val += i;
135 local_irq_restore(flags);
136
137 return val;
138}
139
140static inline int atomic_sub_return(int i, atomic_t *v)
141{
142 unsigned long flags;
143 int val;
144
145 local_irq_save(flags);
146 val = v->counter;
147 v->counter = val -= i;
148 local_irq_restore(flags);
149
150 return val;
151}
152
Nick Piggin4a6dae62005-11-13 16:07:24 -0800153static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
154{
155 int ret;
156 unsigned long flags;
157
158 local_irq_save(flags);
159 ret = v->counter;
160 if (likely(ret == old))
161 v->counter = new;
162 local_irq_restore(flags);
163
164 return ret;
165}
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
168{
169 unsigned long flags;
170
171 local_irq_save(flags);
172 *addr &= ~mask;
173 local_irq_restore(flags);
174}
175
176#endif /* __LINUX_ARM_ARCH__ */
177
Nick Piggin8426e1f2005-11-13 16:07:25 -0800178static inline int atomic_add_unless(atomic_t *v, int a, int u)
179{
180 int c, old;
181
182 c = atomic_read(v);
183 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
184 c = old;
185 return c != u;
186}
187#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189#define atomic_add(i, v) (void) atomic_add_return(i, v)
190#define atomic_inc(v) (void) atomic_add_return(1, v)
191#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
192#define atomic_dec(v) (void) atomic_sub_return(1, v)
193
194#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
195#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
196#define atomic_inc_return(v) (atomic_add_return(1, v))
197#define atomic_dec_return(v) (atomic_sub_return(1, v))
198#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
199
200#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
201
202/* Atomic operations are already serializing on ARM */
203#define smp_mb__before_atomic_dec() barrier()
204#define smp_mb__after_atomic_dec() barrier()
205#define smp_mb__before_atomic_inc() barrier()
206#define smp_mb__after_atomic_inc() barrier()
207
208#endif
209#endif