blob: 2885972b0855cafb87e922641560d3fcd9c4c2d8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/include/asm-arm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
14#include <linux/config.h>
15
16typedef struct { volatile int counter; } atomic_t;
17
18#define ATOMIC_INIT(i) { (i) }
19
20#ifdef __KERNEL__
21
22#define atomic_read(v) ((v)->counter)
23
24#if __LINUX_ARM_ARCH__ >= 6
25
26/*
27 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
28 * store exclusive to ensure that these are atomic. We may loop
29 * to ensure that the update happens. Writing to 'v->counter'
30 * without using the following operations WILL break the atomic
31 * nature of these ops.
32 */
33static inline void atomic_set(atomic_t *v, int i)
34{
35 unsigned long tmp;
36
37 __asm__ __volatile__("@ atomic_set\n"
38"1: ldrex %0, [%1]\n"
39" strex %0, %2, [%1]\n"
40" teq %0, #0\n"
41" bne 1b"
42 : "=&r" (tmp)
43 : "r" (&v->counter), "r" (i)
44 : "cc");
45}
46
47static inline int atomic_add_return(int i, atomic_t *v)
48{
49 unsigned long tmp;
50 int result;
51
52 __asm__ __volatile__("@ atomic_add_return\n"
53"1: ldrex %0, [%2]\n"
54" add %0, %0, %3\n"
55" strex %1, %0, [%2]\n"
56" teq %1, #0\n"
57" bne 1b"
58 : "=&r" (result), "=&r" (tmp)
59 : "r" (&v->counter), "Ir" (i)
60 : "cc");
61
62 return result;
63}
64
65static inline int atomic_sub_return(int i, atomic_t *v)
66{
67 unsigned long tmp;
68 int result;
69
70 __asm__ __volatile__("@ atomic_sub_return\n"
71"1: ldrex %0, [%2]\n"
72" sub %0, %0, %3\n"
73" strex %1, %0, [%2]\n"
74" teq %1, #0\n"
75" bne 1b"
76 : "=&r" (result), "=&r" (tmp)
77 : "r" (&v->counter), "Ir" (i)
78 : "cc");
79
80 return result;
81}
82
83static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
84{
85 unsigned long tmp, tmp2;
86
87 __asm__ __volatile__("@ atomic_clear_mask\n"
88"1: ldrex %0, %2\n"
89" bic %0, %0, %3\n"
90" strex %1, %0, %2\n"
91" teq %1, #0\n"
92" bne 1b"
93 : "=&r" (tmp), "=&r" (tmp2)
94 : "r" (addr), "Ir" (mask)
95 : "cc");
96}
97
98#else /* ARM_ARCH_6 */
99
100#include <asm/system.h>
101
102#ifdef CONFIG_SMP
103#error SMP not supported on pre-ARMv6 CPUs
104#endif
105
106#define atomic_set(v,i) (((v)->counter) = (i))
107
108static inline int atomic_add_return(int i, atomic_t *v)
109{
110 unsigned long flags;
111 int val;
112
113 local_irq_save(flags);
114 val = v->counter;
115 v->counter = val += i;
116 local_irq_restore(flags);
117
118 return val;
119}
120
121static inline int atomic_sub_return(int i, atomic_t *v)
122{
123 unsigned long flags;
124 int val;
125
126 local_irq_save(flags);
127 val = v->counter;
128 v->counter = val -= i;
129 local_irq_restore(flags);
130
131 return val;
132}
133
134static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
135{
136 unsigned long flags;
137
138 local_irq_save(flags);
139 *addr &= ~mask;
140 local_irq_restore(flags);
141}
142
143#endif /* __LINUX_ARM_ARCH__ */
144
145#define atomic_add(i, v) (void) atomic_add_return(i, v)
146#define atomic_inc(v) (void) atomic_add_return(1, v)
147#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
148#define atomic_dec(v) (void) atomic_sub_return(1, v)
149
150#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
151#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
152#define atomic_inc_return(v) (atomic_add_return(1, v))
153#define atomic_dec_return(v) (atomic_sub_return(1, v))
154#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
155
156#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
157
158/* Atomic operations are already serializing on ARM */
159#define smp_mb__before_atomic_dec() barrier()
160#define smp_mb__after_atomic_dec() barrier()
161#define smp_mb__before_atomic_inc() barrier()
162#define smp_mb__after_atomic_inc() barrier()
163
164#endif
165#endif