blob: 618d8e0de3480e7b4c8f8610c3d8a35628846bbf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_ATOMIC_H
2#define __ASM_SH_ATOMIC_H
3
4/*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
7 *
8 */
9
10typedef struct { volatile int counter; } atomic_t;
11
12#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
13
14#define atomic_read(v) ((v)->counter)
15#define atomic_set(v,i) ((v)->counter = (i))
16
17#include <asm/system.h>
18
19/*
20 * To get proper branch prediction for the main line, we must branch
21 * forward to code at the end of this object's .text section, then
22 * branch back to restart the operation.
23 */
24
25static __inline__ void atomic_add(int i, atomic_t * v)
26{
27 unsigned long flags;
28
29 local_irq_save(flags);
30 *(long *)v += i;
31 local_irq_restore(flags);
32}
33
34static __inline__ void atomic_sub(int i, atomic_t *v)
35{
36 unsigned long flags;
37
38 local_irq_save(flags);
39 *(long *)v -= i;
40 local_irq_restore(flags);
41}
42
43static __inline__ int atomic_add_return(int i, atomic_t * v)
44{
45 unsigned long temp, flags;
46
47 local_irq_save(flags);
48 temp = *(long *)v;
49 temp += i;
50 *(long *)v = temp;
51 local_irq_restore(flags);
52
53 return temp;
54}
55
56#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
57
58static __inline__ int atomic_sub_return(int i, atomic_t * v)
59{
60 unsigned long temp, flags;
61
62 local_irq_save(flags);
63 temp = *(long *)v;
64 temp -= i;
65 *(long *)v = temp;
66 local_irq_restore(flags);
67
68 return temp;
69}
70
71#define atomic_dec_return(v) atomic_sub_return(1,(v))
72#define atomic_inc_return(v) atomic_add_return(1,(v))
73
74/*
75 * atomic_inc_and_test - increment and test
76 * @v: pointer of type atomic_t
77 *
78 * Atomically increments @v by 1
79 * and returns true if the result is zero, or false for all
80 * other cases.
81 */
82#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
83
84#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
85#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
86
87#define atomic_inc(v) atomic_add(1,(v))
88#define atomic_dec(v) atomic_sub(1,(v))
89
Nick Piggin4a6dae62005-11-13 16:07:24 -080090static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
91{
92 int ret;
93 unsigned long flags;
94
95 local_irq_save(flags);
96 ret = v->counter;
97 if (likely(ret == old))
98 v->counter = new;
99 local_irq_restore(flags);
100
101 return ret;
102}
103
Nick Piggin8426e1f2005-11-13 16:07:25 -0800104static inline int atomic_add_unless(atomic_t *v, int a, int u)
105{
106 int ret;
107 unsigned long flags;
108
109 local_irq_save(flags);
110 ret = v->counter;
111 if (ret != u)
112 v->counter += a;
113 local_irq_restore(flags);
114
115 return ret != u;
116}
117#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
120{
121 unsigned long flags;
122
123 local_irq_save(flags);
124 *(long *)v &= ~mask;
125 local_irq_restore(flags);
126}
127
128static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
129{
130 unsigned long flags;
131
132 local_irq_save(flags);
133 *(long *)v |= mask;
134 local_irq_restore(flags);
135}
136
137/* Atomic operations are already serializing on SH */
138#define smp_mb__before_atomic_dec() barrier()
139#define smp_mb__after_atomic_dec() barrier()
140#define smp_mb__before_atomic_inc() barrier()
141#define smp_mb__after_atomic_inc() barrier()
142
Christoph Lameterd3cb4872006-01-06 00:11:20 -0800143#include <asm-generic/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144#endif /* __ASM_SH_ATOMIC_H */