blob: b1957fba083b9e7a7b8b975782d8863a49a95559 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ARCH_M68KNOMMU_ATOMIC__
2#define __ARCH_M68KNOMMU_ATOMIC__
3
4#include <asm/system.h> /* local_irq_XXX() */
5
6/*
7 * Atomic operations that C can't guarantee us. Useful for
8 * resource counting etc..
9 */
10
11/*
12 * We do not have SMP m68k systems, so we don't have to deal with that.
13 */
14
15typedef struct { int counter; } atomic_t;
16#define ATOMIC_INIT(i) { (i) }
17
18#define atomic_read(v) ((v)->counter)
19#define atomic_set(v, i) (((v)->counter) = i)
20
21static __inline__ void atomic_add(int i, atomic_t *v)
22{
23#ifdef CONFIG_COLDFIRE
24 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "d" (i));
25#else
26 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "di" (i));
27#endif
28}
29
30static __inline__ void atomic_sub(int i, atomic_t *v)
31{
32#ifdef CONFIG_COLDFIRE
33 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "d" (i));
34#else
35 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "di" (i));
36#endif
37}
38
39static __inline__ int atomic_sub_and_test(int i, atomic_t * v)
40{
41 char c;
42#ifdef CONFIG_COLDFIRE
43 __asm__ __volatile__("subl %2,%1; seq %0"
44 : "=d" (c), "+m" (*v)
45 : "d" (i));
46#else
47 __asm__ __volatile__("subl %2,%1; seq %0"
48 : "=d" (c), "+m" (*v)
49 : "di" (i));
50#endif
51 return c != 0;
52}
53
54static __inline__ void atomic_inc(volatile atomic_t *v)
55{
56 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
57}
58
59/*
60 * atomic_inc_and_test - increment and test
61 * @v: pointer of type atomic_t
62 *
63 * Atomically increments @v by 1
64 * and returns true if the result is zero, or false for all
65 * other cases.
66 */
67
68static __inline__ int atomic_inc_and_test(volatile atomic_t *v)
69{
70 char c;
71 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
72 return c != 0;
73}
74
75static __inline__ void atomic_dec(volatile atomic_t *v)
76{
77 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
78}
79
80static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
81{
82 char c;
83 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
84 return c != 0;
85}
86
87static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
88{
89 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
90}
91
92static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
93{
94 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
95}
96
97/* Atomic operations are already serializing */
98#define smp_mb__before_atomic_dec() barrier()
99#define smp_mb__after_atomic_dec() barrier()
100#define smp_mb__before_atomic_inc() barrier()
101#define smp_mb__after_atomic_inc() barrier()
102
103extern __inline__ int atomic_add_return(int i, atomic_t * v)
104{
105 unsigned long temp, flags;
106
107 local_irq_save(flags);
108 temp = *(long *)v;
109 temp += i;
110 *(long *)v = temp;
111 local_irq_restore(flags);
112
113 return temp;
114}
115
116#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
117
118extern __inline__ int atomic_sub_return(int i, atomic_t * v)
119{
120 unsigned long temp, flags;
121
122 local_irq_save(flags);
123 temp = *(long *)v;
124 temp -= i;
125 *(long *)v = temp;
126 local_irq_restore(flags);
127
128 return temp;
129}
130
131#define atomic_dec_return(v) atomic_sub_return(1,(v))
132#define atomic_inc_return(v) atomic_add_return(1,(v))
133
134#endif /* __ARCH_M68KNOMMU_ATOMIC __ */