blob: 6eeb57b015ce50442cd5b3c17dad2e21d022980f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH64_ATOMIC_H
2#define __ASM_SH64_ATOMIC_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/atomic.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Paul Mundt
13 *
14 */
15
16/*
17 * Atomic operations that C can't guarantee us. Useful for
18 * resource counting etc..
19 *
20 */
21
22typedef struct { volatile int counter; } atomic_t;
23
24#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
25
26#define atomic_read(v) ((v)->counter)
27#define atomic_set(v,i) ((v)->counter = (i))
28
29#include <asm/system.h>
30
31/*
32 * To get proper branch prediction for the main line, we must branch
33 * forward to code at the end of this object's .text section, then
34 * branch back to restart the operation.
35 */
36
37static __inline__ void atomic_add(int i, atomic_t * v)
38{
39 unsigned long flags;
40
41 local_irq_save(flags);
42 *(long *)v += i;
43 local_irq_restore(flags);
44}
45
46static __inline__ void atomic_sub(int i, atomic_t *v)
47{
48 unsigned long flags;
49
50 local_irq_save(flags);
51 *(long *)v -= i;
52 local_irq_restore(flags);
53}
54
55static __inline__ int atomic_add_return(int i, atomic_t * v)
56{
57 unsigned long temp, flags;
58
59 local_irq_save(flags);
60 temp = *(long *)v;
61 temp += i;
62 *(long *)v = temp;
63 local_irq_restore(flags);
64
65 return temp;
66}
67
68#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
69
70static __inline__ int atomic_sub_return(int i, atomic_t * v)
71{
72 unsigned long temp, flags;
73
74 local_irq_save(flags);
75 temp = *(long *)v;
76 temp -= i;
77 *(long *)v = temp;
78 local_irq_restore(flags);
79
80 return temp;
81}
82
83#define atomic_dec_return(v) atomic_sub_return(1,(v))
84#define atomic_inc_return(v) atomic_add_return(1,(v))
85
86/*
87 * atomic_inc_and_test - increment and test
88 * @v: pointer of type atomic_t
89 *
90 * Atomically increments @v by 1
91 * and returns true if the result is zero, or false for all
92 * other cases.
93 */
94#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
95
96#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
97#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
98
99#define atomic_inc(v) atomic_add(1,(v))
100#define atomic_dec(v) atomic_sub(1,(v))
101
Nick Piggin4a6dae62005-11-13 16:07:24 -0800102static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
103{
104 int ret;
105 unsigned long flags;
106
107 local_irq_save(flags);
108 ret = v->counter;
109 if (likely(ret == old))
110 v->counter = new;
111 local_irq_restore(flags);
112
113 return ret;
114}
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
117{
118 unsigned long flags;
119
120 local_irq_save(flags);
121 *(long *)v &= ~mask;
122 local_irq_restore(flags);
123}
124
125static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
126{
127 unsigned long flags;
128
129 local_irq_save(flags);
130 *(long *)v |= mask;
131 local_irq_restore(flags);
132}
133
134/* Atomic operations are already serializing on SH */
135#define smp_mb__before_atomic_dec() barrier()
136#define smp_mb__after_atomic_dec() barrier()
137#define smp_mb__before_atomic_inc() barrier()
138#define smp_mb__after_atomic_inc() barrier()
139
140#endif /* __ASM_SH64_ATOMIC_H */