blob: 4cefa0c9fd81303d30d65a27e0e60adc43a4860f [file] [log] [blame]
Chris Metcalf18aecc22011-05-04 14:38:26 -04001/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
Arun Sharma600634972011-07-26 16:09:06 -070014 * Do not include directly; use <linux/atomic.h>.
Chris Metcalf18aecc22011-05-04 14:38:26 -040015 */
16
17#ifndef _ASM_TILE_ATOMIC_64_H
18#define _ASM_TILE_ATOMIC_64_H
19
20#ifndef __ASSEMBLY__
21
David Howellsbd119c62012-03-28 18:30:03 +010022#include <asm/barrier.h>
Chris Metcalf18aecc22011-05-04 14:38:26 -040023#include <arch/spr_def.h>
24
25/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
26
Peter Zijlstra62e8a322015-09-18 11:13:10 +020027#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
Chris Metcalf18aecc22011-05-04 14:38:26 -040028
29/*
30 * The smp_mb() operations throughout are to support the fact that
31 * Linux requires memory barriers before and after the operation,
32 * on any routine which updates memory and returns a value.
33 */
34
Chris Metcalf15384752016-04-26 09:54:56 -040035/*
36 * Note a subtlety of the locking here. We are required to provide a
37 * full memory barrier before and after the operation. However, we
38 * only provide an explicit mb before the operation. After the
39 * operation, we use barrier() to get a full mb for free, because:
40 *
41 * (1) The barrier directive to the compiler prohibits any instructions
42 * being statically hoisted before the barrier;
43 * (2) the microarchitecture will not issue any further instructions
44 * until the fetchadd result is available for the "+ i" add instruction;
45 * (3) the smb_mb before the fetchadd ensures that no other memory
46 * operations are in flight at this point.
47 */
Chris Metcalf18aecc22011-05-04 14:38:26 -040048static inline int atomic_add_return(int i, atomic_t *v)
49{
50 int val;
51 smp_mb(); /* barrier for proper semantics */
52 val = __insn_fetchadd4((void *)&v->counter, i) + i;
Chris Metcalf15384752016-04-26 09:54:56 -040053 barrier(); /* equivalent to smp_mb(); see block comment above */
Chris Metcalf18aecc22011-05-04 14:38:26 -040054 return val;
55}
56
Peter Zijlstra1af5de92016-04-18 01:16:03 +020057#define ATOMIC_OPS(op) \
58static inline int atomic_fetch_##op(int i, atomic_t *v) \
59{ \
60 int val; \
61 smp_mb(); \
62 val = __insn_fetch##op##4((void *)&v->counter, i); \
63 smp_mb(); \
64 return val; \
65} \
66static inline void atomic_##op(int i, atomic_t *v) \
67{ \
68 __insn_fetch##op##4((void *)&v->counter, i); \
69}
70
71ATOMIC_OPS(add)
72ATOMIC_OPS(and)
73ATOMIC_OPS(or)
74
75#undef ATOMIC_OPS
76
77static inline int atomic_fetch_xor(int i, atomic_t *v)
Chris Metcalf18aecc22011-05-04 14:38:26 -040078{
79 int guess, oldval = v->counter;
Peter Zijlstra1af5de92016-04-18 01:16:03 +020080 smp_mb();
Chris Metcalf18aecc22011-05-04 14:38:26 -040081 do {
Chris Metcalf18aecc22011-05-04 14:38:26 -040082 guess = oldval;
Peter Zijlstra1af5de92016-04-18 01:16:03 +020083 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
84 oldval = __insn_cmpexch4(&v->counter, guess ^ i);
Chris Metcalf18aecc22011-05-04 14:38:26 -040085 } while (guess != oldval);
Peter Zijlstra1af5de92016-04-18 01:16:03 +020086 smp_mb();
Arun Sharmaf24219b2011-07-26 16:09:07 -070087 return oldval;
Chris Metcalf18aecc22011-05-04 14:38:26 -040088}
89
Chris Metcalf2957c032015-07-09 16:38:17 -040090static inline void atomic_xor(int i, atomic_t *v)
91{
92 int guess, oldval = v->counter;
93 do {
94 guess = oldval;
95 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
96 oldval = __insn_cmpexch4(&v->counter, guess ^ i);
97 } while (guess != oldval);
98}
99
Peter Zijlstra1af5de92016-04-18 01:16:03 +0200100static inline int __atomic_add_unless(atomic_t *v, int a, int u)
101{
102 int guess, oldval = v->counter;
103 do {
104 if (oldval == u)
105 break;
106 guess = oldval;
107 oldval = cmpxchg(&v->counter, guess, guess + a);
108 } while (guess != oldval);
109 return oldval;
110}
111
Chris Metcalf18aecc22011-05-04 14:38:26 -0400112/* Now the true 64-bit operations. */
113
114#define ATOMIC64_INIT(i) { (i) }
115
Peter Zijlstra62e8a322015-09-18 11:13:10 +0200116#define atomic64_read(v) READ_ONCE((v)->counter)
117#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
Chris Metcalf18aecc22011-05-04 14:38:26 -0400118
Chris Metcalf18aecc22011-05-04 14:38:26 -0400119static inline long atomic64_add_return(long i, atomic64_t *v)
120{
121 int val;
122 smp_mb(); /* barrier for proper semantics */
123 val = __insn_fetchadd((void *)&v->counter, i) + i;
Chris Metcalf15384752016-04-26 09:54:56 -0400124 barrier(); /* equivalent to smp_mb; see atomic_add_return() */
Chris Metcalf18aecc22011-05-04 14:38:26 -0400125 return val;
126}
127
Peter Zijlstra1af5de92016-04-18 01:16:03 +0200128#define ATOMIC64_OPS(op) \
129static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
130{ \
131 long val; \
132 smp_mb(); \
133 val = __insn_fetch##op((void *)&v->counter, i); \
134 smp_mb(); \
135 return val; \
136} \
137static inline void atomic64_##op(long i, atomic64_t *v) \
138{ \
139 __insn_fetch##op((void *)&v->counter, i); \
140}
141
142ATOMIC64_OPS(add)
143ATOMIC64_OPS(and)
144ATOMIC64_OPS(or)
145
146#undef ATOMIC64_OPS
147
148static inline long atomic64_fetch_xor(long i, atomic64_t *v)
149{
150 long guess, oldval = v->counter;
151 smp_mb();
152 do {
153 guess = oldval;
154 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
155 oldval = __insn_cmpexch(&v->counter, guess ^ i);
156 } while (guess != oldval);
157 smp_mb();
158 return oldval;
159}
160
161static inline void atomic64_xor(long i, atomic64_t *v)
162{
163 long guess, oldval = v->counter;
164 do {
165 guess = oldval;
166 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
167 oldval = __insn_cmpexch(&v->counter, guess ^ i);
168 } while (guess != oldval);
169}
170
Chris Metcalf18aecc22011-05-04 14:38:26 -0400171static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
172{
173 long guess, oldval = v->counter;
174 do {
175 if (oldval == u)
176 break;
177 guess = oldval;
Chris Metcalf6dc96582013-09-06 08:56:45 -0400178 oldval = cmpxchg(&v->counter, guess, guess + a);
Chris Metcalf18aecc22011-05-04 14:38:26 -0400179 } while (guess != oldval);
180 return oldval != u;
181}
182
183#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
Peter Zijlstra1af5de92016-04-18 01:16:03 +0200184#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
Chris Metcalf18aecc22011-05-04 14:38:26 -0400185#define atomic64_sub(i, v) atomic64_add(-(i), (v))
186#define atomic64_inc_return(v) atomic64_add_return(1, (v))
187#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
188#define atomic64_inc(v) atomic64_add(1, (v))
189#define atomic64_dec(v) atomic64_sub(1, (v))
190
191#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
192#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
193#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
194#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
195
196#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
197
Chris Metcalf18aecc22011-05-04 14:38:26 -0400198#endif /* !__ASSEMBLY__ */
199
200#endif /* _ASM_TILE_ATOMIC_64_H */