blob: b0531a623653a702544c475c110e8d19cb7b10eb [file] [log] [blame]
Chris Metcalf18aecc22011-05-04 14:38:26 -04001/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
Arun Sharma600634972011-07-26 16:09:06 -070014 * Do not include directly; use <linux/atomic.h>.
Chris Metcalf18aecc22011-05-04 14:38:26 -040015 */
16
17#ifndef _ASM_TILE_ATOMIC_64_H
18#define _ASM_TILE_ATOMIC_64_H
19
20#ifndef __ASSEMBLY__
21
David Howellsbd119c62012-03-28 18:30:03 +010022#include <asm/barrier.h>
Chris Metcalf18aecc22011-05-04 14:38:26 -040023#include <arch/spr_def.h>
24
25/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
26
Peter Zijlstra62e8a322015-09-18 11:13:10 +020027#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
Chris Metcalf18aecc22011-05-04 14:38:26 -040028
29/*
30 * The smp_mb() operations throughout are to support the fact that
31 * Linux requires memory barriers before and after the operation,
32 * on any routine which updates memory and returns a value.
33 */
34
Chris Metcalf18aecc22011-05-04 14:38:26 -040035static inline void atomic_add(int i, atomic_t *v)
36{
37 __insn_fetchadd4((void *)&v->counter, i);
38}
39
Chris Metcalf15384752016-04-26 09:54:56 -040040/*
41 * Note a subtlety of the locking here. We are required to provide a
42 * full memory barrier before and after the operation. However, we
43 * only provide an explicit mb before the operation. After the
44 * operation, we use barrier() to get a full mb for free, because:
45 *
46 * (1) The barrier directive to the compiler prohibits any instructions
47 * being statically hoisted before the barrier;
48 * (2) the microarchitecture will not issue any further instructions
49 * until the fetchadd result is available for the "+ i" add instruction;
50 * (3) the smb_mb before the fetchadd ensures that no other memory
51 * operations are in flight at this point.
52 */
Chris Metcalf18aecc22011-05-04 14:38:26 -040053static inline int atomic_add_return(int i, atomic_t *v)
54{
55 int val;
56 smp_mb(); /* barrier for proper semantics */
57 val = __insn_fetchadd4((void *)&v->counter, i) + i;
Chris Metcalf15384752016-04-26 09:54:56 -040058 barrier(); /* equivalent to smp_mb(); see block comment above */
Chris Metcalf18aecc22011-05-04 14:38:26 -040059 return val;
60}
61
Arun Sharmaf24219b2011-07-26 16:09:07 -070062static inline int __atomic_add_unless(atomic_t *v, int a, int u)
Chris Metcalf18aecc22011-05-04 14:38:26 -040063{
64 int guess, oldval = v->counter;
65 do {
66 if (oldval == u)
67 break;
68 guess = oldval;
Chris Metcalf6dc96582013-09-06 08:56:45 -040069 oldval = cmpxchg(&v->counter, guess, guess + a);
Chris Metcalf18aecc22011-05-04 14:38:26 -040070 } while (guess != oldval);
Arun Sharmaf24219b2011-07-26 16:09:07 -070071 return oldval;
Chris Metcalf18aecc22011-05-04 14:38:26 -040072}
73
Chris Metcalf2957c032015-07-09 16:38:17 -040074static inline void atomic_and(int i, atomic_t *v)
75{
76 __insn_fetchand4((void *)&v->counter, i);
77}
78
79static inline void atomic_or(int i, atomic_t *v)
80{
81 __insn_fetchor4((void *)&v->counter, i);
82}
83
84static inline void atomic_xor(int i, atomic_t *v)
85{
86 int guess, oldval = v->counter;
87 do {
88 guess = oldval;
89 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
90 oldval = __insn_cmpexch4(&v->counter, guess ^ i);
91 } while (guess != oldval);
92}
93
Chris Metcalf18aecc22011-05-04 14:38:26 -040094/* Now the true 64-bit operations. */
95
96#define ATOMIC64_INIT(i) { (i) }
97
Peter Zijlstra62e8a322015-09-18 11:13:10 +020098#define atomic64_read(v) READ_ONCE((v)->counter)
99#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
Chris Metcalf18aecc22011-05-04 14:38:26 -0400100
Chris Metcalf18aecc22011-05-04 14:38:26 -0400101static inline void atomic64_add(long i, atomic64_t *v)
102{
103 __insn_fetchadd((void *)&v->counter, i);
104}
105
106static inline long atomic64_add_return(long i, atomic64_t *v)
107{
108 int val;
109 smp_mb(); /* barrier for proper semantics */
110 val = __insn_fetchadd((void *)&v->counter, i) + i;
Chris Metcalf15384752016-04-26 09:54:56 -0400111 barrier(); /* equivalent to smp_mb; see atomic_add_return() */
Chris Metcalf18aecc22011-05-04 14:38:26 -0400112 return val;
113}
114
115static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
116{
117 long guess, oldval = v->counter;
118 do {
119 if (oldval == u)
120 break;
121 guess = oldval;
Chris Metcalf6dc96582013-09-06 08:56:45 -0400122 oldval = cmpxchg(&v->counter, guess, guess + a);
Chris Metcalf18aecc22011-05-04 14:38:26 -0400123 } while (guess != oldval);
124 return oldval != u;
125}
126
Chris Metcalf2957c032015-07-09 16:38:17 -0400127static inline void atomic64_and(long i, atomic64_t *v)
128{
129 __insn_fetchand((void *)&v->counter, i);
130}
131
132static inline void atomic64_or(long i, atomic64_t *v)
133{
134 __insn_fetchor((void *)&v->counter, i);
135}
136
137static inline void atomic64_xor(long i, atomic64_t *v)
138{
139 long guess, oldval = v->counter;
140 do {
141 guess = oldval;
142 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
143 oldval = __insn_cmpexch(&v->counter, guess ^ i);
144 } while (guess != oldval);
145}
146
Chris Metcalf18aecc22011-05-04 14:38:26 -0400147#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
148#define atomic64_sub(i, v) atomic64_add(-(i), (v))
149#define atomic64_inc_return(v) atomic64_add_return(1, (v))
150#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
151#define atomic64_inc(v) atomic64_add(1, (v))
152#define atomic64_dec(v) atomic64_sub(1, (v))
153
154#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
155#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
156#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
157#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
158
159#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
160
Chris Metcalf18aecc22011-05-04 14:38:26 -0400161#endif /* !__ASSEMBLY__ */
162
163#endif /* _ASM_TILE_ATOMIC_64_H */