blob: 51cabc26e387c32fd368d8058563eae117782ff2 [file] [log] [blame]
Chris Metcalf18aecc22011-05-04 14:38:26 -04001/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
Arun Sharma600634972011-07-26 16:09:06 -070014 * Do not include directly; use <linux/atomic.h>.
Chris Metcalf18aecc22011-05-04 14:38:26 -040015 */
16
17#ifndef _ASM_TILE_ATOMIC_64_H
18#define _ASM_TILE_ATOMIC_64_H
19
20#ifndef __ASSEMBLY__
21
David Howellsbd119c62012-03-28 18:30:03 +010022#include <asm/barrier.h>
Chris Metcalf18aecc22011-05-04 14:38:26 -040023#include <arch/spr_def.h>
24
25/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
26
Peter Zijlstra62e8a322015-09-18 11:13:10 +020027#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
Chris Metcalf18aecc22011-05-04 14:38:26 -040028
29/*
30 * The smp_mb() operations throughout are to support the fact that
31 * Linux requires memory barriers before and after the operation,
32 * on any routine which updates memory and returns a value.
33 */
34
Chris Metcalf18aecc22011-05-04 14:38:26 -040035static inline void atomic_add(int i, atomic_t *v)
36{
37 __insn_fetchadd4((void *)&v->counter, i);
38}
39
40static inline int atomic_add_return(int i, atomic_t *v)
41{
42 int val;
43 smp_mb(); /* barrier for proper semantics */
44 val = __insn_fetchadd4((void *)&v->counter, i) + i;
45 barrier(); /* the "+ i" above will wait on memory */
46 return val;
47}
48
Arun Sharmaf24219b2011-07-26 16:09:07 -070049static inline int __atomic_add_unless(atomic_t *v, int a, int u)
Chris Metcalf18aecc22011-05-04 14:38:26 -040050{
51 int guess, oldval = v->counter;
52 do {
53 if (oldval == u)
54 break;
55 guess = oldval;
Chris Metcalf6dc96582013-09-06 08:56:45 -040056 oldval = cmpxchg(&v->counter, guess, guess + a);
Chris Metcalf18aecc22011-05-04 14:38:26 -040057 } while (guess != oldval);
Arun Sharmaf24219b2011-07-26 16:09:07 -070058 return oldval;
Chris Metcalf18aecc22011-05-04 14:38:26 -040059}
60
Chris Metcalf2957c032015-07-09 16:38:17 -040061static inline void atomic_and(int i, atomic_t *v)
62{
63 __insn_fetchand4((void *)&v->counter, i);
64}
65
66static inline void atomic_or(int i, atomic_t *v)
67{
68 __insn_fetchor4((void *)&v->counter, i);
69}
70
71static inline void atomic_xor(int i, atomic_t *v)
72{
73 int guess, oldval = v->counter;
74 do {
75 guess = oldval;
76 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
77 oldval = __insn_cmpexch4(&v->counter, guess ^ i);
78 } while (guess != oldval);
79}
80
Chris Metcalf18aecc22011-05-04 14:38:26 -040081/* Now the true 64-bit operations. */
82
83#define ATOMIC64_INIT(i) { (i) }
84
Peter Zijlstra62e8a322015-09-18 11:13:10 +020085#define atomic64_read(v) READ_ONCE((v)->counter)
86#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
Chris Metcalf18aecc22011-05-04 14:38:26 -040087
Chris Metcalf18aecc22011-05-04 14:38:26 -040088static inline void atomic64_add(long i, atomic64_t *v)
89{
90 __insn_fetchadd((void *)&v->counter, i);
91}
92
93static inline long atomic64_add_return(long i, atomic64_t *v)
94{
95 int val;
96 smp_mb(); /* barrier for proper semantics */
97 val = __insn_fetchadd((void *)&v->counter, i) + i;
98 barrier(); /* the "+ i" above will wait on memory */
99 return val;
100}
101
102static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
103{
104 long guess, oldval = v->counter;
105 do {
106 if (oldval == u)
107 break;
108 guess = oldval;
Chris Metcalf6dc96582013-09-06 08:56:45 -0400109 oldval = cmpxchg(&v->counter, guess, guess + a);
Chris Metcalf18aecc22011-05-04 14:38:26 -0400110 } while (guess != oldval);
111 return oldval != u;
112}
113
Chris Metcalf2957c032015-07-09 16:38:17 -0400114static inline void atomic64_and(long i, atomic64_t *v)
115{
116 __insn_fetchand((void *)&v->counter, i);
117}
118
119static inline void atomic64_or(long i, atomic64_t *v)
120{
121 __insn_fetchor((void *)&v->counter, i);
122}
123
124static inline void atomic64_xor(long i, atomic64_t *v)
125{
126 long guess, oldval = v->counter;
127 do {
128 guess = oldval;
129 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
130 oldval = __insn_cmpexch(&v->counter, guess ^ i);
131 } while (guess != oldval);
132}
133
Chris Metcalf18aecc22011-05-04 14:38:26 -0400134#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
135#define atomic64_sub(i, v) atomic64_add(-(i), (v))
136#define atomic64_inc_return(v) atomic64_add_return(1, (v))
137#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
138#define atomic64_inc(v) atomic64_add(1, (v))
139#define atomic64_dec(v) atomic64_sub(1, (v))
140
141#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
142#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
143#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
144#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
145
146#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
147
Chris Metcalf18aecc22011-05-04 14:38:26 -0400148#endif /* !__ASSEMBLY__ */
149
150#endif /* _ASM_TILE_ATOMIC_64_H */