blob: ad220eed05fc17cc4548a170117bc29ff7e19c1e [file] [log] [blame]
Chris Metcalf18aecc22011-05-04 14:38:26 -04001/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
Arun Sharma600634972011-07-26 16:09:06 -070014 * Do not include directly; use <linux/atomic.h>.
Chris Metcalf18aecc22011-05-04 14:38:26 -040015 */
16
17#ifndef _ASM_TILE_ATOMIC_64_H
18#define _ASM_TILE_ATOMIC_64_H
19
20#ifndef __ASSEMBLY__
21
David Howellsbd119c62012-03-28 18:30:03 +010022#include <asm/barrier.h>
Chris Metcalf18aecc22011-05-04 14:38:26 -040023#include <arch/spr_def.h>
24
25/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
26
27#define atomic_set(v, i) ((v)->counter = (i))
28
29/*
30 * The smp_mb() operations throughout are to support the fact that
31 * Linux requires memory barriers before and after the operation,
32 * on any routine which updates memory and returns a value.
33 */
34
Chris Metcalf18aecc22011-05-04 14:38:26 -040035static inline void atomic_add(int i, atomic_t *v)
36{
37 __insn_fetchadd4((void *)&v->counter, i);
38}
39
40static inline int atomic_add_return(int i, atomic_t *v)
41{
42 int val;
43 smp_mb(); /* barrier for proper semantics */
44 val = __insn_fetchadd4((void *)&v->counter, i) + i;
45 barrier(); /* the "+ i" above will wait on memory */
46 return val;
47}
48
Arun Sharmaf24219b2011-07-26 16:09:07 -070049static inline int __atomic_add_unless(atomic_t *v, int a, int u)
Chris Metcalf18aecc22011-05-04 14:38:26 -040050{
51 int guess, oldval = v->counter;
52 do {
53 if (oldval == u)
54 break;
55 guess = oldval;
Chris Metcalf6dc96582013-09-06 08:56:45 -040056 oldval = cmpxchg(&v->counter, guess, guess + a);
Chris Metcalf18aecc22011-05-04 14:38:26 -040057 } while (guess != oldval);
Arun Sharmaf24219b2011-07-26 16:09:07 -070058 return oldval;
Chris Metcalf18aecc22011-05-04 14:38:26 -040059}
60
61/* Now the true 64-bit operations. */
62
63#define ATOMIC64_INIT(i) { (i) }
64
65#define atomic64_read(v) ((v)->counter)
66#define atomic64_set(v, i) ((v)->counter = (i))
67
Chris Metcalf18aecc22011-05-04 14:38:26 -040068static inline void atomic64_add(long i, atomic64_t *v)
69{
70 __insn_fetchadd((void *)&v->counter, i);
71}
72
73static inline long atomic64_add_return(long i, atomic64_t *v)
74{
75 int val;
76 smp_mb(); /* barrier for proper semantics */
77 val = __insn_fetchadd((void *)&v->counter, i) + i;
78 barrier(); /* the "+ i" above will wait on memory */
79 return val;
80}
81
82static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
83{
84 long guess, oldval = v->counter;
85 do {
86 if (oldval == u)
87 break;
88 guess = oldval;
Chris Metcalf6dc96582013-09-06 08:56:45 -040089 oldval = cmpxchg(&v->counter, guess, guess + a);
Chris Metcalf18aecc22011-05-04 14:38:26 -040090 } while (guess != oldval);
91 return oldval != u;
92}
93
94#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
95#define atomic64_sub(i, v) atomic64_add(-(i), (v))
96#define atomic64_inc_return(v) atomic64_add_return(1, (v))
97#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
98#define atomic64_inc(v) atomic64_add(1, (v))
99#define atomic64_dec(v) atomic64_sub(1, (v))
100
101#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
102#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
103#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
104#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
105
106#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
107
108/* Atomic dec and inc don't implement barrier, so provide them if needed. */
109#define smp_mb__before_atomic_dec() smp_mb()
110#define smp_mb__after_atomic_dec() smp_mb()
111#define smp_mb__before_atomic_inc() smp_mb()
112#define smp_mb__after_atomic_inc() smp_mb()
113
Chris Metcalf8aaf1dd2011-05-16 13:59:39 -0400114/* Define this to indicate that cmpxchg is an efficient operation. */
115#define __HAVE_ARCH_CMPXCHG
Chris Metcalf18aecc22011-05-04 14:38:26 -0400116
117#endif /* !__ASSEMBLY__ */
118
119#endif /* _ASM_TILE_ATOMIC_64_H */