blob: d320ce253d8630d1dee48a366de0cf3b7823c0ab [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
Arun Sharma600634972011-07-26 16:09:06 -070014 * Do not include directly; use <linux/atomic.h>.
Chris Metcalf867e3592010-05-28 23:09:12 -040015 */
16
17#ifndef _ASM_TILE_ATOMIC_32_H
18#define _ASM_TILE_ATOMIC_32_H
19
David Howellsbd119c62012-03-28 18:30:03 +010020#include <asm/barrier.h>
Chris Metcalf867e3592010-05-28 23:09:12 -040021#include <arch/chip.h>
22
23#ifndef __ASSEMBLY__
24
Chris Metcalf867e3592010-05-28 23:09:12 -040025/**
26 * atomic_add - add integer to atomic variable
27 * @i: integer value to add
28 * @v: pointer of type atomic_t
29 *
30 * Atomically adds @i to @v.
31 */
32static inline void atomic_add(int i, atomic_t *v)
33{
Chris Metcalf6dc96582013-09-06 08:56:45 -040034 _atomic_xchg_add(&v->counter, i);
Chris Metcalf867e3592010-05-28 23:09:12 -040035}
36
Chris Metcalf2957c032015-07-09 16:38:17 -040037#define ATOMIC_OP(op) \
38unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
39static inline void atomic_##op(int i, atomic_t *v) \
40{ \
41 _atomic_##op((unsigned long *)&v->counter, i); \
42}
43
Chris Metcalf2957c032015-07-09 16:38:17 -040044ATOMIC_OP(and)
45ATOMIC_OP(or)
46ATOMIC_OP(xor)
47
48#undef ATOMIC_OP
49
Chris Metcalf867e3592010-05-28 23:09:12 -040050/**
51 * atomic_add_return - add integer and return
52 * @v: pointer of type atomic_t
53 * @i: integer value to add
54 *
55 * Atomically adds @i to @v and returns @i + @v
56 */
57static inline int atomic_add_return(int i, atomic_t *v)
58{
59 smp_mb(); /* barrier for proper semantics */
Chris Metcalf6dc96582013-09-06 08:56:45 -040060 return _atomic_xchg_add(&v->counter, i) + i;
Chris Metcalf867e3592010-05-28 23:09:12 -040061}
62
63/**
Arun Sharmaf24219b2011-07-26 16:09:07 -070064 * __atomic_add_unless - add unless the number is already a given value
Chris Metcalf867e3592010-05-28 23:09:12 -040065 * @v: pointer of type atomic_t
66 * @a: the amount to add to v...
67 * @u: ...unless v is equal to u.
68 *
69 * Atomically adds @a to @v, so long as @v was not already @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -070070 * Returns the old value of @v.
Chris Metcalf867e3592010-05-28 23:09:12 -040071 */
Arun Sharmaf24219b2011-07-26 16:09:07 -070072static inline int __atomic_add_unless(atomic_t *v, int a, int u)
Chris Metcalf867e3592010-05-28 23:09:12 -040073{
74 smp_mb(); /* barrier for proper semantics */
Chris Metcalf6dc96582013-09-06 08:56:45 -040075 return _atomic_xchg_add_unless(&v->counter, a, u);
Chris Metcalf867e3592010-05-28 23:09:12 -040076}
77
78/**
79 * atomic_set - set atomic variable
80 * @v: pointer of type atomic_t
81 * @i: required value
82 *
83 * Atomically sets the value of @v to @i.
84 *
85 * atomic_set() can't be just a raw store, since it would be lost if it
86 * fell between the load and store of one of the other atomic ops.
87 */
88static inline void atomic_set(atomic_t *v, int n)
89{
Chris Metcalf6dc96582013-09-06 08:56:45 -040090 _atomic_xchg(&v->counter, n);
Chris Metcalf867e3592010-05-28 23:09:12 -040091}
92
Chris Metcalf867e3592010-05-28 23:09:12 -040093/* A 64bit atomic type */
94
95typedef struct {
Chen Gangb924a692013-09-25 12:14:08 +080096 long long counter;
Chris Metcalf867e3592010-05-28 23:09:12 -040097} atomic64_t;
98
99#define ATOMIC64_INIT(val) { (val) }
100
Chris Metcalf867e3592010-05-28 23:09:12 -0400101/**
102 * atomic64_read - read atomic variable
103 * @v: pointer of type atomic64_t
104 *
105 * Atomically reads the value of @v.
106 */
Chen Gangb924a692013-09-25 12:14:08 +0800107static inline long long atomic64_read(const atomic64_t *v)
Chris Metcalf867e3592010-05-28 23:09:12 -0400108{
109 /*
110 * Requires an atomic op to read both 32-bit parts consistently.
111 * Casting away const is safe since the atomic support routines
112 * do not write to memory if the value has not been modified.
113 */
Chen Gangb924a692013-09-25 12:14:08 +0800114 return _atomic64_xchg_add((long long *)&v->counter, 0);
Chris Metcalf867e3592010-05-28 23:09:12 -0400115}
116
117/**
118 * atomic64_add - add integer to atomic variable
119 * @i: integer value to add
120 * @v: pointer of type atomic64_t
121 *
122 * Atomically adds @i to @v.
123 */
Chen Gangb924a692013-09-25 12:14:08 +0800124static inline void atomic64_add(long long i, atomic64_t *v)
Chris Metcalf867e3592010-05-28 23:09:12 -0400125{
Chris Metcalf6dc96582013-09-06 08:56:45 -0400126 _atomic64_xchg_add(&v->counter, i);
Chris Metcalf867e3592010-05-28 23:09:12 -0400127}
128
Chris Metcalf2957c032015-07-09 16:38:17 -0400129#define ATOMIC64_OP(op) \
130long long _atomic64_##op(long long *v, long long n); \
131static inline void atomic64_##op(long long i, atomic64_t *v) \
132{ \
133 _atomic64_##op(&v->counter, i); \
134}
135
136ATOMIC64_OP(and)
137ATOMIC64_OP(or)
138ATOMIC64_OP(xor)
139
Chris Metcalf867e3592010-05-28 23:09:12 -0400140/**
141 * atomic64_add_return - add integer and return
142 * @v: pointer of type atomic64_t
143 * @i: integer value to add
144 *
145 * Atomically adds @i to @v and returns @i + @v
146 */
Chen Gangb924a692013-09-25 12:14:08 +0800147static inline long long atomic64_add_return(long long i, atomic64_t *v)
Chris Metcalf867e3592010-05-28 23:09:12 -0400148{
149 smp_mb(); /* barrier for proper semantics */
Chris Metcalf6dc96582013-09-06 08:56:45 -0400150 return _atomic64_xchg_add(&v->counter, i) + i;
Chris Metcalf867e3592010-05-28 23:09:12 -0400151}
152
153/**
154 * atomic64_add_unless - add unless the number is already a given value
155 * @v: pointer of type atomic64_t
156 * @a: the amount to add to v...
157 * @u: ...unless v is equal to u.
158 *
159 * Atomically adds @a to @v, so long as @v was not already @u.
Chris Metcalf07feea82012-03-27 14:10:03 -0400160 * Returns non-zero if @v was not @u, and zero otherwise.
Chris Metcalf867e3592010-05-28 23:09:12 -0400161 */
Chen Gangb924a692013-09-25 12:14:08 +0800162static inline long long atomic64_add_unless(atomic64_t *v, long long a,
163 long long u)
Chris Metcalf867e3592010-05-28 23:09:12 -0400164{
165 smp_mb(); /* barrier for proper semantics */
Chris Metcalf6dc96582013-09-06 08:56:45 -0400166 return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
Chris Metcalf867e3592010-05-28 23:09:12 -0400167}
168
169/**
170 * atomic64_set - set atomic variable
171 * @v: pointer of type atomic64_t
172 * @i: required value
173 *
174 * Atomically sets the value of @v to @i.
175 *
176 * atomic64_set() can't be just a raw store, since it would be lost if it
177 * fell between the load and store of one of the other atomic ops.
178 */
Chen Gangb924a692013-09-25 12:14:08 +0800179static inline void atomic64_set(atomic64_t *v, long long n)
Chris Metcalf867e3592010-05-28 23:09:12 -0400180{
Chris Metcalf6dc96582013-09-06 08:56:45 -0400181 _atomic64_xchg(&v->counter, n);
Chris Metcalf867e3592010-05-28 23:09:12 -0400182}
183
184#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
185#define atomic64_inc(v) atomic64_add(1LL, (v))
186#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
187#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
188#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
189#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
190#define atomic64_sub(i, v) atomic64_add(-(i), (v))
191#define atomic64_dec(v) atomic64_sub(1LL, (v))
192#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
193#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
194#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
195
Chris Metcalf867e3592010-05-28 23:09:12 -0400196
Chris Metcalf867e3592010-05-28 23:09:12 -0400197#endif /* !__ASSEMBLY__ */
198
199/*
200 * Internal definitions only beyond this point.
201 */
202
Chris Metcalf867e3592010-05-28 23:09:12 -0400203/*
204 * Number of atomic locks in atomic_locks[]. Must be a power of two.
205 * There is no reason for more than PAGE_SIZE / 8 entries, since that
206 * is the maximum number of pointer bits we can use to index this.
207 * And we cannot have more than PAGE_SIZE / 4, since this has to
208 * fit on a single page and each entry takes 4 bytes.
209 */
210#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
211#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
212
213#ifndef __ASSEMBLY__
214extern int atomic_locks[];
215#endif
216
Chris Metcalf867e3592010-05-28 23:09:12 -0400217/*
218 * All the code that may fault while holding an atomic lock must
219 * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
220 * can correctly release and reacquire the lock. Note that we
221 * mention the register number in a comment in "lib/atomic_asm.S" to help
222 * assembly coders from using this register by mistake, so if it
223 * is changed here, change that comment as well.
224 */
225#define ATOMIC_LOCK_REG 20
226#define ATOMIC_LOCK_REG_NAME r20
227
228#ifndef __ASSEMBLY__
229/* Called from setup to initialize a hash table to point to per_cpu locks. */
230void __init_atomic_per_cpu(void);
231
232#ifdef CONFIG_SMP
233/* Support releasing the atomic lock in do_page_fault_ics(). */
234void __atomic_fault_unlock(int *lock_ptr);
235#endif
Chris Metcalf0707ad32010-06-25 17:04:17 -0400236
Chris Metcalf47d632f2012-03-29 13:39:51 -0400237/* Return a pointer to the lock for the given address. */
238int *__atomic_hashed_lock(volatile void *v);
239
Chris Metcalf0707ad32010-06-25 17:04:17 -0400240/* Private helper routines in lib/atomic_asm_32.S */
Chris Metcalf47d632f2012-03-29 13:39:51 -0400241struct __get_user {
242 unsigned long val;
243 int err;
244};
Chris Metcalf0707ad32010-06-25 17:04:17 -0400245extern struct __get_user __atomic_cmpxchg(volatile int *p,
246 int *lock, int o, int n);
247extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
248extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
249extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
250 int *lock, int o, int n);
251extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
Chris Metcalf2957c032015-07-09 16:38:17 -0400252extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
Chris Metcalf0707ad32010-06-25 17:04:17 -0400253extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
254extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
Chen Gangb924a692013-09-25 12:14:08 +0800255extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
256 long long o, long long n);
257extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
258extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
259 long long n);
260extern long long __atomic64_xchg_add_unless(volatile long long *p,
261 int *lock, long long o, long long n);
Chris Metcalf2957c032015-07-09 16:38:17 -0400262extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
263extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
264extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
Chris Metcalf0707ad32010-06-25 17:04:17 -0400265
Chris Metcalf47d632f2012-03-29 13:39:51 -0400266/* Return failure from the atomic wrappers. */
267struct __get_user __atomic_bad_address(int __user *addr);
268
Chris Metcalf867e3592010-05-28 23:09:12 -0400269#endif /* !__ASSEMBLY__ */
270
271#endif /* _ASM_TILE_ATOMIC_32_H */