blob: 173f303a868f20854cbdcc598cf991e4f6d6d0e5 [file] [log] [blame]
Vineet Gupta14e968b2013-01-18 15:12:16 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H
11
12#ifdef __KERNEL__
13
14#ifndef __ASSEMBLY__
15
16#include <linux/types.h>
17#include <linux/compiler.h>
18#include <asm/cmpxchg.h>
19#include <asm/barrier.h>
20#include <asm/smp.h>
21
22#define atomic_read(v) ((v)->counter)
23
24#ifdef CONFIG_ARC_HAS_LLSC
25
26#define atomic_set(v, i) (((v)->counter) = (i))
27
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010028#define ATOMIC_OP(op, c_op, asm_op) \
29static inline void atomic_##op(int i, atomic_t *v) \
30{ \
31 unsigned int temp; \
32 \
33 __asm__ __volatile__( \
34 "1: llock %0, [%1] \n" \
35 " " #asm_op " %0, %0, %2 \n" \
36 " scond %0, [%1] \n" \
37 " bnz 1b \n" \
38 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
39 : "r"(&v->counter), "ir"(i) \
40 : "cc"); \
41} \
Vineet Gupta14e968b2013-01-18 15:12:16 +053042
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010043#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
44static inline int atomic_##op##_return(int i, atomic_t *v) \
45{ \
46 unsigned int temp; \
47 \
48 __asm__ __volatile__( \
49 "1: llock %0, [%1] \n" \
50 " " #asm_op " %0, %0, %2 \n" \
51 " scond %0, [%1] \n" \
52 " bnz 1b \n" \
53 : "=&r"(temp) \
54 : "r"(&v->counter), "ir"(i) \
55 : "cc"); \
56 \
57 return temp; \
Vineet Gupta14e968b2013-01-18 15:12:16 +053058}
59
60#else /* !CONFIG_ARC_HAS_LLSC */
61
62#ifndef CONFIG_SMP
63
64 /* violating atomic_xxx API locking protocol in UP for optimization sake */
65#define atomic_set(v, i) (((v)->counter) = (i))
66
67#else
68
69static inline void atomic_set(atomic_t *v, int i)
70{
71 /*
72 * Independent of hardware support, all of the atomic_xxx() APIs need
73 * to follow the same locking rules to make sure that a "hardware"
74 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
75 * sequence
76 *
77 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
78 * requires the locking.
79 */
80 unsigned long flags;
81
82 atomic_ops_lock(flags);
83 v->counter = i;
84 atomic_ops_unlock(flags);
85}
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010086
Vineet Gupta14e968b2013-01-18 15:12:16 +053087#endif
88
89/*
90 * Non hardware assisted Atomic-R-M-W
91 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
92 */
93
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010094#define ATOMIC_OP(op, c_op, asm_op) \
95static inline void atomic_##op(int i, atomic_t *v) \
96{ \
97 unsigned long flags; \
98 \
99 atomic_ops_lock(flags); \
100 v->counter c_op i; \
101 atomic_ops_unlock(flags); \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530102}
103
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100104#define ATOMIC_OP_RETURN(op, c_op) \
105static inline int atomic_##op##_return(int i, atomic_t *v) \
106{ \
107 unsigned long flags; \
108 unsigned long temp; \
109 \
110 atomic_ops_lock(flags); \
111 temp = v->counter; \
112 temp c_op i; \
113 v->counter = temp; \
114 atomic_ops_unlock(flags); \
115 \
116 return temp; \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530117}
118
119#endif /* !CONFIG_ARC_HAS_LLSC */
120
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100121#define ATOMIC_OPS(op, c_op, asm_op) \
122 ATOMIC_OP(op, c_op, asm_op) \
123 ATOMIC_OP_RETURN(op, c_op, asm_op)
124
125ATOMIC_OPS(add, +=, add)
126ATOMIC_OPS(sub, -=, sub)
127ATOMIC_OP(and, &=, and)
128
129#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
130
131#undef ATOMIC_OPS
132#undef ATOMIC_OP_RETURN
133#undef ATOMIC_OP
134
Vineet Gupta14e968b2013-01-18 15:12:16 +0530135/**
136 * __atomic_add_unless - add unless the number is a given value
137 * @v: pointer of type atomic_t
138 * @a: the amount to add to v...
139 * @u: ...unless v is equal to u.
140 *
141 * Atomically adds @a to @v, so long as it was not @u.
142 * Returns the old value of @v
143 */
144#define __atomic_add_unless(v, a, u) \
145({ \
146 int c, old; \
147 c = atomic_read(v); \
148 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
149 c = old; \
150 c; \
151})
152
153#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
154
155#define atomic_inc(v) atomic_add(1, v)
156#define atomic_dec(v) atomic_sub(1, v)
157
158#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
159#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
160#define atomic_inc_return(v) atomic_add_return(1, (v))
161#define atomic_dec_return(v) atomic_sub_return(1, (v))
162#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
163
164#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
165
166#define ATOMIC_INIT(i) { (i) }
167
168#include <asm-generic/atomic64.h>
169
170#endif
171
172#endif
173
174#endif