blob: d819de1c5d10ed21d951a4cf18a7cef75370e1de [file] [log] [blame]
Vineet Gupta14e968b2013-01-18 15:12:16 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_ARC_CMPXCHG_H
10#define __ASM_ARC_CMPXCHG_H
11
12#include <linux/types.h>
Vineet Gupta2576c282014-11-20 15:42:09 +053013
14#include <asm/barrier.h>
Vineet Gupta14e968b2013-01-18 15:12:16 +053015#include <asm/smp.h>
16
17#ifdef CONFIG_ARC_HAS_LLSC
18
19static inline unsigned long
20__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
21{
22 unsigned long prev;
23
Vineet Gupta2576c282014-11-20 15:42:09 +053024 /*
25 * Explicit full memory barrier needed before/after as
26 * LLOCK/SCOND thmeselves don't provide any such semantics
27 */
28 smp_mb();
29
Vineet Gupta14e968b2013-01-18 15:12:16 +053030 __asm__ __volatile__(
31 "1: llock %0, [%1] \n"
32 " brne %0, %2, 2f \n"
33 " scond %3, [%1] \n"
34 " bnz 1b \n"
35 "2: \n"
Vineet Guptad57f7272014-11-13 15:54:01 +053036 : "=&r"(prev) /* Early clobber, to prevent reg reuse */
37 : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
38 "ir"(expected),
39 "r"(new) /* can't be "ir". scond can't take LIMM for "b" */
40 : "cc", "memory"); /* so that gcc knows memory is being written here */
Vineet Gupta14e968b2013-01-18 15:12:16 +053041
Vineet Gupta2576c282014-11-20 15:42:09 +053042 smp_mb();
43
Vineet Gupta14e968b2013-01-18 15:12:16 +053044 return prev;
45}
46
Noam Camusa5a10d92015-05-16 17:49:35 +030047#elif !defined(CONFIG_ARC_PLAT_EZNPS)
Vineet Gupta14e968b2013-01-18 15:12:16 +053048
49static inline unsigned long
50__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
51{
52 unsigned long flags;
53 int prev;
54 volatile unsigned long *p = ptr;
55
Vineet Gupta2576c282014-11-20 15:42:09 +053056 /*
57 * spin lock/unlock provide the needed smp_mb() before/after
58 */
Vineet Gupta14e968b2013-01-18 15:12:16 +053059 atomic_ops_lock(flags);
60 prev = *p;
61 if (prev == expected)
62 *p = new;
63 atomic_ops_unlock(flags);
64 return prev;
65}
66
Noam Camusa5a10d92015-05-16 17:49:35 +030067#else /* CONFIG_ARC_PLAT_EZNPS */
68
69static inline unsigned long
70__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
71{
72 /*
73 * Explicit full memory barrier needed before/after
74 */
75 smp_mb();
76
77 write_aux_reg(CTOP_AUX_GPA1, expected);
78
79 __asm__ __volatile__(
80 " mov r2, %0\n"
81 " mov r3, %1\n"
82 " .word %2\n"
83 " mov %0, r2"
84 : "+r"(new)
85 : "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3)
86 : "r2", "r3", "memory");
87
88 smp_mb();
89
90 return new;
91}
92
Vineet Gupta14e968b2013-01-18 15:12:16 +053093#endif /* CONFIG_ARC_HAS_LLSC */
94
95#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
96 (unsigned long)(o), (unsigned long)(n)))
97
98/*
Noam Camusa5a10d92015-05-16 17:49:35 +030099 * atomic_cmpxchg is same as cmpxchg
100 * LLSC: only different in data-type, semantics are exactly same
101 * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
102 * semantics, and this lock also happens to be used by atomic_*()
Vineet Gupta14e968b2013-01-18 15:12:16 +0530103 */
104#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
105
106
Noam Camusa5a10d92015-05-16 17:49:35 +0300107#ifndef CONFIG_ARC_PLAT_EZNPS
108
Vineet Gupta14e968b2013-01-18 15:12:16 +0530109/*
110 * xchg (reg with memory) based on "Native atomic" EX insn
111 */
112static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
113 int size)
114{
115 extern unsigned long __xchg_bad_pointer(void);
116
117 switch (size) {
118 case 4:
Vineet Gupta2576c282014-11-20 15:42:09 +0530119 smp_mb();
120
Vineet Gupta14e968b2013-01-18 15:12:16 +0530121 __asm__ __volatile__(
122 " ex %0, [%1] \n"
123 : "+r"(val)
124 : "r"(ptr)
125 : "memory");
126
Vineet Gupta2576c282014-11-20 15:42:09 +0530127 smp_mb();
128
Vineet Gupta14e968b2013-01-18 15:12:16 +0530129 return val;
130 }
131 return __xchg_bad_pointer();
132}
133
134#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
135 sizeof(*(ptr))))
136
137/*
Vineet Gupta09074952015-08-19 17:23:58 +0530138 * xchg() maps directly to ARC EX instruction which guarantees atomicity.
139 * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
140 * due to a subtle reason:
141 * - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
142 * of kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
143 * Hence xchg() needs to follow same locking rules.
Vineet Gupta14e968b2013-01-18 15:12:16 +0530144 *
Vineet Gupta09074952015-08-19 17:23:58 +0530145 * Technically the lock is also needed for UP (boils down to irq save/restore)
146 * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
147 * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
148 * Other way around, xchg is one instruction anyways, so can't be interrupted
149 * as such
Vineet Gupta14e968b2013-01-18 15:12:16 +0530150 */
151
152#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
153
154#define xchg(ptr, with) \
155({ \
156 unsigned long flags; \
157 typeof(*(ptr)) old_val; \
158 \
159 atomic_ops_lock(flags); \
160 old_val = _xchg(ptr, with); \
161 atomic_ops_unlock(flags); \
162 old_val; \
163})
164
165#else
166
167#define xchg(ptr, with) _xchg(ptr, with)
168
169#endif
170
Noam Camusa5a10d92015-05-16 17:49:35 +0300171#else /* CONFIG_ARC_PLAT_EZNPS */
172
173static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
174 int size)
175{
176 extern unsigned long __xchg_bad_pointer(void);
177
178 switch (size) {
179 case 4:
180 /*
181 * Explicit full memory barrier needed before/after
182 */
183 smp_mb();
184
185 __asm__ __volatile__(
186 " mov r2, %0\n"
187 " mov r3, %1\n"
188 " .word %2\n"
189 " mov %0, r2\n"
190 : "+r"(val)
191 : "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3)
192 : "r2", "r3", "memory");
193
194 smp_mb();
195
196 return val;
197 }
198 return __xchg_bad_pointer();
199}
200
201#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
202 sizeof(*(ptr))))
203
204#endif /* CONFIG_ARC_PLAT_EZNPS */
205
Vineet Gupta14e968b2013-01-18 15:12:16 +0530206/*
207 * "atomic" variant of xchg()
208 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
209 * Since xchg() doesn't always do that, it would seem that following defintion
210 * is incorrect. But here's the rationale:
211 * SMP : Even xchg() takes the atomic_ops_lock, so OK.
Adam Buchbinder7423cc02016-02-23 15:24:55 -0800212 * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
Vineet Gupta14e968b2013-01-18 15:12:16 +0530213 * is natively "SMP safe", no serialization required).
214 * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
215 * could clobber them. atomic_xchg() itself would be 1 insn, so it
216 * can't be clobbered by others. Thus no serialization required when
217 * atomic_xchg is involved.
218 */
219#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
220
221#endif