blob: 3ea4112c8302a8b71b3dc7e473fd7e78d3992b1d [file] [log] [blame]
Vineet Gupta14e968b2013-01-18 15:12:16 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_ARC_CMPXCHG_H
10#define __ASM_ARC_CMPXCHG_H
11
12#include <linux/types.h>
Vineet Gupta2576c282014-11-20 15:42:09 +053013
14#include <asm/barrier.h>
Vineet Gupta14e968b2013-01-18 15:12:16 +053015#include <asm/smp.h>
16
17#ifdef CONFIG_ARC_HAS_LLSC
18
19static inline unsigned long
20__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
21{
22 unsigned long prev;
23
Vineet Gupta2576c282014-11-20 15:42:09 +053024 /*
25 * Explicit full memory barrier needed before/after as
26 * LLOCK/SCOND thmeselves don't provide any such semantics
27 */
28 smp_mb();
29
Vineet Gupta14e968b2013-01-18 15:12:16 +053030 __asm__ __volatile__(
31 "1: llock %0, [%1] \n"
32 " brne %0, %2, 2f \n"
33 " scond %3, [%1] \n"
34 " bnz 1b \n"
35 "2: \n"
Vineet Guptad57f7272014-11-13 15:54:01 +053036 : "=&r"(prev) /* Early clobber, to prevent reg reuse */
37 : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
38 "ir"(expected),
39 "r"(new) /* can't be "ir". scond can't take LIMM for "b" */
40 : "cc", "memory"); /* so that gcc knows memory is being written here */
Vineet Gupta14e968b2013-01-18 15:12:16 +053041
Vineet Gupta2576c282014-11-20 15:42:09 +053042 smp_mb();
43
Vineet Gupta14e968b2013-01-18 15:12:16 +053044 return prev;
45}
46
Noam Camusa5a10d92015-05-16 17:49:35 +030047#elif !defined(CONFIG_ARC_PLAT_EZNPS)
Vineet Gupta14e968b2013-01-18 15:12:16 +053048
49static inline unsigned long
50__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
51{
52 unsigned long flags;
53 int prev;
54 volatile unsigned long *p = ptr;
55
Vineet Gupta2576c282014-11-20 15:42:09 +053056 /*
57 * spin lock/unlock provide the needed smp_mb() before/after
58 */
Vineet Gupta14e968b2013-01-18 15:12:16 +053059 atomic_ops_lock(flags);
60 prev = *p;
61 if (prev == expected)
62 *p = new;
63 atomic_ops_unlock(flags);
64 return prev;
65}
66
Noam Camusa5a10d92015-05-16 17:49:35 +030067#else /* CONFIG_ARC_PLAT_EZNPS */
68
69static inline unsigned long
70__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
71{
72 /*
73 * Explicit full memory barrier needed before/after
74 */
75 smp_mb();
76
77 write_aux_reg(CTOP_AUX_GPA1, expected);
78
79 __asm__ __volatile__(
80 " mov r2, %0\n"
81 " mov r3, %1\n"
82 " .word %2\n"
83 " mov %0, r2"
84 : "+r"(new)
85 : "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3)
86 : "r2", "r3", "memory");
87
88 smp_mb();
89
90 return new;
91}
92
Vineet Gupta14e968b2013-01-18 15:12:16 +053093#endif /* CONFIG_ARC_HAS_LLSC */
94
Vineet Gupta89c92142019-05-07 10:45:24 -070095#define cmpxchg(ptr, o, n) ({ \
96 (typeof(*(ptr)))__cmpxchg((ptr), \
97 (unsigned long)(o), \
98 (unsigned long)(n)); \
99})
Vineet Gupta14e968b2013-01-18 15:12:16 +0530100
101/*
Noam Camusa5a10d92015-05-16 17:49:35 +0300102 * atomic_cmpxchg is same as cmpxchg
103 * LLSC: only different in data-type, semantics are exactly same
104 * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
105 * semantics, and this lock also happens to be used by atomic_*()
Vineet Gupta14e968b2013-01-18 15:12:16 +0530106 */
107#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
108
109
Noam Camusa5a10d92015-05-16 17:49:35 +0300110#ifndef CONFIG_ARC_PLAT_EZNPS
111
Vineet Gupta14e968b2013-01-18 15:12:16 +0530112/*
113 * xchg (reg with memory) based on "Native atomic" EX insn
114 */
115static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
116 int size)
117{
118 extern unsigned long __xchg_bad_pointer(void);
119
120 switch (size) {
121 case 4:
Vineet Gupta2576c282014-11-20 15:42:09 +0530122 smp_mb();
123
Vineet Gupta14e968b2013-01-18 15:12:16 +0530124 __asm__ __volatile__(
125 " ex %0, [%1] \n"
126 : "+r"(val)
127 : "r"(ptr)
128 : "memory");
129
Vineet Gupta2576c282014-11-20 15:42:09 +0530130 smp_mb();
131
Vineet Gupta14e968b2013-01-18 15:12:16 +0530132 return val;
133 }
134 return __xchg_bad_pointer();
135}
136
137#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
138 sizeof(*(ptr))))
139
140/*
Vineet Gupta09074952015-08-19 17:23:58 +0530141 * xchg() maps directly to ARC EX instruction which guarantees atomicity.
142 * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
143 * due to a subtle reason:
144 * - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
145 * of kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
146 * Hence xchg() needs to follow same locking rules.
Vineet Gupta14e968b2013-01-18 15:12:16 +0530147 *
Vineet Gupta09074952015-08-19 17:23:58 +0530148 * Technically the lock is also needed for UP (boils down to irq save/restore)
149 * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
150 * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
151 * Other way around, xchg is one instruction anyways, so can't be interrupted
152 * as such
Vineet Gupta14e968b2013-01-18 15:12:16 +0530153 */
154
155#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
156
157#define xchg(ptr, with) \
158({ \
159 unsigned long flags; \
160 typeof(*(ptr)) old_val; \
161 \
162 atomic_ops_lock(flags); \
163 old_val = _xchg(ptr, with); \
164 atomic_ops_unlock(flags); \
165 old_val; \
166})
167
168#else
169
170#define xchg(ptr, with) _xchg(ptr, with)
171
172#endif
173
Noam Camusa5a10d92015-05-16 17:49:35 +0300174#else /* CONFIG_ARC_PLAT_EZNPS */
175
176static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
177 int size)
178{
179 extern unsigned long __xchg_bad_pointer(void);
180
181 switch (size) {
182 case 4:
183 /*
184 * Explicit full memory barrier needed before/after
185 */
186 smp_mb();
187
188 __asm__ __volatile__(
189 " mov r2, %0\n"
190 " mov r3, %1\n"
191 " .word %2\n"
192 " mov %0, r2\n"
193 : "+r"(val)
194 : "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3)
195 : "r2", "r3", "memory");
196
197 smp_mb();
198
199 return val;
200 }
201 return __xchg_bad_pointer();
202}
203
Vineet Gupta89c92142019-05-07 10:45:24 -0700204#define xchg(ptr, with) ({ \
205 (typeof(*(ptr)))__xchg((unsigned long)(with), \
206 (ptr), \
207 sizeof(*(ptr))); \
208})
Noam Camusa5a10d92015-05-16 17:49:35 +0300209
210#endif /* CONFIG_ARC_PLAT_EZNPS */
211
Vineet Gupta14e968b2013-01-18 15:12:16 +0530212/*
213 * "atomic" variant of xchg()
214 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
215 * Since xchg() doesn't always do that, it would seem that following defintion
216 * is incorrect. But here's the rationale:
217 * SMP : Even xchg() takes the atomic_ops_lock, so OK.
Adam Buchbinder7423cc02016-02-23 15:24:55 -0800218 * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
Vineet Gupta14e968b2013-01-18 15:12:16 +0530219 * is natively "SMP safe", no serialization required).
220 * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
221 * could clobber them. atomic_xchg() itself would be 1 insn, so it
222 * can't be clobbered by others. Thus no serialization required when
223 * atomic_xchg is involved.
224 */
225#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
226
227#endif