blob: 44fd531f4d7b93a9df7bff6dec976af5e571506c [file] [log] [blame]
Vineet Gupta14e968b2013-01-18 15:12:16 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_ARC_CMPXCHG_H
10#define __ASM_ARC_CMPXCHG_H
11
12#include <linux/types.h>
Vineet Gupta2576c282014-11-20 15:42:09 +053013
14#include <asm/barrier.h>
Vineet Gupta14e968b2013-01-18 15:12:16 +053015#include <asm/smp.h>
16
17#ifdef CONFIG_ARC_HAS_LLSC
18
19static inline unsigned long
20__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
21{
22 unsigned long prev;
23
Vineet Gupta2576c282014-11-20 15:42:09 +053024 /*
25 * Explicit full memory barrier needed before/after as
26 * LLOCK/SCOND thmeselves don't provide any such semantics
27 */
28 smp_mb();
29
Vineet Gupta14e968b2013-01-18 15:12:16 +053030 __asm__ __volatile__(
31 "1: llock %0, [%1] \n"
32 " brne %0, %2, 2f \n"
33 " scond %3, [%1] \n"
34 " bnz 1b \n"
35 "2: \n"
Vineet Guptad57f7272014-11-13 15:54:01 +053036 : "=&r"(prev) /* Early clobber, to prevent reg reuse */
37 : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
38 "ir"(expected),
39 "r"(new) /* can't be "ir". scond can't take LIMM for "b" */
40 : "cc", "memory"); /* so that gcc knows memory is being written here */
Vineet Gupta14e968b2013-01-18 15:12:16 +053041
Vineet Gupta2576c282014-11-20 15:42:09 +053042 smp_mb();
43
Vineet Gupta14e968b2013-01-18 15:12:16 +053044 return prev;
45}
46
47#else
48
49static inline unsigned long
50__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
51{
52 unsigned long flags;
53 int prev;
54 volatile unsigned long *p = ptr;
55
Vineet Gupta2576c282014-11-20 15:42:09 +053056 /*
57 * spin lock/unlock provide the needed smp_mb() before/after
58 */
Vineet Gupta14e968b2013-01-18 15:12:16 +053059 atomic_ops_lock(flags);
60 prev = *p;
61 if (prev == expected)
62 *p = new;
63 atomic_ops_unlock(flags);
64 return prev;
65}
66
67#endif /* CONFIG_ARC_HAS_LLSC */
68
69#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
70 (unsigned long)(o), (unsigned long)(n)))
71
72/*
73 * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
74 * just to gaurantee semantics.
75 * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
76 * which also happens to be atomic_ops_lock.
77 *
78 * Thus despite semantically being different, implementation of atomic_cmpxchg()
79 * is same as cmpxchg().
80 */
81#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
82
83
84/*
85 * xchg (reg with memory) based on "Native atomic" EX insn
86 */
87static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
88 int size)
89{
90 extern unsigned long __xchg_bad_pointer(void);
91
92 switch (size) {
93 case 4:
Vineet Gupta2576c282014-11-20 15:42:09 +053094 smp_mb();
95
Vineet Gupta14e968b2013-01-18 15:12:16 +053096 __asm__ __volatile__(
97 " ex %0, [%1] \n"
98 : "+r"(val)
99 : "r"(ptr)
100 : "memory");
101
Vineet Gupta2576c282014-11-20 15:42:09 +0530102 smp_mb();
103
Vineet Gupta14e968b2013-01-18 15:12:16 +0530104 return val;
105 }
106 return __xchg_bad_pointer();
107}
108
109#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
110 sizeof(*(ptr))))
111
112/*
113 * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
114 * not require any locking. However there's a quirk.
115 * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
116 * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
117 * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
118 * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
119 *
120 * This however is only relevant if SMP and/or ARC lacks LLSC
121 * if (UP or LLSC)
122 * xchg doesn't need serialization
123 * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
124 * xchg needs serialization
125 */
126
127#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
128
129#define xchg(ptr, with) \
130({ \
131 unsigned long flags; \
132 typeof(*(ptr)) old_val; \
133 \
134 atomic_ops_lock(flags); \
135 old_val = _xchg(ptr, with); \
136 atomic_ops_unlock(flags); \
137 old_val; \
138})
139
140#else
141
142#define xchg(ptr, with) _xchg(ptr, with)
143
144#endif
145
146/*
147 * "atomic" variant of xchg()
148 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
149 * Since xchg() doesn't always do that, it would seem that following defintion
150 * is incorrect. But here's the rationale:
151 * SMP : Even xchg() takes the atomic_ops_lock, so OK.
152 * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
153 * is natively "SMP safe", no serialization required).
154 * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
155 * could clobber them. atomic_xchg() itself would be 1 insn, so it
156 * can't be clobbered by others. Thus no serialization required when
157 * atomic_xchg is involved.
158 */
159#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
160
161#endif