blob: 5eee832b73a03e56beb8ce574c75f1ca74499eb4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_M32R_SYSTEM_H
2#define _ASM_M32R_SYSTEM_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 2001 by Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
10 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
11 */
12
13#include <linux/config.h>
Hirokazu Takata0332db52005-11-28 13:43:59 -080014#include <asm/assembler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16#ifdef __KERNEL__
17
18/*
19 * switch_to(prev, next) should switch from task `prev' to `next'
20 * `prev' will never be the same as `next'.
21 *
22 * `next' and `prev' should be struct task_struct, but it isn't always defined
23 */
24
25#ifndef CONFIG_SMP
26#define prepare_to_switch() do { } while(0)
27#endif /* not CONFIG_SMP */
28
29#define switch_to(prev, next, last) do { \
30 register unsigned long arg0 __asm__ ("r0") = (unsigned long)prev; \
31 register unsigned long arg1 __asm__ ("r1") = (unsigned long)next; \
32 register unsigned long *oldsp __asm__ ("r2") = &(prev->thread.sp); \
33 register unsigned long *newsp __asm__ ("r3") = &(next->thread.sp); \
34 register unsigned long *oldlr __asm__ ("r4") = &(prev->thread.lr); \
35 register unsigned long *newlr __asm__ ("r5") = &(next->thread.lr); \
36 register struct task_struct *__last __asm__ ("r6"); \
37 __asm__ __volatile__ ( \
38 "st r8, @-r15 \n\t" \
39 "st r9, @-r15 \n\t" \
40 "st r10, @-r15 \n\t" \
41 "st r11, @-r15 \n\t" \
42 "st r12, @-r15 \n\t" \
43 "st r13, @-r15 \n\t" \
44 "st r14, @-r15 \n\t" \
45 "seth r14, #high(1f) \n\t" \
46 "or3 r14, r14, #low(1f) \n\t" \
47 "st r14, @r4 ; store old LR \n\t" \
48 "st r15, @r2 ; store old SP \n\t" \
49 "ld r15, @r3 ; load new SP \n\t" \
50 "st r0, @-r15 ; store 'prev' onto new stack \n\t" \
51 "ld r14, @r5 ; load new LR \n\t" \
52 "jmp r14 \n\t" \
53 ".fillinsn \n " \
54 "1: \n\t" \
55 "ld r6, @r15+ ; load 'prev' from new stack \n\t" \
56 "ld r14, @r15+ \n\t" \
57 "ld r13, @r15+ \n\t" \
58 "ld r12, @r15+ \n\t" \
59 "ld r11, @r15+ \n\t" \
60 "ld r10, @r15+ \n\t" \
61 "ld r9, @r15+ \n\t" \
62 "ld r8, @r15+ \n\t" \
63 : "=&r" (__last) \
64 : "r" (arg0), "r" (arg1), "r" (oldsp), "r" (newsp), \
65 "r" (oldlr), "r" (newlr) \
66 : "memory" \
67 ); \
68 last = __last; \
69} while(0)
70
71/* Interrupt Control */
72#if !defined(CONFIG_CHIP_M32102)
73#define local_irq_enable() \
74 __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory")
75#define local_irq_disable() \
76 __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory")
77#else /* CONFIG_CHIP_M32102 */
78static inline void local_irq_enable(void)
79{
80 unsigned long tmpreg;
81 __asm__ __volatile__(
82 "mvfc %0, psw; \n\t"
83 "or3 %0, %0, #0x0040; \n\t"
84 "mvtc %0, psw; \n\t"
85 : "=&r" (tmpreg) : : "cbit", "memory");
86}
87
88static inline void local_irq_disable(void)
89{
90 unsigned long tmpreg0, tmpreg1;
91 __asm__ __volatile__(
92 "ld24 %0, #0 ; Use 32-bit insn. \n\t"
93 "mvfc %1, psw ; No interrupt can be accepted here. \n\t"
94 "mvtc %0, psw \n\t"
95 "and3 %0, %1, #0xffbf \n\t"
96 "mvtc %0, psw \n\t"
97 : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory");
98}
99#endif /* CONFIG_CHIP_M32102 */
100
101#define local_save_flags(x) \
102 __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */)
103
104#define local_irq_restore(x) \
105 __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \
106 : "r" (x) : "cbit", "memory")
107
108#if !defined(CONFIG_CHIP_M32102)
109#define local_irq_save(x) \
110 __asm__ __volatile__( \
111 "mvfc %0, psw; \n\t" \
112 "clrpsw #0x40 -> nop; \n\t" \
113 : "=r" (x) : /* no input */ : "memory")
114#else /* CONFIG_CHIP_M32102 */
115#define local_irq_save(x) \
116 ({ \
117 unsigned long tmpreg; \
118 __asm__ __volatile__( \
119 "ld24 %1, #0 \n\t" \
120 "mvfc %0, psw \n\t" \
121 "mvtc %1, psw \n\t" \
122 "and3 %1, %0, #0xffbf \n\t" \
123 "mvtc %1, psw \n\t" \
124 : "=r" (x), "=&r" (tmpreg) \
125 : : "cbit", "memory"); \
126 })
127#endif /* CONFIG_CHIP_M32102 */
128
129#define irqs_disabled() \
130 ({ \
131 unsigned long flags; \
132 local_save_flags(flags); \
133 !(flags & 0x40); \
134 })
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136#define nop() __asm__ __volatile__ ("nop" : : )
137
138#define xchg(ptr,x) \
139 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
140
141#define tas(ptr) (xchg((ptr),1))
142
143#ifdef CONFIG_SMP
144extern void __xchg_called_with_bad_pointer(void);
145#endif
146
147#ifdef CONFIG_CHIP_M32700_TS1
148#define DCACHE_CLEAR(reg0, reg1, addr) \
149 "seth "reg1", #high(dcache_dummy); \n\t" \
150 "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \
151 "lock "reg0", @"reg1"; \n\t" \
152 "add3 "reg0", "addr", #0x1000; \n\t" \
153 "ld "reg0", @"reg0"; \n\t" \
154 "add3 "reg0", "addr", #0x2000; \n\t" \
155 "ld "reg0", @"reg0"; \n\t" \
156 "unlock "reg0", @"reg1"; \n\t"
157 /* FIXME: This workaround code cannot handle kenrel modules
158 * correctly under SMP environment.
159 */
160#else /* CONFIG_CHIP_M32700_TS1 */
161#define DCACHE_CLEAR(reg0, reg1, addr)
162#endif /* CONFIG_CHIP_M32700_TS1 */
163
164static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr,
165 int size)
166{
167 unsigned long flags;
168 unsigned long tmp = 0;
169
170 local_irq_save(flags);
171
172 switch (size) {
173#ifndef CONFIG_SMP
174 case 1:
175 __asm__ __volatile__ (
176 "ldb %0, @%2 \n\t"
177 "stb %1, @%2 \n\t"
178 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
179 break;
180 case 2:
181 __asm__ __volatile__ (
182 "ldh %0, @%2 \n\t"
183 "sth %1, @%2 \n\t"
184 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
185 break;
186 case 4:
187 __asm__ __volatile__ (
188 "ld %0, @%2 \n\t"
189 "st %1, @%2 \n\t"
190 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
191 break;
192#else /* CONFIG_SMP */
193 case 4:
194 __asm__ __volatile__ (
195 DCACHE_CLEAR("%0", "r4", "%2")
196 "lock %0, @%2; \n\t"
197 "unlock %1, @%2; \n\t"
198 : "=&r" (tmp) : "r" (x), "r" (ptr)
199 : "memory"
200#ifdef CONFIG_CHIP_M32700_TS1
201 , "r4"
202#endif /* CONFIG_CHIP_M32700_TS1 */
203 );
204 break;
205 default:
206 __xchg_called_with_bad_pointer();
207#endif /* CONFIG_SMP */
208 }
209
210 local_irq_restore(flags);
211
212 return (tmp);
213}
214
Hirokazu Takata0332db52005-11-28 13:43:59 -0800215#define __HAVE_ARCH_CMPXCHG 1
216
217static __inline__ unsigned long
218__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
219{
220 unsigned long flags;
221 unsigned int retval;
222
223 local_irq_save(flags);
224 __asm__ __volatile__ (
225 DCACHE_CLEAR("%0", "r4", "%1")
226 M32R_LOCK" %0, @%1; \n"
227 " bne %0, %2, 1f; \n"
228 M32R_UNLOCK" %3, @%1; \n"
229 " bra 2f; \n"
230 " .fillinsn \n"
231 "1:"
232 M32R_UNLOCK" %2, @%1; \n"
233 " .fillinsn \n"
234 "2:"
235 : "=&r" (retval)
236 : "r" (p), "r" (old), "r" (new)
237 : "cbit", "memory"
238#ifdef CONFIG_CHIP_M32700_TS1
239 , "r4"
240#endif /* CONFIG_CHIP_M32700_TS1 */
241 );
242 local_irq_restore(flags);
243
244 return retval;
245}
246
247/* This function doesn't exist, so you'll get a linker error
248 if something tries to do an invalid cmpxchg(). */
249extern void __cmpxchg_called_with_bad_pointer(void);
250
251static __inline__ unsigned long
252__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
253{
254 switch (size) {
255 case 4:
256 return __cmpxchg_u32(ptr, old, new);
257#if 0 /* we don't have __cmpxchg_u64 */
258 case 8:
259 return __cmpxchg_u64(ptr, old, new);
260#endif /* 0 */
261 }
262 __cmpxchg_called_with_bad_pointer();
263 return old;
264}
265
266#define cmpxchg(ptr,o,n) \
267 ({ \
268 __typeof__(*(ptr)) _o_ = (o); \
269 __typeof__(*(ptr)) _n_ = (n); \
270 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
271 (unsigned long)_n_, sizeof(*(ptr))); \
272 })
273
274#endif /* __KERNEL__ */
275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276/*
277 * Memory barrier.
278 *
279 * mb() prevents loads and stores being reordered across this point.
280 * rmb() prevents loads being reordered across this point.
281 * wmb() prevents stores being reordered across this point.
282 */
283#define mb() barrier()
284#define rmb() mb()
285#define wmb() mb()
286
287/**
288 * read_barrier_depends - Flush all pending reads that subsequents reads
289 * depend on.
290 *
291 * No data-dependent reads from memory-like regions are ever reordered
292 * over this barrier. All reads preceding this primitive are guaranteed
293 * to access memory (but not necessarily other CPUs' caches) before any
294 * reads following this primitive that depend on the data return by
295 * any of the preceding reads. This primitive is much lighter weight than
296 * rmb() on most CPUs, and is never heavier weight than is
297 * rmb().
298 *
299 * These ordering constraints are respected by both the local CPU
300 * and the compiler.
301 *
302 * Ordering is not guaranteed by anything other than these primitives,
303 * not even by data dependencies. See the documentation for
304 * memory_barrier() for examples and URLs to more information.
305 *
306 * For example, the following code would force ordering (the initial
307 * value of "a" is zero, "b" is one, and "p" is "&a"):
308 *
309 * <programlisting>
310 * CPU 0 CPU 1
311 *
312 * b = 2;
313 * memory_barrier();
314 * p = &b; q = p;
315 * read_barrier_depends();
316 * d = *q;
317 * </programlisting>
318 *
319 *
320 * because the read of "*q" depends on the read of "p" and these
321 * two reads are separated by a read_barrier_depends(). However,
322 * the following code, with the same initial values for "a" and "b":
323 *
324 * <programlisting>
325 * CPU 0 CPU 1
326 *
327 * a = 2;
328 * memory_barrier();
329 * b = 3; y = b;
330 * read_barrier_depends();
331 * x = a;
332 * </programlisting>
333 *
334 * does not enforce ordering, since there is no data dependency between
335 * the read of "a" and the read of "b". Therefore, on some CPUs, such
336 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
337 * in cases like thiswhere there are no data dependencies.
338 **/
339
340#define read_barrier_depends() do { } while (0)
341
342#ifdef CONFIG_SMP
343#define smp_mb() mb()
344#define smp_rmb() rmb()
345#define smp_wmb() wmb()
346#define smp_read_barrier_depends() read_barrier_depends()
347#else
348#define smp_mb() barrier()
349#define smp_rmb() barrier()
350#define smp_wmb() barrier()
351#define smp_read_barrier_depends() do { } while (0)
352#endif
353
354#define set_mb(var, value) do { xchg(&var, value); } while (0)
355#define set_wmb(var, value) do { var = value; wmb(); } while (0)
356
357#define arch_align_stack(x) (x)
358
359#endif /* _ASM_M32R_SYSTEM_H */