blob: 932bda92a21c392b609e67eb09608897d4259657 [file] [log] [blame]
Chris Zankel9a8fd552005-06-23 22:01:26 -07001/*
2 * include/asm-xtensa/system.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_SYSTEM_H
12#define _XTENSA_SYSTEM_H
13
Chris Zankel9a8fd552005-06-23 22:01:26 -070014#include <linux/stringify.h>
15
16#include <asm/processor.h>
17
18/* interrupt control */
19
20#define local_save_flags(x) \
21 __asm__ __volatile__ ("rsr %0,"__stringify(PS) : "=a" (x));
22#define local_irq_restore(x) do { \
23 __asm__ __volatile__ ("wsr %0, "__stringify(PS)" ; rsync" \
24 :: "a" (x) : "memory"); } while(0);
25#define local_irq_save(x) do { \
26 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) \
27 : "=a" (x) :: "memory");} while(0);
28
29static inline void local_irq_disable(void)
30{
31 unsigned long flags;
32 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL)
33 : "=a" (flags) :: "memory");
34}
35static inline void local_irq_enable(void)
36{
37 unsigned long flags;
38 __asm__ __volatile__ ("rsil %0, 0" : "=a" (flags) :: "memory");
39
40}
41
42static inline int irqs_disabled(void)
43{
44 unsigned long flags;
45 local_save_flags(flags);
46 return flags & 0xf;
47}
48
49#define RSR_CPENABLE(x) do { \
50 __asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
51 } while(0);
52#define WSR_CPENABLE(x) do { \
53 __asm__ __volatile__("wsr %0," __stringify(CPENABLE)";rsync" \
54 :: "a" (x));} while(0);
55
56#define clear_cpenable() __clear_cpenable()
57
Adrian Bunkd99cf712005-09-03 15:57:53 -070058static inline void __clear_cpenable(void)
Chris Zankel9a8fd552005-06-23 22:01:26 -070059{
60#if XCHAL_HAVE_CP
61 unsigned long i = 0;
62 WSR_CPENABLE(i);
63#endif
64}
65
Adrian Bunkd99cf712005-09-03 15:57:53 -070066static inline void enable_coprocessor(int i)
Chris Zankel9a8fd552005-06-23 22:01:26 -070067{
68#if XCHAL_HAVE_CP
69 int cp;
70 RSR_CPENABLE(cp);
71 cp |= 1 << i;
72 WSR_CPENABLE(cp);
73#endif
74}
75
Adrian Bunkd99cf712005-09-03 15:57:53 -070076static inline void disable_coprocessor(int i)
Chris Zankel9a8fd552005-06-23 22:01:26 -070077{
78#if XCHAL_HAVE_CP
79 int cp;
80 RSR_CPENABLE(cp);
81 cp &= ~(1 << i);
82 WSR_CPENABLE(cp);
83#endif
84}
85
86#define smp_read_barrier_depends() do { } while(0)
87#define read_barrier_depends() do { } while(0)
88
89#define mb() barrier()
90#define rmb() mb()
91#define wmb() mb()
92
93#ifdef CONFIG_SMP
94#error smp_* not defined
95#else
96#define smp_mb() barrier()
97#define smp_rmb() barrier()
98#define smp_wmb() barrier()
99#endif
100
101#define set_mb(var, value) do { var = value; mb(); } while (0)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700102
103#if !defined (__ASSEMBLY__)
104
105/* * switch_to(n) should switch tasks to task nr n, first
106 * checking that n isn't the current task, in which case it does nothing.
107 */
108extern void *_switch_to(void *last, void *next);
109
110#endif /* __ASSEMBLY__ */
111
Chris Zankel9a8fd552005-06-23 22:01:26 -0700112#define switch_to(prev,next,last) \
113do { \
114 clear_cpenable(); \
115 (last) = _switch_to(prev, next); \
116} while(0)
117
118/*
119 * cmpxchg
120 */
121
Adrian Bunkd99cf712005-09-03 15:57:53 -0700122static inline unsigned long
Chris Zankel9a8fd552005-06-23 22:01:26 -0700123__cmpxchg_u32(volatile int *p, int old, int new)
124{
125 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
126 "l32i %0, %1, 0 \n\t"
127 "bne %0, %2, 1f \n\t"
128 "s32i %3, %1, 0 \n\t"
129 "1: \n\t"
130 "wsr a15, "__stringify(PS)" \n\t"
131 "rsync \n\t"
132 : "=&a" (old)
133 : "a" (p), "a" (old), "r" (new)
134 : "a15", "memory");
135 return old;
136}
137/* This function doesn't exist, so you'll get a linker error
138 * if something tries to do an invalid cmpxchg(). */
139
140extern void __cmpxchg_called_with_bad_pointer(void);
141
142static __inline__ unsigned long
143__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
144{
145 switch (size) {
146 case 4: return __cmpxchg_u32(ptr, old, new);
147 default: __cmpxchg_called_with_bad_pointer();
148 return old;
149 }
150}
151
152#define cmpxchg(ptr,o,n) \
153 ({ __typeof__(*(ptr)) _o_ = (o); \
154 __typeof__(*(ptr)) _n_ = (n); \
155 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
156 (unsigned long)_n_, sizeof (*(ptr))); \
157 })
158
159
160
161
162/*
163 * xchg_u32
164 *
165 * Note that a15 is used here because the register allocation
166 * done by the compiler is not guaranteed and a window overflow
167 * may not occur between the rsil and wsr instructions. By using
168 * a15 in the rsil, the machine is guaranteed to be in a state
169 * where no register reference will cause an overflow.
170 */
171
Adrian Bunkd99cf712005-09-03 15:57:53 -0700172static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700173{
174 unsigned long tmp;
175 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
176 "l32i %0, %1, 0 \n\t"
177 "s32i %2, %1, 0 \n\t"
178 "wsr a15, "__stringify(PS)" \n\t"
179 "rsync \n\t"
180 : "=&a" (tmp)
181 : "a" (m), "a" (val)
182 : "a15", "memory");
183 return tmp;
184}
185
186#define tas(ptr) (xchg((ptr),1))
187
Chris Zankel9a8fd552005-06-23 22:01:26 -0700188#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
189
190/*
191 * This only works if the compiler isn't horribly bad at optimizing.
192 * gcc-2.5.8 reportedly can't handle this, but I define that one to
193 * be dead anyway.
194 */
195
196extern void __xchg_called_with_bad_pointer(void);
197
198static __inline__ unsigned long
199__xchg(unsigned long x, volatile void * ptr, int size)
200{
201 switch (size) {
202 case 4:
203 return xchg_u32(ptr, x);
204 }
205 __xchg_called_with_bad_pointer();
206 return x;
207}
208
Chris Zankel9a8fd552005-06-23 22:01:26 -0700209extern void set_except_vector(int n, void *addr);
210
211static inline void spill_registers(void)
212{
213 unsigned int a0, ps;
214
215 __asm__ __volatile__ (
216 "movi a14," __stringify (PS_EXCM_MASK) " | 1\n\t"
217 "mov a12, a0\n\t"
218 "rsr a13," __stringify(SAR) "\n\t"
219 "xsr a14," __stringify(PS) "\n\t"
220 "movi a0, _spill_registers\n\t"
221 "rsync\n\t"
222 "callx0 a0\n\t"
223 "mov a0, a12\n\t"
224 "wsr a13," __stringify(SAR) "\n\t"
225 "wsr a14," __stringify(PS) "\n\t"
226 :: "a" (&a0), "a" (&ps)
227 : "a2", "a3", "a12", "a13", "a14", "a15", "memory");
228}
229
230#define arch_align_stack(x) (x)
231
232#endif /* _XTENSA_SYSTEM_H */