blob: b400cea81487e8e84041fc89bd9a34b9978233b1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* system.h: FR-V CPU control definitions
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _ASM_SYSTEM_H
13#define _ASM_SYSTEM_H
14
Peter Zijlstra2c750ed2007-06-06 11:39:40 +020015#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/linkage.h>
Mathieu Desnoyers6784fd52008-02-08 15:00:45 -080017#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19struct thread_struct;
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021/*
22 * switch_to(prev, next) should switch from task `prev' to `next'
23 * `prev' will never be the same as `next'.
24 * The `mb' is to tell GCC not to cache `current' across this call.
25 */
26extern asmlinkage
27struct task_struct *__switch_to(struct thread_struct *prev_thread,
28 struct thread_struct *next_thread,
29 struct task_struct *prev);
30
31#define switch_to(prev, next, last) \
32do { \
33 (prev)->thread.sched_lr = \
34 (unsigned long) __builtin_return_address(0); \
35 (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
36 mb(); \
37} while(0)
38
39/*
40 * interrupt flag manipulation
David Howells28baeba2006-02-14 13:53:20 -080041 * - use virtual interrupt management since touching the PSR is slow
42 * - ICC2.Z: T if interrupts virtually disabled
43 * - ICC2.C: F if interrupts really disabled
44 * - if Z==1 upon interrupt:
45 * - C is set to 0
46 * - interrupts are really disabled
47 * - entry.S returns immediately
48 * - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts
49 * - if taken, the trap:
50 * - sets ICC2.C
51 * - enables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 */
David Howells28baeba2006-02-14 13:53:20 -080053#define local_irq_disable() \
54do { \
55 /* set Z flag, but don't change the C flag */ \
56 asm volatile(" andcc gr0,gr0,gr0,icc2 \n" \
57 : \
58 : \
59 : "memory", "icc2" \
60 ); \
61} while(0)
62
63#define local_irq_enable() \
64do { \
65 /* clear Z flag and then test the C flag */ \
66 asm volatile(" oricc gr0,#1,gr0,icc2 \n" \
67 " tihi icc2,gr0,#2 \n" \
68 : \
69 : \
70 : "memory", "icc2" \
71 ); \
72} while(0)
73
74#define local_save_flags(flags) \
75do { \
76 typecheck(unsigned long, flags); \
77 asm volatile("movsg ccr,%0" \
78 : "=r"(flags) \
79 : \
80 : "memory"); \
81 \
82 /* shift ICC2.Z to bit 0 */ \
83 flags >>= 26; \
84 \
85 /* make flags 1 if interrupts disabled, 0 otherwise */ \
86 flags &= 1UL; \
87} while(0)
88
89#define irqs_disabled() \
90 ({unsigned long flags; local_save_flags(flags); flags; })
91
92#define local_irq_save(flags) \
93do { \
94 typecheck(unsigned long, flags); \
95 local_save_flags(flags); \
96 local_irq_disable(); \
97} while(0)
98
99#define local_irq_restore(flags) \
100do { \
101 typecheck(unsigned long, flags); \
102 \
103 /* load the Z flag by turning 1 if disabled into 0 if disabled \
104 * and thus setting the Z flag but not the C flag */ \
105 asm volatile(" xoricc %0,#1,gr0,icc2 \n" \
106 /* then test Z=0 and C=0 */ \
107 " tihi icc2,gr0,#2 \n" \
108 : \
109 : "r"(flags) \
110 : "memory", "icc2" \
111 ); \
112 \
113} while(0)
114
115/*
116 * real interrupt flag manipulation
117 */
118#define __local_irq_disable() \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119do { \
120 unsigned long psr; \
121 asm volatile(" movsg psr,%0 \n" \
122 " andi %0,%2,%0 \n" \
123 " ori %0,%1,%0 \n" \
124 " movgs %0,psr \n" \
125 : "=r"(psr) \
126 : "i" (PSR_PIL_14), "i" (~PSR_PIL) \
127 : "memory"); \
128} while(0)
129
David Howells28baeba2006-02-14 13:53:20 -0800130#define __local_irq_enable() \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131do { \
132 unsigned long psr; \
133 asm volatile(" movsg psr,%0 \n" \
134 " andi %0,%1,%0 \n" \
135 " movgs %0,psr \n" \
136 : "=r"(psr) \
137 : "i" (~PSR_PIL) \
138 : "memory"); \
139} while(0)
140
David Howells28baeba2006-02-14 13:53:20 -0800141#define __local_save_flags(flags) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142do { \
143 typecheck(unsigned long, flags); \
144 asm("movsg psr,%0" \
145 : "=r"(flags) \
146 : \
147 : "memory"); \
148} while(0)
149
David Howells28baeba2006-02-14 13:53:20 -0800150#define __local_irq_save(flags) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151do { \
152 unsigned long npsr; \
153 typecheck(unsigned long, flags); \
154 asm volatile(" movsg psr,%0 \n" \
155 " andi %0,%3,%1 \n" \
156 " ori %1,%2,%1 \n" \
157 " movgs %1,psr \n" \
158 : "=r"(flags), "=r"(npsr) \
159 : "i" (PSR_PIL_14), "i" (~PSR_PIL) \
160 : "memory"); \
161} while(0)
162
David Howells28baeba2006-02-14 13:53:20 -0800163#define __local_irq_restore(flags) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164do { \
165 typecheck(unsigned long, flags); \
166 asm volatile(" movgs %0,psr \n" \
167 : \
168 : "r" (flags) \
169 : "memory"); \
170} while(0)
171
David Howells28baeba2006-02-14 13:53:20 -0800172#define __irqs_disabled() \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 ((__get_PSR() & PSR_PIL) >= PSR_PIL_14)
174
175/*
176 * Force strict CPU ordering.
177 */
178#define nop() asm volatile ("nop"::)
179#define mb() asm volatile ("membar" : : :"memory")
180#define rmb() asm volatile ("membar" : : :"memory")
181#define wmb() asm volatile ("membar" : : :"memory")
182#define set_mb(var, value) do { var = value; mb(); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
184#define smp_mb() mb()
185#define smp_rmb() rmb()
186#define smp_wmb() wmb()
187
188#define read_barrier_depends() do {} while(0)
189#define smp_read_barrier_depends() read_barrier_depends()
190
191#define HARD_RESET_NOW() \
192do { \
193 cli(); \
194} while(1)
195
196extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2)));
197extern void free_initmem(void);
198
199#define arch_align_stack(x) (x)
200
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700201/*****************************************************************************/
202/*
203 * compare and conditionally exchange value with memory
204 * - if (*ptr == test) then orig = *ptr; *ptr = test;
205 * - if (*ptr != test) then orig = *ptr;
206 */
207#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
208
209#define cmpxchg(ptr, test, new) \
210({ \
211 __typeof__(ptr) __xg_ptr = (ptr); \
212 __typeof__(*(ptr)) __xg_orig, __xg_tmp; \
213 __typeof__(*(ptr)) __xg_test = (test); \
214 __typeof__(*(ptr)) __xg_new = (new); \
215 \
216 switch (sizeof(__xg_orig)) { \
217 case 4: \
218 asm volatile( \
219 "0: \n" \
220 " orcc gr0,gr0,gr0,icc3 \n" \
221 " ckeq icc3,cc7 \n" \
222 " ld.p %M0,%1 \n" \
223 " orcr cc7,cc7,cc3 \n" \
224 " sub%I4cc %1,%4,%2,icc0 \n" \
225 " bne icc0,#0,1f \n" \
226 " cst.p %3,%M0 ,cc3,#1 \n" \
227 " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
228 " beq icc3,#0,0b \n" \
229 "1: \n" \
230 : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
231 : "r"(__xg_new), "NPr"(__xg_test) \
232 : "memory", "cc7", "cc3", "icc3", "icc0" \
233 ); \
234 break; \
235 \
236 default: \
237 __xg_orig = 0; \
238 asm volatile("break"); \
239 break; \
240 } \
241 \
242 __xg_orig; \
243})
244
245#else
246
247extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
248
249#define cmpxchg(ptr, test, new) \
250({ \
251 __typeof__(ptr) __xg_ptr = (ptr); \
252 __typeof__(*(ptr)) __xg_orig; \
253 __typeof__(*(ptr)) __xg_test = (test); \
254 __typeof__(*(ptr)) __xg_new = (new); \
255 \
256 switch (sizeof(__xg_orig)) { \
Al Viro0cc08442007-10-14 19:35:10 +0100257 case 4: __xg_orig = (__force __typeof__(*ptr)) \
258 __cmpxchg_32((__force uint32_t *)__xg_ptr, \
259 (__force uint32_t)__xg_test, \
260 (__force uint32_t)__xg_new); break; \
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700261 default: \
262 __xg_orig = 0; \
263 asm volatile("break"); \
264 break; \
265 } \
266 \
267 __xg_orig; \
268})
269
270#endif
271
Mathieu Desnoyers14e0cb32008-02-07 00:16:14 -0800272#include <asm-generic/cmpxchg-local.h>
273
274static inline unsigned long __cmpxchg_local(volatile void *ptr,
275 unsigned long old,
276 unsigned long new, int size)
277{
278 switch (size) {
279 case 4:
Mathieu Desnoyers6784fd52008-02-08 15:00:45 -0800280 return cmpxchg((unsigned long *)ptr, old, new);
Mathieu Desnoyers14e0cb32008-02-07 00:16:14 -0800281 default:
282 return __cmpxchg_local_generic(ptr, old, new, size);
283 }
284
285 return old;
286}
287
288/*
289 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
290 * them available.
291 */
292#define cmpxchg_local(ptr, o, n) \
293 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
294 (unsigned long)(n), sizeof(*(ptr))))
295#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297#endif /* _ASM_SYSTEM_H */