blob: 2b3d47d0bbb692f62d9242adba7adec523cc0723 [file] [log] [blame]
Bryan Wu1394f032007-05-06 14:50:22 -07001/*
2 * File: include/asm/system.h
3 * Based on:
4 * Author: Tony Kou (tonyko@lineo.ca)
5 * Copyright (c) 2002 Arcturus Networks Inc.
6 * (www.arcturusnetworks.com)
7 * Copyright (c) 2003 Metrowerks (www.metrowerks.com)
8 * Copyright (c) 2004 Analog Device Inc.
9 * Created: 25Jan2001 - Tony Kou
10 * Description: system.h include file
11 *
12 * Modified: 22Sep2006 - Robin Getz
13 * - move include blackfin.h down, so I can get access to
14 * irq functions in other include files.
15 *
16 * Bugs: Enter bugs at http://blackfin.uclinux.org/
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; see the file COPYING.
30 * If not, write to the Free Software Foundation,
31 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
32 */
33
34#ifndef _BLACKFIN_SYSTEM_H
35#define _BLACKFIN_SYSTEM_H
36
37#include <linux/linkage.h>
38#include <linux/compiler.h>
Mike Frysinger1aafd902007-07-25 11:19:14 +080039#include <asm/mach/anomaly.h>
Bryan Wu1394f032007-05-06 14:50:22 -070040
41/*
42 * Interrupt configuring macros.
43 */
44
45extern unsigned long irq_flags;
46
Mike Frysinger1aafd902007-07-25 11:19:14 +080047#define local_irq_enable() \
48 __asm__ __volatile__( \
49 "sti %0;" \
50 : \
51 : "d" (irq_flags) \
52 )
Bryan Wu1394f032007-05-06 14:50:22 -070053
Mike Frysinger1aafd902007-07-25 11:19:14 +080054#define local_irq_disable() \
55 do { \
56 int __tmp_dummy; \
57 __asm__ __volatile__( \
58 "cli %0;" \
59 : "=d" (__tmp_dummy) \
60 ); \
61 } while (0)
Bryan Wu1394f032007-05-06 14:50:22 -070062
Robin Getz3bebca22007-10-10 23:55:26 +080063#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
Mike Frysinger1aafd902007-07-25 11:19:14 +080064# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
Bryan Wu1394f032007-05-06 14:50:22 -070065#else
Mike Frysinger1aafd902007-07-25 11:19:14 +080066# define NOP_PAD_ANOMALY_05000244
Bryan Wu1394f032007-05-06 14:50:22 -070067#endif
68
Mike Frysinger1aafd902007-07-25 11:19:14 +080069#define idle_with_irq_disabled() \
70 __asm__ __volatile__( \
71 NOP_PAD_ANOMALY_05000244 \
72 ".align 8;" \
73 "sti %0;" \
74 "idle;" \
75 : \
76 : "d" (irq_flags) \
77 )
78
Bryan Wu1394f032007-05-06 14:50:22 -070079#ifdef CONFIG_DEBUG_HWERR
Mike Frysinger1aafd902007-07-25 11:19:14 +080080# define __save_and_cli(x) \
81 __asm__ __volatile__( \
82 "cli %0;" \
83 "sti %1;" \
84 : "=&d" (x) \
85 : "d" (0x3F) \
86 )
Bryan Wu1394f032007-05-06 14:50:22 -070087#else
Mike Frysinger1aafd902007-07-25 11:19:14 +080088# define __save_and_cli(x) \
89 __asm__ __volatile__( \
90 "cli %0;" \
91 : "=&d" (x) \
92 )
Bryan Wu1394f032007-05-06 14:50:22 -070093#endif
94
Mike Frysinger1aafd902007-07-25 11:19:14 +080095#define local_save_flags(x) \
96 __asm__ __volatile__( \
97 "cli %0;" \
98 "sti %0;" \
99 : "=d" (x) \
100 )
Bryan Wu1394f032007-05-06 14:50:22 -0700101
102#ifdef CONFIG_DEBUG_HWERR
103#define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0)
104#else
105#define irqs_enabled_from_flags(x) ((x) != 0x1f)
106#endif
107
Mike Frysinger1aafd902007-07-25 11:19:14 +0800108#define local_irq_restore(x) \
109 do { \
110 if (irqs_enabled_from_flags(x)) \
111 local_irq_enable(); \
112 } while (0)
Bryan Wu1394f032007-05-06 14:50:22 -0700113
114/* For spinlocks etc */
115#define local_irq_save(x) __save_and_cli(x)
116
117#define irqs_disabled() \
118({ \
119 unsigned long flags; \
120 local_save_flags(flags); \
121 !irqs_enabled_from_flags(flags); \
122})
123
124/*
125 * Force strict CPU ordering.
126 */
127#define nop() asm volatile ("nop;\n\t"::)
128#define mb() asm volatile ("" : : :"memory")
129#define rmb() asm volatile ("" : : :"memory")
130#define wmb() asm volatile ("" : : :"memory")
131#define set_rmb(var, value) do { (void) xchg(&var, value); } while (0)
132#define set_mb(var, value) set_rmb(var, value)
133#define set_wmb(var, value) do { var = value; wmb(); } while (0)
134
135#define read_barrier_depends() do { } while(0)
136
137#ifdef CONFIG_SMP
138#define smp_mb() mb()
139#define smp_rmb() rmb()
140#define smp_wmb() wmb()
141#define smp_read_barrier_depends() read_barrier_depends()
142#else
143#define smp_mb() barrier()
144#define smp_rmb() barrier()
145#define smp_wmb() barrier()
146#define smp_read_barrier_depends() do { } while(0)
147#endif
148
149#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
Bryan Wu1394f032007-05-06 14:50:22 -0700150
151struct __xchg_dummy {
152 unsigned long a[100];
153};
154#define __xg(x) ((volatile struct __xchg_dummy *)(x))
155
156static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
157 int size)
158{
159 unsigned long tmp = 0;
160 unsigned long flags = 0;
161
162 local_irq_save(flags);
163
164 switch (size) {
165 case 1:
166 __asm__ __volatile__
167 ("%0 = b%2 (z);\n\t"
168 "b%2 = %1;\n\t"
169 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
170 break;
171 case 2:
172 __asm__ __volatile__
173 ("%0 = w%2 (z);\n\t"
174 "w%2 = %1;\n\t"
175 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
176 break;
177 case 4:
178 __asm__ __volatile__
179 ("%0 = %2;\n\t"
180 "%2 = %1;\n\t"
181 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
182 break;
183 }
184 local_irq_restore(flags);
185 return tmp;
186}
187
188/*
189 * Atomic compare and exchange. Compare OLD with MEM, if identical,
190 * store NEW in MEM. Return the initial value in MEM. Success is
191 * indicated by comparing RETURN with OLD.
192 */
193static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
194 unsigned long new, int size)
195{
196 unsigned long tmp = 0;
197 unsigned long flags = 0;
198
199 local_irq_save(flags);
200
201 switch (size) {
202 case 1:
203 __asm__ __volatile__
204 ("%0 = b%3 (z);\n\t"
205 "CC = %1 == %0;\n\t"
206 "IF !CC JUMP 1f;\n\t"
207 "b%3 = %2;\n\t"
208 "1:\n\t"
209 : "=&d" (tmp) : "d" (old), "d" (new), "m" (*__xg(ptr)) : "memory");
210 break;
211 case 2:
212 __asm__ __volatile__
213 ("%0 = w%3 (z);\n\t"
214 "CC = %1 == %0;\n\t"
215 "IF !CC JUMP 1f;\n\t"
216 "w%3 = %2;\n\t"
217 "1:\n\t"
218 : "=&d" (tmp) : "d" (old), "d" (new), "m" (*__xg(ptr)) : "memory");
219 break;
220 case 4:
221 __asm__ __volatile__
222 ("%0 = %3;\n\t"
223 "CC = %1 == %0;\n\t"
224 "IF !CC JUMP 1f;\n\t"
225 "%3 = %2;\n\t"
226 "1:\n\t"
227 : "=&d" (tmp) : "d" (old), "d" (new), "m" (*__xg(ptr)) : "memory");
228 break;
229 }
230 local_irq_restore(flags);
231 return tmp;
232}
233
234#define cmpxchg(ptr,o,n)\
235 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
236 (unsigned long)(n),sizeof(*(ptr))))
237
238#define prepare_to_switch() do { } while(0)
239
240/*
241 * switch_to(n) should switch tasks to task ptr, first checking that
242 * ptr isn't the current task, in which case it does nothing.
243 */
244
245#include <asm/blackfin.h>
246
247asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next);
248
249#define switch_to(prev,next,last) \
250do { \
Roman Zippelf7e42172007-05-09 02:35:17 -0700251 memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \
Bryan Wu1394f032007-05-06 14:50:22 -0700252 sizeof *L1_SCRATCH_TASK_INFO); \
Roman Zippelf7e42172007-05-09 02:35:17 -0700253 memcpy (L1_SCRATCH_TASK_INFO, &task_thread_info(next)->l1_task_info, \
Bryan Wu1394f032007-05-06 14:50:22 -0700254 sizeof *L1_SCRATCH_TASK_INFO); \
255 (last) = resume (prev, next); \
256} while (0)
257
258#endif /* _BLACKFIN_SYSTEM_H */