blob: 48e360c743272a70fa1c6cd36829b7a037b2e510 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_ARM_SYSTEM_H
2#define __ASM_ARM_SYSTEM_H
3
4#ifdef __KERNEL__
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#define CPU_ARCH_UNKNOWN 0
7#define CPU_ARCH_ARMv3 1
8#define CPU_ARCH_ARMv4 2
9#define CPU_ARCH_ARMv4T 3
10#define CPU_ARCH_ARMv5 4
11#define CPU_ARCH_ARMv5T 5
12#define CPU_ARCH_ARMv5TE 6
13#define CPU_ARCH_ARMv5TEJ 7
14#define CPU_ARCH_ARMv6 8
Catalin Marinasbbe88882007-05-08 22:27:46 +010015#define CPU_ARCH_ARMv7 9
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17/*
18 * CR1 bits (CP#15 CR1)
19 */
20#define CR_M (1 << 0) /* MMU enable */
21#define CR_A (1 << 1) /* Alignment abort enable */
22#define CR_C (1 << 2) /* Dcache enable */
23#define CR_W (1 << 3) /* Write buffer enable */
24#define CR_P (1 << 4) /* 32-bit exception handler */
25#define CR_D (1 << 5) /* 32-bit data address range */
26#define CR_L (1 << 6) /* Implementation defined */
27#define CR_B (1 << 7) /* Big endian */
28#define CR_S (1 << 8) /* System MMU protection */
29#define CR_R (1 << 9) /* ROM MMU protection */
30#define CR_F (1 << 10) /* Implementation defined */
31#define CR_Z (1 << 11) /* Implementation defined */
32#define CR_I (1 << 12) /* Icache enable */
33#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
34#define CR_RR (1 << 14) /* Round Robin cache replacement */
35#define CR_L4 (1 << 15) /* LDR pc can set T bit */
36#define CR_DT (1 << 16)
37#define CR_IT (1 << 18)
38#define CR_ST (1 << 19)
39#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
40#define CR_U (1 << 22) /* Unaligned access operation */
41#define CR_XP (1 << 23) /* Extended page tables */
42#define CR_VE (1 << 24) /* Vectored interrupts */
Russell Kingb1cce6b2008-11-04 10:52:28 +000043#define CR_EE (1 << 25) /* Exception (Big) Endian */
44#define CR_TRE (1 << 28) /* TEX remap enable */
45#define CR_AFE (1 << 29) /* Access flag enable */
46#define CR_TE (1 << 30) /* Thumb exception enable */
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/*
49 * This is used to ensure the compiler did actually allocate the register we
50 * asked it for some inline assembly sequences. Apparently we can't trust
51 * the compiler from one version to another so a bit of paranoia won't hurt.
52 * This string is meant to be concatenated with the inline asm string and
53 * will cause compilation to stop on mismatch.
54 * (for details, see gcc PR 15089)
55 */
56#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
57
58#ifndef __ASSEMBLY__
59
60#include <linux/linkage.h>
Russell King255d1f82006-12-18 00:12:47 +000061#include <linux/irqflags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Catalin Marinase7c56502010-03-24 16:49:54 +010063#include <asm/outercache.h>
64
Russell King7ab3f8d2007-03-02 15:01:36 +000065#define __exception __attribute__((section(".exception.text")))
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067struct thread_info;
68struct task_struct;
69
70/* information about the system we're running on */
71extern unsigned int system_rev;
72extern unsigned int system_serial_low;
73extern unsigned int system_serial_high;
74extern unsigned int mem_fclk_21285;
75
76struct pt_regs;
77
Russell Kinga9221de2010-01-20 17:02:54 +000078void die(const char *msg, struct pt_regs *regs, int err);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Russell Kingcfb08102005-06-30 11:06:49 +010080struct siginfo;
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070081void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
Russell Kingcfb08102005-06-30 11:06:49 +010082 unsigned long err, unsigned long trap);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
85 struct pt_regs *),
Kirill A. Shutemov6338a6a2010-07-22 13:18:19 +010086 int sig, int code, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Linus Torvalds1da177e2005-04-16 15:20:36 -070088#define xchg(ptr,x) \
89 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091extern asmlinkage void __backtrace(void);
Russell King652a12e2005-04-17 15:50:36 +010092extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
Russell King5470dc62005-11-16 18:36:49 +000093
94struct mm_struct;
Russell King652a12e2005-04-17 15:50:36 +010095extern void show_pte(struct mm_struct *mm, unsigned long addr);
96extern void __show_regs(struct pt_regs *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98extern int cpu_architecture(void);
Russell King36c5ed22005-06-19 18:39:33 +010099extern void cpu_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Russell Kingbe093be2009-03-19 16:20:24 +0000101void arm_machine_restart(char mode, const char *cmd);
102extern void (*arm_pm_restart)(char str, const char *cmd);
Richard Purdie74617fb2006-06-19 19:57:12 +0100103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#define UDBG_UNDEFINED (1 << 0)
105#define UDBG_SYSCALL (1 << 1)
106#define UDBG_BADABORT (1 << 2)
107#define UDBG_SEGV (1 << 3)
108#define UDBG_BUS (1 << 4)
109
110extern unsigned int user_debug;
111
112#if __LINUX_ARM_ARCH__ >= 4
113#define vectors_high() (cr_alignment & CR_V)
114#else
115#define vectors_high() (0)
116#endif
117
Catalin Marinas56163fc2007-05-08 22:53:44 +0100118#if __LINUX_ARM_ARCH__ >= 7
119#define isb() __asm__ __volatile__ ("isb" : : : "memory")
120#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
121#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
122#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
Catalin Marinasdcda7e42007-02-05 14:47:35 +0100123#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
124 : : "r" (0) : "memory")
125#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
126 : : "r" (0) : "memory")
127#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
128 : : "r" (0) : "memory")
Paulius Zaleckas28853ac2009-03-25 13:10:01 +0200129#elif defined(CONFIG_CPU_FA526)
130#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
131 : : "r" (0) : "memory")
132#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
133 : : "r" (0) : "memory")
134#define dmb() __asm__ __volatile__ ("" : : : "memory")
Russell King6d9b37a2005-07-26 19:44:26 +0100135#else
Catalin Marinasdcda7e42007-02-05 14:47:35 +0100136#define isb() __asm__ __volatile__ ("" : : : "memory")
137#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
138 : : "r" (0) : "memory")
139#define dmb() __asm__ __volatile__ ("" : : : "memory")
Russell King6d9b37a2005-07-26 19:44:26 +0100140#endif
Catalin Marinas9623b372007-02-28 12:30:38 +0100141
Catalin Marinase7c56502010-03-24 16:49:54 +0100142#ifdef CONFIG_ARCH_HAS_BARRIERS
143#include <mach/barriers.h>
Russell Kingac1d4262010-05-17 17:24:04 +0100144#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
Catalin Marinase7c56502010-03-24 16:49:54 +0100145#define mb() do { dsb(); outer_sync(); } while (0)
Russell King26a26d32009-11-20 21:06:43 +0000146#define rmb() dmb()
Catalin Marinase7c56502010-03-24 16:49:54 +0100147#define wmb() mb()
Russell King26a26d32009-11-20 21:06:43 +0000148#else
Lennert Buytenhek398e6922007-03-31 12:03:20 +0100149#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
150#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
151#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
Russell King26a26d32009-11-20 21:06:43 +0000152#endif
153
154#ifndef CONFIG_SMP
Lennert Buytenhek398e6922007-03-31 12:03:20 +0100155#define smp_mb() barrier()
156#define smp_rmb() barrier()
157#define smp_wmb() barrier()
Catalin Marinas9623b372007-02-28 12:30:38 +0100158#else
Catalin Marinase7c56502010-03-24 16:49:54 +0100159#define smp_mb() dmb()
160#define smp_rmb() dmb()
161#define smp_wmb() dmb()
Lennert Buytenhek398e6922007-03-31 12:03:20 +0100162#endif
Russell King26a26d32009-11-20 21:06:43 +0000163
Lennert Buytenhek398e6922007-03-31 12:03:20 +0100164#define read_barrier_depends() do { } while(0)
165#define smp_read_barrier_depends() do { } while(0)
Catalin Marinas9623b372007-02-28 12:30:38 +0100166
167#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
169
Catalin Marinas56660fa2007-02-05 14:48:02 +0100170extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
171extern unsigned long cr_alignment; /* defined in entry-armv.S */
172
173static inline unsigned int get_cr(void)
174{
175 unsigned int val;
176 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
177 return val;
178}
179
180static inline void set_cr(unsigned int val)
181{
182 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
183 : : "r" (val) : "cc");
184 isb();
185}
186
187#ifndef CONFIG_SMP
188extern void adjust_cr(unsigned long mask, unsigned long set);
189#endif
190
191#define CPACC_FULL(n) (3 << (n * 2))
192#define CPACC_SVC(n) (1 << (n * 2))
193#define CPACC_DISABLE(n) (0 << (n * 2))
194
195static inline unsigned int get_copro_access(void)
196{
197 unsigned int val;
198 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
199 : "=r" (val) : : "cc");
200 return val;
201}
202
203static inline void set_copro_access(unsigned int val)
204{
205 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
206 : : "r" (val) : "cc");
207 isb();
208}
209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210/*
Nick Piggin4866cde2005-06-25 14:57:23 -0700211 * switch_mm() may do a full cache flush over the context switch,
212 * so enable interrupts over the context switch to avoid high
213 * latency.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 */
Nick Piggin4866cde2005-06-25 14:57:23 -0700215#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217/*
218 * switch_to(prev, next) should switch from task `prev' to `next'
219 * `prev' will never be the same as `next'. schedule() itself
220 * contains the memory barrier to tell GCC not to cache `current'.
221 */
222extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
223
224#define switch_to(prev,next,last) \
225do { \
Al Viroe7c1b322006-01-12 01:05:56 -0800226 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227} while (0)
228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
230/*
231 * On the StrongARM, "swp" is terminally broken since it bypasses the
232 * cache totally. This means that the cache becomes inconsistent, and,
233 * since we use normal loads/stores as well, this is really bad.
234 * Typically, this causes oopsen in filp_close, but could have other,
235 * more disasterous effects. There are two work-arounds:
236 * 1. Disable interrupts and emulate the atomic swap
237 * 2. Clean the cache, perform atomic swap, flush the cache
238 *
239 * We choose (1) since its the "easiest" to achieve here and is not
240 * dependent on the processor type.
Russell King053a7b52005-06-28 19:22:25 +0100241 *
242 * NOTE that this solution won't work on an SMP system, so explcitly
243 * forbid it here.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 */
245#define swp_is_buggy
246#endif
247
248static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
249{
250 extern void __bad_xchg(volatile void *, int);
251 unsigned long ret;
252#ifdef swp_is_buggy
253 unsigned long flags;
254#endif
Russell King95607822005-07-26 19:39:31 +0100255#if __LINUX_ARM_ARCH__ >= 6
256 unsigned int tmp;
257#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Russell Kingbac4e962009-05-25 20:58:00 +0100259 smp_mb();
260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 switch (size) {
Russell King95607822005-07-26 19:39:31 +0100262#if __LINUX_ARM_ARCH__ >= 6
263 case 1:
264 asm volatile("@ __xchg1\n"
265 "1: ldrexb %0, [%3]\n"
266 " strexb %1, %2, [%3]\n"
267 " teq %1, #0\n"
268 " bne 1b"
269 : "=&r" (ret), "=&r" (tmp)
270 : "r" (x), "r" (ptr)
271 : "memory", "cc");
272 break;
273 case 4:
274 asm volatile("@ __xchg4\n"
275 "1: ldrex %0, [%3]\n"
276 " strex %1, %2, [%3]\n"
277 " teq %1, #0\n"
278 " bne 1b"
279 : "=&r" (ret), "=&r" (tmp)
280 : "r" (x), "r" (ptr)
281 : "memory", "cc");
282 break;
283#elif defined(swp_is_buggy)
284#ifdef CONFIG_SMP
285#error SMP is not supported on this platform
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286#endif
Russell King95607822005-07-26 19:39:31 +0100287 case 1:
Lennert Buytenheke7cc2c52006-09-21 03:35:20 +0100288 raw_local_irq_save(flags);
Russell King95607822005-07-26 19:39:31 +0100289 ret = *(volatile unsigned char *)ptr;
290 *(volatile unsigned char *)ptr = x;
Lennert Buytenheke7cc2c52006-09-21 03:35:20 +0100291 raw_local_irq_restore(flags);
Russell King95607822005-07-26 19:39:31 +0100292 break;
293
294 case 4:
Lennert Buytenheke7cc2c52006-09-21 03:35:20 +0100295 raw_local_irq_save(flags);
Russell King95607822005-07-26 19:39:31 +0100296 ret = *(volatile unsigned long *)ptr;
297 *(volatile unsigned long *)ptr = x;
Lennert Buytenheke7cc2c52006-09-21 03:35:20 +0100298 raw_local_irq_restore(flags);
Russell King95607822005-07-26 19:39:31 +0100299 break;
300#else
301 case 1:
302 asm volatile("@ __xchg1\n"
303 " swpb %0, %1, [%2]"
304 : "=&r" (ret)
305 : "r" (x), "r" (ptr)
306 : "memory", "cc");
307 break;
308 case 4:
309 asm volatile("@ __xchg4\n"
310 " swp %0, %1, [%2]"
311 : "=&r" (ret)
312 : "r" (x), "r" (ptr)
313 : "memory", "cc");
314 break;
315#endif
316 default:
317 __bad_xchg(ptr, size), ret = 0;
318 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 }
Russell Kingbac4e962009-05-25 20:58:00 +0100320 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
322 return ret;
323}
324
Ben Dooksdabaeff2006-03-15 23:17:26 +0000325extern void disable_hlt(void);
326extern void enable_hlt(void);
327
Kevin Hilmanc7b0aff2010-10-01 22:13:47 +0100328void cpu_idle_wait(void);
329
Mathieu Desnoyers176393d2008-02-07 00:16:11 -0800330#include <asm-generic/cmpxchg-local.h>
331
Mathieu Desnoyersecd322c2009-05-28 16:07:39 -0400332#if __LINUX_ARM_ARCH__ < 6
333
334#ifdef CONFIG_SMP
335#error "SMP is not supported on this platform"
336#endif
337
Mathieu Desnoyers176393d2008-02-07 00:16:11 -0800338/*
339 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
340 * them available.
341 */
342#define cmpxchg_local(ptr, o, n) \
343 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
344 (unsigned long)(n), sizeof(*(ptr))))
345#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
346
347#ifndef CONFIG_SMP
348#include <asm-generic/cmpxchg.h>
349#endif
350
Mathieu Desnoyersecd322c2009-05-28 16:07:39 -0400351#else /* __LINUX_ARM_ARCH__ >= 6 */
352
353extern void __bad_cmpxchg(volatile void *ptr, int size);
354
355/*
356 * cmpxchg only support 32-bits operands on ARMv6.
357 */
358
359static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
360 unsigned long new, int size)
361{
362 unsigned long oldval, res;
363
364 switch (size) {
365#ifdef CONFIG_CPU_32v6K
366 case 1:
367 do {
368 asm volatile("@ __cmpxchg1\n"
369 " ldrexb %1, [%2]\n"
370 " mov %0, #0\n"
371 " teq %1, %3\n"
372 " strexbeq %0, %4, [%2]\n"
373 : "=&r" (res), "=&r" (oldval)
374 : "r" (ptr), "Ir" (old), "r" (new)
375 : "memory", "cc");
376 } while (res);
377 break;
378 case 2:
379 do {
380 asm volatile("@ __cmpxchg1\n"
381 " ldrexh %1, [%2]\n"
382 " mov %0, #0\n"
383 " teq %1, %3\n"
384 " strexheq %0, %4, [%2]\n"
385 : "=&r" (res), "=&r" (oldval)
386 : "r" (ptr), "Ir" (old), "r" (new)
387 : "memory", "cc");
388 } while (res);
389 break;
390#endif /* CONFIG_CPU_32v6K */
391 case 4:
392 do {
393 asm volatile("@ __cmpxchg4\n"
394 " ldrex %1, [%2]\n"
395 " mov %0, #0\n"
396 " teq %1, %3\n"
397 " strexeq %0, %4, [%2]\n"
398 : "=&r" (res), "=&r" (oldval)
399 : "r" (ptr), "Ir" (old), "r" (new)
400 : "memory", "cc");
401 } while (res);
402 break;
403 default:
404 __bad_cmpxchg(ptr, size);
405 oldval = 0;
406 }
407
408 return oldval;
409}
410
411static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
412 unsigned long new, int size)
413{
414 unsigned long ret;
415
416 smp_mb();
417 ret = __cmpxchg(ptr, old, new, size);
418 smp_mb();
419
420 return ret;
421}
422
423#define cmpxchg(ptr,o,n) \
424 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
425 (unsigned long)(o), \
426 (unsigned long)(n), \
427 sizeof(*(ptr))))
428
429static inline unsigned long __cmpxchg_local(volatile void *ptr,
430 unsigned long old,
431 unsigned long new, int size)
432{
433 unsigned long ret;
434
435 switch (size) {
436#ifndef CONFIG_CPU_32v6K
437 case 1:
438 case 2:
439 ret = __cmpxchg_local_generic(ptr, old, new, size);
440 break;
441#endif /* !CONFIG_CPU_32v6K */
442 default:
443 ret = __cmpxchg(ptr, old, new, size);
444 }
445
446 return ret;
447}
448
449#define cmpxchg_local(ptr,o,n) \
450 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
451 (unsigned long)(o), \
452 (unsigned long)(n), \
453 sizeof(*(ptr))))
454
455#ifdef CONFIG_CPU_32v6K
456
457/*
458 * Note : ARMv7-M (currently unsupported by Linux) does not support
459 * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
460 * not be allowed to use __cmpxchg64.
461 */
462static inline unsigned long long __cmpxchg64(volatile void *ptr,
463 unsigned long long old,
464 unsigned long long new)
465{
466 register unsigned long long oldval asm("r0");
467 register unsigned long long __old asm("r2") = old;
468 register unsigned long long __new asm("r4") = new;
469 unsigned long res;
470
471 do {
472 asm volatile(
473 " @ __cmpxchg8\n"
474 " ldrexd %1, %H1, [%2]\n"
475 " mov %0, #0\n"
476 " teq %1, %3\n"
477 " teqeq %H1, %H3\n"
478 " strexdeq %0, %4, %H4, [%2]\n"
479 : "=&r" (res), "=&r" (oldval)
480 : "r" (ptr), "Ir" (__old), "r" (__new)
481 : "memory", "cc");
482 } while (res);
483
484 return oldval;
485}
486
487static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
488 unsigned long long old,
489 unsigned long long new)
490{
491 unsigned long long ret;
492
493 smp_mb();
494 ret = __cmpxchg64(ptr, old, new);
495 smp_mb();
496
497 return ret;
498}
499
500#define cmpxchg64(ptr,o,n) \
501 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
502 (unsigned long long)(o), \
503 (unsigned long long)(n)))
504
505#define cmpxchg64_local(ptr,o,n) \
506 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
507 (unsigned long long)(o), \
508 (unsigned long long)(n)))
509
510#else /* !CONFIG_CPU_32v6K */
511
512#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
513
514#endif /* CONFIG_CPU_32v6K */
515
516#endif /* __LINUX_ARM_ARCH__ >= 6 */
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518#endif /* __ASSEMBLY__ */
519
520#define arch_align_stack(x) (x)
521
522#endif /* __KERNEL__ */
523
524#endif