blob: 6116e4893c0ade4bc478996a82c316ce5c99a64e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/assembler.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1996-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This file contains arm architecture specific defines
11 * for the different processors.
12 *
13 * Do not include any C declarations in this file - it is included by
14 * assembler source.
15 */
16#ifndef __ASSEMBLY__
17#error "Only include this from assembly code"
18#endif
19
20#include <asm/ptrace.h>
21
22/*
23 * Endian independent macros for shifting bytes within registers.
24 */
25#ifndef __ARMEB__
26#define pull lsr
27#define push lsl
28#define get_byte_0 lsl #0
29#define get_byte_1 lsr #8
30#define get_byte_2 lsr #16
31#define get_byte_3 lsr #24
32#define put_byte_0 lsl #0
33#define put_byte_1 lsl #8
34#define put_byte_2 lsl #16
35#define put_byte_3 lsl #24
36#else
37#define pull lsl
38#define push lsr
39#define get_byte_0 lsr #24
40#define get_byte_1 lsr #16
41#define get_byte_2 lsr #8
42#define get_byte_3 lsl #0
43#define put_byte_0 lsl #24
44#define put_byte_1 lsl #16
45#define put_byte_2 lsl #8
46#define put_byte_3 lsl #0
47#endif
48
49/*
50 * Data preload for architectures that support it
51 */
52#if __LINUX_ARM_ARCH__ >= 5
53#define PLD(code...) code
54#else
55#define PLD(code...)
56#endif
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058/*
Nicolas Pitre2239aff2008-03-31 12:38:31 -040059 * This can be used to enable code to cacheline align the destination
60 * pointer when bulk writing to memory. Experiments on StrongARM and
61 * XScale didn't show this a worthwhile thing to do when the cache is not
62 * set to write-allocate (this would need further testing on XScale when WA
63 * is used).
64 *
65 * On Feroceon there is much to gain however, regardless of cache mode.
66 */
67#ifdef CONFIG_CPU_FEROCEON
68#define CALGN(code...) code
69#else
70#define CALGN(code...)
71#endif
72
73/*
Russell King9c429542006-03-23 16:59:37 +000074 * Enable and disable interrupts
75 */
76#if __LINUX_ARM_ARCH__ >= 6
77 .macro disable_irq
78 cpsid i
79 .endm
80
81 .macro enable_irq
82 cpsie i
83 .endm
84#else
85 .macro disable_irq
86 msr cpsr_c, #PSR_I_BIT | SVC_MODE
87 .endm
88
89 .macro enable_irq
90 msr cpsr_c, #SVC_MODE
91 .endm
92#endif
93
94/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 * Save the current IRQ state and disable IRQs. Note that this macro
96 * assumes FIQs are enabled, and that the processor is in SVC mode.
97 */
Russell King59d1ff32005-11-09 15:04:22 +000098 .macro save_and_disable_irqs, oldcpsr
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 mrs \oldcpsr, cpsr
Russell King9c429542006-03-23 16:59:37 +0000100 disable_irq
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 .endm
102
103/*
104 * Restore interrupt state previously stored in a register. We don't
105 * guarantee that this will preserve the flags.
106 */
107 .macro restore_irqs, oldcpsr
108 msr cpsr_c, \oldcpsr
109 .endm
110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#define USER(x...) \
1129999: x; \
113 .section __ex_table,"a"; \
114 .align 3; \
115 .long 9999b,9001f; \
116 .previous