Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_ARM_SYSTEM_H |
| 2 | #define __ASM_ARM_SYSTEM_H |
| 3 | |
| 4 | #ifdef __KERNEL__ |
| 5 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #define CPU_ARCH_UNKNOWN 0 |
| 7 | #define CPU_ARCH_ARMv3 1 |
| 8 | #define CPU_ARCH_ARMv4 2 |
| 9 | #define CPU_ARCH_ARMv4T 3 |
| 10 | #define CPU_ARCH_ARMv5 4 |
| 11 | #define CPU_ARCH_ARMv5T 5 |
| 12 | #define CPU_ARCH_ARMv5TE 6 |
| 13 | #define CPU_ARCH_ARMv5TEJ 7 |
| 14 | #define CPU_ARCH_ARMv6 8 |
Catalin Marinas | bbe8888 | 2007-05-08 22:27:46 +0100 | [diff] [blame] | 15 | #define CPU_ARCH_ARMv7 9 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
| 17 | /* |
| 18 | * CR1 bits (CP#15 CR1) |
| 19 | */ |
| 20 | #define CR_M (1 << 0) /* MMU enable */ |
| 21 | #define CR_A (1 << 1) /* Alignment abort enable */ |
| 22 | #define CR_C (1 << 2) /* Dcache enable */ |
| 23 | #define CR_W (1 << 3) /* Write buffer enable */ |
| 24 | #define CR_P (1 << 4) /* 32-bit exception handler */ |
| 25 | #define CR_D (1 << 5) /* 32-bit data address range */ |
| 26 | #define CR_L (1 << 6) /* Implementation defined */ |
| 27 | #define CR_B (1 << 7) /* Big endian */ |
| 28 | #define CR_S (1 << 8) /* System MMU protection */ |
| 29 | #define CR_R (1 << 9) /* ROM MMU protection */ |
| 30 | #define CR_F (1 << 10) /* Implementation defined */ |
| 31 | #define CR_Z (1 << 11) /* Implementation defined */ |
| 32 | #define CR_I (1 << 12) /* Icache enable */ |
| 33 | #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ |
| 34 | #define CR_RR (1 << 14) /* Round Robin cache replacement */ |
| 35 | #define CR_L4 (1 << 15) /* LDR pc can set T bit */ |
| 36 | #define CR_DT (1 << 16) |
| 37 | #define CR_IT (1 << 18) |
| 38 | #define CR_ST (1 << 19) |
| 39 | #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ |
| 40 | #define CR_U (1 << 22) /* Unaligned access operation */ |
| 41 | #define CR_XP (1 << 23) /* Extended page tables */ |
| 42 | #define CR_VE (1 << 24) /* Vectored interrupts */ |
Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 43 | #define CR_EE (1 << 25) /* Exception (Big) Endian */ |
| 44 | #define CR_TRE (1 << 28) /* TEX remap enable */ |
| 45 | #define CR_AFE (1 << 29) /* Access flag enable */ |
| 46 | #define CR_TE (1 << 30) /* Thumb exception enable */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | /* |
| 49 | * This is used to ensure the compiler did actually allocate the register we |
| 50 | * asked it for some inline assembly sequences. Apparently we can't trust |
| 51 | * the compiler from one version to another so a bit of paranoia won't hurt. |
| 52 | * This string is meant to be concatenated with the inline asm string and |
| 53 | * will cause compilation to stop on mismatch. |
| 54 | * (for details, see gcc PR 15089) |
| 55 | */ |
| 56 | #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" |
| 57 | |
| 58 | #ifndef __ASSEMBLY__ |
| 59 | |
| 60 | #include <linux/linkage.h> |
Russell King | 255d1f8 | 2006-12-18 00:12:47 +0000 | [diff] [blame] | 61 | #include <linux/irqflags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | |
Catalin Marinas | e7c5650 | 2010-03-24 16:49:54 +0100 | [diff] [blame] | 63 | #include <asm/outercache.h> |
| 64 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 65 | void cpu_idle_wait(void); |
| 66 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | struct thread_info; |
| 68 | struct task_struct; |
| 69 | |
| 70 | /* information about the system we're running on */ |
| 71 | extern unsigned int system_rev; |
| 72 | extern unsigned int system_serial_low; |
| 73 | extern unsigned int system_serial_high; |
| 74 | extern unsigned int mem_fclk_21285; |
| 75 | |
| 76 | struct pt_regs; |
| 77 | |
Russell King | a9221de | 2010-01-20 17:02:54 +0000 | [diff] [blame] | 78 | void die(const char *msg, struct pt_regs *regs, int err); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | |
Russell King | cfb0810 | 2005-06-30 11:06:49 +0100 | [diff] [blame] | 80 | struct siginfo; |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 81 | void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, |
Russell King | cfb0810 | 2005-06-30 11:06:49 +0100 | [diff] [blame] | 82 | unsigned long err, unsigned long trap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
| 84 | void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, |
| 85 | struct pt_regs *), |
Kirill A. Shutemov | 6338a6a | 2010-07-22 13:18:19 +0100 | [diff] [blame] | 86 | int sig, int code, const char *name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | |
Will Deacon | 3a4b5dc | 2010-09-03 10:39:59 +0100 | [diff] [blame] | 88 | void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, |
| 89 | struct pt_regs *), |
| 90 | int sig, int code, const char *name); |
| 91 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | #define xchg(ptr,x) \ |
| 93 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) |
| 94 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | extern asmlinkage void __backtrace(void); |
Russell King | 652a12e | 2005-04-17 15:50:36 +0100 | [diff] [blame] | 96 | extern asmlinkage void c_backtrace(unsigned long fp, int pmode); |
Russell King | 5470dc6 | 2005-11-16 18:36:49 +0000 | [diff] [blame] | 97 | |
| 98 | struct mm_struct; |
Russell King | 652a12e | 2005-04-17 15:50:36 +0100 | [diff] [blame] | 99 | extern void show_pte(struct mm_struct *mm, unsigned long addr); |
| 100 | extern void __show_regs(struct pt_regs *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | |
| 102 | extern int cpu_architecture(void); |
Russell King | 36c5ed2 | 2005-06-19 18:39:33 +0100 | [diff] [blame] | 103 | extern void cpu_init(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | |
Russell King | be093be | 2009-03-19 16:20:24 +0000 | [diff] [blame] | 105 | void arm_machine_restart(char mode, const char *cmd); |
| 106 | extern void (*arm_pm_restart)(char str, const char *cmd); |
Richard Purdie | 74617fb | 2006-06-19 19:57:12 +0100 | [diff] [blame] | 107 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | #define UDBG_UNDEFINED (1 << 0) |
| 109 | #define UDBG_SYSCALL (1 << 1) |
| 110 | #define UDBG_BADABORT (1 << 2) |
| 111 | #define UDBG_SEGV (1 << 3) |
| 112 | #define UDBG_BUS (1 << 4) |
| 113 | |
| 114 | extern unsigned int user_debug; |
| 115 | |
| 116 | #if __LINUX_ARM_ARCH__ >= 4 |
| 117 | #define vectors_high() (cr_alignment & CR_V) |
| 118 | #else |
| 119 | #define vectors_high() (0) |
| 120 | #endif |
| 121 | |
Shiraz Hashim | 3d29005 | 2010-12-23 11:32:41 +0100 | [diff] [blame] | 122 | #if __LINUX_ARM_ARCH__ >= 7 || \ |
| 123 | (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) |
| 124 | #define sev() __asm__ __volatile__ ("sev" : : : "memory") |
| 125 | #define wfe() __asm__ __volatile__ ("wfe" : : : "memory") |
| 126 | #define wfi() __asm__ __volatile__ ("wfi" : : : "memory") |
| 127 | #endif |
| 128 | |
Catalin Marinas | 56163fc | 2007-05-08 22:53:44 +0100 | [diff] [blame] | 129 | #if __LINUX_ARM_ARCH__ >= 7 |
| 130 | #define isb() __asm__ __volatile__ ("isb" : : : "memory") |
| 131 | #define dsb() __asm__ __volatile__ ("dsb" : : : "memory") |
| 132 | #define dmb() __asm__ __volatile__ ("dmb" : : : "memory") |
| 133 | #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 |
Catalin Marinas | dcda7e4 | 2007-02-05 14:47:35 +0100 | [diff] [blame] | 134 | #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ |
| 135 | : : "r" (0) : "memory") |
| 136 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ |
| 137 | : : "r" (0) : "memory") |
| 138 | #define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 139 | : : "r" (0) : "memory") |
Paulius Zaleckas | 28853ac | 2009-03-25 13:10:01 +0200 | [diff] [blame] | 140 | #elif defined(CONFIG_CPU_FA526) |
| 141 | #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ |
| 142 | : : "r" (0) : "memory") |
| 143 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ |
| 144 | : : "r" (0) : "memory") |
| 145 | #define dmb() __asm__ __volatile__ ("" : : : "memory") |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 146 | #else |
Catalin Marinas | dcda7e4 | 2007-02-05 14:47:35 +0100 | [diff] [blame] | 147 | #define isb() __asm__ __volatile__ ("" : : : "memory") |
| 148 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ |
| 149 | : : "r" (0) : "memory") |
| 150 | #define dmb() __asm__ __volatile__ ("" : : : "memory") |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 151 | #endif |
Catalin Marinas | 9623b37 | 2007-02-28 12:30:38 +0100 | [diff] [blame] | 152 | |
Catalin Marinas | e7c5650 | 2010-03-24 16:49:54 +0100 | [diff] [blame] | 153 | #ifdef CONFIG_ARCH_HAS_BARRIERS |
| 154 | #include <mach/barriers.h> |
Russell King | ac1d426 | 2010-05-17 17:24:04 +0100 | [diff] [blame] | 155 | #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) |
Catalin Marinas | e7c5650 | 2010-03-24 16:49:54 +0100 | [diff] [blame] | 156 | #define mb() do { dsb(); outer_sync(); } while (0) |
Catalin Marinas | a904f5f | 2011-04-06 16:18:47 +0100 | [diff] [blame] | 157 | #define rmb() dsb() |
Catalin Marinas | e7c5650 | 2010-03-24 16:49:54 +0100 | [diff] [blame] | 158 | #define wmb() mb() |
Russell King | 26a26d3 | 2009-11-20 21:06:43 +0000 | [diff] [blame] | 159 | #else |
Axel Lin | 7c0ab43 | 2011-01-03 02:26:53 +0100 | [diff] [blame] | 160 | #include <asm/memory.h> |
Lennert Buytenhek | 398e692 | 2007-03-31 12:03:20 +0100 | [diff] [blame] | 161 | #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) |
| 162 | #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) |
| 163 | #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) |
Russell King | 26a26d3 | 2009-11-20 21:06:43 +0000 | [diff] [blame] | 164 | #endif |
| 165 | |
| 166 | #ifndef CONFIG_SMP |
Lennert Buytenhek | 398e692 | 2007-03-31 12:03:20 +0100 | [diff] [blame] | 167 | #define smp_mb() barrier() |
| 168 | #define smp_rmb() barrier() |
| 169 | #define smp_wmb() barrier() |
Catalin Marinas | 9623b37 | 2007-02-28 12:30:38 +0100 | [diff] [blame] | 170 | #else |
Catalin Marinas | e7c5650 | 2010-03-24 16:49:54 +0100 | [diff] [blame] | 171 | #define smp_mb() dmb() |
| 172 | #define smp_rmb() dmb() |
| 173 | #define smp_wmb() dmb() |
Lennert Buytenhek | 398e692 | 2007-03-31 12:03:20 +0100 | [diff] [blame] | 174 | #endif |
Russell King | 26a26d3 | 2009-11-20 21:06:43 +0000 | [diff] [blame] | 175 | |
Lennert Buytenhek | 398e692 | 2007-03-31 12:03:20 +0100 | [diff] [blame] | 176 | #define read_barrier_depends() do { } while(0) |
| 177 | #define smp_read_barrier_depends() do { } while(0) |
Catalin Marinas | 9623b37 | 2007-02-28 12:30:38 +0100 | [diff] [blame] | 178 | |
| 179 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); |
| 181 | |
Catalin Marinas | 56660fa | 2007-02-05 14:48:02 +0100 | [diff] [blame] | 182 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ |
| 183 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ |
| 184 | |
| 185 | static inline unsigned int get_cr(void) |
| 186 | { |
| 187 | unsigned int val; |
| 188 | asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); |
| 189 | return val; |
| 190 | } |
| 191 | |
| 192 | static inline void set_cr(unsigned int val) |
| 193 | { |
| 194 | asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" |
| 195 | : : "r" (val) : "cc"); |
| 196 | isb(); |
| 197 | } |
| 198 | |
| 199 | #ifndef CONFIG_SMP |
| 200 | extern void adjust_cr(unsigned long mask, unsigned long set); |
| 201 | #endif |
| 202 | |
| 203 | #define CPACC_FULL(n) (3 << (n * 2)) |
| 204 | #define CPACC_SVC(n) (1 << (n * 2)) |
| 205 | #define CPACC_DISABLE(n) (0 << (n * 2)) |
| 206 | |
| 207 | static inline unsigned int get_copro_access(void) |
| 208 | { |
| 209 | unsigned int val; |
| 210 | asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" |
| 211 | : "=r" (val) : : "cc"); |
| 212 | return val; |
| 213 | } |
| 214 | |
| 215 | static inline void set_copro_access(unsigned int val) |
| 216 | { |
| 217 | asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" |
| 218 | : : "r" (val) : "cc"); |
| 219 | isb(); |
| 220 | } |
| 221 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | /* |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 223 | * switch_mm() may do a full cache flush over the context switch, |
| 224 | * so enable interrupts over the context switch to avoid high |
| 225 | * latency. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | */ |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 227 | #define __ARCH_WANT_INTERRUPTS_ON_CTXSW |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | |
| 229 | /* |
| 230 | * switch_to(prev, next) should switch from task `prev' to `next' |
| 231 | * `prev' will never be the same as `next'. schedule() itself |
| 232 | * contains the memory barrier to tell GCC not to cache `current'. |
| 233 | */ |
| 234 | extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); |
| 235 | |
| 236 | #define switch_to(prev,next,last) \ |
| 237 | do { \ |
Al Viro | e7c1b32 | 2006-01-12 01:05:56 -0800 | [diff] [blame] | 238 | last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | } while (0) |
| 240 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) |
| 242 | /* |
| 243 | * On the StrongARM, "swp" is terminally broken since it bypasses the |
| 244 | * cache totally. This means that the cache becomes inconsistent, and, |
| 245 | * since we use normal loads/stores as well, this is really bad. |
| 246 | * Typically, this causes oopsen in filp_close, but could have other, |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 247 | * more disastrous effects. There are two work-arounds: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | * 1. Disable interrupts and emulate the atomic swap |
| 249 | * 2. Clean the cache, perform atomic swap, flush the cache |
| 250 | * |
| 251 | * We choose (1) since its the "easiest" to achieve here and is not |
| 252 | * dependent on the processor type. |
Russell King | 053a7b5 | 2005-06-28 19:22:25 +0100 | [diff] [blame] | 253 | * |
| 254 | * NOTE that this solution won't work on an SMP system, so explcitly |
| 255 | * forbid it here. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | */ |
| 257 | #define swp_is_buggy |
| 258 | #endif |
| 259 | |
| 260 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) |
| 261 | { |
| 262 | extern void __bad_xchg(volatile void *, int); |
| 263 | unsigned long ret; |
| 264 | #ifdef swp_is_buggy |
| 265 | unsigned long flags; |
| 266 | #endif |
Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 267 | #if __LINUX_ARM_ARCH__ >= 6 |
| 268 | unsigned int tmp; |
| 269 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | |
Russell King | bac4e96 | 2009-05-25 20:58:00 +0100 | [diff] [blame] | 271 | smp_mb(); |
| 272 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | switch (size) { |
Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 274 | #if __LINUX_ARM_ARCH__ >= 6 |
| 275 | case 1: |
| 276 | asm volatile("@ __xchg1\n" |
| 277 | "1: ldrexb %0, [%3]\n" |
| 278 | " strexb %1, %2, [%3]\n" |
| 279 | " teq %1, #0\n" |
| 280 | " bne 1b" |
| 281 | : "=&r" (ret), "=&r" (tmp) |
| 282 | : "r" (x), "r" (ptr) |
| 283 | : "memory", "cc"); |
| 284 | break; |
| 285 | case 4: |
| 286 | asm volatile("@ __xchg4\n" |
| 287 | "1: ldrex %0, [%3]\n" |
| 288 | " strex %1, %2, [%3]\n" |
| 289 | " teq %1, #0\n" |
| 290 | " bne 1b" |
| 291 | : "=&r" (ret), "=&r" (tmp) |
| 292 | : "r" (x), "r" (ptr) |
| 293 | : "memory", "cc"); |
| 294 | break; |
| 295 | #elif defined(swp_is_buggy) |
| 296 | #ifdef CONFIG_SMP |
| 297 | #error SMP is not supported on this platform |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | #endif |
Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 299 | case 1: |
Lennert Buytenhek | e7cc2c5 | 2006-09-21 03:35:20 +0100 | [diff] [blame] | 300 | raw_local_irq_save(flags); |
Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 301 | ret = *(volatile unsigned char *)ptr; |
| 302 | *(volatile unsigned char *)ptr = x; |
Lennert Buytenhek | e7cc2c5 | 2006-09-21 03:35:20 +0100 | [diff] [blame] | 303 | raw_local_irq_restore(flags); |
Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 304 | break; |
| 305 | |
| 306 | case 4: |
Lennert Buytenhek | e7cc2c5 | 2006-09-21 03:35:20 +0100 | [diff] [blame] | 307 | raw_local_irq_save(flags); |
Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 308 | ret = *(volatile unsigned long *)ptr; |
| 309 | *(volatile unsigned long *)ptr = x; |
Lennert Buytenhek | e7cc2c5 | 2006-09-21 03:35:20 +0100 | [diff] [blame] | 310 | raw_local_irq_restore(flags); |
Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 311 | break; |
| 312 | #else |
| 313 | case 1: |
| 314 | asm volatile("@ __xchg1\n" |
| 315 | " swpb %0, %1, [%2]" |
| 316 | : "=&r" (ret) |
| 317 | : "r" (x), "r" (ptr) |
| 318 | : "memory", "cc"); |
| 319 | break; |
| 320 | case 4: |
| 321 | asm volatile("@ __xchg4\n" |
| 322 | " swp %0, %1, [%2]" |
| 323 | : "=&r" (ret) |
| 324 | : "r" (x), "r" (ptr) |
| 325 | : "memory", "cc"); |
| 326 | break; |
| 327 | #endif |
| 328 | default: |
| 329 | __bad_xchg(ptr, size), ret = 0; |
| 330 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | } |
Russell King | bac4e96 | 2009-05-25 20:58:00 +0100 | [diff] [blame] | 332 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | |
| 334 | return ret; |
| 335 | } |
| 336 | |
Ben Dooks | dabaeff | 2006-03-15 23:17:26 +0000 | [diff] [blame] | 337 | extern void disable_hlt(void); |
| 338 | extern void enable_hlt(void); |
| 339 | |
Kevin Hilman | c7b0aff | 2010-10-01 22:13:47 +0100 | [diff] [blame] | 340 | void cpu_idle_wait(void); |
| 341 | |
Mathieu Desnoyers | 176393d | 2008-02-07 00:16:11 -0800 | [diff] [blame] | 342 | #include <asm-generic/cmpxchg-local.h> |
| 343 | |
Mathieu Desnoyers | ecd322c | 2009-05-28 16:07:39 -0400 | [diff] [blame] | 344 | #if __LINUX_ARM_ARCH__ < 6 |
Russell King | 4ed67a5 | 2011-01-17 15:42:42 +0000 | [diff] [blame] | 345 | /* min ARCH < ARMv6 */ |
Mathieu Desnoyers | ecd322c | 2009-05-28 16:07:39 -0400 | [diff] [blame] | 346 | |
| 347 | #ifdef CONFIG_SMP |
| 348 | #error "SMP is not supported on this platform" |
| 349 | #endif |
| 350 | |
Mathieu Desnoyers | 176393d | 2008-02-07 00:16:11 -0800 | [diff] [blame] | 351 | /* |
| 352 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make |
| 353 | * them available. |
| 354 | */ |
| 355 | #define cmpxchg_local(ptr, o, n) \ |
| 356 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ |
| 357 | (unsigned long)(n), sizeof(*(ptr)))) |
| 358 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) |
| 359 | |
| 360 | #ifndef CONFIG_SMP |
| 361 | #include <asm-generic/cmpxchg.h> |
| 362 | #endif |
| 363 | |
Russell King | 4ed67a5 | 2011-01-17 15:42:42 +0000 | [diff] [blame] | 364 | #else /* min ARCH >= ARMv6 */ |
Mathieu Desnoyers | ecd322c | 2009-05-28 16:07:39 -0400 | [diff] [blame] | 365 | |
| 366 | extern void __bad_cmpxchg(volatile void *ptr, int size); |
| 367 | |
| 368 | /* |
| 369 | * cmpxchg only support 32-bits operands on ARMv6. |
| 370 | */ |
| 371 | |
| 372 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
| 373 | unsigned long new, int size) |
| 374 | { |
| 375 | unsigned long oldval, res; |
| 376 | |
| 377 | switch (size) { |
Russell King | 4ed67a5 | 2011-01-17 15:42:42 +0000 | [diff] [blame] | 378 | #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ |
Mathieu Desnoyers | ecd322c | 2009-05-28 16:07:39 -0400 | [diff] [blame] | 379 | case 1: |
| 380 | do { |
| 381 | asm volatile("@ __cmpxchg1\n" |
| 382 | " ldrexb %1, [%2]\n" |
| 383 | " mov %0, #0\n" |
| 384 | " teq %1, %3\n" |
| 385 | " strexbeq %0, %4, [%2]\n" |
| 386 | : "=&r" (res), "=&r" (oldval) |
| 387 | : "r" (ptr), "Ir" (old), "r" (new) |
| 388 | : "memory", "cc"); |
| 389 | } while (res); |
| 390 | break; |
| 391 | case 2: |
| 392 | do { |
| 393 | asm volatile("@ __cmpxchg1\n" |
| 394 | " ldrexh %1, [%2]\n" |
| 395 | " mov %0, #0\n" |
| 396 | " teq %1, %3\n" |
| 397 | " strexheq %0, %4, [%2]\n" |
| 398 | : "=&r" (res), "=&r" (oldval) |
| 399 | : "r" (ptr), "Ir" (old), "r" (new) |
| 400 | : "memory", "cc"); |
| 401 | } while (res); |
| 402 | break; |
Russell King | 4ed67a5 | 2011-01-17 15:42:42 +0000 | [diff] [blame] | 403 | #endif |
Mathieu Desnoyers | ecd322c | 2009-05-28 16:07:39 -0400 | [diff] [blame] | 404 | case 4: |
| 405 | do { |
| 406 | asm volatile("@ __cmpxchg4\n" |
| 407 | " ldrex %1, [%2]\n" |
| 408 | " mov %0, #0\n" |
| 409 | " teq %1, %3\n" |
| 410 | " strexeq %0, %4, [%2]\n" |
| 411 | : "=&r" (res), "=&r" (oldval) |
| 412 | : "r" (ptr), "Ir" (old), "r" (new) |
| 413 | : "memory", "cc"); |
| 414 | } while (res); |
| 415 | break; |
| 416 | default: |
| 417 | __bad_cmpxchg(ptr, size); |
| 418 | oldval = 0; |
| 419 | } |
| 420 | |
| 421 | return oldval; |
| 422 | } |
| 423 | |
| 424 | static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, |
| 425 | unsigned long new, int size) |
| 426 | { |
| 427 | unsigned long ret; |
| 428 | |
| 429 | smp_mb(); |
| 430 | ret = __cmpxchg(ptr, old, new, size); |
| 431 | smp_mb(); |
| 432 | |
| 433 | return ret; |
| 434 | } |
| 435 | |
| 436 | #define cmpxchg(ptr,o,n) \ |
| 437 | ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ |
| 438 | (unsigned long)(o), \ |
| 439 | (unsigned long)(n), \ |
| 440 | sizeof(*(ptr)))) |
| 441 | |
| 442 | static inline unsigned long __cmpxchg_local(volatile void *ptr, |
| 443 | unsigned long old, |
| 444 | unsigned long new, int size) |
| 445 | { |
| 446 | unsigned long ret; |
| 447 | |
| 448 | switch (size) { |
Russell King | 4ed67a5 | 2011-01-17 15:42:42 +0000 | [diff] [blame] | 449 | #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ |
Mathieu Desnoyers | ecd322c | 2009-05-28 16:07:39 -0400 | [diff] [blame] | 450 | case 1: |
| 451 | case 2: |
| 452 | ret = __cmpxchg_local_generic(ptr, old, new, size); |
| 453 | break; |
Russell King | 4ed67a5 | 2011-01-17 15:42:42 +0000 | [diff] [blame] | 454 | #endif |
Mathieu Desnoyers | ecd322c | 2009-05-28 16:07:39 -0400 | [diff] [blame] | 455 | default: |
| 456 | ret = __cmpxchg(ptr, old, new, size); |
| 457 | } |
| 458 | |
| 459 | return ret; |
| 460 | } |
| 461 | |
| 462 | #define cmpxchg_local(ptr,o,n) \ |
| 463 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ |
| 464 | (unsigned long)(o), \ |
| 465 | (unsigned long)(n), \ |
| 466 | sizeof(*(ptr)))) |
| 467 | |
Russell King | 4ed67a5 | 2011-01-17 15:42:42 +0000 | [diff] [blame] | 468 | #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ |
Mathieu Desnoyers | ecd322c | 2009-05-28 16:07:39 -0400 | [diff] [blame] | 469 | |
| 470 | /* |
| 471 | * Note : ARMv7-M (currently unsupported by Linux) does not support |
| 472 | * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should |
| 473 | * not be allowed to use __cmpxchg64. |
| 474 | */ |
| 475 | static inline unsigned long long __cmpxchg64(volatile void *ptr, |
| 476 | unsigned long long old, |
| 477 | unsigned long long new) |
| 478 | { |
| 479 | register unsigned long long oldval asm("r0"); |
| 480 | register unsigned long long __old asm("r2") = old; |
| 481 | register unsigned long long __new asm("r4") = new; |
| 482 | unsigned long res; |
| 483 | |
| 484 | do { |
| 485 | asm volatile( |
| 486 | " @ __cmpxchg8\n" |
| 487 | " ldrexd %1, %H1, [%2]\n" |
| 488 | " mov %0, #0\n" |
| 489 | " teq %1, %3\n" |
| 490 | " teqeq %H1, %H3\n" |
| 491 | " strexdeq %0, %4, %H4, [%2]\n" |
| 492 | : "=&r" (res), "=&r" (oldval) |
| 493 | : "r" (ptr), "Ir" (__old), "r" (__new) |
| 494 | : "memory", "cc"); |
| 495 | } while (res); |
| 496 | |
| 497 | return oldval; |
| 498 | } |
| 499 | |
| 500 | static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, |
| 501 | unsigned long long old, |
| 502 | unsigned long long new) |
| 503 | { |
| 504 | unsigned long long ret; |
| 505 | |
| 506 | smp_mb(); |
| 507 | ret = __cmpxchg64(ptr, old, new); |
| 508 | smp_mb(); |
| 509 | |
| 510 | return ret; |
| 511 | } |
| 512 | |
| 513 | #define cmpxchg64(ptr,o,n) \ |
| 514 | ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ |
| 515 | (unsigned long long)(o), \ |
| 516 | (unsigned long long)(n))) |
| 517 | |
| 518 | #define cmpxchg64_local(ptr,o,n) \ |
| 519 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ |
| 520 | (unsigned long long)(o), \ |
| 521 | (unsigned long long)(n))) |
| 522 | |
Russell King | 4ed67a5 | 2011-01-17 15:42:42 +0000 | [diff] [blame] | 523 | #else /* min ARCH = ARMv6 */ |
Mathieu Desnoyers | ecd322c | 2009-05-28 16:07:39 -0400 | [diff] [blame] | 524 | |
| 525 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) |
| 526 | |
Russell King | 4ed67a5 | 2011-01-17 15:42:42 +0000 | [diff] [blame] | 527 | #endif |
Mathieu Desnoyers | ecd322c | 2009-05-28 16:07:39 -0400 | [diff] [blame] | 528 | |
| 529 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ |
| 530 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | #endif /* __ASSEMBLY__ */ |
| 532 | |
| 533 | #define arch_align_stack(x) (x) |
| 534 | |
| 535 | #endif /* __KERNEL__ */ |
| 536 | |
| 537 | #endif |