Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> |
| 3 | */ |
Stephen Rothwell | bbeb3f4 | 2005-09-27 13:51:59 +1000 | [diff] [blame] | 4 | #ifndef _ASM_POWERPC_SYSTEM_H |
| 5 | #define _ASM_POWERPC_SYSTEM_H |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 6 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 7 | #include <linux/kernel.h> |
Paul Mackerras | 14b3ca4 | 2008-04-20 17:57:10 +1000 | [diff] [blame] | 8 | #include <linux/irqflags.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 9 | |
| 10 | #include <asm/hw_irq.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 11 | |
| 12 | /* |
| 13 | * Memory barrier. |
| 14 | * The sync instruction guarantees that all memory accesses initiated |
| 15 | * by this processor have been performed (with respect to all other |
| 16 | * mechanisms that access memory). The eieio instruction is a barrier |
| 17 | * providing an ordering (separately) for (a) cacheable stores and (b) |
| 18 | * loads and stores to non-cacheable memory (e.g. I/O devices). |
| 19 | * |
| 20 | * mb() prevents loads and stores being reordered across this point. |
| 21 | * rmb() prevents loads being reordered across this point. |
| 22 | * wmb() prevents stores being reordered across this point. |
| 23 | * read_barrier_depends() prevents data-dependent loads being reordered |
| 24 | * across this point (nop on PPC). |
| 25 | * |
Nick Piggin | 957ab07 | 2008-11-11 17:51:18 +0000 | [diff] [blame] | 26 | * *mb() variants without smp_ prefix must order all types of memory |
| 27 | * operations with one another. sync is the only instruction sufficient |
| 28 | * to do this. |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 29 | * |
Nick Piggin | 957ab07 | 2008-11-11 17:51:18 +0000 | [diff] [blame] | 30 | * For the smp_ barriers, ordering is for cacheable memory operations |
| 31 | * only. We have to use the sync instruction for smp_mb(), since lwsync |
| 32 | * doesn't order loads with respect to previous stores. Lwsync can be |
| 33 | * used for smp_rmb() and smp_wmb(). |
| 34 | * |
| 35 | * However, on CPUs that don't support lwsync, lwsync actually maps to a |
| 36 | * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 37 | */ |
| 38 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") |
Nick Piggin | 598056d | 2008-05-22 00:10:56 +1000 | [diff] [blame] | 39 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 40 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") |
| 41 | #define read_barrier_depends() do { } while(0) |
| 42 | |
| 43 | #define set_mb(var, value) do { var = value; mb(); } while (0) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 44 | |
Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 45 | #ifdef __KERNEL__ |
Olaf Hering | 4f9a58d | 2007-10-16 23:30:12 -0700 | [diff] [blame] | 46 | #define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 47 | #ifdef CONFIG_SMP |
Nick Piggin | 74f0609 | 2008-05-22 00:12:31 +1000 | [diff] [blame] | 48 | |
| 49 | #ifdef __SUBARCH_HAS_LWSYNC |
Nick Piggin | 46d075be | 2008-11-11 17:50:48 +0000 | [diff] [blame] | 50 | # define SMPWMB LWSYNC |
Nick Piggin | 74f0609 | 2008-05-22 00:12:31 +1000 | [diff] [blame] | 51 | #else |
| 52 | # define SMPWMB eieio |
| 53 | #endif |
| 54 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 55 | #define smp_mb() mb() |
Nick Piggin | 957ab07 | 2008-11-11 17:51:18 +0000 | [diff] [blame] | 56 | #define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") |
Nick Piggin | 46d075be | 2008-11-11 17:50:48 +0000 | [diff] [blame] | 57 | #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 58 | #define smp_read_barrier_depends() read_barrier_depends() |
| 59 | #else |
| 60 | #define smp_mb() barrier() |
| 61 | #define smp_rmb() barrier() |
| 62 | #define smp_wmb() barrier() |
| 63 | #define smp_read_barrier_depends() do { } while(0) |
| 64 | #endif /* CONFIG_SMP */ |
| 65 | |
Nathan Lynch | 5db9fa9 | 2006-08-22 20:36:05 -0500 | [diff] [blame] | 66 | /* |
| 67 | * This is a barrier which prevents following instructions from being |
| 68 | * started until the value of the argument x is known. For example, if |
| 69 | * x is a variable loaded from memory, this prevents following |
| 70 | * instructions from being executed until the load has been performed. |
| 71 | */ |
| 72 | #define data_barrier(x) \ |
| 73 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); |
| 74 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 75 | struct task_struct; |
| 76 | struct pt_regs; |
| 77 | |
Olof Johansson | 7dbb922 | 2008-01-31 14:34:47 +1100 | [diff] [blame] | 78 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 79 | |
| 80 | extern int (*__debugger)(struct pt_regs *regs); |
| 81 | extern int (*__debugger_ipi)(struct pt_regs *regs); |
| 82 | extern int (*__debugger_bpt)(struct pt_regs *regs); |
| 83 | extern int (*__debugger_sstep)(struct pt_regs *regs); |
| 84 | extern int (*__debugger_iabr_match)(struct pt_regs *regs); |
| 85 | extern int (*__debugger_dabr_match)(struct pt_regs *regs); |
| 86 | extern int (*__debugger_fault_handler)(struct pt_regs *regs); |
| 87 | |
| 88 | #define DEBUGGER_BOILERPLATE(__NAME) \ |
| 89 | static inline int __NAME(struct pt_regs *regs) \ |
| 90 | { \ |
| 91 | if (unlikely(__ ## __NAME)) \ |
| 92 | return __ ## __NAME(regs); \ |
| 93 | return 0; \ |
| 94 | } |
| 95 | |
| 96 | DEBUGGER_BOILERPLATE(debugger) |
| 97 | DEBUGGER_BOILERPLATE(debugger_ipi) |
| 98 | DEBUGGER_BOILERPLATE(debugger_bpt) |
| 99 | DEBUGGER_BOILERPLATE(debugger_sstep) |
| 100 | DEBUGGER_BOILERPLATE(debugger_iabr_match) |
| 101 | DEBUGGER_BOILERPLATE(debugger_dabr_match) |
| 102 | DEBUGGER_BOILERPLATE(debugger_fault_handler) |
| 103 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 104 | #else |
| 105 | static inline int debugger(struct pt_regs *regs) { return 0; } |
| 106 | static inline int debugger_ipi(struct pt_regs *regs) { return 0; } |
| 107 | static inline int debugger_bpt(struct pt_regs *regs) { return 0; } |
| 108 | static inline int debugger_sstep(struct pt_regs *regs) { return 0; } |
| 109 | static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } |
| 110 | static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } |
| 111 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } |
| 112 | #endif |
| 113 | |
| 114 | extern int set_dabr(unsigned long dabr); |
Dave Kleikamp | 3bffb65 | 2010-02-08 11:51:18 +0000 | [diff] [blame] | 115 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 116 | extern void do_send_trap(struct pt_regs *regs, unsigned long address, |
| 117 | unsigned long error_code, int signal_code, int brkpt); |
| 118 | #else |
Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 119 | extern void do_dabr(struct pt_regs *regs, unsigned long address, |
| 120 | unsigned long error_code); |
Dave Kleikamp | 3bffb65 | 2010-02-08 11:51:18 +0000 | [diff] [blame] | 121 | #endif |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 122 | extern void print_backtrace(unsigned long *); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 123 | extern void flush_instruction_cache(void); |
| 124 | extern void hard_reset_now(void); |
| 125 | extern void poweroff_now(void); |
| 126 | |
| 127 | #ifdef CONFIG_6xx |
| 128 | extern long _get_L2CR(void); |
| 129 | extern long _get_L3CR(void); |
| 130 | extern void _set_L2CR(unsigned long); |
| 131 | extern void _set_L3CR(unsigned long); |
| 132 | #else |
| 133 | #define _get_L2CR() 0L |
| 134 | #define _get_L3CR() 0L |
| 135 | #define _set_L2CR(val) do { } while(0) |
| 136 | #define _set_L3CR(val) do { } while(0) |
| 137 | #endif |
| 138 | |
| 139 | extern void via_cuda_init(void); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 140 | extern void read_rtc_time(void); |
| 141 | extern void pmac_find_display(void); |
| 142 | extern void giveup_fpu(struct task_struct *); |
Stephen Rothwell | cabb558 | 2005-09-30 16:16:52 +1000 | [diff] [blame] | 143 | extern void disable_kernel_fp(void); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 144 | extern void enable_kernel_fp(void); |
| 145 | extern void flush_fp_to_thread(struct task_struct *); |
| 146 | extern void enable_kernel_altivec(void); |
| 147 | extern void giveup_altivec(struct task_struct *); |
| 148 | extern void load_up_altivec(struct task_struct *); |
Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 149 | extern int emulate_altivec(struct pt_regs *); |
Michael Neuling | 7c29217 | 2008-07-11 16:29:12 +1000 | [diff] [blame] | 150 | extern void __giveup_vsx(struct task_struct *); |
Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 151 | extern void giveup_vsx(struct task_struct *); |
Johannes Berg | d169d14 | 2007-04-28 08:00:03 +1000 | [diff] [blame] | 152 | extern void enable_kernel_spe(void); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 153 | extern void giveup_spe(struct task_struct *); |
| 154 | extern void load_up_spe(struct task_struct *); |
| 155 | extern int fix_alignment(struct pt_regs *); |
Andreas Schwab | 05d77ac | 2010-08-21 11:43:20 +0000 | [diff] [blame] | 156 | extern void cvt_fd(float *from, double *to); |
| 157 | extern void cvt_df(double *from, float *to); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 158 | |
Paul Mackerras | 5388fb1 | 2006-01-11 22:11:39 +1100 | [diff] [blame] | 159 | #ifndef CONFIG_SMP |
| 160 | extern void discard_lazy_cpu_state(void); |
| 161 | #else |
| 162 | static inline void discard_lazy_cpu_state(void) |
| 163 | { |
| 164 | } |
| 165 | #endif |
| 166 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 167 | #ifdef CONFIG_ALTIVEC |
| 168 | extern void flush_altivec_to_thread(struct task_struct *); |
| 169 | #else |
| 170 | static inline void flush_altivec_to_thread(struct task_struct *t) |
| 171 | { |
| 172 | } |
| 173 | #endif |
| 174 | |
Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 175 | #ifdef CONFIG_VSX |
| 176 | extern void flush_vsx_to_thread(struct task_struct *); |
| 177 | #else |
| 178 | static inline void flush_vsx_to_thread(struct task_struct *t) |
| 179 | { |
| 180 | } |
| 181 | #endif |
| 182 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 183 | #ifdef CONFIG_SPE |
| 184 | extern void flush_spe_to_thread(struct task_struct *); |
| 185 | #else |
| 186 | static inline void flush_spe_to_thread(struct task_struct *t) |
| 187 | { |
| 188 | } |
| 189 | #endif |
| 190 | |
| 191 | extern int call_rtas(const char *, int, int, unsigned long *, ...); |
| 192 | extern void cacheable_memzero(void *p, unsigned int nb); |
| 193 | extern void *cacheable_memcpy(void *, const void *, unsigned int); |
| 194 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); |
| 195 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 196 | extern void _exception(int, struct pt_regs *, int, unsigned long); |
Anton Blanchard | 760ca4d | 2011-11-30 00:23:13 +0000 | [diff] [blame] | 197 | extern void die(const char *, struct pt_regs *, long); |
Jon Loeliger | 1d59483 | 2008-01-23 12:42:07 -0600 | [diff] [blame] | 198 | extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); |
| 199 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 200 | #ifdef CONFIG_BOOKE_WDT |
| 201 | extern u32 booke_wdt_enabled; |
| 202 | extern u32 booke_wdt_period; |
| 203 | #endif /* CONFIG_BOOKE_WDT */ |
| 204 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 205 | struct device_node; |
| 206 | extern void note_scsi_host(struct device_node *, void *); |
| 207 | |
| 208 | extern struct task_struct *__switch_to(struct task_struct *, |
| 209 | struct task_struct *); |
| 210 | #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) |
| 211 | |
| 212 | struct thread_struct; |
| 213 | extern struct task_struct *_switch(struct thread_struct *prev, |
| 214 | struct thread_struct *next); |
| 215 | |
| 216 | extern unsigned int rtas_data; |
Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 217 | extern int mem_init_done; /* set on boot once kmalloc can be called */ |
Benjamin Herrenschmidt | d3f6204 | 2009-06-02 21:16:38 +0000 | [diff] [blame] | 218 | extern int init_bootmem_done; /* set once bootmem is available */ |
Becky Bruce | 49a8496 | 2009-05-08 12:19:27 +0000 | [diff] [blame] | 219 | extern phys_addr_t memory_limit; |
Paul Mackerras | 49b0985 | 2005-11-10 15:53:40 +1100 | [diff] [blame] | 220 | extern unsigned long klimit; |
Stephen Rothwell | 5669c3c | 2007-10-02 13:37:53 +1000 | [diff] [blame] | 221 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); |
Stephen Rothwell | 7b2c3c5 | 2007-09-17 14:08:06 +1000 | [diff] [blame] | 222 | |
Paul Mackerras | 17a6392 | 2005-10-20 21:10:09 +1000 | [diff] [blame] | 223 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ |
Deepthi Dharwar | 771dae8 | 2011-11-30 02:46:31 +0000 | [diff] [blame] | 224 | void cpu_idle_wait(void); |
Paul Mackerras | 17a6392 | 2005-10-20 21:10:09 +1000 | [diff] [blame] | 225 | |
Deepthi Dharwar | 707827f | 2011-11-30 02:46:42 +0000 | [diff] [blame] | 226 | #ifdef CONFIG_PSERIES_IDLE |
| 227 | extern void update_smt_snooze_delay(int snooze); |
| 228 | extern int pseries_notify_cpuidle_add_cpu(int cpu); |
| 229 | #else |
| 230 | static inline void update_smt_snooze_delay(int snooze) {} |
| 231 | static inline int pseries_notify_cpuidle_add_cpu(int cpu) { return 0; } |
| 232 | #endif |
| 233 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 234 | /* |
| 235 | * Atomic exchange |
| 236 | * |
| 237 | * Changes the memory location '*ptr' to be val and returns |
| 238 | * the previous value stored there. |
| 239 | */ |
Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 240 | static __always_inline unsigned long |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 241 | __xchg_u32(volatile void *p, unsigned long val) |
| 242 | { |
| 243 | unsigned long prev; |
| 244 | |
| 245 | __asm__ __volatile__( |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 246 | PPC_RELEASE_BARRIER |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 247 | "1: lwarx %0,0,%2 \n" |
| 248 | PPC405_ERR77(0,%2) |
| 249 | " stwcx. %3,0,%2 \n\ |
| 250 | bne- 1b" |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 251 | PPC_ACQUIRE_BARRIER |
Linus Torvalds | e2a3d40 | 2006-07-08 15:00:28 -0700 | [diff] [blame] | 252 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) |
| 253 | : "r" (p), "r" (val) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 254 | : "cc", "memory"); |
| 255 | |
| 256 | return prev; |
| 257 | } |
| 258 | |
Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 259 | /* |
| 260 | * Atomic exchange |
| 261 | * |
| 262 | * Changes the memory location '*ptr' to be val and returns |
| 263 | * the previous value stored there. |
| 264 | */ |
Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 265 | static __always_inline unsigned long |
Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 266 | __xchg_u32_local(volatile void *p, unsigned long val) |
| 267 | { |
| 268 | unsigned long prev; |
| 269 | |
| 270 | __asm__ __volatile__( |
| 271 | "1: lwarx %0,0,%2 \n" |
| 272 | PPC405_ERR77(0,%2) |
| 273 | " stwcx. %3,0,%2 \n\ |
| 274 | bne- 1b" |
| 275 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) |
| 276 | : "r" (p), "r" (val) |
| 277 | : "cc", "memory"); |
| 278 | |
| 279 | return prev; |
| 280 | } |
| 281 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 282 | #ifdef CONFIG_PPC64 |
Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 283 | static __always_inline unsigned long |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 284 | __xchg_u64(volatile void *p, unsigned long val) |
| 285 | { |
| 286 | unsigned long prev; |
| 287 | |
| 288 | __asm__ __volatile__( |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 289 | PPC_RELEASE_BARRIER |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 290 | "1: ldarx %0,0,%2 \n" |
| 291 | PPC405_ERR77(0,%2) |
| 292 | " stdcx. %3,0,%2 \n\ |
| 293 | bne- 1b" |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 294 | PPC_ACQUIRE_BARRIER |
Linus Torvalds | e2a3d40 | 2006-07-08 15:00:28 -0700 | [diff] [blame] | 295 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) |
| 296 | : "r" (p), "r" (val) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 297 | : "cc", "memory"); |
| 298 | |
| 299 | return prev; |
| 300 | } |
Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 301 | |
Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 302 | static __always_inline unsigned long |
Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 303 | __xchg_u64_local(volatile void *p, unsigned long val) |
| 304 | { |
| 305 | unsigned long prev; |
| 306 | |
| 307 | __asm__ __volatile__( |
| 308 | "1: ldarx %0,0,%2 \n" |
| 309 | PPC405_ERR77(0,%2) |
| 310 | " stdcx. %3,0,%2 \n\ |
| 311 | bne- 1b" |
| 312 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) |
| 313 | : "r" (p), "r" (val) |
| 314 | : "cc", "memory"); |
| 315 | |
| 316 | return prev; |
| 317 | } |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 318 | #endif |
| 319 | |
| 320 | /* |
| 321 | * This function doesn't exist, so you'll get a linker error |
| 322 | * if something tries to do an invalid xchg(). |
| 323 | */ |
| 324 | extern void __xchg_called_with_bad_pointer(void); |
| 325 | |
Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 326 | static __always_inline unsigned long |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 327 | __xchg(volatile void *ptr, unsigned long x, unsigned int size) |
| 328 | { |
| 329 | switch (size) { |
| 330 | case 4: |
| 331 | return __xchg_u32(ptr, x); |
| 332 | #ifdef CONFIG_PPC64 |
| 333 | case 8: |
| 334 | return __xchg_u64(ptr, x); |
| 335 | #endif |
| 336 | } |
| 337 | __xchg_called_with_bad_pointer(); |
| 338 | return x; |
| 339 | } |
| 340 | |
Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 341 | static __always_inline unsigned long |
Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 342 | __xchg_local(volatile void *ptr, unsigned long x, unsigned int size) |
| 343 | { |
| 344 | switch (size) { |
| 345 | case 4: |
| 346 | return __xchg_u32_local(ptr, x); |
| 347 | #ifdef CONFIG_PPC64 |
| 348 | case 8: |
| 349 | return __xchg_u64_local(ptr, x); |
| 350 | #endif |
| 351 | } |
| 352 | __xchg_called_with_bad_pointer(); |
| 353 | return x; |
| 354 | } |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 355 | #define xchg(ptr,x) \ |
| 356 | ({ \ |
| 357 | __typeof__(*(ptr)) _x_ = (x); \ |
| 358 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ |
| 359 | }) |
| 360 | |
Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 361 | #define xchg_local(ptr,x) \ |
| 362 | ({ \ |
| 363 | __typeof__(*(ptr)) _x_ = (x); \ |
| 364 | (__typeof__(*(ptr))) __xchg_local((ptr), \ |
| 365 | (unsigned long)_x_, sizeof(*(ptr))); \ |
| 366 | }) |
| 367 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 368 | /* |
| 369 | * Compare and exchange - if *p == old, set it to new, |
| 370 | * and return the old value of *p. |
| 371 | */ |
| 372 | #define __HAVE_ARCH_CMPXCHG 1 |
| 373 | |
Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 374 | static __always_inline unsigned long |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 375 | __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) |
| 376 | { |
| 377 | unsigned int prev; |
| 378 | |
| 379 | __asm__ __volatile__ ( |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 380 | PPC_RELEASE_BARRIER |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 381 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ |
| 382 | cmpw 0,%0,%3\n\ |
| 383 | bne- 2f\n" |
| 384 | PPC405_ERR77(0,%2) |
| 385 | " stwcx. %4,0,%2\n\ |
| 386 | bne- 1b" |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 387 | PPC_ACQUIRE_BARRIER |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 388 | "\n\ |
| 389 | 2:" |
Linus Torvalds | e2a3d40 | 2006-07-08 15:00:28 -0700 | [diff] [blame] | 390 | : "=&r" (prev), "+m" (*p) |
| 391 | : "r" (p), "r" (old), "r" (new) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 392 | : "cc", "memory"); |
| 393 | |
| 394 | return prev; |
| 395 | } |
| 396 | |
Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 397 | static __always_inline unsigned long |
Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 398 | __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, |
| 399 | unsigned long new) |
| 400 | { |
| 401 | unsigned int prev; |
| 402 | |
| 403 | __asm__ __volatile__ ( |
| 404 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ |
| 405 | cmpw 0,%0,%3\n\ |
| 406 | bne- 2f\n" |
| 407 | PPC405_ERR77(0,%2) |
| 408 | " stwcx. %4,0,%2\n\ |
| 409 | bne- 1b" |
| 410 | "\n\ |
| 411 | 2:" |
| 412 | : "=&r" (prev), "+m" (*p) |
| 413 | : "r" (p), "r" (old), "r" (new) |
| 414 | : "cc", "memory"); |
| 415 | |
| 416 | return prev; |
| 417 | } |
| 418 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 419 | #ifdef CONFIG_PPC64 |
Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 420 | static __always_inline unsigned long |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 421 | __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 422 | { |
| 423 | unsigned long prev; |
| 424 | |
| 425 | __asm__ __volatile__ ( |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 426 | PPC_RELEASE_BARRIER |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 427 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ |
| 428 | cmpd 0,%0,%3\n\ |
| 429 | bne- 2f\n\ |
| 430 | stdcx. %4,0,%2\n\ |
| 431 | bne- 1b" |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 432 | PPC_ACQUIRE_BARRIER |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 433 | "\n\ |
| 434 | 2:" |
Linus Torvalds | e2a3d40 | 2006-07-08 15:00:28 -0700 | [diff] [blame] | 435 | : "=&r" (prev), "+m" (*p) |
| 436 | : "r" (p), "r" (old), "r" (new) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 437 | : "cc", "memory"); |
| 438 | |
| 439 | return prev; |
| 440 | } |
Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 441 | |
Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 442 | static __always_inline unsigned long |
Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 443 | __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, |
| 444 | unsigned long new) |
| 445 | { |
| 446 | unsigned long prev; |
| 447 | |
| 448 | __asm__ __volatile__ ( |
| 449 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ |
| 450 | cmpd 0,%0,%3\n\ |
| 451 | bne- 2f\n\ |
| 452 | stdcx. %4,0,%2\n\ |
| 453 | bne- 1b" |
| 454 | "\n\ |
| 455 | 2:" |
| 456 | : "=&r" (prev), "+m" (*p) |
| 457 | : "r" (p), "r" (old), "r" (new) |
| 458 | : "cc", "memory"); |
| 459 | |
| 460 | return prev; |
| 461 | } |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 462 | #endif |
| 463 | |
| 464 | /* This function doesn't exist, so you'll get a linker error |
| 465 | if something tries to do an invalid cmpxchg(). */ |
| 466 | extern void __cmpxchg_called_with_bad_pointer(void); |
| 467 | |
Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 468 | static __always_inline unsigned long |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 469 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, |
| 470 | unsigned int size) |
| 471 | { |
| 472 | switch (size) { |
| 473 | case 4: |
| 474 | return __cmpxchg_u32(ptr, old, new); |
| 475 | #ifdef CONFIG_PPC64 |
| 476 | case 8: |
| 477 | return __cmpxchg_u64(ptr, old, new); |
| 478 | #endif |
| 479 | } |
| 480 | __cmpxchg_called_with_bad_pointer(); |
| 481 | return old; |
| 482 | } |
| 483 | |
Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 484 | static __always_inline unsigned long |
Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 485 | __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, |
| 486 | unsigned int size) |
| 487 | { |
| 488 | switch (size) { |
| 489 | case 4: |
| 490 | return __cmpxchg_u32_local(ptr, old, new); |
| 491 | #ifdef CONFIG_PPC64 |
| 492 | case 8: |
| 493 | return __cmpxchg_u64_local(ptr, old, new); |
| 494 | #endif |
| 495 | } |
| 496 | __cmpxchg_called_with_bad_pointer(); |
| 497 | return old; |
| 498 | } |
| 499 | |
Mathieu Desnoyers | f9c4650 | 2008-02-07 00:16:10 -0800 | [diff] [blame] | 500 | #define cmpxchg(ptr, o, n) \ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 501 | ({ \ |
| 502 | __typeof__(*(ptr)) _o_ = (o); \ |
| 503 | __typeof__(*(ptr)) _n_ = (n); \ |
| 504 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ |
| 505 | (unsigned long)_n_, sizeof(*(ptr))); \ |
| 506 | }) |
| 507 | |
Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 508 | |
Mathieu Desnoyers | f9c4650 | 2008-02-07 00:16:10 -0800 | [diff] [blame] | 509 | #define cmpxchg_local(ptr, o, n) \ |
Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 510 | ({ \ |
| 511 | __typeof__(*(ptr)) _o_ = (o); \ |
| 512 | __typeof__(*(ptr)) _n_ = (n); \ |
| 513 | (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ |
| 514 | (unsigned long)_n_, sizeof(*(ptr))); \ |
| 515 | }) |
| 516 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 517 | #ifdef CONFIG_PPC64 |
| 518 | /* |
| 519 | * We handle most unaligned accesses in hardware. On the other hand |
| 520 | * unaligned DMA can be very expensive on some ppc64 IO chips (it does |
| 521 | * powers of 2 writes until it reaches sufficient alignment). |
| 522 | * |
| 523 | * Based on this we disable the IP header alignment in network drivers. |
| 524 | */ |
Anton Blanchard | 025be81 | 2006-03-31 02:27:06 -0800 | [diff] [blame] | 525 | #define NET_IP_ALIGN 0 |
Mathieu Desnoyers | f9c4650 | 2008-02-07 00:16:10 -0800 | [diff] [blame] | 526 | |
| 527 | #define cmpxchg64(ptr, o, n) \ |
| 528 | ({ \ |
| 529 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |
| 530 | cmpxchg((ptr), (o), (n)); \ |
| 531 | }) |
| 532 | #define cmpxchg64_local(ptr, o, n) \ |
| 533 | ({ \ |
| 534 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |
| 535 | cmpxchg_local((ptr), (o), (n)); \ |
| 536 | }) |
| 537 | #else |
| 538 | #include <asm-generic/cmpxchg-local.h> |
| 539 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 540 | #endif |
| 541 | |
Anton Blanchard | d839088 | 2009-02-22 01:50:03 +0000 | [diff] [blame] | 542 | extern unsigned long arch_align_stack(unsigned long sp); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 543 | |
Paul Mackerras | 9b6b563 | 2005-10-06 12:06:20 +1000 | [diff] [blame] | 544 | /* Used in very early kernel initialization. */ |
Stephen Rothwell | cabb558 | 2005-09-30 16:16:52 +1000 | [diff] [blame] | 545 | extern unsigned long reloc_offset(void); |
Paul Mackerras | 9b6b563 | 2005-10-06 12:06:20 +1000 | [diff] [blame] | 546 | extern unsigned long add_reloc_offset(unsigned long); |
| 547 | extern void reloc_got2(unsigned long); |
| 548 | |
| 549 | #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) |
Stephen Rothwell | cabb558 | 2005-09-30 16:16:52 +1000 | [diff] [blame] | 550 | |
Michael Ellerman | 94a3807 | 2007-06-20 10:54:19 +1000 | [diff] [blame] | 551 | extern struct dentry *powerpc_debugfs_root; |
| 552 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 553 | #endif /* __KERNEL__ */ |
Stephen Rothwell | bbeb3f4 | 2005-09-27 13:51:59 +1000 | [diff] [blame] | 554 | #endif /* _ASM_POWERPC_SYSTEM_H */ |