Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SYSTEM_H |
| 2 | #define __ASM_SYSTEM_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/kernel.h> |
| 5 | #include <asm/segment.h> |
| 6 | #include <asm/cpufeature.h> |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 7 | #include <asm/cmpxchg.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | |
| 9 | #ifdef __KERNEL__ |
Olaf Hering | 4f9a58d | 2007-10-16 23:30:12 -0700 | [diff] [blame] | 10 | #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
| 12 | struct task_struct; /* one of the stranger aspects of C forward declarations.. */ |
| 13 | extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); |
| 14 | |
Linus Torvalds | 47a5c6f | 2006-09-18 16:20:40 -0700 | [diff] [blame] | 15 | /* |
| 16 | * Saving eflags is important. It switches not only IOPL between tasks, |
| 17 | * it also protects other tasks from NT leaking through sysenter etc. |
| 18 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #define switch_to(prev,next,last) do { \ |
| 20 | unsigned long esi,edi; \ |
Linus Torvalds | 47a5c6f | 2006-09-18 16:20:40 -0700 | [diff] [blame] | 21 | asm volatile("pushfl\n\t" /* Save flags */ \ |
| 22 | "pushl %%ebp\n\t" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | "movl %%esp,%0\n\t" /* save ESP */ \ |
| 24 | "movl %5,%%esp\n\t" /* restore ESP */ \ |
| 25 | "movl $1f,%1\n\t" /* save EIP */ \ |
| 26 | "pushl %6\n\t" /* restore EIP */ \ |
| 27 | "jmp __switch_to\n" \ |
| 28 | "1:\t" \ |
| 29 | "popl %%ebp\n\t" \ |
Linus Torvalds | 47a5c6f | 2006-09-18 16:20:40 -0700 | [diff] [blame] | 30 | "popfl" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ |
| 32 | "=a" (last),"=S" (esi),"=D" (edi) \ |
| 33 | :"m" (next->thread.esp),"m" (next->thread.eip), \ |
| 34 | "2" (prev), "d" (next)); \ |
| 35 | } while (0) |
| 36 | |
| 37 | #define _set_base(addr,base) do { unsigned long __pr; \ |
| 38 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ |
| 39 | "rorl $16,%%edx\n\t" \ |
| 40 | "movb %%dl,%2\n\t" \ |
| 41 | "movb %%dh,%3" \ |
| 42 | :"=&d" (__pr) \ |
| 43 | :"m" (*((addr)+2)), \ |
| 44 | "m" (*((addr)+4)), \ |
| 45 | "m" (*((addr)+7)), \ |
| 46 | "0" (base) \ |
| 47 | ); } while(0) |
| 48 | |
| 49 | #define _set_limit(addr,limit) do { unsigned long __lr; \ |
| 50 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ |
| 51 | "rorl $16,%%edx\n\t" \ |
| 52 | "movb %2,%%dh\n\t" \ |
| 53 | "andb $0xf0,%%dh\n\t" \ |
| 54 | "orb %%dh,%%dl\n\t" \ |
| 55 | "movb %%dl,%2" \ |
| 56 | :"=&d" (__lr) \ |
| 57 | :"m" (*(addr)), \ |
| 58 | "m" (*((addr)+6)), \ |
| 59 | "0" (limit) \ |
| 60 | ); } while(0) |
| 61 | |
| 62 | #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) |
Zachary Amsden | 5fe9fe3c | 2006-01-06 00:11:55 -0800 | [diff] [blame] | 63 | #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | /* |
| 66 | * Load a segment. Fall back on loading the zero |
| 67 | * segment if something goes wrong.. |
| 68 | */ |
| 69 | #define loadsegment(seg,value) \ |
| 70 | asm volatile("\n" \ |
| 71 | "1:\t" \ |
H. J. Lu | fd51f66 | 2005-05-01 08:58:48 -0700 | [diff] [blame] | 72 | "mov %0,%%" #seg "\n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | "2:\n" \ |
| 74 | ".section .fixup,\"ax\"\n" \ |
| 75 | "3:\t" \ |
| 76 | "pushl $0\n\t" \ |
| 77 | "popl %%" #seg "\n\t" \ |
| 78 | "jmp 2b\n" \ |
| 79 | ".previous\n" \ |
| 80 | ".section __ex_table,\"a\"\n\t" \ |
| 81 | ".align 4\n\t" \ |
| 82 | ".long 1b,3b\n" \ |
| 83 | ".previous" \ |
Zachary Amsden | 4d37e7e | 2005-09-03 15:56:38 -0700 | [diff] [blame] | 84 | : :"rm" (value)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
| 86 | /* |
| 87 | * Save a segment register away |
| 88 | */ |
| 89 | #define savesegment(seg, value) \ |
Zachary Amsden | 4d37e7e | 2005-09-03 15:56:38 -0700 | [diff] [blame] | 90 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | |
Rusty Russell | 90a0a06 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 92 | |
| 93 | static inline void native_clts(void) |
| 94 | { |
| 95 | asm volatile ("clts"); |
| 96 | } |
| 97 | |
| 98 | static inline unsigned long native_read_cr0(void) |
| 99 | { |
| 100 | unsigned long val; |
| 101 | asm volatile("movl %%cr0,%0\n\t" :"=r" (val)); |
| 102 | return val; |
| 103 | } |
| 104 | |
| 105 | static inline void native_write_cr0(unsigned long val) |
| 106 | { |
| 107 | asm volatile("movl %0,%%cr0": :"r" (val)); |
| 108 | } |
| 109 | |
| 110 | static inline unsigned long native_read_cr2(void) |
| 111 | { |
| 112 | unsigned long val; |
| 113 | asm volatile("movl %%cr2,%0\n\t" :"=r" (val)); |
| 114 | return val; |
| 115 | } |
| 116 | |
| 117 | static inline void native_write_cr2(unsigned long val) |
| 118 | { |
| 119 | asm volatile("movl %0,%%cr2": :"r" (val)); |
| 120 | } |
| 121 | |
| 122 | static inline unsigned long native_read_cr3(void) |
| 123 | { |
| 124 | unsigned long val; |
| 125 | asm volatile("movl %%cr3,%0\n\t" :"=r" (val)); |
| 126 | return val; |
| 127 | } |
| 128 | |
| 129 | static inline void native_write_cr3(unsigned long val) |
| 130 | { |
| 131 | asm volatile("movl %0,%%cr3": :"r" (val)); |
| 132 | } |
| 133 | |
| 134 | static inline unsigned long native_read_cr4(void) |
| 135 | { |
| 136 | unsigned long val; |
| 137 | asm volatile("movl %%cr4,%0\n\t" :"=r" (val)); |
| 138 | return val; |
| 139 | } |
| 140 | |
| 141 | static inline unsigned long native_read_cr4_safe(void) |
| 142 | { |
| 143 | unsigned long val; |
| 144 | /* This could fault if %cr4 does not exist */ |
Kirill Korotaev | c1217a7 | 2007-10-17 18:04:33 +0200 | [diff] [blame] | 145 | asm volatile("1: movl %%cr4, %0 \n" |
Rusty Russell | 90a0a06 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 146 | "2: \n" |
| 147 | ".section __ex_table,\"a\" \n" |
| 148 | ".long 1b,2b \n" |
| 149 | ".previous \n" |
| 150 | : "=r" (val): "0" (0)); |
| 151 | return val; |
| 152 | } |
| 153 | |
| 154 | static inline void native_write_cr4(unsigned long val) |
| 155 | { |
| 156 | asm volatile("movl %0,%%cr4": :"r" (val)); |
| 157 | } |
| 158 | |
| 159 | static inline void native_wbinvd(void) |
| 160 | { |
| 161 | asm volatile("wbinvd": : :"memory"); |
| 162 | } |
| 163 | |
H. Peter Anvin | 6619a8f | 2007-10-17 18:04:37 +0200 | [diff] [blame] | 164 | static inline void clflush(volatile void *__p) |
| 165 | { |
| 166 | asm volatile("clflush %0" : "+m" (*(char __force *)__p)); |
| 167 | } |
Rusty Russell | 90a0a06 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 168 | |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 169 | #ifdef CONFIG_PARAVIRT |
| 170 | #include <asm/paravirt.h> |
| 171 | #else |
Rusty Russell | 90a0a06 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 172 | #define read_cr0() (native_read_cr0()) |
| 173 | #define write_cr0(x) (native_write_cr0(x)) |
| 174 | #define read_cr2() (native_read_cr2()) |
| 175 | #define write_cr2(x) (native_write_cr2(x)) |
| 176 | #define read_cr3() (native_read_cr3()) |
| 177 | #define write_cr3(x) (native_write_cr3(x)) |
| 178 | #define read_cr4() (native_read_cr4()) |
| 179 | #define read_cr4_safe() (native_read_cr4_safe()) |
| 180 | #define write_cr4(x) (native_write_cr4(x)) |
| 181 | #define wbinvd() (native_wbinvd()) |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 182 | |
| 183 | /* Clear the 'TS' bit */ |
Rusty Russell | 90a0a06 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 184 | #define clts() (native_clts()) |
| 185 | |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 186 | #endif/* CONFIG_PARAVIRT */ |
| 187 | |
| 188 | /* Set the 'TS' bit */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | #define stts() write_cr0(8 | read_cr0()) |
| 190 | |
| 191 | #endif /* __KERNEL__ */ |
| 192 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | static inline unsigned long get_limit(unsigned long segment) |
| 194 | { |
| 195 | unsigned long __limit; |
| 196 | __asm__("lsll %1,%0" |
| 197 | :"=r" (__limit):"r" (segment)); |
| 198 | return __limit+1; |
| 199 | } |
| 200 | |
| 201 | #define nop() __asm__ __volatile__ ("nop") |
| 202 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | /* |
| 204 | * Force strict CPU ordering. |
| 205 | * And yes, this is required on UP too when we're talking |
| 206 | * to devices. |
| 207 | * |
| 208 | * For now, "wmb()" doesn't actually do anything, as all |
| 209 | * Intel CPU's follow what Intel calls a *Processor Order*, |
| 210 | * in which all writes are seen in the program order even |
| 211 | * outside the CPU. |
| 212 | * |
| 213 | * I expect future Intel CPU's to have a weaker ordering, |
| 214 | * but I'd also expect them to finally get their act together |
| 215 | * and add some real memory barriers if so. |
| 216 | * |
| 217 | * Some non intel clones support out of order store. wmb() ceases to be a |
| 218 | * nop for these. |
| 219 | */ |
| 220 | |
| 221 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) |
| 223 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) |
Nick Piggin | 4071c71 | 2007-10-13 03:06:55 +0200 | [diff] [blame] | 224 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | |
| 226 | /** |
| 227 | * read_barrier_depends - Flush all pending reads that subsequents reads |
| 228 | * depend on. |
| 229 | * |
| 230 | * No data-dependent reads from memory-like regions are ever reordered |
| 231 | * over this barrier. All reads preceding this primitive are guaranteed |
| 232 | * to access memory (but not necessarily other CPUs' caches) before any |
| 233 | * reads following this primitive that depend on the data return by |
| 234 | * any of the preceding reads. This primitive is much lighter weight than |
| 235 | * rmb() on most CPUs, and is never heavier weight than is |
| 236 | * rmb(). |
| 237 | * |
| 238 | * These ordering constraints are respected by both the local CPU |
| 239 | * and the compiler. |
| 240 | * |
| 241 | * Ordering is not guaranteed by anything other than these primitives, |
| 242 | * not even by data dependencies. See the documentation for |
| 243 | * memory_barrier() for examples and URLs to more information. |
| 244 | * |
| 245 | * For example, the following code would force ordering (the initial |
| 246 | * value of "a" is zero, "b" is one, and "p" is "&a"): |
| 247 | * |
| 248 | * <programlisting> |
| 249 | * CPU 0 CPU 1 |
| 250 | * |
| 251 | * b = 2; |
| 252 | * memory_barrier(); |
| 253 | * p = &b; q = p; |
| 254 | * read_barrier_depends(); |
| 255 | * d = *q; |
| 256 | * </programlisting> |
| 257 | * |
| 258 | * because the read of "*q" depends on the read of "p" and these |
| 259 | * two reads are separated by a read_barrier_depends(). However, |
| 260 | * the following code, with the same initial values for "a" and "b": |
| 261 | * |
| 262 | * <programlisting> |
| 263 | * CPU 0 CPU 1 |
| 264 | * |
| 265 | * a = 2; |
| 266 | * memory_barrier(); |
| 267 | * b = 3; y = b; |
| 268 | * read_barrier_depends(); |
| 269 | * x = a; |
| 270 | * </programlisting> |
| 271 | * |
| 272 | * does not enforce ordering, since there is no data dependency between |
| 273 | * the read of "a" and the read of "b". Therefore, on some CPUs, such |
| 274 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() |
Andreas Mohr | d6e05ed | 2006-06-26 18:35:02 +0200 | [diff] [blame] | 275 | * in cases like this where there are no data dependencies. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | **/ |
| 277 | |
| 278 | #define read_barrier_depends() do { } while(0) |
| 279 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | #ifdef CONFIG_SMP |
| 281 | #define smp_mb() mb() |
Nick Piggin | b6c7347 | 2007-10-13 03:07:38 +0200 | [diff] [blame] | 282 | #ifdef CONFIG_X86_PPRO_FENCE |
| 283 | # define smp_rmb() rmb() |
| 284 | #else |
| 285 | # define smp_rmb() barrier() |
| 286 | #endif |
Nick Piggin | 4071c71 | 2007-10-13 03:06:55 +0200 | [diff] [blame] | 287 | #ifdef CONFIG_X86_OOSTORE |
| 288 | # define smp_wmb() wmb() |
| 289 | #else |
| 290 | # define smp_wmb() barrier() |
| 291 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | #define smp_read_barrier_depends() read_barrier_depends() |
Takashi Iwai | 911b0ad | 2006-02-04 23:28:05 -0800 | [diff] [blame] | 293 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | #else |
| 295 | #define smp_mb() barrier() |
| 296 | #define smp_rmb() barrier() |
| 297 | #define smp_wmb() barrier() |
| 298 | #define smp_read_barrier_depends() do { } while(0) |
| 299 | #define set_mb(var, value) do { var = value; barrier(); } while (0) |
| 300 | #endif |
| 301 | |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 302 | #include <linux/irqflags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | |
| 304 | /* |
| 305 | * disable hlt during certain critical i/o operations |
| 306 | */ |
| 307 | #define HAVE_DISABLE_HLT |
| 308 | void disable_hlt(void); |
| 309 | void enable_hlt(void); |
| 310 | |
| 311 | extern int es7000_plat; |
| 312 | void cpu_idle_wait(void); |
| 313 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | extern unsigned long arch_align_stack(unsigned long sp); |
Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 315 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | |
Adrian Bunk | cdb0452 | 2006-03-24 03:15:57 -0800 | [diff] [blame] | 317 | void default_idle(void); |
Pavel Emelyanov | 9d975eb | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 318 | void __show_registers(struct pt_regs *, int all); |
Adrian Bunk | cdb0452 | 2006-03-24 03:15:57 -0800 | [diff] [blame] | 319 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | #endif |