Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 1 | #ifndef _ASM_X86_SYSTEM_H_ |
| 2 | #define _ASM_X86_SYSTEM_H_ |
| 3 | |
| 4 | #include <asm/asm.h> |
| 5 | |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 6 | #include <linux/kernel.h> |
| 7 | |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 8 | #ifdef CONFIG_X86_32 |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame^] | 9 | #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ |
| 10 | |
| 11 | struct task_struct; /* one of the stranger aspects of C forward declarations */ |
| 12 | extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev, |
| 13 | struct task_struct *next)); |
| 14 | |
| 15 | /* |
| 16 | * Saving eflags is important. It switches not only IOPL between tasks, |
| 17 | * it also protects other tasks from NT leaking through sysenter etc. |
| 18 | */ |
| 19 | #define switch_to(prev, next, last) do { \ |
| 20 | unsigned long esi, edi; \ |
| 21 | asm volatile("pushfl\n\t" /* Save flags */ \ |
| 22 | "pushl %%ebp\n\t" \ |
| 23 | "movl %%esp,%0\n\t" /* save ESP */ \ |
| 24 | "movl %5,%%esp\n\t" /* restore ESP */ \ |
| 25 | "movl $1f,%1\n\t" /* save EIP */ \ |
| 26 | "pushl %6\n\t" /* restore EIP */ \ |
| 27 | "jmp __switch_to\n" \ |
| 28 | "1:\t" \ |
| 29 | "popl %%ebp\n\t" \ |
| 30 | "popfl" \ |
| 31 | :"=m" (prev->thread.sp), "=m" (prev->thread.ip), \ |
| 32 | "=a" (last), "=S" (esi), "=D" (edi) \ |
| 33 | :"m" (next->thread.sp), "m" (next->thread.ip), \ |
| 34 | "2" (prev), "d" (next)); \ |
| 35 | } while (0) |
| 36 | |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 37 | # include "system_32.h" |
| 38 | #else |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame^] | 39 | #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" |
| 40 | #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" |
| 41 | |
| 42 | /* frame pointer must be last for get_wchan */ |
| 43 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" |
| 44 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" |
| 45 | |
| 46 | #define __EXTRA_CLOBBER \ |
| 47 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ |
| 48 | "r12", "r13", "r14", "r15" |
| 49 | |
| 50 | /* Save restore flags to clear handle leaking NT */ |
| 51 | #define switch_to(prev, next, last) \ |
| 52 | asm volatile(SAVE_CONTEXT \ |
| 53 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ |
| 54 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ |
| 55 | "call __switch_to\n\t" \ |
| 56 | ".globl thread_return\n" \ |
| 57 | "thread_return:\n\t" \ |
| 58 | "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ |
| 59 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ |
| 60 | LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ |
| 61 | "movq %%rax,%%rdi\n\t" \ |
| 62 | "jc ret_from_fork\n\t" \ |
| 63 | RESTORE_CONTEXT \ |
| 64 | : "=a" (last) \ |
| 65 | : [next] "S" (next), [prev] "D" (prev), \ |
| 66 | [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ |
| 67 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ |
| 68 | [tif_fork] "i" (TIF_FORK), \ |
| 69 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ |
| 70 | [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ |
| 71 | : "memory", "cc" __EXTRA_CLOBBER) |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 72 | # include "system_64.h" |
| 73 | #endif |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 74 | |
| 75 | #ifdef __KERNEL__ |
| 76 | #define _set_base(addr, base) do { unsigned long __pr; \ |
| 77 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ |
| 78 | "rorl $16,%%edx\n\t" \ |
| 79 | "movb %%dl,%2\n\t" \ |
| 80 | "movb %%dh,%3" \ |
| 81 | :"=&d" (__pr) \ |
| 82 | :"m" (*((addr)+2)), \ |
| 83 | "m" (*((addr)+4)), \ |
| 84 | "m" (*((addr)+7)), \ |
| 85 | "0" (base) \ |
| 86 | ); } while (0) |
| 87 | |
| 88 | #define _set_limit(addr, limit) do { unsigned long __lr; \ |
| 89 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ |
| 90 | "rorl $16,%%edx\n\t" \ |
| 91 | "movb %2,%%dh\n\t" \ |
| 92 | "andb $0xf0,%%dh\n\t" \ |
| 93 | "orb %%dh,%%dl\n\t" \ |
| 94 | "movb %%dl,%2" \ |
| 95 | :"=&d" (__lr) \ |
| 96 | :"m" (*(addr)), \ |
| 97 | "m" (*((addr)+6)), \ |
| 98 | "0" (limit) \ |
| 99 | ); } while (0) |
| 100 | |
| 101 | #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base)) |
| 102 | #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1)) |
| 103 | |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 104 | extern void load_gs_index(unsigned); |
| 105 | |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 106 | /* |
Glauber de Oliveira Costa | a6b4655 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 107 | * Load a segment. Fall back on loading the zero |
| 108 | * segment if something goes wrong.. |
| 109 | */ |
| 110 | #define loadsegment(seg, value) \ |
| 111 | asm volatile("\n" \ |
| 112 | "1:\t" \ |
| 113 | "movl %k0,%%" #seg "\n" \ |
| 114 | "2:\n" \ |
| 115 | ".section .fixup,\"ax\"\n" \ |
| 116 | "3:\t" \ |
| 117 | "movl %k1, %%" #seg "\n\t" \ |
| 118 | "jmp 2b\n" \ |
| 119 | ".previous\n" \ |
| 120 | ".section __ex_table,\"a\"\n\t" \ |
| 121 | _ASM_ALIGN "\n\t" \ |
| 122 | _ASM_PTR " 1b,3b\n" \ |
| 123 | ".previous" \ |
| 124 | : :"r" (value), "r" (0)) |
| 125 | |
| 126 | |
| 127 | /* |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 128 | * Save a segment register away |
| 129 | */ |
| 130 | #define savesegment(seg, value) \ |
| 131 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) |
| 132 | |
| 133 | static inline unsigned long get_limit(unsigned long segment) |
| 134 | { |
| 135 | unsigned long __limit; |
| 136 | __asm__("lsll %1,%0" |
| 137 | :"=r" (__limit):"r" (segment)); |
| 138 | return __limit+1; |
| 139 | } |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 140 | |
| 141 | static inline void native_clts(void) |
| 142 | { |
| 143 | asm volatile ("clts"); |
| 144 | } |
| 145 | |
| 146 | /* |
| 147 | * Volatile isn't enough to prevent the compiler from reordering the |
| 148 | * read/write functions for the control registers and messing everything up. |
| 149 | * A memory clobber would solve the problem, but would prevent reordering of |
| 150 | * all loads stores around it, which can hurt performance. Solution is to |
| 151 | * use a variable and mimic reads and writes to it to enforce serialization |
| 152 | */ |
| 153 | static unsigned long __force_order; |
| 154 | |
| 155 | static inline unsigned long native_read_cr0(void) |
| 156 | { |
| 157 | unsigned long val; |
| 158 | asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order)); |
| 159 | return val; |
| 160 | } |
| 161 | |
| 162 | static inline void native_write_cr0(unsigned long val) |
| 163 | { |
| 164 | asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order)); |
| 165 | } |
| 166 | |
| 167 | static inline unsigned long native_read_cr2(void) |
| 168 | { |
| 169 | unsigned long val; |
| 170 | asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order)); |
| 171 | return val; |
| 172 | } |
| 173 | |
| 174 | static inline void native_write_cr2(unsigned long val) |
| 175 | { |
| 176 | asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order)); |
| 177 | } |
| 178 | |
| 179 | static inline unsigned long native_read_cr3(void) |
| 180 | { |
| 181 | unsigned long val; |
| 182 | asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order)); |
| 183 | return val; |
| 184 | } |
| 185 | |
| 186 | static inline void native_write_cr3(unsigned long val) |
| 187 | { |
| 188 | asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order)); |
| 189 | } |
| 190 | |
| 191 | static inline unsigned long native_read_cr4(void) |
| 192 | { |
| 193 | unsigned long val; |
| 194 | asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order)); |
| 195 | return val; |
| 196 | } |
| 197 | |
| 198 | static inline unsigned long native_read_cr4_safe(void) |
| 199 | { |
| 200 | unsigned long val; |
| 201 | /* This could fault if %cr4 does not exist. In x86_64, a cr4 always |
| 202 | * exists, so it will never fail. */ |
| 203 | #ifdef CONFIG_X86_32 |
| 204 | asm volatile("1: mov %%cr4, %0 \n" |
| 205 | "2: \n" |
| 206 | ".section __ex_table,\"a\" \n" |
| 207 | ".long 1b,2b \n" |
| 208 | ".previous \n" |
| 209 | : "=r" (val), "=m" (__force_order) : "0" (0)); |
| 210 | #else |
| 211 | val = native_read_cr4(); |
| 212 | #endif |
| 213 | return val; |
| 214 | } |
| 215 | |
| 216 | static inline void native_write_cr4(unsigned long val) |
| 217 | { |
| 218 | asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order)); |
| 219 | } |
| 220 | |
| 221 | static inline void native_wbinvd(void) |
| 222 | { |
| 223 | asm volatile("wbinvd": : :"memory"); |
| 224 | } |
| 225 | #ifdef CONFIG_PARAVIRT |
| 226 | #include <asm/paravirt.h> |
| 227 | #else |
| 228 | #define read_cr0() (native_read_cr0()) |
| 229 | #define write_cr0(x) (native_write_cr0(x)) |
| 230 | #define read_cr2() (native_read_cr2()) |
| 231 | #define write_cr2(x) (native_write_cr2(x)) |
| 232 | #define read_cr3() (native_read_cr3()) |
| 233 | #define write_cr3(x) (native_write_cr3(x)) |
| 234 | #define read_cr4() (native_read_cr4()) |
| 235 | #define read_cr4_safe() (native_read_cr4_safe()) |
| 236 | #define write_cr4(x) (native_write_cr4(x)) |
| 237 | #define wbinvd() (native_wbinvd()) |
| 238 | |
| 239 | /* Clear the 'TS' bit */ |
| 240 | #define clts() (native_clts()) |
| 241 | |
| 242 | #endif/* CONFIG_PARAVIRT */ |
| 243 | |
| 244 | #define stts() write_cr0(8 | read_cr0()) |
| 245 | |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 246 | #endif /* __KERNEL__ */ |
| 247 | |
| 248 | static inline void clflush(void *__p) |
| 249 | { |
| 250 | asm volatile("clflush %0" : "+m" (*(char __force *)__p)); |
| 251 | } |
| 252 | |
| 253 | #define nop() __asm__ __volatile__ ("nop") |
| 254 | |
| 255 | void disable_hlt(void); |
| 256 | void enable_hlt(void); |
| 257 | |
| 258 | extern int es7000_plat; |
| 259 | void cpu_idle_wait(void); |
| 260 | |
| 261 | extern unsigned long arch_align_stack(unsigned long sp); |
| 262 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
| 263 | |
| 264 | void default_idle(void); |
| 265 | |
Glauber de Oliveira Costa | 833d846 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 266 | /* |
| 267 | * Force strict CPU ordering. |
| 268 | * And yes, this is required on UP too when we're talking |
| 269 | * to devices. |
| 270 | */ |
| 271 | #ifdef CONFIG_X86_32 |
| 272 | /* |
| 273 | * For now, "wmb()" doesn't actually do anything, as all |
| 274 | * Intel CPU's follow what Intel calls a *Processor Order*, |
| 275 | * in which all writes are seen in the program order even |
| 276 | * outside the CPU. |
| 277 | * |
| 278 | * I expect future Intel CPU's to have a weaker ordering, |
| 279 | * but I'd also expect them to finally get their act together |
| 280 | * and add some real memory barriers if so. |
| 281 | * |
| 282 | * Some non intel clones support out of order store. wmb() ceases to be a |
| 283 | * nop for these. |
| 284 | */ |
| 285 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) |
| 286 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) |
| 287 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) |
| 288 | #else |
| 289 | #define mb() asm volatile("mfence":::"memory") |
| 290 | #define rmb() asm volatile("lfence":::"memory") |
| 291 | #define wmb() asm volatile("sfence" ::: "memory") |
| 292 | #endif |
| 293 | |
| 294 | /** |
| 295 | * read_barrier_depends - Flush all pending reads that subsequents reads |
| 296 | * depend on. |
| 297 | * |
| 298 | * No data-dependent reads from memory-like regions are ever reordered |
| 299 | * over this barrier. All reads preceding this primitive are guaranteed |
| 300 | * to access memory (but not necessarily other CPUs' caches) before any |
| 301 | * reads following this primitive that depend on the data return by |
| 302 | * any of the preceding reads. This primitive is much lighter weight than |
| 303 | * rmb() on most CPUs, and is never heavier weight than is |
| 304 | * rmb(). |
| 305 | * |
| 306 | * These ordering constraints are respected by both the local CPU |
| 307 | * and the compiler. |
| 308 | * |
| 309 | * Ordering is not guaranteed by anything other than these primitives, |
| 310 | * not even by data dependencies. See the documentation for |
| 311 | * memory_barrier() for examples and URLs to more information. |
| 312 | * |
| 313 | * For example, the following code would force ordering (the initial |
| 314 | * value of "a" is zero, "b" is one, and "p" is "&a"): |
| 315 | * |
| 316 | * <programlisting> |
| 317 | * CPU 0 CPU 1 |
| 318 | * |
| 319 | * b = 2; |
| 320 | * memory_barrier(); |
| 321 | * p = &b; q = p; |
| 322 | * read_barrier_depends(); |
| 323 | * d = *q; |
| 324 | * </programlisting> |
| 325 | * |
| 326 | * because the read of "*q" depends on the read of "p" and these |
| 327 | * two reads are separated by a read_barrier_depends(). However, |
| 328 | * the following code, with the same initial values for "a" and "b": |
| 329 | * |
| 330 | * <programlisting> |
| 331 | * CPU 0 CPU 1 |
| 332 | * |
| 333 | * a = 2; |
| 334 | * memory_barrier(); |
| 335 | * b = 3; y = b; |
| 336 | * read_barrier_depends(); |
| 337 | * x = a; |
| 338 | * </programlisting> |
| 339 | * |
| 340 | * does not enforce ordering, since there is no data dependency between |
| 341 | * the read of "a" and the read of "b". Therefore, on some CPUs, such |
| 342 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() |
| 343 | * in cases like this where there are no data dependencies. |
| 344 | **/ |
| 345 | |
| 346 | #define read_barrier_depends() do { } while (0) |
| 347 | |
| 348 | #ifdef CONFIG_SMP |
| 349 | #define smp_mb() mb() |
| 350 | #ifdef CONFIG_X86_PPRO_FENCE |
| 351 | # define smp_rmb() rmb() |
| 352 | #else |
| 353 | # define smp_rmb() barrier() |
| 354 | #endif |
| 355 | #ifdef CONFIG_X86_OOSTORE |
| 356 | # define smp_wmb() wmb() |
| 357 | #else |
| 358 | # define smp_wmb() barrier() |
| 359 | #endif |
| 360 | #define smp_read_barrier_depends() read_barrier_depends() |
| 361 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) |
| 362 | #else |
| 363 | #define smp_mb() barrier() |
| 364 | #define smp_rmb() barrier() |
| 365 | #define smp_wmb() barrier() |
| 366 | #define smp_read_barrier_depends() do { } while (0) |
| 367 | #define set_mb(var, value) do { var = value; barrier(); } while (0) |
| 368 | #endif |
| 369 | |
| 370 | |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 371 | #endif |