Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 1 | #ifndef _ASM_X86_SYSTEM_H_ |
| 2 | #define _ASM_X86_SYSTEM_H_ |
| 3 | |
| 4 | #include <asm/asm.h> |
| 5 | |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 6 | #include <linux/kernel.h> |
| 7 | |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 8 | #ifdef CONFIG_X86_32 |
| 9 | # include "system_32.h" |
| 10 | #else |
| 11 | # include "system_64.h" |
| 12 | #endif |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 13 | |
| 14 | #ifdef __KERNEL__ |
| 15 | #define _set_base(addr, base) do { unsigned long __pr; \ |
| 16 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ |
| 17 | "rorl $16,%%edx\n\t" \ |
| 18 | "movb %%dl,%2\n\t" \ |
| 19 | "movb %%dh,%3" \ |
| 20 | :"=&d" (__pr) \ |
| 21 | :"m" (*((addr)+2)), \ |
| 22 | "m" (*((addr)+4)), \ |
| 23 | "m" (*((addr)+7)), \ |
| 24 | "0" (base) \ |
| 25 | ); } while (0) |
| 26 | |
| 27 | #define _set_limit(addr, limit) do { unsigned long __lr; \ |
| 28 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ |
| 29 | "rorl $16,%%edx\n\t" \ |
| 30 | "movb %2,%%dh\n\t" \ |
| 31 | "andb $0xf0,%%dh\n\t" \ |
| 32 | "orb %%dh,%%dl\n\t" \ |
| 33 | "movb %%dl,%2" \ |
| 34 | :"=&d" (__lr) \ |
| 35 | :"m" (*(addr)), \ |
| 36 | "m" (*((addr)+6)), \ |
| 37 | "0" (limit) \ |
| 38 | ); } while (0) |
| 39 | |
| 40 | #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base)) |
| 41 | #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1)) |
| 42 | |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 43 | extern void load_gs_index(unsigned); |
| 44 | |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 45 | /* |
Glauber de Oliveira Costa | a6b4655 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 46 | * Load a segment. Fall back on loading the zero |
| 47 | * segment if something goes wrong.. |
| 48 | */ |
| 49 | #define loadsegment(seg, value) \ |
| 50 | asm volatile("\n" \ |
| 51 | "1:\t" \ |
| 52 | "movl %k0,%%" #seg "\n" \ |
| 53 | "2:\n" \ |
| 54 | ".section .fixup,\"ax\"\n" \ |
| 55 | "3:\t" \ |
| 56 | "movl %k1, %%" #seg "\n\t" \ |
| 57 | "jmp 2b\n" \ |
| 58 | ".previous\n" \ |
| 59 | ".section __ex_table,\"a\"\n\t" \ |
| 60 | _ASM_ALIGN "\n\t" \ |
| 61 | _ASM_PTR " 1b,3b\n" \ |
| 62 | ".previous" \ |
| 63 | : :"r" (value), "r" (0)) |
| 64 | |
| 65 | |
| 66 | /* |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 67 | * Save a segment register away |
| 68 | */ |
| 69 | #define savesegment(seg, value) \ |
| 70 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) |
| 71 | |
| 72 | static inline unsigned long get_limit(unsigned long segment) |
| 73 | { |
| 74 | unsigned long __limit; |
| 75 | __asm__("lsll %1,%0" |
| 76 | :"=r" (__limit):"r" (segment)); |
| 77 | return __limit+1; |
| 78 | } |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 79 | |
| 80 | static inline void native_clts(void) |
| 81 | { |
| 82 | asm volatile ("clts"); |
| 83 | } |
| 84 | |
| 85 | /* |
| 86 | * Volatile isn't enough to prevent the compiler from reordering the |
| 87 | * read/write functions for the control registers and messing everything up. |
| 88 | * A memory clobber would solve the problem, but would prevent reordering of |
| 89 | * all loads stores around it, which can hurt performance. Solution is to |
| 90 | * use a variable and mimic reads and writes to it to enforce serialization |
| 91 | */ |
| 92 | static unsigned long __force_order; |
| 93 | |
| 94 | static inline unsigned long native_read_cr0(void) |
| 95 | { |
| 96 | unsigned long val; |
| 97 | asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order)); |
| 98 | return val; |
| 99 | } |
| 100 | |
| 101 | static inline void native_write_cr0(unsigned long val) |
| 102 | { |
| 103 | asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order)); |
| 104 | } |
| 105 | |
| 106 | static inline unsigned long native_read_cr2(void) |
| 107 | { |
| 108 | unsigned long val; |
| 109 | asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order)); |
| 110 | return val; |
| 111 | } |
| 112 | |
| 113 | static inline void native_write_cr2(unsigned long val) |
| 114 | { |
| 115 | asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order)); |
| 116 | } |
| 117 | |
| 118 | static inline unsigned long native_read_cr3(void) |
| 119 | { |
| 120 | unsigned long val; |
| 121 | asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order)); |
| 122 | return val; |
| 123 | } |
| 124 | |
| 125 | static inline void native_write_cr3(unsigned long val) |
| 126 | { |
| 127 | asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order)); |
| 128 | } |
| 129 | |
| 130 | static inline unsigned long native_read_cr4(void) |
| 131 | { |
| 132 | unsigned long val; |
| 133 | asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order)); |
| 134 | return val; |
| 135 | } |
| 136 | |
| 137 | static inline unsigned long native_read_cr4_safe(void) |
| 138 | { |
| 139 | unsigned long val; |
| 140 | /* This could fault if %cr4 does not exist. In x86_64, a cr4 always |
| 141 | * exists, so it will never fail. */ |
| 142 | #ifdef CONFIG_X86_32 |
| 143 | asm volatile("1: mov %%cr4, %0 \n" |
| 144 | "2: \n" |
| 145 | ".section __ex_table,\"a\" \n" |
| 146 | ".long 1b,2b \n" |
| 147 | ".previous \n" |
| 148 | : "=r" (val), "=m" (__force_order) : "0" (0)); |
| 149 | #else |
| 150 | val = native_read_cr4(); |
| 151 | #endif |
| 152 | return val; |
| 153 | } |
| 154 | |
| 155 | static inline void native_write_cr4(unsigned long val) |
| 156 | { |
| 157 | asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order)); |
| 158 | } |
| 159 | |
| 160 | static inline void native_wbinvd(void) |
| 161 | { |
| 162 | asm volatile("wbinvd": : :"memory"); |
| 163 | } |
| 164 | #ifdef CONFIG_PARAVIRT |
| 165 | #include <asm/paravirt.h> |
| 166 | #else |
| 167 | #define read_cr0() (native_read_cr0()) |
| 168 | #define write_cr0(x) (native_write_cr0(x)) |
| 169 | #define read_cr2() (native_read_cr2()) |
| 170 | #define write_cr2(x) (native_write_cr2(x)) |
| 171 | #define read_cr3() (native_read_cr3()) |
| 172 | #define write_cr3(x) (native_write_cr3(x)) |
| 173 | #define read_cr4() (native_read_cr4()) |
| 174 | #define read_cr4_safe() (native_read_cr4_safe()) |
| 175 | #define write_cr4(x) (native_write_cr4(x)) |
| 176 | #define wbinvd() (native_wbinvd()) |
| 177 | |
| 178 | /* Clear the 'TS' bit */ |
| 179 | #define clts() (native_clts()) |
| 180 | |
| 181 | #endif/* CONFIG_PARAVIRT */ |
| 182 | |
| 183 | #define stts() write_cr0(8 | read_cr0()) |
| 184 | |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 185 | #endif /* __KERNEL__ */ |
| 186 | |
| 187 | static inline void clflush(void *__p) |
| 188 | { |
| 189 | asm volatile("clflush %0" : "+m" (*(char __force *)__p)); |
| 190 | } |
| 191 | |
| 192 | #define nop() __asm__ __volatile__ ("nop") |
| 193 | |
| 194 | void disable_hlt(void); |
| 195 | void enable_hlt(void); |
| 196 | |
| 197 | extern int es7000_plat; |
| 198 | void cpu_idle_wait(void); |
| 199 | |
| 200 | extern unsigned long arch_align_stack(unsigned long sp); |
| 201 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
| 202 | |
| 203 | void default_idle(void); |
| 204 | |
Glauber de Oliveira Costa | 833d846 | 2008-01-30 13:31:08 +0100 | [diff] [blame^] | 205 | /* |
| 206 | * Force strict CPU ordering. |
| 207 | * And yes, this is required on UP too when we're talking |
| 208 | * to devices. |
| 209 | */ |
| 210 | #ifdef CONFIG_X86_32 |
| 211 | /* |
| 212 | * For now, "wmb()" doesn't actually do anything, as all |
| 213 | * Intel CPU's follow what Intel calls a *Processor Order*, |
| 214 | * in which all writes are seen in the program order even |
| 215 | * outside the CPU. |
| 216 | * |
| 217 | * I expect future Intel CPU's to have a weaker ordering, |
| 218 | * but I'd also expect them to finally get their act together |
| 219 | * and add some real memory barriers if so. |
| 220 | * |
| 221 | * Some non intel clones support out of order store. wmb() ceases to be a |
| 222 | * nop for these. |
| 223 | */ |
| 224 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) |
| 225 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) |
| 226 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) |
| 227 | #else |
| 228 | #define mb() asm volatile("mfence":::"memory") |
| 229 | #define rmb() asm volatile("lfence":::"memory") |
| 230 | #define wmb() asm volatile("sfence" ::: "memory") |
| 231 | #endif |
| 232 | |
| 233 | /** |
| 234 | * read_barrier_depends - Flush all pending reads that subsequents reads |
| 235 | * depend on. |
| 236 | * |
| 237 | * No data-dependent reads from memory-like regions are ever reordered |
| 238 | * over this barrier. All reads preceding this primitive are guaranteed |
| 239 | * to access memory (but not necessarily other CPUs' caches) before any |
| 240 | * reads following this primitive that depend on the data return by |
| 241 | * any of the preceding reads. This primitive is much lighter weight than |
| 242 | * rmb() on most CPUs, and is never heavier weight than is |
| 243 | * rmb(). |
| 244 | * |
| 245 | * These ordering constraints are respected by both the local CPU |
| 246 | * and the compiler. |
| 247 | * |
| 248 | * Ordering is not guaranteed by anything other than these primitives, |
| 249 | * not even by data dependencies. See the documentation for |
| 250 | * memory_barrier() for examples and URLs to more information. |
| 251 | * |
| 252 | * For example, the following code would force ordering (the initial |
| 253 | * value of "a" is zero, "b" is one, and "p" is "&a"): |
| 254 | * |
| 255 | * <programlisting> |
| 256 | * CPU 0 CPU 1 |
| 257 | * |
| 258 | * b = 2; |
| 259 | * memory_barrier(); |
| 260 | * p = &b; q = p; |
| 261 | * read_barrier_depends(); |
| 262 | * d = *q; |
| 263 | * </programlisting> |
| 264 | * |
| 265 | * because the read of "*q" depends on the read of "p" and these |
| 266 | * two reads are separated by a read_barrier_depends(). However, |
| 267 | * the following code, with the same initial values for "a" and "b": |
| 268 | * |
| 269 | * <programlisting> |
| 270 | * CPU 0 CPU 1 |
| 271 | * |
| 272 | * a = 2; |
| 273 | * memory_barrier(); |
| 274 | * b = 3; y = b; |
| 275 | * read_barrier_depends(); |
| 276 | * x = a; |
| 277 | * </programlisting> |
| 278 | * |
| 279 | * does not enforce ordering, since there is no data dependency between |
| 280 | * the read of "a" and the read of "b". Therefore, on some CPUs, such |
| 281 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() |
| 282 | * in cases like this where there are no data dependencies. |
| 283 | **/ |
| 284 | |
| 285 | #define read_barrier_depends() do { } while (0) |
| 286 | |
| 287 | #ifdef CONFIG_SMP |
| 288 | #define smp_mb() mb() |
| 289 | #ifdef CONFIG_X86_PPRO_FENCE |
| 290 | # define smp_rmb() rmb() |
| 291 | #else |
| 292 | # define smp_rmb() barrier() |
| 293 | #endif |
| 294 | #ifdef CONFIG_X86_OOSTORE |
| 295 | # define smp_wmb() wmb() |
| 296 | #else |
| 297 | # define smp_wmb() barrier() |
| 298 | #endif |
| 299 | #define smp_read_barrier_depends() read_barrier_depends() |
| 300 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) |
| 301 | #else |
| 302 | #define smp_mb() barrier() |
| 303 | #define smp_rmb() barrier() |
| 304 | #define smp_wmb() barrier() |
| 305 | #define smp_read_barrier_depends() do { } while (0) |
| 306 | #define set_mb(var, value) do { var = value; barrier(); } while (0) |
| 307 | #endif |
| 308 | |
| 309 | |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 310 | #endif |