H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_SYSTEM_H |
| 2 | #define _ASM_X86_SYSTEM_H |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 3 | |
| 4 | #include <asm/asm.h> |
Glauber de Oliveira Costa | d46d7d7 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 5 | #include <asm/segment.h> |
| 6 | #include <asm/cpufeature.h> |
| 7 | #include <asm/cmpxchg.h> |
Andi Kleen | fde1b3f | 2008-01-30 13:32:38 +0100 | [diff] [blame] | 8 | #include <asm/nops.h> |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 9 | |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 10 | #include <linux/kernel.h> |
Glauber de Oliveira Costa | d46d7d7 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 11 | #include <linux/irqflags.h> |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 12 | |
Jan Beulich | ded9aa0 | 2008-01-30 13:31:24 +0100 | [diff] [blame] | 13 | /* entries in ARCH_DLINFO: */ |
Serge E. Hallyn | cf9db6c | 2010-02-08 20:35:02 -0600 | [diff] [blame] | 14 | #if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64) |
Jan Beulich | ded9aa0 | 2008-01-30 13:31:24 +0100 | [diff] [blame] | 15 | # define AT_VECTOR_SIZE_ARCH 2 |
Serge E. Hallyn | cf9db6c | 2010-02-08 20:35:02 -0600 | [diff] [blame] | 16 | #else /* else it's non-compat x86-64 */ |
Jan Beulich | ded9aa0 | 2008-01-30 13:31:24 +0100 | [diff] [blame] | 17 | # define AT_VECTOR_SIZE_ARCH 1 |
| 18 | #endif |
| 19 | |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 20 | struct task_struct; /* one of the stranger aspects of C forward declarations */ |
Harvey Harrison | 599db4f | 2008-02-04 16:48:03 +0100 | [diff] [blame] | 21 | struct task_struct *__switch_to(struct task_struct *prev, |
| 22 | struct task_struct *next); |
Jeremy Fitzhardinge | 2fb6b2a0 | 2009-02-27 13:25:33 -0800 | [diff] [blame] | 23 | struct tss_struct; |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 24 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
| 25 | struct tss_struct *tss); |
Andy Isaacson | 814e2c8 | 2009-12-08 00:29:42 -0800 | [diff] [blame] | 26 | extern void show_regs_common(void); |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 27 | |
Jaswinder Singh | aab02f0 | 2008-12-15 22:23:54 +0530 | [diff] [blame] | 28 | #ifdef CONFIG_X86_32 |
| 29 | |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 30 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 31 | #define __switch_canary \ |
Tejun Heo | 5c79d2a | 2009-02-11 16:31:00 +0900 | [diff] [blame] | 32 | "movl %P[task_canary](%[next]), %%ebx\n\t" \ |
| 33 | "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 34 | #define __switch_canary_oparam \ |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 35 | , [stack_canary] "=m" (stack_canary.canary) |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 36 | #define __switch_canary_iparam \ |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 37 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) |
| 38 | #else /* CC_STACKPROTECTOR */ |
| 39 | #define __switch_canary |
| 40 | #define __switch_canary_oparam |
| 41 | #define __switch_canary_iparam |
| 42 | #endif /* CC_STACKPROTECTOR */ |
| 43 | |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 44 | /* |
| 45 | * Saving eflags is important. It switches not only IOPL between tasks, |
| 46 | * it also protects other tasks from NT leaking through sysenter etc. |
| 47 | */ |
Ingo Molnar | 23b55bd | 2008-03-05 10:24:37 +0100 | [diff] [blame] | 48 | #define switch_to(prev, next, last) \ |
| 49 | do { \ |
Ingo Molnar | 8b6451f | 2008-03-05 10:46:38 +0100 | [diff] [blame] | 50 | /* \ |
| 51 | * Context-switching clobbers all registers, so we clobber \ |
| 52 | * them explicitly, via unused output variables. \ |
| 53 | * (EAX and EBP is not listed because EBP is saved/restored \ |
| 54 | * explicitly for wchan access and EAX is the return value of \ |
| 55 | * __switch_to()) \ |
| 56 | */ \ |
| 57 | unsigned long ebx, ecx, edx, esi, edi; \ |
Ingo Molnar | 23b55bd | 2008-03-05 10:24:37 +0100 | [diff] [blame] | 58 | \ |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 59 | asm volatile("pushfl\n\t" /* save flags */ \ |
| 60 | "pushl %%ebp\n\t" /* save EBP */ \ |
| 61 | "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ |
| 62 | "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ |
| 63 | "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ |
| 64 | "pushl %[next_ip]\n\t" /* restore EIP */ \ |
Tejun Heo | 5c79d2a | 2009-02-11 16:31:00 +0900 | [diff] [blame] | 65 | __switch_canary \ |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 66 | "jmp __switch_to\n" /* regparm call */ \ |
| 67 | "1:\t" \ |
| 68 | "popl %%ebp\n\t" /* restore EBP */ \ |
| 69 | "popfl\n" /* restore flags */ \ |
Ingo Molnar | 23b55bd | 2008-03-05 10:24:37 +0100 | [diff] [blame] | 70 | \ |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 71 | /* output parameters */ \ |
| 72 | : [prev_sp] "=m" (prev->thread.sp), \ |
| 73 | [prev_ip] "=m" (prev->thread.ip), \ |
| 74 | "=a" (last), \ |
Ingo Molnar | 23b55bd | 2008-03-05 10:24:37 +0100 | [diff] [blame] | 75 | \ |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 76 | /* clobbered output registers: */ \ |
| 77 | "=b" (ebx), "=c" (ecx), "=d" (edx), \ |
| 78 | "=S" (esi), "=D" (edi) \ |
| 79 | \ |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 80 | __switch_canary_oparam \ |
| 81 | \ |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 82 | /* input parameters: */ \ |
| 83 | : [next_sp] "m" (next->thread.sp), \ |
| 84 | [next_ip] "m" (next->thread.ip), \ |
| 85 | \ |
| 86 | /* regparm parameters for __switch_to(): */ \ |
| 87 | [prev] "a" (prev), \ |
Vegard Nossum | 33f8c40 | 2008-09-14 19:03:53 +0200 | [diff] [blame] | 88 | [next] "d" (next) \ |
| 89 | \ |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 90 | __switch_canary_iparam \ |
| 91 | \ |
Vegard Nossum | 33f8c40 | 2008-09-14 19:03:53 +0200 | [diff] [blame] | 92 | : /* reloaded segment registers */ \ |
| 93 | "memory"); \ |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 94 | } while (0) |
| 95 | |
Glauber de Oliveira Costa | d46d7d7 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 96 | /* |
| 97 | * disable hlt during certain critical i/o operations |
| 98 | */ |
| 99 | #define HAVE_DISABLE_HLT |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 100 | #else |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 101 | #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" |
| 102 | #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" |
| 103 | |
| 104 | /* frame pointer must be last for get_wchan */ |
| 105 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" |
| 106 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" |
| 107 | |
| 108 | #define __EXTRA_CLOBBER \ |
| 109 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ |
| 110 | "r12", "r13", "r14", "r15" |
| 111 | |
Tejun Heo | b4a8f7a | 2009-01-20 12:29:19 +0900 | [diff] [blame] | 112 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 113 | #define __switch_canary \ |
| 114 | "movq %P[task_canary](%%rsi),%%r8\n\t" \ |
Tejun Heo | 67e68bd | 2009-01-21 17:26:05 +0900 | [diff] [blame] | 115 | "movq %%r8,"__percpu_arg([gs_canary])"\n\t" |
| 116 | #define __switch_canary_oparam \ |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 117 | , [gs_canary] "=m" (irq_stack_union.stack_canary) |
Tejun Heo | 67e68bd | 2009-01-21 17:26:05 +0900 | [diff] [blame] | 118 | #define __switch_canary_iparam \ |
| 119 | , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) |
Tejun Heo | b4a8f7a | 2009-01-20 12:29:19 +0900 | [diff] [blame] | 120 | #else /* CC_STACKPROTECTOR */ |
| 121 | #define __switch_canary |
Tejun Heo | 67e68bd | 2009-01-21 17:26:05 +0900 | [diff] [blame] | 122 | #define __switch_canary_oparam |
| 123 | #define __switch_canary_iparam |
Tejun Heo | b4a8f7a | 2009-01-20 12:29:19 +0900 | [diff] [blame] | 124 | #endif /* CC_STACKPROTECTOR */ |
| 125 | |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 126 | /* Save restore flags to clear handle leaking NT */ |
| 127 | #define switch_to(prev, next, last) \ |
Tejun Heo | b4a8f7a | 2009-01-20 12:29:19 +0900 | [diff] [blame] | 128 | asm volatile(SAVE_CONTEXT \ |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 129 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ |
| 130 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ |
| 131 | "call __switch_to\n\t" \ |
Brian Gerst | 87b2640 | 2009-01-19 00:38:59 +0900 | [diff] [blame] | 132 | "movq "__percpu_arg([current_task])",%%rsi\n\t" \ |
Tejun Heo | b4a8f7a | 2009-01-20 12:29:19 +0900 | [diff] [blame] | 133 | __switch_canary \ |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 134 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 135 | "movq %%rax,%%rdi\n\t" \ |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 136 | "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ |
Benjamin LaHaise | 7106a5a | 2009-01-10 23:00:22 -0500 | [diff] [blame] | 137 | "jnz ret_from_fork\n\t" \ |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 138 | RESTORE_CONTEXT \ |
| 139 | : "=a" (last) \ |
Tejun Heo | 67e68bd | 2009-01-21 17:26:05 +0900 | [diff] [blame] | 140 | __switch_canary_oparam \ |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 141 | : [next] "S" (next), [prev] "D" (prev), \ |
| 142 | [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ |
| 143 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ |
Benjamin LaHaise | 7106a5a | 2009-01-10 23:00:22 -0500 | [diff] [blame] | 144 | [_tif_fork] "i" (_TIF_FORK), \ |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 145 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 146 | [current_task] "m" (current_task) \ |
Tejun Heo | 67e68bd | 2009-01-21 17:26:05 +0900 | [diff] [blame] | 147 | __switch_canary_iparam \ |
Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 148 | : "memory", "cc" __EXTRA_CLOBBER) |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 149 | #endif |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 150 | |
| 151 | #ifdef __KERNEL__ |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 152 | |
Jeremy Fitzhardinge | 9f9d489 | 2008-06-25 00:19:32 -0400 | [diff] [blame] | 153 | extern void native_load_gs_index(unsigned); |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 154 | |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 155 | /* |
Glauber de Oliveira Costa | a6b4655 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 156 | * Load a segment. Fall back on loading the zero |
| 157 | * segment if something goes wrong.. |
| 158 | */ |
Ingo Molnar | 64b028b | 2009-11-26 10:37:55 +0100 | [diff] [blame] | 159 | #define loadsegment(seg, value) \ |
| 160 | do { \ |
| 161 | unsigned short __val = (value); \ |
| 162 | \ |
| 163 | asm volatile(" \n" \ |
| 164 | "1: movl %k0,%%" #seg " \n" \ |
| 165 | \ |
| 166 | ".section .fixup,\"ax\" \n" \ |
| 167 | "2: xorl %k0,%k0 \n" \ |
| 168 | " jmp 1b \n" \ |
| 169 | ".previous \n" \ |
| 170 | \ |
| 171 | _ASM_EXTABLE(1b, 2b) \ |
| 172 | \ |
| 173 | : "+r" (__val) : : "memory"); \ |
Brian Gerst | 79b0379 | 2009-11-25 14:18:26 -0500 | [diff] [blame] | 174 | } while (0) |
Glauber de Oliveira Costa | a6b4655 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 175 | |
| 176 | /* |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 177 | * Save a segment register away |
| 178 | */ |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 179 | #define savesegment(seg, value) \ |
Ingo Molnar | d9fc3fd | 2008-07-11 19:41:19 +0200 | [diff] [blame] | 180 | asm("mov %%" #seg ",%0":"=r" (value) : : "memory") |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 181 | |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 182 | /* |
| 183 | * x86_32 user gs accessors. |
| 184 | */ |
| 185 | #ifdef CONFIG_X86_32 |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 186 | #ifdef CONFIG_X86_32_LAZY_GS |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 187 | #define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) |
| 188 | #define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) |
| 189 | #define task_user_gs(tsk) ((tsk)->thread.gs) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 190 | #define lazy_save_gs(v) savesegment(gs, (v)) |
| 191 | #define lazy_load_gs(v) loadsegment(gs, (v)) |
| 192 | #else /* X86_32_LAZY_GS */ |
| 193 | #define get_user_gs(regs) (u16)((regs)->gs) |
| 194 | #define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) |
| 195 | #define task_user_gs(tsk) (task_pt_regs(tsk)->gs) |
| 196 | #define lazy_save_gs(v) do { } while (0) |
| 197 | #define lazy_load_gs(v) do { } while (0) |
| 198 | #endif /* X86_32_LAZY_GS */ |
| 199 | #endif /* X86_32 */ |
Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 200 | |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 201 | static inline unsigned long get_limit(unsigned long segment) |
| 202 | { |
| 203 | unsigned long __limit; |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 204 | asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); |
| 205 | return __limit + 1; |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 206 | } |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 207 | |
| 208 | static inline void native_clts(void) |
| 209 | { |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 210 | asm volatile("clts"); |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | /* |
| 214 | * Volatile isn't enough to prevent the compiler from reordering the |
| 215 | * read/write functions for the control registers and messing everything up. |
| 216 | * A memory clobber would solve the problem, but would prevent reordering of |
| 217 | * all loads stores around it, which can hurt performance. Solution is to |
| 218 | * use a variable and mimic reads and writes to it to enforce serialization |
| 219 | */ |
| 220 | static unsigned long __force_order; |
| 221 | |
| 222 | static inline unsigned long native_read_cr0(void) |
| 223 | { |
| 224 | unsigned long val; |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 225 | asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 226 | return val; |
| 227 | } |
| 228 | |
| 229 | static inline void native_write_cr0(unsigned long val) |
| 230 | { |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 231 | asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 232 | } |
| 233 | |
| 234 | static inline unsigned long native_read_cr2(void) |
| 235 | { |
| 236 | unsigned long val; |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 237 | asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 238 | return val; |
| 239 | } |
| 240 | |
| 241 | static inline void native_write_cr2(unsigned long val) |
| 242 | { |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 243 | asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 244 | } |
| 245 | |
| 246 | static inline unsigned long native_read_cr3(void) |
| 247 | { |
| 248 | unsigned long val; |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 249 | asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 250 | return val; |
| 251 | } |
| 252 | |
| 253 | static inline void native_write_cr3(unsigned long val) |
| 254 | { |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 255 | asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | static inline unsigned long native_read_cr4(void) |
| 259 | { |
| 260 | unsigned long val; |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 261 | asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 262 | return val; |
| 263 | } |
| 264 | |
| 265 | static inline unsigned long native_read_cr4_safe(void) |
| 266 | { |
| 267 | unsigned long val; |
| 268 | /* This could fault if %cr4 does not exist. In x86_64, a cr4 always |
| 269 | * exists, so it will never fail. */ |
| 270 | #ifdef CONFIG_X86_32 |
H. Peter Anvin | 88976ee | 2008-02-04 16:47:58 +0100 | [diff] [blame] | 271 | asm volatile("1: mov %%cr4, %0\n" |
| 272 | "2:\n" |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 273 | _ASM_EXTABLE(1b, 2b) |
H. Peter Anvin | 88976ee | 2008-02-04 16:47:58 +0100 | [diff] [blame] | 274 | : "=r" (val), "=m" (__force_order) : "0" (0)); |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 275 | #else |
| 276 | val = native_read_cr4(); |
| 277 | #endif |
| 278 | return val; |
| 279 | } |
| 280 | |
| 281 | static inline void native_write_cr4(unsigned long val) |
| 282 | { |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 283 | asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 284 | } |
| 285 | |
Glauber de Oliveira Costa | 94ea03c | 2008-01-30 13:33:19 +0100 | [diff] [blame] | 286 | #ifdef CONFIG_X86_64 |
| 287 | static inline unsigned long native_read_cr8(void) |
| 288 | { |
| 289 | unsigned long cr8; |
| 290 | asm volatile("movq %%cr8,%0" : "=r" (cr8)); |
| 291 | return cr8; |
| 292 | } |
| 293 | |
| 294 | static inline void native_write_cr8(unsigned long val) |
| 295 | { |
| 296 | asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); |
| 297 | } |
| 298 | #endif |
| 299 | |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 300 | static inline void native_wbinvd(void) |
| 301 | { |
| 302 | asm volatile("wbinvd": : :"memory"); |
| 303 | } |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 304 | |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 305 | #ifdef CONFIG_PARAVIRT |
| 306 | #include <asm/paravirt.h> |
| 307 | #else |
| 308 | #define read_cr0() (native_read_cr0()) |
| 309 | #define write_cr0(x) (native_write_cr0(x)) |
| 310 | #define read_cr2() (native_read_cr2()) |
| 311 | #define write_cr2(x) (native_write_cr2(x)) |
| 312 | #define read_cr3() (native_read_cr3()) |
| 313 | #define write_cr3(x) (native_write_cr3(x)) |
| 314 | #define read_cr4() (native_read_cr4()) |
| 315 | #define read_cr4_safe() (native_read_cr4_safe()) |
| 316 | #define write_cr4(x) (native_write_cr4(x)) |
| 317 | #define wbinvd() (native_wbinvd()) |
Glauber de Oliveira Costa | d46d7d7 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 318 | #ifdef CONFIG_X86_64 |
Glauber de Oliveira Costa | 94ea03c | 2008-01-30 13:33:19 +0100 | [diff] [blame] | 319 | #define read_cr8() (native_read_cr8()) |
| 320 | #define write_cr8(x) (native_write_cr8(x)) |
Jeremy Fitzhardinge | 9f9d489 | 2008-06-25 00:19:32 -0400 | [diff] [blame] | 321 | #define load_gs_index native_load_gs_index |
Glauber de Oliveira Costa | d46d7d7 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 322 | #endif |
| 323 | |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 324 | /* Clear the 'TS' bit */ |
| 325 | #define clts() (native_clts()) |
| 326 | |
| 327 | #endif/* CONFIG_PARAVIRT */ |
| 328 | |
Jeremy Fitzhardinge | 4e09e21 | 2008-05-26 23:31:03 +0100 | [diff] [blame] | 329 | #define stts() write_cr0(read_cr0() | X86_CR0_TS) |
Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 330 | |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 331 | #endif /* __KERNEL__ */ |
| 332 | |
H. Peter Anvin | 84fb144 | 2008-02-04 16:48:00 +0100 | [diff] [blame] | 333 | static inline void clflush(volatile void *__p) |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 334 | { |
H. Peter Anvin | 84fb144 | 2008-02-04 16:48:00 +0100 | [diff] [blame] | 335 | asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 336 | } |
| 337 | |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 338 | #define nop() asm volatile ("nop") |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 339 | |
| 340 | void disable_hlt(void); |
| 341 | void enable_hlt(void); |
| 342 | |
Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 343 | void cpu_idle_wait(void); |
| 344 | |
| 345 | extern unsigned long arch_align_stack(unsigned long sp); |
| 346 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
| 347 | |
| 348 | void default_idle(void); |
| 349 | |
Ivan Vecera | d3ec5ca | 2008-11-11 14:33:44 +0100 | [diff] [blame] | 350 | void stop_this_cpu(void *dummy); |
| 351 | |
Glauber de Oliveira Costa | 833d846 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 352 | /* |
| 353 | * Force strict CPU ordering. |
| 354 | * And yes, this is required on UP too when we're talking |
| 355 | * to devices. |
| 356 | */ |
| 357 | #ifdef CONFIG_X86_32 |
| 358 | /* |
Pavel Machek | 0d7a181 | 2008-03-03 12:49:09 +0100 | [diff] [blame] | 359 | * Some non-Intel clones support out of order store. wmb() ceases to be a |
Glauber de Oliveira Costa | 833d846 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 360 | * nop for these. |
| 361 | */ |
| 362 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) |
| 363 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) |
| 364 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) |
| 365 | #else |
| 366 | #define mb() asm volatile("mfence":::"memory") |
| 367 | #define rmb() asm volatile("lfence":::"memory") |
| 368 | #define wmb() asm volatile("sfence" ::: "memory") |
| 369 | #endif |
| 370 | |
| 371 | /** |
| 372 | * read_barrier_depends - Flush all pending reads that subsequents reads |
| 373 | * depend on. |
| 374 | * |
| 375 | * No data-dependent reads from memory-like regions are ever reordered |
| 376 | * over this barrier. All reads preceding this primitive are guaranteed |
| 377 | * to access memory (but not necessarily other CPUs' caches) before any |
| 378 | * reads following this primitive that depend on the data return by |
| 379 | * any of the preceding reads. This primitive is much lighter weight than |
| 380 | * rmb() on most CPUs, and is never heavier weight than is |
| 381 | * rmb(). |
| 382 | * |
| 383 | * These ordering constraints are respected by both the local CPU |
| 384 | * and the compiler. |
| 385 | * |
| 386 | * Ordering is not guaranteed by anything other than these primitives, |
| 387 | * not even by data dependencies. See the documentation for |
| 388 | * memory_barrier() for examples and URLs to more information. |
| 389 | * |
| 390 | * For example, the following code would force ordering (the initial |
| 391 | * value of "a" is zero, "b" is one, and "p" is "&a"): |
| 392 | * |
| 393 | * <programlisting> |
| 394 | * CPU 0 CPU 1 |
| 395 | * |
| 396 | * b = 2; |
| 397 | * memory_barrier(); |
| 398 | * p = &b; q = p; |
| 399 | * read_barrier_depends(); |
| 400 | * d = *q; |
| 401 | * </programlisting> |
| 402 | * |
| 403 | * because the read of "*q" depends on the read of "p" and these |
| 404 | * two reads are separated by a read_barrier_depends(). However, |
| 405 | * the following code, with the same initial values for "a" and "b": |
| 406 | * |
| 407 | * <programlisting> |
| 408 | * CPU 0 CPU 1 |
| 409 | * |
| 410 | * a = 2; |
| 411 | * memory_barrier(); |
| 412 | * b = 3; y = b; |
| 413 | * read_barrier_depends(); |
| 414 | * x = a; |
| 415 | * </programlisting> |
| 416 | * |
| 417 | * does not enforce ordering, since there is no data dependency between |
| 418 | * the read of "a" and the read of "b". Therefore, on some CPUs, such |
| 419 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() |
| 420 | * in cases like this where there are no data dependencies. |
| 421 | **/ |
| 422 | |
| 423 | #define read_barrier_depends() do { } while (0) |
| 424 | |
| 425 | #ifdef CONFIG_SMP |
| 426 | #define smp_mb() mb() |
| 427 | #ifdef CONFIG_X86_PPRO_FENCE |
| 428 | # define smp_rmb() rmb() |
| 429 | #else |
| 430 | # define smp_rmb() barrier() |
| 431 | #endif |
| 432 | #ifdef CONFIG_X86_OOSTORE |
| 433 | # define smp_wmb() wmb() |
| 434 | #else |
| 435 | # define smp_wmb() barrier() |
| 436 | #endif |
| 437 | #define smp_read_barrier_depends() read_barrier_depends() |
Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 438 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
Glauber de Oliveira Costa | 833d846 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 439 | #else |
| 440 | #define smp_mb() barrier() |
| 441 | #define smp_rmb() barrier() |
| 442 | #define smp_wmb() barrier() |
| 443 | #define smp_read_barrier_depends() do { } while (0) |
| 444 | #define set_mb(var, value) do { var = value; barrier(); } while (0) |
| 445 | #endif |
| 446 | |
Andi Kleen | fde1b3f | 2008-01-30 13:32:38 +0100 | [diff] [blame] | 447 | /* |
| 448 | * Stop RDTSC speculation. This is needed when you need to use RDTSC |
| 449 | * (or get_cycles or vread that possibly accesses the TSC) in a defined |
| 450 | * code region. |
| 451 | * |
| 452 | * (Could use an alternative three way for this if there was one.) |
| 453 | */ |
Andi Kleen | 1244829 | 2010-06-18 23:09:00 +0200 | [diff] [blame] | 454 | static __always_inline void rdtsc_barrier(void) |
Andi Kleen | fde1b3f | 2008-01-30 13:32:38 +0100 | [diff] [blame] | 455 | { |
| 456 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); |
| 457 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); |
| 458 | } |
Glauber de Oliveira Costa | 833d846 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 459 | |
Alexander Duyck | ea812ca | 2010-06-29 18:38:00 +0000 | [diff] [blame] | 460 | /* |
| 461 | * We handle most unaligned accesses in hardware. On the other hand |
| 462 | * unaligned DMA can be quite expensive on some Nehalem processors. |
| 463 | * |
| 464 | * Based on this we disable the IP header alignment in network drivers. |
| 465 | */ |
| 466 | #define NET_IP_ALIGN 0 |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 467 | #endif /* _ASM_X86_SYSTEM_H */ |