blob: 2692ee8ef031386b849695412a1a6e5fb210cdf5 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_SYSTEM_H
2#define _ASM_X86_SYSTEM_H
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +01003
4#include <asm/asm.h>
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +01005#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
Andi Kleenfde1b3f2008-01-30 13:32:38 +01008#include <asm/nops.h>
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +01009
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +010010#include <linux/kernel.h>
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +010011#include <linux/irqflags.h>
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +010012
Jan Beulichded9aa02008-01-30 13:31:24 +010013/* entries in ARCH_DLINFO: */
14#ifdef CONFIG_IA32_EMULATION
15# define AT_VECTOR_SIZE_ARCH 2
16#else
17# define AT_VECTOR_SIZE_ARCH 1
18#endif
19
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +010020struct task_struct; /* one of the stranger aspects of C forward declarations */
Harvey Harrison599db4f2008-02-04 16:48:03 +010021struct task_struct *__switch_to(struct task_struct *prev,
22 struct task_struct *next);
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +010023
Jaswinder Singhaab02f02008-12-15 22:23:54 +053024#ifdef CONFIG_X86_32
25
Tejun Heo60a53172009-02-09 22:17:40 +090026#ifdef CONFIG_CC_STACKPROTECTOR
27#define __switch_canary \
28 "movl "__percpu_arg([current_task])",%%ebx\n\t" \
29 "movl %P[task_canary](%%ebx),%%ebx\n\t" \
30 "movl %%ebx,"__percpu_arg([stack_canary])"\n\t"
31#define __switch_canary_oparam \
32 , [stack_canary] "=m" (per_cpu_var(stack_canary))
33#define __switch_canary_iparam \
34 , [current_task] "m" (per_cpu_var(current_task)) \
35 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
36#else /* CC_STACKPROTECTOR */
37#define __switch_canary
38#define __switch_canary_oparam
39#define __switch_canary_iparam
40#endif /* CC_STACKPROTECTOR */
41
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +010042/*
43 * Saving eflags is important. It switches not only IOPL between tasks,
44 * it also protects other tasks from NT leaking through sysenter etc.
45 */
Ingo Molnar23b55bd2008-03-05 10:24:37 +010046#define switch_to(prev, next, last) \
47do { \
Ingo Molnar8b6451f2008-03-05 10:46:38 +010048 /* \
49 * Context-switching clobbers all registers, so we clobber \
50 * them explicitly, via unused output variables. \
51 * (EAX and EBP is not listed because EBP is saved/restored \
52 * explicitly for wchan access and EAX is the return value of \
53 * __switch_to()) \
54 */ \
55 unsigned long ebx, ecx, edx, esi, edi; \
Ingo Molnar23b55bd2008-03-05 10:24:37 +010056 \
Joe Perchesc5386c22008-03-23 01:03:39 -070057 asm volatile("pushfl\n\t" /* save flags */ \
58 "pushl %%ebp\n\t" /* save EBP */ \
59 "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
60 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
61 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
62 "pushl %[next_ip]\n\t" /* restore EIP */ \
63 "jmp __switch_to\n" /* regparm call */ \
64 "1:\t" \
Tejun Heo60a53172009-02-09 22:17:40 +090065 __switch_canary \
Joe Perchesc5386c22008-03-23 01:03:39 -070066 "popl %%ebp\n\t" /* restore EBP */ \
67 "popfl\n" /* restore flags */ \
Ingo Molnar23b55bd2008-03-05 10:24:37 +010068 \
Joe Perchesc5386c22008-03-23 01:03:39 -070069 /* output parameters */ \
70 : [prev_sp] "=m" (prev->thread.sp), \
71 [prev_ip] "=m" (prev->thread.ip), \
72 "=a" (last), \
Ingo Molnar23b55bd2008-03-05 10:24:37 +010073 \
Joe Perchesc5386c22008-03-23 01:03:39 -070074 /* clobbered output registers: */ \
75 "=b" (ebx), "=c" (ecx), "=d" (edx), \
76 "=S" (esi), "=D" (edi) \
77 \
Tejun Heo60a53172009-02-09 22:17:40 +090078 __switch_canary_oparam \
79 \
Joe Perchesc5386c22008-03-23 01:03:39 -070080 /* input parameters: */ \
81 : [next_sp] "m" (next->thread.sp), \
82 [next_ip] "m" (next->thread.ip), \
83 \
84 /* regparm parameters for __switch_to(): */ \
85 [prev] "a" (prev), \
Vegard Nossum33f8c402008-09-14 19:03:53 +020086 [next] "d" (next) \
87 \
Tejun Heo60a53172009-02-09 22:17:40 +090088 __switch_canary_iparam \
89 \
Vegard Nossum33f8c402008-09-14 19:03:53 +020090 : /* reloaded segment registers */ \
91 "memory"); \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +010092} while (0)
93
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +010094/*
95 * disable hlt during certain critical i/o operations
96 */
97#define HAVE_DISABLE_HLT
Thomas Gleixner96a388d2007-10-11 11:20:03 +020098#else
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +010099#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
100#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
101
102/* frame pointer must be last for get_wchan */
103#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
104#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
105
106#define __EXTRA_CLOBBER \
107 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
108 "r12", "r13", "r14", "r15"
109
Tejun Heob4a8f7a2009-01-20 12:29:19 +0900110#ifdef CONFIG_CC_STACKPROTECTOR
111#define __switch_canary \
112 "movq %P[task_canary](%%rsi),%%r8\n\t" \
Tejun Heo67e68bd2009-01-21 17:26:05 +0900113 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
114#define __switch_canary_oparam \
115 , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
116#define __switch_canary_iparam \
117 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
Tejun Heob4a8f7a2009-01-20 12:29:19 +0900118#else /* CC_STACKPROTECTOR */
119#define __switch_canary
Tejun Heo67e68bd2009-01-21 17:26:05 +0900120#define __switch_canary_oparam
121#define __switch_canary_iparam
Tejun Heob4a8f7a2009-01-20 12:29:19 +0900122#endif /* CC_STACKPROTECTOR */
123
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100124/* Save restore flags to clear handle leaking NT */
125#define switch_to(prev, next, last) \
Tejun Heob4a8f7a2009-01-20 12:29:19 +0900126 asm volatile(SAVE_CONTEXT \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100127 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
128 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
129 "call __switch_to\n\t" \
130 ".globl thread_return\n" \
131 "thread_return:\n\t" \
Brian Gerst87b26402009-01-19 00:38:59 +0900132 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
Tejun Heob4a8f7a2009-01-20 12:29:19 +0900133 __switch_canary \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100134 "movq %P[thread_info](%%rsi),%%r8\n\t" \
135 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
136 "movq %%rax,%%rdi\n\t" \
137 "jc ret_from_fork\n\t" \
138 RESTORE_CONTEXT \
139 : "=a" (last) \
Tejun Heo67e68bd2009-01-21 17:26:05 +0900140 __switch_canary_oparam \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100141 : [next] "S" (next), [prev] "D" (prev), \
142 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
143 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
144 [tif_fork] "i" (TIF_FORK), \
145 [thread_info] "i" (offsetof(struct task_struct, stack)), \
Tejun Heob4a8f7a2009-01-20 12:29:19 +0900146 [current_task] "m" (per_cpu_var(current_task)) \
Tejun Heo67e68bd2009-01-21 17:26:05 +0900147 __switch_canary_iparam \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100148 : "memory", "cc" __EXTRA_CLOBBER)
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200149#endif
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100150
151#ifdef __KERNEL__
152#define _set_base(addr, base) do { unsigned long __pr; \
153__asm__ __volatile__ ("movw %%dx,%1\n\t" \
154 "rorl $16,%%edx\n\t" \
155 "movb %%dl,%2\n\t" \
156 "movb %%dh,%3" \
157 :"=&d" (__pr) \
158 :"m" (*((addr)+2)), \
159 "m" (*((addr)+4)), \
160 "m" (*((addr)+7)), \
161 "0" (base) \
162 ); } while (0)
163
164#define _set_limit(addr, limit) do { unsigned long __lr; \
165__asm__ __volatile__ ("movw %%dx,%1\n\t" \
166 "rorl $16,%%edx\n\t" \
167 "movb %2,%%dh\n\t" \
168 "andb $0xf0,%%dh\n\t" \
169 "orb %%dh,%%dl\n\t" \
170 "movb %%dl,%2" \
171 :"=&d" (__lr) \
172 :"m" (*(addr)), \
173 "m" (*((addr)+6)), \
174 "0" (limit) \
175 ); } while (0)
176
177#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
178#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
179
Jeremy Fitzhardinge9f9d4892008-06-25 00:19:32 -0400180extern void native_load_gs_index(unsigned);
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100181
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100182/*
Glauber de Oliveira Costaa6b46552008-01-30 13:31:08 +0100183 * Load a segment. Fall back on loading the zero
184 * segment if something goes wrong..
185 */
186#define loadsegment(seg, value) \
187 asm volatile("\n" \
Joe Perchesc5386c22008-03-23 01:03:39 -0700188 "1:\t" \
189 "movl %k0,%%" #seg "\n" \
190 "2:\n" \
191 ".section .fixup,\"ax\"\n" \
192 "3:\t" \
193 "movl %k1, %%" #seg "\n\t" \
194 "jmp 2b\n" \
195 ".previous\n" \
196 _ASM_EXTABLE(1b,3b) \
Jeremy Fitzhardinged338c732008-06-25 00:18:58 -0400197 : :"r" (value), "r" (0) : "memory")
Glauber de Oliveira Costaa6b46552008-01-30 13:31:08 +0100198
199
200/*
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100201 * Save a segment register away
202 */
Joe Perchesc5386c22008-03-23 01:03:39 -0700203#define savesegment(seg, value) \
Ingo Molnard9fc3fd2008-07-11 19:41:19 +0200204 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100205
Tejun Heod9a89a22009-02-09 22:17:40 +0900206/*
207 * x86_32 user gs accessors.
208 */
209#ifdef CONFIG_X86_32
Tejun Heoccbeed32009-02-09 22:17:40 +0900210#ifdef CONFIG_X86_32_LAZY_GS
Tejun Heod9a89a22009-02-09 22:17:40 +0900211#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
212#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
213#define task_user_gs(tsk) ((tsk)->thread.gs)
Tejun Heoccbeed32009-02-09 22:17:40 +0900214#define lazy_save_gs(v) savesegment(gs, (v))
215#define lazy_load_gs(v) loadsegment(gs, (v))
216#else /* X86_32_LAZY_GS */
217#define get_user_gs(regs) (u16)((regs)->gs)
218#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
219#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
220#define lazy_save_gs(v) do { } while (0)
221#define lazy_load_gs(v) do { } while (0)
222#endif /* X86_32_LAZY_GS */
223#endif /* X86_32 */
Tejun Heod9a89a22009-02-09 22:17:40 +0900224
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100225static inline unsigned long get_limit(unsigned long segment)
226{
227 unsigned long __limit;
Joe Perchesc5386c22008-03-23 01:03:39 -0700228 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
229 return __limit + 1;
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100230}
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100231
232static inline void native_clts(void)
233{
Joe Perchesc5386c22008-03-23 01:03:39 -0700234 asm volatile("clts");
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100235}
236
237/*
238 * Volatile isn't enough to prevent the compiler from reordering the
239 * read/write functions for the control registers and messing everything up.
240 * A memory clobber would solve the problem, but would prevent reordering of
241 * all loads stores around it, which can hurt performance. Solution is to
242 * use a variable and mimic reads and writes to it to enforce serialization
243 */
244static unsigned long __force_order;
245
246static inline unsigned long native_read_cr0(void)
247{
248 unsigned long val;
Joe Perchesc5386c22008-03-23 01:03:39 -0700249 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100250 return val;
251}
252
253static inline void native_write_cr0(unsigned long val)
254{
Joe Perchesc5386c22008-03-23 01:03:39 -0700255 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100256}
257
258static inline unsigned long native_read_cr2(void)
259{
260 unsigned long val;
Joe Perchesc5386c22008-03-23 01:03:39 -0700261 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100262 return val;
263}
264
265static inline void native_write_cr2(unsigned long val)
266{
Joe Perchesc5386c22008-03-23 01:03:39 -0700267 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100268}
269
270static inline unsigned long native_read_cr3(void)
271{
272 unsigned long val;
Joe Perchesc5386c22008-03-23 01:03:39 -0700273 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100274 return val;
275}
276
277static inline void native_write_cr3(unsigned long val)
278{
Joe Perchesc5386c22008-03-23 01:03:39 -0700279 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100280}
281
282static inline unsigned long native_read_cr4(void)
283{
284 unsigned long val;
Joe Perchesc5386c22008-03-23 01:03:39 -0700285 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100286 return val;
287}
288
289static inline unsigned long native_read_cr4_safe(void)
290{
291 unsigned long val;
292 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
293 * exists, so it will never fail. */
294#ifdef CONFIG_X86_32
H. Peter Anvin88976ee2008-02-04 16:47:58 +0100295 asm volatile("1: mov %%cr4, %0\n"
296 "2:\n"
Joe Perchesc5386c22008-03-23 01:03:39 -0700297 _ASM_EXTABLE(1b, 2b)
H. Peter Anvin88976ee2008-02-04 16:47:58 +0100298 : "=r" (val), "=m" (__force_order) : "0" (0));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100299#else
300 val = native_read_cr4();
301#endif
302 return val;
303}
304
305static inline void native_write_cr4(unsigned long val)
306{
Joe Perchesc5386c22008-03-23 01:03:39 -0700307 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100308}
309
Glauber de Oliveira Costa94ea03c2008-01-30 13:33:19 +0100310#ifdef CONFIG_X86_64
311static inline unsigned long native_read_cr8(void)
312{
313 unsigned long cr8;
314 asm volatile("movq %%cr8,%0" : "=r" (cr8));
315 return cr8;
316}
317
318static inline void native_write_cr8(unsigned long val)
319{
320 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
321}
322#endif
323
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100324static inline void native_wbinvd(void)
325{
326 asm volatile("wbinvd": : :"memory");
327}
Joe Perchesc5386c22008-03-23 01:03:39 -0700328
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100329#ifdef CONFIG_PARAVIRT
330#include <asm/paravirt.h>
331#else
332#define read_cr0() (native_read_cr0())
333#define write_cr0(x) (native_write_cr0(x))
334#define read_cr2() (native_read_cr2())
335#define write_cr2(x) (native_write_cr2(x))
336#define read_cr3() (native_read_cr3())
337#define write_cr3(x) (native_write_cr3(x))
338#define read_cr4() (native_read_cr4())
339#define read_cr4_safe() (native_read_cr4_safe())
340#define write_cr4(x) (native_write_cr4(x))
341#define wbinvd() (native_wbinvd())
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +0100342#ifdef CONFIG_X86_64
Glauber de Oliveira Costa94ea03c2008-01-30 13:33:19 +0100343#define read_cr8() (native_read_cr8())
344#define write_cr8(x) (native_write_cr8(x))
Jeremy Fitzhardinge9f9d4892008-06-25 00:19:32 -0400345#define load_gs_index native_load_gs_index
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +0100346#endif
347
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100348/* Clear the 'TS' bit */
349#define clts() (native_clts())
350
351#endif/* CONFIG_PARAVIRT */
352
Jeremy Fitzhardinge4e09e212008-05-26 23:31:03 +0100353#define stts() write_cr0(read_cr0() | X86_CR0_TS)
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100354
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100355#endif /* __KERNEL__ */
356
H. Peter Anvin84fb1442008-02-04 16:48:00 +0100357static inline void clflush(volatile void *__p)
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100358{
H. Peter Anvin84fb1442008-02-04 16:48:00 +0100359 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100360}
361
Joe Perchesc5386c22008-03-23 01:03:39 -0700362#define nop() asm volatile ("nop")
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100363
364void disable_hlt(void);
365void enable_hlt(void);
366
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100367void cpu_idle_wait(void);
368
369extern unsigned long arch_align_stack(unsigned long sp);
370extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
371
372void default_idle(void);
373
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +0100374void stop_this_cpu(void *dummy);
375
Glauber de Oliveira Costa833d8462008-01-30 13:31:08 +0100376/*
377 * Force strict CPU ordering.
378 * And yes, this is required on UP too when we're talking
379 * to devices.
380 */
381#ifdef CONFIG_X86_32
382/*
Pavel Machek0d7a1812008-03-03 12:49:09 +0100383 * Some non-Intel clones support out of order store. wmb() ceases to be a
Glauber de Oliveira Costa833d8462008-01-30 13:31:08 +0100384 * nop for these.
385 */
386#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
387#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
388#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
389#else
390#define mb() asm volatile("mfence":::"memory")
391#define rmb() asm volatile("lfence":::"memory")
392#define wmb() asm volatile("sfence" ::: "memory")
393#endif
394
395/**
396 * read_barrier_depends - Flush all pending reads that subsequents reads
397 * depend on.
398 *
399 * No data-dependent reads from memory-like regions are ever reordered
400 * over this barrier. All reads preceding this primitive are guaranteed
401 * to access memory (but not necessarily other CPUs' caches) before any
402 * reads following this primitive that depend on the data return by
403 * any of the preceding reads. This primitive is much lighter weight than
404 * rmb() on most CPUs, and is never heavier weight than is
405 * rmb().
406 *
407 * These ordering constraints are respected by both the local CPU
408 * and the compiler.
409 *
410 * Ordering is not guaranteed by anything other than these primitives,
411 * not even by data dependencies. See the documentation for
412 * memory_barrier() for examples and URLs to more information.
413 *
414 * For example, the following code would force ordering (the initial
415 * value of "a" is zero, "b" is one, and "p" is "&a"):
416 *
417 * <programlisting>
418 * CPU 0 CPU 1
419 *
420 * b = 2;
421 * memory_barrier();
422 * p = &b; q = p;
423 * read_barrier_depends();
424 * d = *q;
425 * </programlisting>
426 *
427 * because the read of "*q" depends on the read of "p" and these
428 * two reads are separated by a read_barrier_depends(). However,
429 * the following code, with the same initial values for "a" and "b":
430 *
431 * <programlisting>
432 * CPU 0 CPU 1
433 *
434 * a = 2;
435 * memory_barrier();
436 * b = 3; y = b;
437 * read_barrier_depends();
438 * x = a;
439 * </programlisting>
440 *
441 * does not enforce ordering, since there is no data dependency between
442 * the read of "a" and the read of "b". Therefore, on some CPUs, such
443 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
444 * in cases like this where there are no data dependencies.
445 **/
446
447#define read_barrier_depends() do { } while (0)
448
449#ifdef CONFIG_SMP
450#define smp_mb() mb()
451#ifdef CONFIG_X86_PPRO_FENCE
452# define smp_rmb() rmb()
453#else
454# define smp_rmb() barrier()
455#endif
456#ifdef CONFIG_X86_OOSTORE
457# define smp_wmb() wmb()
458#else
459# define smp_wmb() barrier()
460#endif
461#define smp_read_barrier_depends() read_barrier_depends()
Joe Perchesc5386c22008-03-23 01:03:39 -0700462#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
Glauber de Oliveira Costa833d8462008-01-30 13:31:08 +0100463#else
464#define smp_mb() barrier()
465#define smp_rmb() barrier()
466#define smp_wmb() barrier()
467#define smp_read_barrier_depends() do { } while (0)
468#define set_mb(var, value) do { var = value; barrier(); } while (0)
469#endif
470
Andi Kleenfde1b3f2008-01-30 13:32:38 +0100471/*
472 * Stop RDTSC speculation. This is needed when you need to use RDTSC
473 * (or get_cycles or vread that possibly accesses the TSC) in a defined
474 * code region.
475 *
476 * (Could use an alternative three way for this if there was one.)
477 */
478static inline void rdtsc_barrier(void)
479{
480 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
481 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
482}
Glauber de Oliveira Costa833d8462008-01-30 13:31:08 +0100483
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700484#endif /* _ASM_X86_SYSTEM_H */