blob: 6c7d1fda499549245d371a37d8832d2e1fac7ad3 [file] [log] [blame]
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +01001#ifndef _ASM_X86_SYSTEM_H_
2#define _ASM_X86_SYSTEM_H_
3
4#include <asm/asm.h>
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +01005#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +01008
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +01009#include <linux/kernel.h>
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +010010#include <linux/irqflags.h>
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +010011
Jan Beulichded9aa02008-01-30 13:31:24 +010012/* entries in ARCH_DLINFO: */
13#ifdef CONFIG_IA32_EMULATION
14# define AT_VECTOR_SIZE_ARCH 2
15#else
16# define AT_VECTOR_SIZE_ARCH 1
17#endif
18
Thomas Gleixner96a388d2007-10-11 11:20:03 +020019#ifdef CONFIG_X86_32
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +010020
21struct task_struct; /* one of the stranger aspects of C forward declarations */
22extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev,
23 struct task_struct *next));
24
25/*
26 * Saving eflags is important. It switches not only IOPL between tasks,
27 * it also protects other tasks from NT leaking through sysenter etc.
28 */
29#define switch_to(prev, next, last) do { \
30 unsigned long esi, edi; \
31 asm volatile("pushfl\n\t" /* Save flags */ \
32 "pushl %%ebp\n\t" \
33 "movl %%esp,%0\n\t" /* save ESP */ \
34 "movl %5,%%esp\n\t" /* restore ESP */ \
35 "movl $1f,%1\n\t" /* save EIP */ \
36 "pushl %6\n\t" /* restore EIP */ \
37 "jmp __switch_to\n" \
38 "1:\t" \
39 "popl %%ebp\n\t" \
40 "popfl" \
41 :"=m" (prev->thread.sp), "=m" (prev->thread.ip), \
42 "=a" (last), "=S" (esi), "=D" (edi) \
43 :"m" (next->thread.sp), "m" (next->thread.ip), \
44 "2" (prev), "d" (next)); \
45} while (0)
46
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +010047/*
48 * disable hlt during certain critical i/o operations
49 */
50#define HAVE_DISABLE_HLT
Thomas Gleixner96a388d2007-10-11 11:20:03 +020051#else
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +010052#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
53#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
54
55/* frame pointer must be last for get_wchan */
56#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
57#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
58
59#define __EXTRA_CLOBBER \
60 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
61 "r12", "r13", "r14", "r15"
62
63/* Save restore flags to clear handle leaking NT */
64#define switch_to(prev, next, last) \
Jan Beulichded9aa02008-01-30 13:31:24 +010065 asm volatile(SAVE_CONTEXT \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +010066 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
67 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
68 "call __switch_to\n\t" \
69 ".globl thread_return\n" \
70 "thread_return:\n\t" \
71 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
72 "movq %P[thread_info](%%rsi),%%r8\n\t" \
73 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
74 "movq %%rax,%%rdi\n\t" \
75 "jc ret_from_fork\n\t" \
76 RESTORE_CONTEXT \
77 : "=a" (last) \
78 : [next] "S" (next), [prev] "D" (prev), \
79 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
80 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
81 [tif_fork] "i" (TIF_FORK), \
82 [thread_info] "i" (offsetof(struct task_struct, stack)), \
83 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
84 : "memory", "cc" __EXTRA_CLOBBER)
Thomas Gleixner96a388d2007-10-11 11:20:03 +020085#endif
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +010086
87#ifdef __KERNEL__
88#define _set_base(addr, base) do { unsigned long __pr; \
89__asm__ __volatile__ ("movw %%dx,%1\n\t" \
90 "rorl $16,%%edx\n\t" \
91 "movb %%dl,%2\n\t" \
92 "movb %%dh,%3" \
93 :"=&d" (__pr) \
94 :"m" (*((addr)+2)), \
95 "m" (*((addr)+4)), \
96 "m" (*((addr)+7)), \
97 "0" (base) \
98 ); } while (0)
99
100#define _set_limit(addr, limit) do { unsigned long __lr; \
101__asm__ __volatile__ ("movw %%dx,%1\n\t" \
102 "rorl $16,%%edx\n\t" \
103 "movb %2,%%dh\n\t" \
104 "andb $0xf0,%%dh\n\t" \
105 "orb %%dh,%%dl\n\t" \
106 "movb %%dl,%2" \
107 :"=&d" (__lr) \
108 :"m" (*(addr)), \
109 "m" (*((addr)+6)), \
110 "0" (limit) \
111 ); } while (0)
112
113#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
114#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
115
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100116extern void load_gs_index(unsigned);
117
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100118/*
Glauber de Oliveira Costaa6b46552008-01-30 13:31:08 +0100119 * Load a segment. Fall back on loading the zero
120 * segment if something goes wrong..
121 */
122#define loadsegment(seg, value) \
123 asm volatile("\n" \
124 "1:\t" \
125 "movl %k0,%%" #seg "\n" \
126 "2:\n" \
127 ".section .fixup,\"ax\"\n" \
128 "3:\t" \
129 "movl %k1, %%" #seg "\n\t" \
130 "jmp 2b\n" \
131 ".previous\n" \
132 ".section __ex_table,\"a\"\n\t" \
133 _ASM_ALIGN "\n\t" \
134 _ASM_PTR " 1b,3b\n" \
135 ".previous" \
136 : :"r" (value), "r" (0))
137
138
139/*
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100140 * Save a segment register away
141 */
142#define savesegment(seg, value) \
143 asm volatile("mov %%" #seg ",%0":"=rm" (value))
144
145static inline unsigned long get_limit(unsigned long segment)
146{
147 unsigned long __limit;
148 __asm__("lsll %1,%0"
149 :"=r" (__limit):"r" (segment));
150 return __limit+1;
151}
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100152
153static inline void native_clts(void)
154{
155 asm volatile ("clts");
156}
157
158/*
159 * Volatile isn't enough to prevent the compiler from reordering the
160 * read/write functions for the control registers and messing everything up.
161 * A memory clobber would solve the problem, but would prevent reordering of
162 * all loads stores around it, which can hurt performance. Solution is to
163 * use a variable and mimic reads and writes to it to enforce serialization
164 */
165static unsigned long __force_order;
166
167static inline unsigned long native_read_cr0(void)
168{
169 unsigned long val;
170 asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order));
171 return val;
172}
173
174static inline void native_write_cr0(unsigned long val)
175{
176 asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order));
177}
178
179static inline unsigned long native_read_cr2(void)
180{
181 unsigned long val;
182 asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order));
183 return val;
184}
185
186static inline void native_write_cr2(unsigned long val)
187{
188 asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order));
189}
190
191static inline unsigned long native_read_cr3(void)
192{
193 unsigned long val;
194 asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order));
195 return val;
196}
197
198static inline void native_write_cr3(unsigned long val)
199{
200 asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order));
201}
202
203static inline unsigned long native_read_cr4(void)
204{
205 unsigned long val;
206 asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order));
207 return val;
208}
209
210static inline unsigned long native_read_cr4_safe(void)
211{
212 unsigned long val;
213 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
214 * exists, so it will never fail. */
215#ifdef CONFIG_X86_32
216 asm volatile("1: mov %%cr4, %0 \n"
217 "2: \n"
218 ".section __ex_table,\"a\" \n"
219 ".long 1b,2b \n"
220 ".previous \n"
221 : "=r" (val), "=m" (__force_order) : "0" (0));
222#else
223 val = native_read_cr4();
224#endif
225 return val;
226}
227
228static inline void native_write_cr4(unsigned long val)
229{
230 asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order));
231}
232
233static inline void native_wbinvd(void)
234{
235 asm volatile("wbinvd": : :"memory");
236}
237#ifdef CONFIG_PARAVIRT
238#include <asm/paravirt.h>
239#else
240#define read_cr0() (native_read_cr0())
241#define write_cr0(x) (native_write_cr0(x))
242#define read_cr2() (native_read_cr2())
243#define write_cr2(x) (native_write_cr2(x))
244#define read_cr3() (native_read_cr3())
245#define write_cr3(x) (native_write_cr3(x))
246#define read_cr4() (native_read_cr4())
247#define read_cr4_safe() (native_read_cr4_safe())
248#define write_cr4(x) (native_write_cr4(x))
249#define wbinvd() (native_wbinvd())
250
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +0100251#ifdef CONFIG_X86_64
252
253static inline unsigned long read_cr8(void)
254{
255 unsigned long cr8;
256 asm volatile("movq %%cr8,%0" : "=r" (cr8));
257 return cr8;
258}
259
260static inline void write_cr8(unsigned long val)
261{
262 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
263}
264
265#endif
266
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100267/* Clear the 'TS' bit */
268#define clts() (native_clts())
269
270#endif/* CONFIG_PARAVIRT */
271
272#define stts() write_cr0(8 | read_cr0())
273
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100274#endif /* __KERNEL__ */
275
276static inline void clflush(void *__p)
277{
278 asm volatile("clflush %0" : "+m" (*(char __force *)__p));
279}
280
281#define nop() __asm__ __volatile__ ("nop")
282
283void disable_hlt(void);
284void enable_hlt(void);
285
286extern int es7000_plat;
287void cpu_idle_wait(void);
288
289extern unsigned long arch_align_stack(unsigned long sp);
290extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
291
292void default_idle(void);
293
Glauber de Oliveira Costa833d8462008-01-30 13:31:08 +0100294/*
295 * Force strict CPU ordering.
296 * And yes, this is required on UP too when we're talking
297 * to devices.
298 */
299#ifdef CONFIG_X86_32
300/*
301 * For now, "wmb()" doesn't actually do anything, as all
302 * Intel CPU's follow what Intel calls a *Processor Order*,
303 * in which all writes are seen in the program order even
304 * outside the CPU.
305 *
306 * I expect future Intel CPU's to have a weaker ordering,
307 * but I'd also expect them to finally get their act together
308 * and add some real memory barriers if so.
309 *
310 * Some non intel clones support out of order store. wmb() ceases to be a
311 * nop for these.
312 */
313#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
314#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
315#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
316#else
317#define mb() asm volatile("mfence":::"memory")
318#define rmb() asm volatile("lfence":::"memory")
319#define wmb() asm volatile("sfence" ::: "memory")
320#endif
321
322/**
323 * read_barrier_depends - Flush all pending reads that subsequents reads
324 * depend on.
325 *
326 * No data-dependent reads from memory-like regions are ever reordered
327 * over this barrier. All reads preceding this primitive are guaranteed
328 * to access memory (but not necessarily other CPUs' caches) before any
329 * reads following this primitive that depend on the data return by
330 * any of the preceding reads. This primitive is much lighter weight than
331 * rmb() on most CPUs, and is never heavier weight than is
332 * rmb().
333 *
334 * These ordering constraints are respected by both the local CPU
335 * and the compiler.
336 *
337 * Ordering is not guaranteed by anything other than these primitives,
338 * not even by data dependencies. See the documentation for
339 * memory_barrier() for examples and URLs to more information.
340 *
341 * For example, the following code would force ordering (the initial
342 * value of "a" is zero, "b" is one, and "p" is "&a"):
343 *
344 * <programlisting>
345 * CPU 0 CPU 1
346 *
347 * b = 2;
348 * memory_barrier();
349 * p = &b; q = p;
350 * read_barrier_depends();
351 * d = *q;
352 * </programlisting>
353 *
354 * because the read of "*q" depends on the read of "p" and these
355 * two reads are separated by a read_barrier_depends(). However,
356 * the following code, with the same initial values for "a" and "b":
357 *
358 * <programlisting>
359 * CPU 0 CPU 1
360 *
361 * a = 2;
362 * memory_barrier();
363 * b = 3; y = b;
364 * read_barrier_depends();
365 * x = a;
366 * </programlisting>
367 *
368 * does not enforce ordering, since there is no data dependency between
369 * the read of "a" and the read of "b". Therefore, on some CPUs, such
370 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
371 * in cases like this where there are no data dependencies.
372 **/
373
374#define read_barrier_depends() do { } while (0)
375
376#ifdef CONFIG_SMP
377#define smp_mb() mb()
378#ifdef CONFIG_X86_PPRO_FENCE
379# define smp_rmb() rmb()
380#else
381# define smp_rmb() barrier()
382#endif
383#ifdef CONFIG_X86_OOSTORE
384# define smp_wmb() wmb()
385#else
386# define smp_wmb() barrier()
387#endif
388#define smp_read_barrier_depends() read_barrier_depends()
389#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
390#else
391#define smp_mb() barrier()
392#define smp_rmb() barrier()
393#define smp_wmb() barrier()
394#define smp_read_barrier_depends() do { } while (0)
395#define set_mb(var, value) do { var = value; barrier(); } while (0)
396#endif
397
398
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100399#endif