blob: 9d895cc2f31220f92b4b285ad193ee26eda9cc7e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/asm-i386/processor.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
6
7#ifndef __ASM_I386_PROCESSOR_H
8#define __ASM_I386_PROCESSOR_H
9
10#include <asm/vm86.h>
11#include <asm/math_emu.h>
12#include <asm/segment.h>
13#include <asm/page.h>
14#include <asm/types.h>
15#include <asm/sigcontext.h>
16#include <asm/cpufeature.h>
17#include <asm/msr.h>
18#include <asm/system.h>
19#include <linux/cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/threads.h>
21#include <asm/percpu.h>
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -080022#include <linux/cpumask.h>
Rusty Russelld7cd5612006-12-07 02:14:08 +010023#include <linux/init.h>
Andi Kleenb4531e82007-05-02 19:27:10 +020024#include <asm/processor-flags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26/* flag for disabling the tsc */
27extern int tsc_disable;
28
29struct desc_struct {
30 unsigned long a,b;
31};
32
33#define desc_empty(desc) \
Zachary Amsden12aaa082005-08-16 12:05:09 -070034 (!((desc)->a | (desc)->b))
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36#define desc_equal(desc1, desc2) \
37 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
38/*
39 * Default implementation of macro that returns current
40 * instruction pointer ("program counter").
41 */
42#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
43
44/*
45 * CPU type and hardware bug flags. Kept separately for each CPU.
46 * Members of this structure are referenced in head.S, so think twice
47 * before touching them. [mj]
48 */
49
50struct cpuinfo_x86 {
51 __u8 x86; /* CPU family */
52 __u8 x86_vendor; /* CPU vendor */
53 __u8 x86_model;
54 __u8 x86_mask;
55 char wp_works_ok; /* It doesn't on 386's */
56 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
57 char hard_math;
58 char rfu;
59 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
60 unsigned long x86_capability[NCAPINTS];
61 char x86_vendor_id[16];
62 char x86_model_id[64];
63 int x86_cache_size; /* in KB - valid for CPUS which support this
64 call */
65 int x86_cache_alignment; /* In bytes */
Andi Kleen3f98bc42006-01-11 22:42:51 +010066 char fdiv_bug;
67 char f00f_bug;
68 char coma_bug;
69 char pad0;
70 int x86_power;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 unsigned long loops_per_jiffy;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -080072#ifdef CONFIG_SMP
73 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
74#endif
Siddha, Suresh B94605ef2005-11-05 17:25:54 +010075 unsigned char x86_max_cores; /* cpuid returned max cores value */
Siddha, Suresh B94605ef2005-11-05 17:25:54 +010076 unsigned char apicid;
Andi Kleen770d1322006-12-07 02:14:05 +010077 unsigned short x86_clflush_size;
Rohit Seth4b89aff2006-06-27 02:53:46 -070078#ifdef CONFIG_SMP
79 unsigned char booted_cores; /* number of cores as seen by OS */
80 __u8 phys_proc_id; /* Physical processor id. */
81 __u8 cpu_core_id; /* Core id */
82#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070083} __attribute__((__aligned__(SMP_CACHE_BYTES)));
84
85#define X86_VENDOR_INTEL 0
86#define X86_VENDOR_CYRIX 1
87#define X86_VENDOR_AMD 2
88#define X86_VENDOR_UMC 3
89#define X86_VENDOR_NEXGEN 4
90#define X86_VENDOR_CENTAUR 5
91#define X86_VENDOR_RISE 6
92#define X86_VENDOR_TRANSMETA 7
93#define X86_VENDOR_NSC 8
94#define X86_VENDOR_NUM 9
95#define X86_VENDOR_UNKNOWN 0xff
96
97/*
98 * capabilities of CPUs
99 */
100
101extern struct cpuinfo_x86 boot_cpu_data;
102extern struct cpuinfo_x86 new_cpu_data;
103extern struct tss_struct doublefault_tss;
104DECLARE_PER_CPU(struct tss_struct, init_tss);
105
106#ifdef CONFIG_SMP
107extern struct cpuinfo_x86 cpu_data[];
108#define current_cpu_data cpu_data[smp_processor_id()]
109#else
110#define cpu_data (&boot_cpu_data)
111#define current_cpu_data boot_cpu_data
112#endif
113
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -0800114extern int cpu_llc_id[NR_CPUS];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115extern char ignore_fpu_irq;
116
Rusty Russelld7cd5612006-12-07 02:14:08 +0100117void __init cpu_detect(struct cpuinfo_x86 *c);
118
Jeremy Fitzhardingea6c4e072007-05-02 19:27:12 +0200119extern void identify_boot_cpu(void);
120extern void identify_secondary_cpu(struct cpuinfo_x86 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121extern void print_cpu_info(struct cpuinfo_x86 *);
122extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
Andi Kleen240cd6a802006-06-26 13:56:13 +0200123extern unsigned short num_cache_leaves;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125#ifdef CONFIG_X86_HT
126extern void detect_ht(struct cpuinfo_x86 *c);
127#else
128static inline void detect_ht(struct cpuinfo_x86 *c) {}
129#endif
130
Rusty Russell90a0a062007-05-02 19:27:10 +0200131static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
Rusty Russelld3561b72006-12-07 02:14:07 +0100132 unsigned int *ecx, unsigned int *edx)
Rusty Russell9f093392006-09-25 23:32:24 -0700133{
134 /* ecx is often an input as well as an output. */
135 __asm__("cpuid"
136 : "=a" (*eax),
137 "=b" (*ebx),
138 "=c" (*ecx),
139 "=d" (*edx)
140 : "0" (*eax), "2" (*ecx));
141}
142
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -0700143#define load_cr3(pgdir) write_cr3(__pa(pgdir))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145/*
146 * Intel CPU features in CR4
147 */
148#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
149#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
150#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
151#define X86_CR4_DE 0x0008 /* enable debugging extensions */
152#define X86_CR4_PSE 0x0010 /* enable page size extensions */
153#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
154#define X86_CR4_MCE 0x0040 /* Machine check enable */
155#define X86_CR4_PGE 0x0080 /* enable global pages */
156#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
157#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
158#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
159
160/*
161 * Save the cr4 feature set we're using (ie
162 * Pentium 4MB enable and PPro Global page
163 * enable), so that any CPU's that boot up
164 * after us can get the correct flags.
165 */
166extern unsigned long mmu_cr4_features;
167
168static inline void set_in_cr4 (unsigned long mask)
169{
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -0700170 unsigned cr4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 mmu_cr4_features |= mask;
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -0700172 cr4 = read_cr4();
173 cr4 |= mask;
174 write_cr4(cr4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176
177static inline void clear_in_cr4 (unsigned long mask)
178{
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -0700179 unsigned cr4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 mmu_cr4_features &= ~mask;
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -0700181 cr4 = read_cr4();
182 cr4 &= ~mask;
183 write_cr4(cr4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184}
185
186/*
187 * NSC/Cyrix CPU configuration register indexes
188 */
189
190#define CX86_PCR0 0x20
191#define CX86_GCR 0xb8
192#define CX86_CCR0 0xc0
193#define CX86_CCR1 0xc1
194#define CX86_CCR2 0xc2
195#define CX86_CCR3 0xc3
196#define CX86_CCR4 0xe8
197#define CX86_CCR5 0xe9
198#define CX86_CCR6 0xea
199#define CX86_CCR7 0xeb
200#define CX86_PCR1 0xf0
201#define CX86_DIR0 0xfe
202#define CX86_DIR1 0xff
203#define CX86_ARR_BASE 0xc4
204#define CX86_RCR_BASE 0xdc
205
206/*
207 * NSC/Cyrix CPU indexed register access macros
208 */
209
210#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
211
212#define setCx86(reg, data) do { \
213 outb((reg), 0x22); \
214 outb((data), 0x23); \
215} while (0)
216
Andi Kleen487472b2006-01-11 22:45:27 +0100217/* Stop speculative execution */
218static inline void sync_core(void)
Zachary Amsden245067d2005-09-03 15:56:37 -0700219{
Andi Kleen487472b2006-01-11 22:45:27 +0100220 int tmp;
221 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
Zachary Amsden245067d2005-09-03 15:56:37 -0700222}
223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224static inline void __monitor(const void *eax, unsigned long ecx,
225 unsigned long edx)
226{
227 /* "monitor %eax,%ecx,%edx;" */
228 asm volatile(
229 ".byte 0x0f,0x01,0xc8;"
230 : :"a" (eax), "c" (ecx), "d"(edx));
231}
232
233static inline void __mwait(unsigned long eax, unsigned long ecx)
234{
235 /* "mwait %eax,%ecx;" */
236 asm volatile(
237 ".byte 0x0f,0x01,0xc9;"
238 : :"a" (eax), "c" (ecx));
239}
240
Venkatesh Pallipadi991528d2006-09-25 16:28:13 -0700241extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243/* from system description table in BIOS. Mostly for MCA use, but
244others may find it useful. */
245extern unsigned int machine_id;
246extern unsigned int machine_submodel_id;
247extern unsigned int BIOS_revision;
248extern unsigned int mca_pentium_flag;
249
250/* Boot loader type from the setup header */
251extern int bootloader_type;
252
253/*
254 * User space process size: 3GB (default).
255 */
256#define TASK_SIZE (PAGE_OFFSET)
257
258/* This decides where the kernel will search for a free chunk of vm
259 * space during mmap's.
260 */
261#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
262
263#define HAVE_ARCH_PICK_MMAP_LAYOUT
264
265/*
266 * Size of io_bitmap.
267 */
268#define IO_BITMAP_BITS 65536
269#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
270#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
271#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
272#define INVALID_IO_BITMAP_OFFSET 0x8000
273#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
274
275struct i387_fsave_struct {
276 long cwd;
277 long swd;
278 long twd;
279 long fip;
280 long fcs;
281 long foo;
282 long fos;
283 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
284 long status; /* software status information */
285};
286
287struct i387_fxsave_struct {
288 unsigned short cwd;
289 unsigned short swd;
290 unsigned short twd;
291 unsigned short fop;
292 long fip;
293 long fcs;
294 long foo;
295 long fos;
296 long mxcsr;
297 long mxcsr_mask;
298 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
299 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
300 long padding[56];
301} __attribute__ ((aligned (16)));
302
303struct i387_soft_struct {
304 long cwd;
305 long swd;
306 long twd;
307 long fip;
308 long fcs;
309 long foo;
310 long fos;
311 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
312 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
313 struct info *info;
314 unsigned long entry_eip;
315};
316
317union i387_union {
318 struct i387_fsave_struct fsave;
319 struct i387_fxsave_struct fxsave;
320 struct i387_soft_struct soft;
321};
322
323typedef struct {
324 unsigned long seg;
325} mm_segment_t;
326
327struct thread_struct;
328
329struct tss_struct {
330 unsigned short back_link,__blh;
331 unsigned long esp0;
332 unsigned short ss0,__ss0h;
333 unsigned long esp1;
334 unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
335 unsigned long esp2;
336 unsigned short ss2,__ss2h;
337 unsigned long __cr3;
338 unsigned long eip;
339 unsigned long eflags;
340 unsigned long eax,ecx,edx,ebx;
341 unsigned long esp;
342 unsigned long ebp;
343 unsigned long esi;
344 unsigned long edi;
345 unsigned short es, __esh;
346 unsigned short cs, __csh;
347 unsigned short ss, __ssh;
348 unsigned short ds, __dsh;
349 unsigned short fs, __fsh;
350 unsigned short gs, __gsh;
351 unsigned short ldt, __ldth;
352 unsigned short trace, io_bitmap_base;
353 /*
354 * The extra 1 is there because the CPU will access an
355 * additional byte beyond the end of the IO permission
356 * bitmap. The extra byte must be all 1 bits, and must
357 * be within the limit.
358 */
359 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
360 /*
361 * Cache the current maximum and the last task that used the bitmap:
362 */
363 unsigned long io_bitmap_max;
364 struct thread_struct *io_bitmap_owner;
365 /*
366 * pads the TSS to be cacheline-aligned (size is 0x100)
367 */
368 unsigned long __cacheline_filler[35];
369 /*
370 * .. and then another 0x100 bytes for emergency kernel stack
371 */
372 unsigned long stack[64];
373} __attribute__((packed));
374
375#define ARCH_MIN_TASKALIGN 16
376
377struct thread_struct {
378/* cached TLS descriptors. */
379 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
380 unsigned long esp0;
381 unsigned long sysenter_cs;
382 unsigned long eip;
383 unsigned long esp;
384 unsigned long fs;
385 unsigned long gs;
386/* Hardware debugging registers */
387 unsigned long debugreg[8]; /* %%db0-7 debug registers */
388/* fault info */
389 unsigned long cr2, trap_no, error_code;
390/* floating point info */
391 union i387_union i387;
392/* virtual 86 mode info */
393 struct vm86_struct __user * vm86_info;
394 unsigned long screen_bitmap;
395 unsigned long v86flags, v86mask, saved_esp0;
396 unsigned int saved_fs, saved_gs;
397/* IO permissions */
398 unsigned long *io_bitmap_ptr;
Zachary Amsdena5201122005-09-03 15:56:44 -0700399 unsigned long iopl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400/* max allowed port in the bitmap, in bytes: */
401 unsigned long io_bitmap_max;
402};
403
404#define INIT_THREAD { \
Rusty Russell692174b2007-05-02 19:27:09 +0200405 .esp0 = sizeof(init_stack) + (long)&init_stack, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 .vm86_info = NULL, \
407 .sysenter_cs = __KERNEL_CS, \
408 .io_bitmap_ptr = NULL, \
Jeremy Fitzhardinge464d1a72007-02-13 13:26:20 +0100409 .fs = __KERNEL_PDA, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410}
411
412/*
413 * Note that the .io_bitmap member must be extra-big. This is because
414 * the CPU will access an additional byte beyond the end of the IO
415 * permission bitmap. The extra byte must be all 1 bits, and must
416 * be within the limit.
417 */
418#define INIT_TSS { \
419 .esp0 = sizeof(init_stack) + (long)&init_stack, \
420 .ss0 = __KERNEL_DS, \
421 .ss1 = __KERNEL_CS, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
423 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
424}
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426#define start_thread(regs, new_eip, new_esp) do { \
Jeremy Fitzhardinge464d1a72007-02-13 13:26:20 +0100427 __asm__("movl %0,%%gs": :"r" (0)); \
428 regs->xfs = 0; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 set_fs(USER_DS); \
430 regs->xds = __USER_DS; \
431 regs->xes = __USER_DS; \
432 regs->xss = __USER_DS; \
433 regs->xcs = __USER_CS; \
434 regs->eip = new_eip; \
435 regs->esp = new_esp; \
436} while (0)
437
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438/* Forward declaration, a strange C thing */
439struct task_struct;
440struct mm_struct;
441
442/* Free all resources held by a thread. */
443extern void release_thread(struct task_struct *);
444
445/* Prepare to copy thread state - unlazy all lazy status */
446extern void prepare_to_copy(struct task_struct *tsk);
447
448/*
449 * create a kernel thread without removing it from tasklists
450 */
451extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
452
453extern unsigned long thread_saved_pc(struct task_struct *tsk);
Jan Beulich176a2712006-06-26 13:57:41 +0200454void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
456unsigned long get_wchan(struct task_struct *p);
457
458#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
459#define KSTK_TOP(info) \
460({ \
461 unsigned long *__ptr = (unsigned long *)(info); \
462 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
463})
464
akpm@osdl.org07b047f2006-01-12 01:05:41 -0800465/*
466 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
467 * This is necessary to guarantee that the entire "struct pt_regs"
468 * is accessable even if the CPU haven't stored the SS/ESP registers
469 * on the stack (interrupt gate does not save these registers
470 * when switching to the same priv ring).
471 * Therefore beware: accessing the xss/esp fields of the
472 * "struct pt_regs" is possible, but they may contain the
473 * completely wrong values.
474 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475#define task_pt_regs(task) \
476({ \
477 struct pt_regs *__regs__; \
Al Viro65e0fdf2006-01-12 01:05:41 -0800478 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 __regs__ - 1; \
480})
481
482#define KSTK_EIP(task) (task_pt_regs(task)->eip)
483#define KSTK_ESP(task) (task_pt_regs(task)->esp)
484
485
486struct microcode_header {
487 unsigned int hdrver;
488 unsigned int rev;
489 unsigned int date;
490 unsigned int sig;
491 unsigned int cksum;
492 unsigned int ldrver;
493 unsigned int pf;
494 unsigned int datasize;
495 unsigned int totalsize;
496 unsigned int reserved[3];
497};
498
499struct microcode {
500 struct microcode_header hdr;
501 unsigned int bits[0];
502};
503
504typedef struct microcode microcode_t;
505typedef struct microcode_header microcode_header_t;
506
507/* microcode format is extended from prescott processors */
508struct extended_signature {
509 unsigned int sig;
510 unsigned int pf;
511 unsigned int cksum;
512};
513
514struct extended_sigtable {
515 unsigned int count;
516 unsigned int cksum;
517 unsigned int reserved[3];
518 struct extended_signature sigs[0];
519};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
521/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
522static inline void rep_nop(void)
523{
524 __asm__ __volatile__("rep;nop": : :"memory");
525}
526
527#define cpu_relax() rep_nop()
528
Rusty Russell90a0a062007-05-02 19:27:10 +0200529static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100530{
531 tss->esp0 = thread->esp0;
532 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
533 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
534 tss->ss1 = thread->sysenter_cs;
535 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
536 }
537}
538
Rusty Russell139ec7c2006-12-07 02:14:08 +0100539
Rusty Russell90a0a062007-05-02 19:27:10 +0200540static inline unsigned long native_get_debugreg(int regno)
541{
542 unsigned long val = 0; /* Damn you, gcc! */
543
544 switch (regno) {
545 case 0:
546 asm("movl %%db0, %0" :"=r" (val)); break;
547 case 1:
548 asm("movl %%db1, %0" :"=r" (val)); break;
549 case 2:
550 asm("movl %%db2, %0" :"=r" (val)); break;
551 case 3:
552 asm("movl %%db3, %0" :"=r" (val)); break;
553 case 6:
554 asm("movl %%db6, %0" :"=r" (val)); break;
555 case 7:
556 asm("movl %%db7, %0" :"=r" (val)); break;
557 default:
558 BUG();
559 }
560 return val;
561}
562
563static inline void native_set_debugreg(int regno, unsigned long value)
564{
565 switch (regno) {
566 case 0:
567 asm("movl %0,%%db0" : /* no output */ :"r" (value));
568 break;
569 case 1:
570 asm("movl %0,%%db1" : /* no output */ :"r" (value));
571 break;
572 case 2:
573 asm("movl %0,%%db2" : /* no output */ :"r" (value));
574 break;
575 case 3:
576 asm("movl %0,%%db3" : /* no output */ :"r" (value));
577 break;
578 case 6:
579 asm("movl %0,%%db6" : /* no output */ :"r" (value));
580 break;
581 case 7:
582 asm("movl %0,%%db7" : /* no output */ :"r" (value));
583 break;
584 default:
585 BUG();
586 }
587}
Rusty Russell139ec7c2006-12-07 02:14:08 +0100588
589/*
590 * Set IOPL bits in EFLAGS from given mask
591 */
Rusty Russell90a0a062007-05-02 19:27:10 +0200592static inline void native_set_iopl_mask(unsigned mask)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100593{
594 unsigned int reg;
595 __asm__ __volatile__ ("pushfl;"
596 "popl %0;"
597 "andl %1, %0;"
598 "orl %2, %0;"
599 "pushl %0;"
600 "popfl"
601 : "=&r" (reg)
602 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
603}
604
Rusty Russell90a0a062007-05-02 19:27:10 +0200605#ifdef CONFIG_PARAVIRT
606#include <asm/paravirt.h>
607#else
608#define paravirt_enabled() 0
609#define __cpuid native_cpuid
610
611static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
612{
613 native_load_esp0(tss, thread);
614}
615
616/*
617 * These special macros can be used to get or set a debugging register
618 */
619#define get_debugreg(var, register) \
620 (var) = native_get_debugreg(register)
621#define set_debugreg(value, register) \
622 native_set_debugreg(register, value)
623
624#define set_iopl_mask native_set_iopl_mask
625#endif /* CONFIG_PARAVIRT */
626
Rusty Russell139ec7c2006-12-07 02:14:08 +0100627/*
628 * Generic CPUID function
629 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
630 * resulting in stale register contents being returned.
631 */
632static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
633{
634 *eax = op;
635 *ecx = 0;
636 __cpuid(eax, ebx, ecx, edx);
637}
638
639/* Some CPUID calls want 'count' to be placed in ecx */
640static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
641 int *edx)
642{
643 *eax = op;
644 *ecx = count;
645 __cpuid(eax, ebx, ecx, edx);
646}
647
648/*
649 * CPUID functions returning a single datum
650 */
651static inline unsigned int cpuid_eax(unsigned int op)
652{
653 unsigned int eax, ebx, ecx, edx;
654
655 cpuid(op, &eax, &ebx, &ecx, &edx);
656 return eax;
657}
658static inline unsigned int cpuid_ebx(unsigned int op)
659{
660 unsigned int eax, ebx, ecx, edx;
661
662 cpuid(op, &eax, &ebx, &ecx, &edx);
663 return ebx;
664}
665static inline unsigned int cpuid_ecx(unsigned int op)
666{
667 unsigned int eax, ebx, ecx, edx;
668
669 cpuid(op, &eax, &ebx, &ecx, &edx);
670 return ecx;
671}
672static inline unsigned int cpuid_edx(unsigned int op)
673{
674 unsigned int eax, ebx, ecx, edx;
675
676 cpuid(op, &eax, &ebx, &ecx, &edx);
677 return edx;
678}
679
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680/* generic versions from gas */
681#define GENERIC_NOP1 ".byte 0x90\n"
682#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
683#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
684#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
685#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
686#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
687#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
688#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
689
690/* Opteron nops */
691#define K8_NOP1 GENERIC_NOP1
692#define K8_NOP2 ".byte 0x66,0x90\n"
693#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
694#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
695#define K8_NOP5 K8_NOP3 K8_NOP2
696#define K8_NOP6 K8_NOP3 K8_NOP3
697#define K8_NOP7 K8_NOP4 K8_NOP3
698#define K8_NOP8 K8_NOP4 K8_NOP4
699
700/* K7 nops */
701/* uses eax dependencies (arbitary choice) */
702#define K7_NOP1 GENERIC_NOP1
703#define K7_NOP2 ".byte 0x8b,0xc0\n"
704#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
705#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
706#define K7_NOP5 K7_NOP4 ASM_NOP1
707#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
708#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
709#define K7_NOP8 K7_NOP7 ASM_NOP1
710
711#ifdef CONFIG_MK8
712#define ASM_NOP1 K8_NOP1
713#define ASM_NOP2 K8_NOP2
714#define ASM_NOP3 K8_NOP3
715#define ASM_NOP4 K8_NOP4
716#define ASM_NOP5 K8_NOP5
717#define ASM_NOP6 K8_NOP6
718#define ASM_NOP7 K8_NOP7
719#define ASM_NOP8 K8_NOP8
720#elif defined(CONFIG_MK7)
721#define ASM_NOP1 K7_NOP1
722#define ASM_NOP2 K7_NOP2
723#define ASM_NOP3 K7_NOP3
724#define ASM_NOP4 K7_NOP4
725#define ASM_NOP5 K7_NOP5
726#define ASM_NOP6 K7_NOP6
727#define ASM_NOP7 K7_NOP7
728#define ASM_NOP8 K7_NOP8
729#else
730#define ASM_NOP1 GENERIC_NOP1
731#define ASM_NOP2 GENERIC_NOP2
732#define ASM_NOP3 GENERIC_NOP3
733#define ASM_NOP4 GENERIC_NOP4
734#define ASM_NOP5 GENERIC_NOP5
735#define ASM_NOP6 GENERIC_NOP6
736#define ASM_NOP7 GENERIC_NOP7
737#define ASM_NOP8 GENERIC_NOP8
738#endif
739
740#define ASM_NOP_MAX 8
741
742/* Prefetch instructions for Pentium III and AMD Athlon */
743/* It's not worth to care about 3dnow! prefetches for the K6
744 because they are microcoded there and very slow.
745 However we don't do prefetches for pre XP Athlons currently
746 That should be fixed. */
747#define ARCH_HAS_PREFETCH
Adrian Bunke2afe67452005-09-10 00:27:16 -0700748static inline void prefetch(const void *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749{
750 alternative_input(ASM_NOP4,
751 "prefetchnta (%1)",
752 X86_FEATURE_XMM,
753 "r" (x));
754}
755
756#define ARCH_HAS_PREFETCH
757#define ARCH_HAS_PREFETCHW
758#define ARCH_HAS_SPINLOCK_PREFETCH
759
760/* 3dnow! prefetch to get an exclusive cache line. Useful for
761 spinlocks to avoid one state transition in the cache coherency protocol. */
Adrian Bunke2afe67452005-09-10 00:27:16 -0700762static inline void prefetchw(const void *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763{
764 alternative_input(ASM_NOP4,
765 "prefetchw (%1)",
766 X86_FEATURE_3DNOW,
767 "r" (x));
768}
769#define spin_lock_prefetch(x) prefetchw(x)
770
771extern void select_idle_routine(const struct cpuinfo_x86 *c);
772
773#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
774
775extern unsigned long boot_option_idle_override;
Li Shaohua6fe940d2005-06-25 14:54:53 -0700776extern void enable_sep_cpu(void);
777extern int sysenter_setup(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
James Bottomley9ee79a32007-01-22 09:18:31 -0600779extern void cpu_set_gdt(int);
Rusty Russelld2cbcc42007-05-02 19:27:10 +0200780extern void cpu_init(void);
Jeremy Fitzhardinge62111192006-12-07 02:14:02 +0100781
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782#endif /* __ASM_I386_PROCESSOR_H */