blob: 5cb151538cd5aba32f688136289502016b692a68 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/asm-x86_64/processor.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
6
7#ifndef __ASM_X86_64_PROCESSOR_H
8#define __ASM_X86_64_PROCESSOR_H
9
10#include <asm/segment.h>
11#include <asm/page.h>
12#include <asm/types.h>
13#include <asm/sigcontext.h>
14#include <asm/cpufeature.h>
15#include <linux/config.h>
16#include <linux/threads.h>
17#include <asm/msr.h>
18#include <asm/current.h>
19#include <asm/system.h>
20#include <asm/mmsegment.h>
21#include <asm/percpu.h>
22#include <linux/personality.h>
23
24#define TF_MASK 0x00000100
25#define IF_MASK 0x00000200
26#define IOPL_MASK 0x00003000
27#define NT_MASK 0x00004000
28#define VM_MASK 0x00020000
29#define AC_MASK 0x00040000
30#define VIF_MASK 0x00080000 /* virtual interrupt flag */
31#define VIP_MASK 0x00100000 /* virtual interrupt pending */
32#define ID_MASK 0x00200000
33
34#define desc_empty(desc) \
Zachary Amsden12aaa082005-08-16 12:05:09 -070035 (!((desc)->a | (desc)->b))
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37#define desc_equal(desc1, desc2) \
38 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
39
40/*
41 * Default implementation of macro that returns current
42 * instruction pointer ("program counter").
43 */
44#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
45
46/*
47 * CPU type and hardware bug flags. Kept separately for each CPU.
48 */
49
50struct cpuinfo_x86 {
51 __u8 x86; /* CPU family */
52 __u8 x86_vendor; /* CPU vendor */
53 __u8 x86_model;
54 __u8 x86_mask;
55 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
56 __u32 x86_capability[NCAPINTS];
57 char x86_vendor_id[16];
58 char x86_model_id[64];
59 int x86_cache_size; /* in KB */
60 int x86_clflush_size;
61 int x86_cache_alignment;
62 int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
63 __u8 x86_virt_bits, x86_phys_bits;
Siddha, Suresh B94605ef2005-11-05 17:25:54 +010064 __u8 x86_max_cores; /* cpuid returned max cores value */
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 __u32 x86_power;
Andi Kleenebfcaa92005-04-16 15:25:18 -070066 __u32 extended_cpuid_level; /* Max extended CPUID function supported */
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 unsigned long loops_per_jiffy;
Siddha, Suresh B94605ef2005-11-05 17:25:54 +010068 __u8 apicid;
69 __u8 booted_cores; /* number of cores as seen by OS */
Linus Torvalds1da177e2005-04-16 15:20:36 -070070} ____cacheline_aligned;
71
72#define X86_VENDOR_INTEL 0
73#define X86_VENDOR_CYRIX 1
74#define X86_VENDOR_AMD 2
75#define X86_VENDOR_UMC 3
76#define X86_VENDOR_NEXGEN 4
77#define X86_VENDOR_CENTAUR 5
78#define X86_VENDOR_RISE 6
79#define X86_VENDOR_TRANSMETA 7
80#define X86_VENDOR_NUM 8
81#define X86_VENDOR_UNKNOWN 0xff
82
83#ifdef CONFIG_SMP
84extern struct cpuinfo_x86 cpu_data[];
85#define current_cpu_data cpu_data[smp_processor_id()]
86#else
87#define cpu_data (&boot_cpu_data)
88#define current_cpu_data boot_cpu_data
89#endif
90
91extern char ignore_irq13;
92
93extern void identify_cpu(struct cpuinfo_x86 *);
94extern void print_cpu_info(struct cpuinfo_x86 *);
95extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
96
97/*
98 * EFLAGS bits
99 */
100#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
101#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
102#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
103#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
104#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
105#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
106#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
107#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
108#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
109#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
110#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
111#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
112#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
113#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
114#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
115#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
116#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
117
118/*
119 * Intel CPU features in CR4
120 */
121#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
122#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
123#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
124#define X86_CR4_DE 0x0008 /* enable debugging extensions */
125#define X86_CR4_PSE 0x0010 /* enable page size extensions */
126#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
127#define X86_CR4_MCE 0x0040 /* Machine check enable */
128#define X86_CR4_PGE 0x0080 /* enable global pages */
129#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
130#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
131#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
132
133/*
134 * Save the cr4 feature set we're using (ie
135 * Pentium 4MB enable and PPro Global page
136 * enable), so that any CPU's that boot up
137 * after us can get the correct flags.
138 */
139extern unsigned long mmu_cr4_features;
140
141static inline void set_in_cr4 (unsigned long mask)
142{
143 mmu_cr4_features |= mask;
144 __asm__("movq %%cr4,%%rax\n\t"
145 "orq %0,%%rax\n\t"
146 "movq %%rax,%%cr4\n"
147 : : "irg" (mask)
148 :"ax");
149}
150
151static inline void clear_in_cr4 (unsigned long mask)
152{
153 mmu_cr4_features &= ~mask;
154 __asm__("movq %%cr4,%%rax\n\t"
155 "andq %0,%%rax\n\t"
156 "movq %%rax,%%cr4\n"
157 : : "irg" (~mask)
158 :"ax");
159}
160
161
162/*
Andi Kleen637716a2005-05-16 21:53:20 -0700163 * User space process size. 47bits minus one guard page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 */
Suresh Siddha84929802005-06-21 17:14:32 -0700165#define TASK_SIZE64 (0x800000000000UL - 4096)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
167/* This decides where the kernel will search for a free chunk of vm
168 * space during mmap's.
169 */
170#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
Suresh Siddha84929802005-06-21 17:14:32 -0700171
172#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
173#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
174
175#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
177/*
178 * Size of io_bitmap.
179 */
180#define IO_BITMAP_BITS 65536
181#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
182#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
183#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
184#define INVALID_IO_BITMAP_OFFSET 0x8000
185
186struct i387_fxsave_struct {
187 u16 cwd;
188 u16 swd;
189 u16 twd;
190 u16 fop;
191 u64 rip;
192 u64 rdp;
193 u32 mxcsr;
194 u32 mxcsr_mask;
195 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
196 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */
197 u32 padding[24];
198} __attribute__ ((aligned (16)));
199
200union i387_union {
201 struct i387_fxsave_struct fxsave;
202};
203
204struct tss_struct {
205 u32 reserved1;
206 u64 rsp0;
207 u64 rsp1;
208 u64 rsp2;
209 u64 reserved2;
210 u64 ist[7];
211 u32 reserved3;
212 u32 reserved4;
213 u16 reserved5;
214 u16 io_bitmap_base;
215 /*
216 * The extra 1 is there because the CPU will access an
217 * additional byte beyond the end of the IO permission
218 * bitmap. The extra byte must be all 1 bits, and must
219 * be within the limit. Thus we have:
220 *
221 * 128 bytes, the bitmap itself, for ports 0..0x3ff
222 * 8 bytes, for an extra "long" of ~0UL
223 */
224 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
225} __attribute__((packed)) ____cacheline_aligned;
226
227extern struct cpuinfo_x86 boot_cpu_data;
228DECLARE_PER_CPU(struct tss_struct,init_tss);
229
230#define ARCH_MIN_TASKALIGN 16
231
232struct thread_struct {
233 unsigned long rsp0;
234 unsigned long rsp;
235 unsigned long userrsp; /* Copy from PDA */
236 unsigned long fs;
237 unsigned long gs;
238 unsigned short es, ds, fsindex, gsindex;
239/* Hardware debugging registers */
240 unsigned long debugreg0;
241 unsigned long debugreg1;
242 unsigned long debugreg2;
243 unsigned long debugreg3;
244 unsigned long debugreg6;
245 unsigned long debugreg7;
246/* fault info */
247 unsigned long cr2, trap_no, error_code;
248/* floating point info */
249 union i387_union i387 __attribute__((aligned(16)));
250/* IO permissions. the bitmap could be moved into the GDT, that would make
251 switch faster for a limited number of ioperm using tasks. -AK */
252 int ioperm;
253 unsigned long *io_bitmap_ptr;
254 unsigned io_bitmap_max;
255/* cached TLS descriptors. */
256 u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
257} __attribute__((aligned(16)));
258
Andi Kleena0d58c92005-09-12 18:49:24 +0200259#define INIT_THREAD { \
260 .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
261}
262
263#define INIT_TSS { \
264 .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
265}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
267#define INIT_MMAP \
268{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
269
270#define STACKFAULT_STACK 1
271#define DOUBLEFAULT_STACK 2
272#define NMI_STACK 3
273#define DEBUG_STACK 4
274#define MCE_STACK 5
275#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
277#define start_thread(regs,new_rip,new_rsp) do { \
278 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
279 load_gs_index(0); \
280 (regs)->rip = (new_rip); \
281 (regs)->rsp = (new_rsp); \
282 write_pda(oldrsp, (new_rsp)); \
283 (regs)->cs = __USER_CS; \
284 (regs)->ss = __USER_DS; \
285 (regs)->eflags = 0x200; \
286 set_fs(USER_DS); \
287} while(0)
288
Vincent Hanqueze9129e52005-06-23 00:08:46 -0700289#define get_debugreg(var, register) \
290 __asm__("movq %%db" #register ", %0" \
291 :"=r" (var))
292#define set_debugreg(value, register) \
293 __asm__("movq %0,%%db" #register \
294 : /* no output */ \
295 :"r" (value))
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297struct task_struct;
298struct mm_struct;
299
300/* Free all resources held by a thread. */
301extern void release_thread(struct task_struct *);
302
303/* Prepare to copy thread state - unlazy all lazy status */
304extern void prepare_to_copy(struct task_struct *tsk);
305
306/*
307 * create a kernel thread without removing it from tasklists
308 */
309extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
310
311/*
312 * Return saved PC of a blocked thread.
313 * What is this good for? it will be always the scheduler or ret_from_fork.
314 */
315#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
316
317extern unsigned long get_wchan(struct task_struct *p);
318#define KSTK_EIP(tsk) \
319 (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip)
320#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
321
322
323struct microcode_header {
324 unsigned int hdrver;
325 unsigned int rev;
326 unsigned int date;
327 unsigned int sig;
328 unsigned int cksum;
329 unsigned int ldrver;
330 unsigned int pf;
331 unsigned int datasize;
332 unsigned int totalsize;
333 unsigned int reserved[3];
334};
335
336struct microcode {
337 struct microcode_header hdr;
338 unsigned int bits[0];
339};
340
341typedef struct microcode microcode_t;
342typedef struct microcode_header microcode_header_t;
343
344/* microcode format is extended from prescott processors */
345struct extended_signature {
346 unsigned int sig;
347 unsigned int pf;
348 unsigned int cksum;
349};
350
351struct extended_sigtable {
352 unsigned int count;
353 unsigned int cksum;
354 unsigned int reserved[3];
355 struct extended_signature sigs[0];
356};
357
358/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
359#define MICROCODE_IOCFREE _IO('6',0)
360
361
362#define ASM_NOP1 K8_NOP1
363#define ASM_NOP2 K8_NOP2
364#define ASM_NOP3 K8_NOP3
365#define ASM_NOP4 K8_NOP4
366#define ASM_NOP5 K8_NOP5
367#define ASM_NOP6 K8_NOP6
368#define ASM_NOP7 K8_NOP7
369#define ASM_NOP8 K8_NOP8
370
371/* Opteron nops */
372#define K8_NOP1 ".byte 0x90\n"
373#define K8_NOP2 ".byte 0x66,0x90\n"
374#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
375#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
376#define K8_NOP5 K8_NOP3 K8_NOP2
377#define K8_NOP6 K8_NOP3 K8_NOP3
378#define K8_NOP7 K8_NOP4 K8_NOP3
379#define K8_NOP8 K8_NOP4 K8_NOP4
380
381#define ASM_NOP_MAX 8
382
383/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
Adrian Bunk9c0aa0f2005-09-12 18:49:24 +0200384static inline void rep_nop(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385{
386 __asm__ __volatile__("rep;nop": : :"memory");
387}
388
389/* Stop speculative execution */
Adrian Bunk9c0aa0f2005-09-12 18:49:24 +0200390static inline void sync_core(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391{
392 int tmp;
393 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
394}
395
396#define cpu_has_fpu 1
397
398#define ARCH_HAS_PREFETCH
399static inline void prefetch(void *x)
400{
401 asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
402}
403
404#define ARCH_HAS_PREFETCHW 1
405static inline void prefetchw(void *x)
406{
Eric Dumazet19aaabb2005-09-06 15:16:17 -0700407 alternative_input("prefetcht0 (%1)",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 "prefetchw (%1)",
409 X86_FEATURE_3DNOW,
410 "r" (x));
411}
412
413#define ARCH_HAS_SPINLOCK_PREFETCH 1
414
415#define spin_lock_prefetch(x) prefetchw(x)
416
417#define cpu_relax() rep_nop()
418
419/*
420 * NSC/Cyrix CPU configuration register indexes
421 */
422#define CX86_CCR0 0xc0
423#define CX86_CCR1 0xc1
424#define CX86_CCR2 0xc2
425#define CX86_CCR3 0xc3
426#define CX86_CCR4 0xe8
427#define CX86_CCR5 0xe9
428#define CX86_CCR6 0xea
429#define CX86_CCR7 0xeb
430#define CX86_DIR0 0xfe
431#define CX86_DIR1 0xff
432#define CX86_ARR_BASE 0xc4
433#define CX86_RCR_BASE 0xdc
434
435/*
436 * NSC/Cyrix CPU indexed register access macros
437 */
438
439#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
440
441#define setCx86(reg, data) do { \
442 outb((reg), 0x22); \
443 outb((data), 0x23); \
444} while (0)
445
Zachary Amsden245067d2005-09-03 15:56:37 -0700446static inline void serialize_cpu(void)
447{
448 __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
449}
450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451static inline void __monitor(const void *eax, unsigned long ecx,
452 unsigned long edx)
453{
454 /* "monitor %eax,%ecx,%edx;" */
455 asm volatile(
456 ".byte 0x0f,0x01,0xc8;"
457 : :"a" (eax), "c" (ecx), "d"(edx));
458}
459
460static inline void __mwait(unsigned long eax, unsigned long ecx)
461{
462 /* "mwait %eax,%ecx;" */
463 asm volatile(
464 ".byte 0x0f,0x01,0xc9;"
465 : :"a" (eax), "c" (ecx));
466}
467
468#define stack_current() \
469({ \
470 struct thread_info *ti; \
471 asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
472 ti->task; \
473})
474
475#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
476
477extern unsigned long boot_option_idle_override;
478/* Boot loader type from the setup header */
479extern int bootloader_type;
480
481#endif /* __ASM_X86_64_PROCESSOR_H */