| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 1 | #ifndef __ASM_X86_PROCESSOR_H | 
|  | 2 | #define __ASM_X86_PROCESSOR_H | 
|  | 3 |  | 
| Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 4 | #include <asm/processor-flags.h> | 
|  | 5 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 6 | /* Forward declaration, a strange C thing */ | 
|  | 7 | struct task_struct; | 
|  | 8 | struct mm_struct; | 
|  | 9 |  | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 10 | #include <asm/vm86.h> | 
|  | 11 | #include <asm/math_emu.h> | 
|  | 12 | #include <asm/segment.h> | 
|  | 13 | #include <asm/page.h> | 
|  | 14 | #include <asm/types.h> | 
|  | 15 | #include <asm/sigcontext.h> | 
|  | 16 | #include <asm/current.h> | 
|  | 17 | #include <asm/cpufeature.h> | 
|  | 18 | #include <asm/system.h> | 
| Glauber de Oliveira Costa | c72dcf8 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 19 | #include <asm/page.h> | 
| Glauber de Oliveira Costa | ca241c7 | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 20 | #include <asm/percpu.h> | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 21 | #include <asm/msr.h> | 
|  | 22 | #include <asm/desc_defs.h> | 
|  | 23 | #include <linux/personality.h> | 
| Glauber de Oliveira Costa | 5300db8 | 2008-01-30 13:31:33 +0100 | [diff] [blame] | 24 | #include <linux/cpumask.h> | 
|  | 25 | #include <linux/cache.h> | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 26 | #include <linux/threads.h> | 
|  | 27 | #include <linux/init.h> | 
| Glauber de Oliveira Costa | c72dcf8 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 28 |  | 
| Glauber de Oliveira Costa | 0ccb8ac | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 29 | /* | 
|  | 30 | * Default implementation of macro that returns current | 
|  | 31 | * instruction pointer ("program counter"). | 
|  | 32 | */ | 
|  | 33 | static inline void *current_text_addr(void) | 
|  | 34 | { | 
|  | 35 | void *pc; | 
|  | 36 | asm volatile("mov $1f,%0\n1:":"=r" (pc)); | 
|  | 37 | return pc; | 
|  | 38 | } | 
|  | 39 |  | 
| Glauber de Oliveira Costa | dbcb466 | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 40 | #ifdef CONFIG_X86_VSMP | 
|  | 41 | #define ARCH_MIN_TASKALIGN	(1 << INTERNODE_CACHE_SHIFT) | 
|  | 42 | #define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT) | 
|  | 43 | #else | 
|  | 44 | #define ARCH_MIN_TASKALIGN	16 | 
|  | 45 | #define ARCH_MIN_MMSTRUCT_ALIGN	0 | 
|  | 46 | #endif | 
|  | 47 |  | 
| Glauber de Oliveira Costa | 5300db8 | 2008-01-30 13:31:33 +0100 | [diff] [blame] | 48 | /* | 
|  | 49 | *  CPU type and hardware bug flags. Kept separately for each CPU. | 
|  | 50 | *  Members of this structure are referenced in head.S, so think twice | 
|  | 51 | *  before touching them. [mj] | 
|  | 52 | */ | 
|  | 53 |  | 
|  | 54 | struct cpuinfo_x86 { | 
|  | 55 | __u8	x86;		/* CPU family */ | 
|  | 56 | __u8	x86_vendor;	/* CPU vendor */ | 
|  | 57 | __u8	x86_model; | 
|  | 58 | __u8	x86_mask; | 
|  | 59 | #ifdef CONFIG_X86_32 | 
|  | 60 | char	wp_works_ok;	/* It doesn't on 386's */ | 
|  | 61 | char	hlt_works_ok;	/* Problems on some 486Dx4's and old 386's */ | 
|  | 62 | char	hard_math; | 
|  | 63 | char	rfu; | 
|  | 64 | char	fdiv_bug; | 
|  | 65 | char	f00f_bug; | 
|  | 66 | char	coma_bug; | 
|  | 67 | char	pad0; | 
|  | 68 | #else | 
|  | 69 | /* number of 4K pages in DTLB/ITLB combined(in pages)*/ | 
|  | 70 | int     x86_tlbsize; | 
|  | 71 | __u8    x86_virt_bits, x86_phys_bits; | 
|  | 72 | /* cpuid returned core id bits */ | 
|  | 73 | __u8    x86_coreid_bits; | 
|  | 74 | /* Max extended CPUID function supported */ | 
|  | 75 | __u32   extended_cpuid_level; | 
|  | 76 | #endif | 
|  | 77 | int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */ | 
|  | 78 | __u32	x86_capability[NCAPINTS]; | 
|  | 79 | char	x86_vendor_id[16]; | 
|  | 80 | char	x86_model_id[64]; | 
|  | 81 | int 	x86_cache_size;  /* in KB - valid for CPUS which support this | 
|  | 82 | call  */ | 
|  | 83 | int 	x86_cache_alignment;	/* In bytes */ | 
|  | 84 | int	x86_power; | 
|  | 85 | unsigned long loops_per_jiffy; | 
|  | 86 | #ifdef CONFIG_SMP | 
|  | 87 | cpumask_t llc_shared_map;	/* cpus sharing the last level cache */ | 
|  | 88 | #endif | 
|  | 89 | unsigned char x86_max_cores;	/* cpuid returned max cores value */ | 
|  | 90 | unsigned char apicid; | 
|  | 91 | unsigned short x86_clflush_size; | 
|  | 92 | #ifdef CONFIG_SMP | 
|  | 93 | unsigned char booted_cores;	/* number of cores as seen by OS */ | 
|  | 94 | __u8 phys_proc_id; 		/* Physical processor id. */ | 
|  | 95 | __u8 cpu_core_id;  		/* Core id */ | 
|  | 96 | __u8 cpu_index;			/* index into per_cpu list */ | 
|  | 97 | #endif | 
|  | 98 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | 
|  | 99 |  | 
|  | 100 | #define X86_VENDOR_INTEL 0 | 
|  | 101 | #define X86_VENDOR_CYRIX 1 | 
|  | 102 | #define X86_VENDOR_AMD 2 | 
|  | 103 | #define X86_VENDOR_UMC 3 | 
|  | 104 | #define X86_VENDOR_NEXGEN 4 | 
|  | 105 | #define X86_VENDOR_CENTAUR 5 | 
|  | 106 | #define X86_VENDOR_TRANSMETA 7 | 
|  | 107 | #define X86_VENDOR_NSC 8 | 
|  | 108 | #define X86_VENDOR_NUM 9 | 
|  | 109 | #define X86_VENDOR_UNKNOWN 0xff | 
|  | 110 |  | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 111 | /* | 
|  | 112 | * capabilities of CPUs | 
|  | 113 | */ | 
| Glauber de Oliveira Costa | 5300db8 | 2008-01-30 13:31:33 +0100 | [diff] [blame] | 114 | extern struct cpuinfo_x86 boot_cpu_data; | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 115 | extern struct cpuinfo_x86 new_cpu_data; | 
|  | 116 | extern struct tss_struct doublefault_tss; | 
| Glauber de Oliveira Costa | 5300db8 | 2008-01-30 13:31:33 +0100 | [diff] [blame] | 117 |  | 
|  | 118 | #ifdef CONFIG_SMP | 
|  | 119 | DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | 
|  | 120 | #define cpu_data(cpu)		per_cpu(cpu_info, cpu) | 
|  | 121 | #define current_cpu_data	cpu_data(smp_processor_id()) | 
|  | 122 | #else | 
|  | 123 | #define cpu_data(cpu)		boot_cpu_data | 
|  | 124 | #define current_cpu_data	boot_cpu_data | 
|  | 125 | #endif | 
|  | 126 |  | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 127 | void cpu_detect(struct cpuinfo_x86 *c); | 
|  | 128 |  | 
|  | 129 | extern void identify_cpu(struct cpuinfo_x86 *); | 
|  | 130 | extern void identify_boot_cpu(void); | 
|  | 131 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | 
| Glauber de Oliveira Costa | 5300db8 | 2008-01-30 13:31:33 +0100 | [diff] [blame] | 132 | extern void print_cpu_info(struct cpuinfo_x86 *); | 
|  | 133 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | 
|  | 134 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | 
|  | 135 | extern unsigned short num_cache_leaves; | 
|  | 136 |  | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 137 | #if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64) | 
|  | 138 | extern void detect_ht(struct cpuinfo_x86 *c); | 
|  | 139 | #else | 
|  | 140 | static inline void detect_ht(struct cpuinfo_x86 *c) {} | 
|  | 141 | #endif | 
|  | 142 |  | 
| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 143 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, | 
|  | 144 | unsigned int *ecx, unsigned int *edx) | 
|  | 145 | { | 
|  | 146 | /* ecx is often an input as well as an output. */ | 
|  | 147 | __asm__("cpuid" | 
|  | 148 | : "=a" (*eax), | 
|  | 149 | "=b" (*ebx), | 
|  | 150 | "=c" (*ecx), | 
|  | 151 | "=d" (*edx) | 
|  | 152 | : "0" (*eax), "2" (*ecx)); | 
|  | 153 | } | 
|  | 154 |  | 
| Glauber de Oliveira Costa | c72dcf8 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 155 | static inline void load_cr3(pgd_t *pgdir) | 
|  | 156 | { | 
|  | 157 | write_cr3(__pa(pgdir)); | 
|  | 158 | } | 
| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 159 |  | 
| Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 160 | #ifdef CONFIG_X86_32 | 
| Glauber de Oliveira Costa | ca241c7 | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 161 | /* This is the TSS defined by the hardware. */ | 
|  | 162 | struct x86_hw_tss { | 
|  | 163 | unsigned short	back_link, __blh; | 
|  | 164 | unsigned long	sp0; | 
|  | 165 | unsigned short	ss0, __ss0h; | 
|  | 166 | unsigned long	sp1; | 
|  | 167 | unsigned short	ss1, __ss1h;	/* ss1 caches MSR_IA32_SYSENTER_CS */ | 
|  | 168 | unsigned long	sp2; | 
|  | 169 | unsigned short	ss2, __ss2h; | 
|  | 170 | unsigned long	__cr3; | 
|  | 171 | unsigned long	ip; | 
|  | 172 | unsigned long	flags; | 
|  | 173 | unsigned long	ax, cx, dx, bx; | 
|  | 174 | unsigned long	sp, bp, si, di; | 
|  | 175 | unsigned short	es, __esh; | 
|  | 176 | unsigned short	cs, __csh; | 
|  | 177 | unsigned short	ss, __ssh; | 
|  | 178 | unsigned short	ds, __dsh; | 
|  | 179 | unsigned short	fs, __fsh; | 
|  | 180 | unsigned short	gs, __gsh; | 
|  | 181 | unsigned short	ldt, __ldth; | 
|  | 182 | unsigned short	trace, io_bitmap_base; | 
|  | 183 | } __attribute__((packed)); | 
|  | 184 | #else | 
|  | 185 | struct x86_hw_tss { | 
|  | 186 | u32 reserved1; | 
|  | 187 | u64 sp0; | 
|  | 188 | u64 sp1; | 
|  | 189 | u64 sp2; | 
|  | 190 | u64 reserved2; | 
|  | 191 | u64 ist[7]; | 
|  | 192 | u32 reserved3; | 
|  | 193 | u32 reserved4; | 
|  | 194 | u16 reserved5; | 
|  | 195 | u16 io_bitmap_base; | 
|  | 196 | } __attribute__((packed)) ____cacheline_aligned; | 
|  | 197 | #endif | 
|  | 198 |  | 
|  | 199 | /* | 
|  | 200 | * Size of io_bitmap. | 
|  | 201 | */ | 
|  | 202 | #define IO_BITMAP_BITS  65536 | 
|  | 203 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | 
|  | 204 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | 
|  | 205 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) | 
|  | 206 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | 
|  | 207 | #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 | 
|  | 208 |  | 
|  | 209 | struct tss_struct { | 
|  | 210 | struct x86_hw_tss x86_tss; | 
|  | 211 |  | 
|  | 212 | /* | 
|  | 213 | * The extra 1 is there because the CPU will access an | 
|  | 214 | * additional byte beyond the end of the IO permission | 
|  | 215 | * bitmap. The extra byte must be all 1 bits, and must | 
|  | 216 | * be within the limit. | 
|  | 217 | */ | 
|  | 218 | unsigned long	io_bitmap[IO_BITMAP_LONGS + 1]; | 
|  | 219 | /* | 
|  | 220 | * Cache the current maximum and the last task that used the bitmap: | 
|  | 221 | */ | 
|  | 222 | unsigned long io_bitmap_max; | 
|  | 223 | struct thread_struct *io_bitmap_owner; | 
|  | 224 | /* | 
|  | 225 | * pads the TSS to be cacheline-aligned (size is 0x100) | 
|  | 226 | */ | 
|  | 227 | unsigned long __cacheline_filler[35]; | 
|  | 228 | /* | 
|  | 229 | * .. and then another 0x100 bytes for emergency kernel stack | 
|  | 230 | */ | 
|  | 231 | unsigned long stack[64]; | 
|  | 232 | } __attribute__((packed)); | 
|  | 233 |  | 
|  | 234 | DECLARE_PER_CPU(struct tss_struct, init_tss); | 
|  | 235 |  | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 236 | /* Save the original ist values for checking stack pointers during debugging */ | 
|  | 237 | struct orig_ist { | 
|  | 238 | unsigned long ist[7]; | 
|  | 239 | }; | 
|  | 240 |  | 
| Roland McGrath | 99f8ecd | 2008-01-30 13:31:48 +0100 | [diff] [blame] | 241 | #define	MXCSR_DEFAULT		0x1f80 | 
|  | 242 |  | 
| Glauber de Oliveira Costa | 46265df | 2008-01-30 13:31:41 +0100 | [diff] [blame] | 243 | struct i387_fsave_struct { | 
| Roland McGrath | 99f8ecd | 2008-01-30 13:31:48 +0100 | [diff] [blame] | 244 | u32	cwd; | 
|  | 245 | u32	swd; | 
|  | 246 | u32	twd; | 
|  | 247 | u32	fip; | 
|  | 248 | u32	fcs; | 
|  | 249 | u32	foo; | 
|  | 250 | u32	fos; | 
|  | 251 | u32	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */ | 
|  | 252 | u32	status;		/* software status information */ | 
| Glauber de Oliveira Costa | 46265df | 2008-01-30 13:31:41 +0100 | [diff] [blame] | 253 | }; | 
|  | 254 |  | 
|  | 255 | struct i387_fxsave_struct { | 
| Glauber de Oliveira Costa | 46265df | 2008-01-30 13:31:41 +0100 | [diff] [blame] | 256 | u16	cwd; | 
|  | 257 | u16	swd; | 
|  | 258 | u16	twd; | 
|  | 259 | u16	fop; | 
| Roland McGrath | 99f8ecd | 2008-01-30 13:31:48 +0100 | [diff] [blame] | 260 | union { | 
|  | 261 | struct { | 
|  | 262 | u64	rip; | 
|  | 263 | u64	rdp; | 
|  | 264 | }; | 
|  | 265 | struct { | 
|  | 266 | u32	fip; | 
|  | 267 | u32	fcs; | 
|  | 268 | u32	foo; | 
|  | 269 | u32	fos; | 
|  | 270 | }; | 
|  | 271 | }; | 
| Glauber de Oliveira Costa | 46265df | 2008-01-30 13:31:41 +0100 | [diff] [blame] | 272 | u32	mxcsr; | 
|  | 273 | u32	mxcsr_mask; | 
|  | 274 | u32	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */ | 
|  | 275 | u32	xmm_space[64];	/* 16*16 bytes for each XMM-reg = 256 bytes */ | 
|  | 276 | u32	padding[24]; | 
|  | 277 | } __attribute__((aligned(16))); | 
|  | 278 |  | 
| Roland McGrath | 99f8ecd | 2008-01-30 13:31:48 +0100 | [diff] [blame] | 279 | struct i387_soft_struct { | 
|  | 280 | u32	cwd; | 
|  | 281 | u32	swd; | 
|  | 282 | u32	twd; | 
|  | 283 | u32	fip; | 
|  | 284 | u32	fcs; | 
|  | 285 | u32	foo; | 
|  | 286 | u32	fos; | 
|  | 287 | u32	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */ | 
|  | 288 | u8	ftop, changed, lookahead, no_update, rm, alimit; | 
|  | 289 | struct info	*info; | 
|  | 290 | u32	entry_eip; | 
| Glauber de Oliveira Costa | 46265df | 2008-01-30 13:31:41 +0100 | [diff] [blame] | 291 | }; | 
|  | 292 |  | 
| Roland McGrath | 99f8ecd | 2008-01-30 13:31:48 +0100 | [diff] [blame] | 293 | union i387_union { | 
|  | 294 | struct i387_fsave_struct	fsave; | 
|  | 295 | struct i387_fxsave_struct	fxsave; | 
|  | 296 | struct i387_soft_struct 	soft; | 
|  | 297 | }; | 
|  | 298 |  | 
|  | 299 | #ifdef CONFIG_X86_32 | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 300 | /* | 
|  | 301 | * the following now lives in the per cpu area: | 
|  | 302 | * extern	int cpu_llc_id[NR_CPUS]; | 
|  | 303 | */ | 
|  | 304 | DECLARE_PER_CPU(u8, cpu_llc_id); | 
| Roland McGrath | 99f8ecd | 2008-01-30 13:31:48 +0100 | [diff] [blame] | 305 | #else | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 306 | DECLARE_PER_CPU(struct orig_ist, orig_ist); | 
| Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 307 | #endif | 
| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 308 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 309 | extern void print_cpu_info(struct cpuinfo_x86 *); | 
|  | 310 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | 
|  | 311 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | 
|  | 312 | extern unsigned short num_cache_leaves; | 
|  | 313 |  | 
| Glauber de Oliveira Costa | cb38d37 | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 314 | struct thread_struct { | 
|  | 315 | /* cached TLS descriptors. */ | 
|  | 316 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | 
|  | 317 | unsigned long	sp0; | 
|  | 318 | unsigned long	sp; | 
|  | 319 | #ifdef CONFIG_X86_32 | 
|  | 320 | unsigned long	sysenter_cs; | 
|  | 321 | #else | 
|  | 322 | unsigned long 	usersp;	/* Copy from PDA */ | 
|  | 323 | unsigned short	es, ds, fsindex, gsindex; | 
|  | 324 | #endif | 
|  | 325 | unsigned long	ip; | 
|  | 326 | unsigned long	fs; | 
|  | 327 | unsigned long	gs; | 
|  | 328 | /* Hardware debugging registers */ | 
|  | 329 | unsigned long	debugreg0; | 
|  | 330 | unsigned long	debugreg1; | 
|  | 331 | unsigned long	debugreg2; | 
|  | 332 | unsigned long	debugreg3; | 
|  | 333 | unsigned long	debugreg6; | 
|  | 334 | unsigned long	debugreg7; | 
|  | 335 | /* fault info */ | 
|  | 336 | unsigned long	cr2, trap_no, error_code; | 
|  | 337 | /* floating point info */ | 
|  | 338 | union i387_union	i387 __attribute__((aligned(16)));; | 
|  | 339 | #ifdef CONFIG_X86_32 | 
|  | 340 | /* virtual 86 mode info */ | 
|  | 341 | struct vm86_struct __user *vm86_info; | 
|  | 342 | unsigned long		screen_bitmap; | 
|  | 343 | unsigned long		v86flags, v86mask, saved_sp0; | 
|  | 344 | unsigned int		saved_fs, saved_gs; | 
|  | 345 | #endif | 
|  | 346 | /* IO permissions */ | 
|  | 347 | unsigned long	*io_bitmap_ptr; | 
|  | 348 | unsigned long	iopl; | 
|  | 349 | /* max allowed port in the bitmap, in bytes: */ | 
|  | 350 | unsigned io_bitmap_max; | 
|  | 351 | /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set.  */ | 
|  | 352 | unsigned long	debugctlmsr; | 
|  | 353 | /* Debug Store - if not 0 points to a DS Save Area configuration; | 
|  | 354 | *               goes into MSR_IA32_DS_AREA */ | 
|  | 355 | unsigned long	ds_area_msr; | 
|  | 356 | }; | 
|  | 357 |  | 
| Glauber de Oliveira Costa | 1b46cbe | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 358 | static inline unsigned long native_get_debugreg(int regno) | 
|  | 359 | { | 
|  | 360 | unsigned long val = 0; 	/* Damn you, gcc! */ | 
|  | 361 |  | 
|  | 362 | switch (regno) { | 
|  | 363 | case 0: | 
|  | 364 | asm("mov %%db0, %0" :"=r" (val)); break; | 
|  | 365 | case 1: | 
|  | 366 | asm("mov %%db1, %0" :"=r" (val)); break; | 
|  | 367 | case 2: | 
|  | 368 | asm("mov %%db2, %0" :"=r" (val)); break; | 
|  | 369 | case 3: | 
|  | 370 | asm("mov %%db3, %0" :"=r" (val)); break; | 
|  | 371 | case 6: | 
|  | 372 | asm("mov %%db6, %0" :"=r" (val)); break; | 
|  | 373 | case 7: | 
|  | 374 | asm("mov %%db7, %0" :"=r" (val)); break; | 
|  | 375 | default: | 
|  | 376 | BUG(); | 
|  | 377 | } | 
|  | 378 | return val; | 
|  | 379 | } | 
|  | 380 |  | 
|  | 381 | static inline void native_set_debugreg(int regno, unsigned long value) | 
|  | 382 | { | 
|  | 383 | switch (regno) { | 
|  | 384 | case 0: | 
|  | 385 | asm("mov %0,%%db0"	: /* no output */ :"r" (value)); | 
|  | 386 | break; | 
|  | 387 | case 1: | 
|  | 388 | asm("mov %0,%%db1"	: /* no output */ :"r" (value)); | 
|  | 389 | break; | 
|  | 390 | case 2: | 
|  | 391 | asm("mov %0,%%db2"	: /* no output */ :"r" (value)); | 
|  | 392 | break; | 
|  | 393 | case 3: | 
|  | 394 | asm("mov %0,%%db3"	: /* no output */ :"r" (value)); | 
|  | 395 | break; | 
|  | 396 | case 6: | 
|  | 397 | asm("mov %0,%%db6"	: /* no output */ :"r" (value)); | 
|  | 398 | break; | 
|  | 399 | case 7: | 
|  | 400 | asm("mov %0,%%db7"	: /* no output */ :"r" (value)); | 
|  | 401 | break; | 
|  | 402 | default: | 
|  | 403 | BUG(); | 
|  | 404 | } | 
|  | 405 | } | 
|  | 406 |  | 
| Glauber de Oliveira Costa | 62d7d7e | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 407 | /* | 
|  | 408 | * Set IOPL bits in EFLAGS from given mask | 
|  | 409 | */ | 
|  | 410 | static inline void native_set_iopl_mask(unsigned mask) | 
|  | 411 | { | 
|  | 412 | #ifdef CONFIG_X86_32 | 
|  | 413 | unsigned int reg; | 
|  | 414 | __asm__ __volatile__ ("pushfl;" | 
|  | 415 | "popl %0;" | 
|  | 416 | "andl %1, %0;" | 
|  | 417 | "orl %2, %0;" | 
|  | 418 | "pushl %0;" | 
|  | 419 | "popfl" | 
|  | 420 | : "=&r" (reg) | 
|  | 421 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | 
|  | 422 | #endif | 
|  | 423 | } | 
|  | 424 |  | 
| Glauber de Oliveira Costa | 7818a1e | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 425 | static inline void native_load_sp0(struct tss_struct *tss, | 
|  | 426 | struct thread_struct *thread) | 
|  | 427 | { | 
|  | 428 | tss->x86_tss.sp0 = thread->sp0; | 
|  | 429 | #ifdef CONFIG_X86_32 | 
|  | 430 | /* Only happens when SEP is enabled, no need to test "SEP"arately */ | 
|  | 431 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { | 
|  | 432 | tss->x86_tss.ss1 = thread->sysenter_cs; | 
|  | 433 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | 
|  | 434 | } | 
|  | 435 | #endif | 
|  | 436 | } | 
| Glauber de Oliveira Costa | 1b46cbe | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 437 |  | 
| Glauber de Oliveira Costa | e801f86 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 438 | static inline void native_swapgs(void) | 
|  | 439 | { | 
|  | 440 | #ifdef CONFIG_X86_64 | 
|  | 441 | asm volatile("swapgs" ::: "memory"); | 
|  | 442 | #endif | 
|  | 443 | } | 
|  | 444 |  | 
| Glauber de Oliveira Costa | 7818a1e | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 445 | #ifdef CONFIG_PARAVIRT | 
|  | 446 | #include <asm/paravirt.h> | 
|  | 447 | #else | 
| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 448 | #define __cpuid native_cpuid | 
| Glauber de Oliveira Costa | 1b46cbe | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 449 | #define paravirt_enabled() 0 | 
|  | 450 |  | 
|  | 451 | /* | 
|  | 452 | * These special macros can be used to get or set a debugging register | 
|  | 453 | */ | 
|  | 454 | #define get_debugreg(var, register)				\ | 
|  | 455 | (var) = native_get_debugreg(register) | 
|  | 456 | #define set_debugreg(value, register)				\ | 
|  | 457 | native_set_debugreg(register, value) | 
|  | 458 |  | 
| Glauber de Oliveira Costa | 7818a1e | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 459 | static inline void load_sp0(struct tss_struct *tss, | 
|  | 460 | struct thread_struct *thread) | 
|  | 461 | { | 
|  | 462 | native_load_sp0(tss, thread); | 
|  | 463 | } | 
|  | 464 |  | 
| Glauber de Oliveira Costa | 62d7d7e | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 465 | #define set_iopl_mask native_set_iopl_mask | 
| Glauber de Oliveira Costa | e801f86 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 466 | #define SWAPGS	swapgs | 
| Glauber de Oliveira Costa | 1b46cbe | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 467 | #endif /* CONFIG_PARAVIRT */ | 
|  | 468 |  | 
|  | 469 | /* | 
|  | 470 | * Save the cr4 feature set we're using (ie | 
|  | 471 | * Pentium 4MB enable and PPro Global page | 
|  | 472 | * enable), so that any CPU's that boot up | 
|  | 473 | * after us can get the correct flags. | 
|  | 474 | */ | 
|  | 475 | extern unsigned long mmu_cr4_features; | 
|  | 476 |  | 
|  | 477 | static inline void set_in_cr4(unsigned long mask) | 
|  | 478 | { | 
|  | 479 | unsigned cr4; | 
|  | 480 | mmu_cr4_features |= mask; | 
|  | 481 | cr4 = read_cr4(); | 
|  | 482 | cr4 |= mask; | 
|  | 483 | write_cr4(cr4); | 
|  | 484 | } | 
|  | 485 |  | 
|  | 486 | static inline void clear_in_cr4(unsigned long mask) | 
|  | 487 | { | 
|  | 488 | unsigned cr4; | 
|  | 489 | mmu_cr4_features &= ~mask; | 
|  | 490 | cr4 = read_cr4(); | 
|  | 491 | cr4 &= ~mask; | 
|  | 492 | write_cr4(cr4); | 
|  | 493 | } | 
|  | 494 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 495 | struct microcode_header { | 
|  | 496 | unsigned int hdrver; | 
|  | 497 | unsigned int rev; | 
|  | 498 | unsigned int date; | 
|  | 499 | unsigned int sig; | 
|  | 500 | unsigned int cksum; | 
|  | 501 | unsigned int ldrver; | 
|  | 502 | unsigned int pf; | 
|  | 503 | unsigned int datasize; | 
|  | 504 | unsigned int totalsize; | 
|  | 505 | unsigned int reserved[3]; | 
|  | 506 | }; | 
| Glauber de Oliveira Costa | 1b46cbe | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 507 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 508 | struct microcode { | 
|  | 509 | struct microcode_header hdr; | 
|  | 510 | unsigned int bits[0]; | 
|  | 511 | }; | 
|  | 512 |  | 
|  | 513 | typedef struct microcode microcode_t; | 
|  | 514 | typedef struct microcode_header microcode_header_t; | 
|  | 515 |  | 
|  | 516 | /* microcode format is extended from prescott processors */ | 
|  | 517 | struct extended_signature { | 
|  | 518 | unsigned int sig; | 
|  | 519 | unsigned int pf; | 
|  | 520 | unsigned int cksum; | 
|  | 521 | }; | 
|  | 522 |  | 
|  | 523 | struct extended_sigtable { | 
|  | 524 | unsigned int count; | 
|  | 525 | unsigned int cksum; | 
|  | 526 | unsigned int reserved[3]; | 
|  | 527 | struct extended_signature sigs[0]; | 
|  | 528 | }; | 
|  | 529 |  | 
| Glauber de Oliveira Costa | fc87e90 | 2008-01-30 13:31:38 +0100 | [diff] [blame] | 530 | typedef struct { | 
|  | 531 | unsigned long seg; | 
|  | 532 | } mm_segment_t; | 
|  | 533 |  | 
|  | 534 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 535 | /* | 
|  | 536 | * create a kernel thread without removing it from tasklists | 
|  | 537 | */ | 
|  | 538 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | 
|  | 539 |  | 
|  | 540 | /* Free all resources held by a thread. */ | 
|  | 541 | extern void release_thread(struct task_struct *); | 
|  | 542 |  | 
|  | 543 | /* Prepare to copy thread state - unlazy all lazy status */ | 
|  | 544 | extern void prepare_to_copy(struct task_struct *tsk); | 
|  | 545 |  | 
|  | 546 | unsigned long get_wchan(struct task_struct *p); | 
| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 547 |  | 
|  | 548 | /* | 
|  | 549 | * Generic CPUID function | 
|  | 550 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | 
|  | 551 | * resulting in stale register contents being returned. | 
|  | 552 | */ | 
|  | 553 | static inline void cpuid(unsigned int op, | 
|  | 554 | unsigned int *eax, unsigned int *ebx, | 
|  | 555 | unsigned int *ecx, unsigned int *edx) | 
|  | 556 | { | 
|  | 557 | *eax = op; | 
|  | 558 | *ecx = 0; | 
|  | 559 | __cpuid(eax, ebx, ecx, edx); | 
|  | 560 | } | 
|  | 561 |  | 
|  | 562 | /* Some CPUID calls want 'count' to be placed in ecx */ | 
|  | 563 | static inline void cpuid_count(unsigned int op, int count, | 
|  | 564 | unsigned int *eax, unsigned int *ebx, | 
|  | 565 | unsigned int *ecx, unsigned int *edx) | 
|  | 566 | { | 
|  | 567 | *eax = op; | 
|  | 568 | *ecx = count; | 
|  | 569 | __cpuid(eax, ebx, ecx, edx); | 
|  | 570 | } | 
|  | 571 |  | 
|  | 572 | /* | 
|  | 573 | * CPUID functions returning a single datum | 
|  | 574 | */ | 
|  | 575 | static inline unsigned int cpuid_eax(unsigned int op) | 
|  | 576 | { | 
|  | 577 | unsigned int eax, ebx, ecx, edx; | 
|  | 578 |  | 
|  | 579 | cpuid(op, &eax, &ebx, &ecx, &edx); | 
|  | 580 | return eax; | 
|  | 581 | } | 
|  | 582 | static inline unsigned int cpuid_ebx(unsigned int op) | 
|  | 583 | { | 
|  | 584 | unsigned int eax, ebx, ecx, edx; | 
|  | 585 |  | 
|  | 586 | cpuid(op, &eax, &ebx, &ecx, &edx); | 
|  | 587 | return ebx; | 
|  | 588 | } | 
|  | 589 | static inline unsigned int cpuid_ecx(unsigned int op) | 
|  | 590 | { | 
|  | 591 | unsigned int eax, ebx, ecx, edx; | 
|  | 592 |  | 
|  | 593 | cpuid(op, &eax, &ebx, &ecx, &edx); | 
|  | 594 | return ecx; | 
|  | 595 | } | 
|  | 596 | static inline unsigned int cpuid_edx(unsigned int op) | 
|  | 597 | { | 
|  | 598 | unsigned int eax, ebx, ecx, edx; | 
|  | 599 |  | 
|  | 600 | cpuid(op, &eax, &ebx, &ecx, &edx); | 
|  | 601 | return edx; | 
|  | 602 | } | 
|  | 603 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 604 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | 
|  | 605 | static inline void rep_nop(void) | 
|  | 606 | { | 
|  | 607 | __asm__ __volatile__("rep;nop": : :"memory"); | 
|  | 608 | } | 
|  | 609 |  | 
|  | 610 | /* Stop speculative execution */ | 
|  | 611 | static inline void sync_core(void) | 
|  | 612 | { | 
|  | 613 | int tmp; | 
|  | 614 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) | 
|  | 615 | : "ebx", "ecx", "edx", "memory"); | 
|  | 616 | } | 
|  | 617 |  | 
|  | 618 | #define cpu_relax()   rep_nop() | 
|  | 619 |  | 
|  | 620 | static inline void __monitor(const void *eax, unsigned long ecx, | 
|  | 621 | unsigned long edx) | 
|  | 622 | { | 
|  | 623 | /* "monitor %eax,%ecx,%edx;" */ | 
|  | 624 | asm volatile( | 
|  | 625 | ".byte 0x0f,0x01,0xc8;" | 
|  | 626 | : :"a" (eax), "c" (ecx), "d"(edx)); | 
|  | 627 | } | 
|  | 628 |  | 
|  | 629 | static inline void __mwait(unsigned long eax, unsigned long ecx) | 
|  | 630 | { | 
|  | 631 | /* "mwait %eax,%ecx;" */ | 
|  | 632 | asm volatile( | 
|  | 633 | ".byte 0x0f,0x01,0xc9;" | 
|  | 634 | : :"a" (eax), "c" (ecx)); | 
|  | 635 | } | 
|  | 636 |  | 
|  | 637 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | 
|  | 638 | { | 
|  | 639 | /* "mwait %eax,%ecx;" */ | 
|  | 640 | asm volatile( | 
|  | 641 | "sti; .byte 0x0f,0x01,0xc9;" | 
|  | 642 | : :"a" (eax), "c" (ecx)); | 
|  | 643 | } | 
|  | 644 |  | 
|  | 645 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | 
|  | 646 |  | 
|  | 647 | extern int force_mwait; | 
|  | 648 |  | 
|  | 649 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 
|  | 650 |  | 
|  | 651 | extern unsigned long boot_option_idle_override; | 
|  | 652 |  | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 653 | extern void enable_sep_cpu(void); | 
|  | 654 | extern int sysenter_setup(void); | 
|  | 655 |  | 
|  | 656 | /* Defined in head.S */ | 
|  | 657 | extern struct desc_ptr early_gdt_descr; | 
|  | 658 |  | 
|  | 659 | extern void cpu_set_gdt(int); | 
|  | 660 | extern void switch_to_new_gdt(void); | 
|  | 661 | extern void cpu_init(void); | 
|  | 662 | extern void init_gdt(int cpu); | 
|  | 663 |  | 
|  | 664 | /* from system description table in BIOS.  Mostly for MCA use, but | 
|  | 665 | * others may find it useful. */ | 
|  | 666 | extern unsigned int machine_id; | 
|  | 667 | extern unsigned int machine_submodel_id; | 
|  | 668 | extern unsigned int BIOS_revision; | 
|  | 669 | extern unsigned int mca_pentium_flag; | 
|  | 670 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 671 | /* Boot loader type from the setup header */ | 
|  | 672 | extern int bootloader_type; | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 673 |  | 
|  | 674 | extern char ignore_fpu_irq; | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 675 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | 
|  | 676 |  | 
| Glauber de Oliveira Costa | ea5e359 | 2008-01-30 13:31:40 +0100 | [diff] [blame] | 677 | /* generic versions from gas */ | 
|  | 678 | #define GENERIC_NOP1	".byte 0x90\n" | 
|  | 679 | #define GENERIC_NOP2    	".byte 0x89,0xf6\n" | 
|  | 680 | #define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n" | 
|  | 681 | #define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n" | 
|  | 682 | #define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4 | 
|  | 683 | #define GENERIC_NOP6	".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" | 
|  | 684 | #define GENERIC_NOP7	".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" | 
|  | 685 | #define GENERIC_NOP8	GENERIC_NOP1 GENERIC_NOP7 | 
|  | 686 |  | 
|  | 687 | /* Opteron nops */ | 
|  | 688 | #define K8_NOP1 GENERIC_NOP1 | 
|  | 689 | #define K8_NOP2	".byte 0x66,0x90\n" | 
|  | 690 | #define K8_NOP3	".byte 0x66,0x66,0x90\n" | 
|  | 691 | #define K8_NOP4	".byte 0x66,0x66,0x66,0x90\n" | 
|  | 692 | #define K8_NOP5	K8_NOP3 K8_NOP2 | 
|  | 693 | #define K8_NOP6	K8_NOP3 K8_NOP3 | 
|  | 694 | #define K8_NOP7	K8_NOP4 K8_NOP3 | 
|  | 695 | #define K8_NOP8	K8_NOP4 K8_NOP4 | 
|  | 696 |  | 
|  | 697 | /* K7 nops */ | 
|  | 698 | /* uses eax dependencies (arbitary choice) */ | 
|  | 699 | #define K7_NOP1  GENERIC_NOP1 | 
|  | 700 | #define K7_NOP2	".byte 0x8b,0xc0\n" | 
|  | 701 | #define K7_NOP3	".byte 0x8d,0x04,0x20\n" | 
|  | 702 | #define K7_NOP4	".byte 0x8d,0x44,0x20,0x00\n" | 
|  | 703 | #define K7_NOP5	K7_NOP4 ASM_NOP1 | 
|  | 704 | #define K7_NOP6	".byte 0x8d,0x80,0,0,0,0\n" | 
|  | 705 | #define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n" | 
|  | 706 | #define K7_NOP8        K7_NOP7 ASM_NOP1 | 
|  | 707 |  | 
|  | 708 | /* P6 nops */ | 
|  | 709 | /* uses eax dependencies (Intel-recommended choice) */ | 
|  | 710 | #define P6_NOP1	GENERIC_NOP1 | 
|  | 711 | #define P6_NOP2	".byte 0x66,0x90\n" | 
|  | 712 | #define P6_NOP3	".byte 0x0f,0x1f,0x00\n" | 
|  | 713 | #define P6_NOP4	".byte 0x0f,0x1f,0x40,0\n" | 
|  | 714 | #define P6_NOP5	".byte 0x0f,0x1f,0x44,0x00,0\n" | 
|  | 715 | #define P6_NOP6	".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" | 
|  | 716 | #define P6_NOP7	".byte 0x0f,0x1f,0x80,0,0,0,0\n" | 
|  | 717 | #define P6_NOP8	".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" | 
|  | 718 |  | 
|  | 719 | #ifdef CONFIG_MK7 | 
|  | 720 | #define ASM_NOP1 K7_NOP1 | 
|  | 721 | #define ASM_NOP2 K7_NOP2 | 
|  | 722 | #define ASM_NOP3 K7_NOP3 | 
|  | 723 | #define ASM_NOP4 K7_NOP4 | 
|  | 724 | #define ASM_NOP5 K7_NOP5 | 
|  | 725 | #define ASM_NOP6 K7_NOP6 | 
|  | 726 | #define ASM_NOP7 K7_NOP7 | 
|  | 727 | #define ASM_NOP8 K7_NOP8 | 
|  | 728 | #elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \ | 
|  | 729 | defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \ | 
|  | 730 | defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4) || \ | 
|  | 731 | defined(CONFIG_MPSC) | 
|  | 732 | #define ASM_NOP1 P6_NOP1 | 
|  | 733 | #define ASM_NOP2 P6_NOP2 | 
|  | 734 | #define ASM_NOP3 P6_NOP3 | 
|  | 735 | #define ASM_NOP4 P6_NOP4 | 
|  | 736 | #define ASM_NOP5 P6_NOP5 | 
|  | 737 | #define ASM_NOP6 P6_NOP6 | 
|  | 738 | #define ASM_NOP7 P6_NOP7 | 
|  | 739 | #define ASM_NOP8 P6_NOP8 | 
|  | 740 | #elif defined(CONFIG_MK8) || defined(CONFIG_X86_64) | 
|  | 741 | #define ASM_NOP1 K8_NOP1 | 
|  | 742 | #define ASM_NOP2 K8_NOP2 | 
|  | 743 | #define ASM_NOP3 K8_NOP3 | 
|  | 744 | #define ASM_NOP4 K8_NOP4 | 
|  | 745 | #define ASM_NOP5 K8_NOP5 | 
|  | 746 | #define ASM_NOP6 K8_NOP6 | 
|  | 747 | #define ASM_NOP7 K8_NOP7 | 
|  | 748 | #define ASM_NOP8 K8_NOP8 | 
|  | 749 | #else | 
|  | 750 | #define ASM_NOP1 GENERIC_NOP1 | 
|  | 751 | #define ASM_NOP2 GENERIC_NOP2 | 
|  | 752 | #define ASM_NOP3 GENERIC_NOP3 | 
|  | 753 | #define ASM_NOP4 GENERIC_NOP4 | 
|  | 754 | #define ASM_NOP5 GENERIC_NOP5 | 
|  | 755 | #define ASM_NOP6 GENERIC_NOP6 | 
|  | 756 | #define ASM_NOP7 GENERIC_NOP7 | 
|  | 757 | #define ASM_NOP8 GENERIC_NOP8 | 
|  | 758 | #endif | 
|  | 759 |  | 
|  | 760 | #define ASM_NOP_MAX 8 | 
|  | 761 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 762 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | 
|  | 763 | #define ARCH_HAS_PREFETCHW | 
|  | 764 | #define ARCH_HAS_SPINLOCK_PREFETCH | 
|  | 765 |  | 
| Glauber de Oliveira Costa | ae2e15e | 2008-01-30 13:31:40 +0100 | [diff] [blame] | 766 | #ifdef CONFIG_X86_32 | 
|  | 767 | #define BASE_PREFETCH	ASM_NOP4 | 
|  | 768 | #define ARCH_HAS_PREFETCH | 
|  | 769 | #else | 
|  | 770 | #define BASE_PREFETCH	"prefetcht0 (%1)" | 
|  | 771 | #endif | 
|  | 772 |  | 
|  | 773 | /* Prefetch instructions for Pentium III and AMD Athlon */ | 
|  | 774 | /* It's not worth to care about 3dnow! prefetches for the K6 | 
|  | 775 | because they are microcoded there and very slow. | 
|  | 776 | However we don't do prefetches for pre XP Athlons currently | 
|  | 777 | That should be fixed. */ | 
|  | 778 | static inline void prefetch(const void *x) | 
|  | 779 | { | 
|  | 780 | alternative_input(BASE_PREFETCH, | 
|  | 781 | "prefetchnta (%1)", | 
|  | 782 | X86_FEATURE_XMM, | 
|  | 783 | "r" (x)); | 
|  | 784 | } | 
|  | 785 |  | 
|  | 786 | /* 3dnow! prefetch to get an exclusive cache line. Useful for | 
|  | 787 | spinlocks to avoid one state transition in the cache coherency protocol. */ | 
|  | 788 | static inline void prefetchw(const void *x) | 
|  | 789 | { | 
|  | 790 | alternative_input(BASE_PREFETCH, | 
|  | 791 | "prefetchw (%1)", | 
|  | 792 | X86_FEATURE_3DNOW, | 
|  | 793 | "r" (x)); | 
|  | 794 | } | 
|  | 795 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 796 | #define spin_lock_prefetch(x)	prefetchw(x) | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 797 | #ifdef CONFIG_X86_32 | 
|  | 798 | /* | 
|  | 799 | * User space process size: 3GB (default). | 
|  | 800 | */ | 
|  | 801 | #define TASK_SIZE	(PAGE_OFFSET) | 
|  | 802 |  | 
|  | 803 | #define INIT_THREAD  {							\ | 
|  | 804 | .sp0 = sizeof(init_stack) + (long)&init_stack,			\ | 
|  | 805 | .vm86_info = NULL,						\ | 
|  | 806 | .sysenter_cs = __KERNEL_CS,					\ | 
|  | 807 | .io_bitmap_ptr = NULL,						\ | 
|  | 808 | .fs = __KERNEL_PERCPU,						\ | 
|  | 809 | } | 
|  | 810 |  | 
|  | 811 | /* | 
|  | 812 | * Note that the .io_bitmap member must be extra-big. This is because | 
|  | 813 | * the CPU will access an additional byte beyond the end of the IO | 
|  | 814 | * permission bitmap. The extra byte must be all 1 bits, and must | 
|  | 815 | * be within the limit. | 
|  | 816 | */ | 
|  | 817 | #define INIT_TSS  {							\ | 
|  | 818 | .x86_tss = {							\ | 
|  | 819 | .sp0		= sizeof(init_stack) + (long)&init_stack, \ | 
|  | 820 | .ss0		= __KERNEL_DS,				\ | 
|  | 821 | .ss1		= __KERNEL_CS,				\ | 
|  | 822 | .io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,		\ | 
|  | 823 | },								\ | 
|  | 824 | .io_bitmap	= { [0 ... IO_BITMAP_LONGS] = ~0 },		\ | 
|  | 825 | } | 
|  | 826 |  | 
|  | 827 | #define start_thread(regs, new_eip, new_esp) do {		\ | 
|  | 828 | __asm__("movl %0,%%gs": :"r" (0));			\ | 
|  | 829 | regs->fs = 0;						\ | 
|  | 830 | set_fs(USER_DS);					\ | 
|  | 831 | regs->ds = __USER_DS;					\ | 
|  | 832 | regs->es = __USER_DS;					\ | 
|  | 833 | regs->ss = __USER_DS;					\ | 
|  | 834 | regs->cs = __USER_CS;					\ | 
|  | 835 | regs->ip = new_eip;					\ | 
|  | 836 | regs->sp = new_esp;					\ | 
|  | 837 | } while (0) | 
|  | 838 |  | 
|  | 839 |  | 
|  | 840 | extern unsigned long thread_saved_pc(struct task_struct *tsk); | 
|  | 841 |  | 
|  | 842 | #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long)) | 
|  | 843 | #define KSTK_TOP(info)                                                 \ | 
|  | 844 | ({                                                                     \ | 
|  | 845 | unsigned long *__ptr = (unsigned long *)(info);                 \ | 
|  | 846 | (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \ | 
|  | 847 | }) | 
|  | 848 |  | 
|  | 849 | /* | 
|  | 850 | * The below -8 is to reserve 8 bytes on top of the ring0 stack. | 
|  | 851 | * This is necessary to guarantee that the entire "struct pt_regs" | 
|  | 852 | * is accessable even if the CPU haven't stored the SS/ESP registers | 
|  | 853 | * on the stack (interrupt gate does not save these registers | 
|  | 854 | * when switching to the same priv ring). | 
|  | 855 | * Therefore beware: accessing the ss/esp fields of the | 
|  | 856 | * "struct pt_regs" is possible, but they may contain the | 
|  | 857 | * completely wrong values. | 
|  | 858 | */ | 
|  | 859 | #define task_pt_regs(task)                                             \ | 
|  | 860 | ({                                                                     \ | 
|  | 861 | struct pt_regs *__regs__;                                       \ | 
|  | 862 | __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ | 
|  | 863 | __regs__ - 1;                                                   \ | 
|  | 864 | }) | 
|  | 865 |  | 
|  | 866 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) | 
|  | 867 |  | 
|  | 868 | #else | 
|  | 869 | /* | 
|  | 870 | * User space process size. 47bits minus one guard page. | 
|  | 871 | */ | 
|  | 872 | #define TASK_SIZE64	(0x800000000000UL - 4096) | 
|  | 873 |  | 
|  | 874 | /* This decides where the kernel will search for a free chunk of vm | 
|  | 875 | * space during mmap's. | 
|  | 876 | */ | 
|  | 877 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ | 
|  | 878 | 0xc0000000 : 0xFFFFe000) | 
|  | 879 |  | 
|  | 880 | #define TASK_SIZE 		(test_thread_flag(TIF_IA32) ? \ | 
|  | 881 | IA32_PAGE_OFFSET : TASK_SIZE64) | 
|  | 882 | #define TASK_SIZE_OF(child) 	((test_tsk_thread_flag(child, TIF_IA32)) ? \ | 
|  | 883 | IA32_PAGE_OFFSET : TASK_SIZE64) | 
|  | 884 |  | 
|  | 885 | #define INIT_THREAD  { \ | 
|  | 886 | .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | 
|  | 887 | } | 
|  | 888 |  | 
|  | 889 | #define INIT_TSS  { \ | 
|  | 890 | .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | 
|  | 891 | } | 
|  | 892 |  | 
|  | 893 | #define start_thread(regs, new_rip, new_rsp) do { 			     \ | 
|  | 894 | asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0));  \ | 
|  | 895 | load_gs_index(0);						     \ | 
|  | 896 | (regs)->ip = (new_rip);						     \ | 
|  | 897 | (regs)->sp = (new_rsp);						     \ | 
|  | 898 | write_pda(oldrsp, (new_rsp));					     \ | 
|  | 899 | (regs)->cs = __USER_CS;						     \ | 
|  | 900 | (regs)->ss = __USER_DS;						     \ | 
|  | 901 | (regs)->flags = 0x200;						     \ | 
|  | 902 | set_fs(USER_DS);						     \ | 
|  | 903 | } while (0) | 
|  | 904 |  | 
|  | 905 | /* | 
|  | 906 | * Return saved PC of a blocked thread. | 
|  | 907 | * What is this good for? it will be always the scheduler or ret_from_fork. | 
|  | 908 | */ | 
|  | 909 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) | 
|  | 910 |  | 
|  | 911 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) | 
|  | 912 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ | 
|  | 913 | #endif /* CONFIG_X86_64 */ | 
|  | 914 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 915 | /* This decides where the kernel will search for a free chunk of vm | 
|  | 916 | * space during mmap's. | 
|  | 917 | */ | 
|  | 918 | #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3)) | 
|  | 919 |  | 
|  | 920 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) | 
|  | 921 |  | 
| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 922 | #endif |