blob: 3740bada097cca3af5317c244bd090f08780ffab [file] [log] [blame]
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +01001#ifndef _ASM_X86_SYSTEM_H_
2#define _ASM_X86_SYSTEM_H_
3
4#include <asm/asm.h>
5
Thomas Gleixner96a388d2007-10-11 11:20:03 +02006#ifdef CONFIG_X86_32
7# include "system_32.h"
8#else
9# include "system_64.h"
10#endif
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +010011
12#ifdef __KERNEL__
13#define _set_base(addr, base) do { unsigned long __pr; \
14__asm__ __volatile__ ("movw %%dx,%1\n\t" \
15 "rorl $16,%%edx\n\t" \
16 "movb %%dl,%2\n\t" \
17 "movb %%dh,%3" \
18 :"=&d" (__pr) \
19 :"m" (*((addr)+2)), \
20 "m" (*((addr)+4)), \
21 "m" (*((addr)+7)), \
22 "0" (base) \
23 ); } while (0)
24
25#define _set_limit(addr, limit) do { unsigned long __lr; \
26__asm__ __volatile__ ("movw %%dx,%1\n\t" \
27 "rorl $16,%%edx\n\t" \
28 "movb %2,%%dh\n\t" \
29 "andb $0xf0,%%dh\n\t" \
30 "orb %%dh,%%dl\n\t" \
31 "movb %%dl,%2" \
32 :"=&d" (__lr) \
33 :"m" (*(addr)), \
34 "m" (*((addr)+6)), \
35 "0" (limit) \
36 ); } while (0)
37
38#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
39#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
40
41/*
Glauber de Oliveira Costaa6b46552008-01-30 13:31:08 +010042 * Load a segment. Fall back on loading the zero
43 * segment if something goes wrong..
44 */
45#define loadsegment(seg, value) \
46 asm volatile("\n" \
47 "1:\t" \
48 "movl %k0,%%" #seg "\n" \
49 "2:\n" \
50 ".section .fixup,\"ax\"\n" \
51 "3:\t" \
52 "movl %k1, %%" #seg "\n\t" \
53 "jmp 2b\n" \
54 ".previous\n" \
55 ".section __ex_table,\"a\"\n\t" \
56 _ASM_ALIGN "\n\t" \
57 _ASM_PTR " 1b,3b\n" \
58 ".previous" \
59 : :"r" (value), "r" (0))
60
61
62/*
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +010063 * Save a segment register away
64 */
65#define savesegment(seg, value) \
66 asm volatile("mov %%" #seg ",%0":"=rm" (value))
67
68static inline unsigned long get_limit(unsigned long segment)
69{
70 unsigned long __limit;
71 __asm__("lsll %1,%0"
72 :"=r" (__limit):"r" (segment));
73 return __limit+1;
74}
75#endif /* __KERNEL__ */
76
77static inline void clflush(void *__p)
78{
79 asm volatile("clflush %0" : "+m" (*(char __force *)__p));
80}
81
82#define nop() __asm__ __volatile__ ("nop")
83
84void disable_hlt(void);
85void enable_hlt(void);
86
87extern int es7000_plat;
88void cpu_idle_wait(void);
89
90extern unsigned long arch_align_stack(unsigned long sp);
91extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
92
93void default_idle(void);
94
95#endif