Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 1 | #ifndef __ASM_X86_XSAVE_H |
| 2 | #define __ASM_X86_XSAVE_H |
| 3 | |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 4 | #include <linux/types.h> |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 5 | #include <asm/processor.h> |
| 6 | #include <asm/i387.h> |
| 7 | |
| 8 | #define XSTATE_FP 0x1 |
| 9 | #define XSTATE_SSE 0x2 |
| 10 | |
| 11 | #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) |
| 12 | |
| 13 | #define FXSAVE_SIZE 512 |
| 14 | |
| 15 | /* |
| 16 | * These are the features that the OS can handle currently. |
| 17 | */ |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 18 | #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 19 | |
Suresh Siddha | b359e8a | 2008-07-29 10:29:20 -0700 | [diff] [blame] | 20 | #ifdef CONFIG_X86_64 |
| 21 | #define REX_PREFIX "0x48, " |
| 22 | #else |
| 23 | #define REX_PREFIX |
| 24 | #endif |
| 25 | |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 26 | extern unsigned int xstate_size; |
| 27 | extern u64 pcntxt_mask; |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 28 | extern struct xsave_struct *init_xstate_buf; |
| 29 | |
| 30 | extern void xsave_cntxt_init(void); |
| 31 | extern void xsave_init(void); |
Suresh Siddha | b359e8a | 2008-07-29 10:29:20 -0700 | [diff] [blame] | 32 | extern int init_fpu(struct task_struct *child); |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 33 | extern int check_for_xstate(struct i387_fxsave_struct __user *buf, |
| 34 | void __user *fpstate, |
| 35 | struct _fpx_sw_bytes *sw); |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 36 | |
Suresh Siddha | b359e8a | 2008-07-29 10:29:20 -0700 | [diff] [blame] | 37 | static inline int xrstor_checking(struct xsave_struct *fx) |
| 38 | { |
| 39 | int err; |
| 40 | |
| 41 | asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" |
| 42 | "2:\n" |
| 43 | ".section .fixup,\"ax\"\n" |
| 44 | "3: movl $-1,%[err]\n" |
| 45 | " jmp 2b\n" |
| 46 | ".previous\n" |
| 47 | _ASM_EXTABLE(1b, 3b) |
| 48 | : [err] "=r" (err) |
| 49 | : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0) |
| 50 | : "memory"); |
| 51 | |
| 52 | return err; |
| 53 | } |
| 54 | |
Suresh Siddha | c37b5ef | 2008-07-29 10:29:25 -0700 | [diff] [blame] | 55 | static inline int xsave_user(struct xsave_struct __user *buf) |
Suresh Siddha | 9dc89c0 | 2008-07-29 10:29:23 -0700 | [diff] [blame] | 56 | { |
| 57 | int err; |
| 58 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" |
| 59 | "2:\n" |
| 60 | ".section .fixup,\"ax\"\n" |
| 61 | "3: movl $-1,%[err]\n" |
| 62 | " jmp 2b\n" |
| 63 | ".previous\n" |
| 64 | ".section __ex_table,\"a\"\n" |
| 65 | _ASM_ALIGN "\n" |
| 66 | _ASM_PTR "1b,3b\n" |
| 67 | ".previous" |
| 68 | : [err] "=r" (err) |
| 69 | : "D" (buf), "a" (-1), "d" (-1), "0" (0) |
| 70 | : "memory"); |
| 71 | if (unlikely(err) && __clear_user(buf, xstate_size)) |
| 72 | err = -EFAULT; |
| 73 | /* No need to clear here because the caller clears USED_MATH */ |
| 74 | return err; |
| 75 | } |
| 76 | |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 77 | static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) |
Suresh Siddha | 9dc89c0 | 2008-07-29 10:29:23 -0700 | [diff] [blame] | 78 | { |
| 79 | int err; |
| 80 | struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 81 | u32 lmask = mask; |
| 82 | u32 hmask = mask >> 32; |
Suresh Siddha | 9dc89c0 | 2008-07-29 10:29:23 -0700 | [diff] [blame] | 83 | |
| 84 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" |
| 85 | "2:\n" |
| 86 | ".section .fixup,\"ax\"\n" |
| 87 | "3: movl $-1,%[err]\n" |
| 88 | " jmp 2b\n" |
| 89 | ".previous\n" |
| 90 | ".section __ex_table,\"a\"\n" |
| 91 | _ASM_ALIGN "\n" |
| 92 | _ASM_PTR "1b,3b\n" |
| 93 | ".previous" |
| 94 | : [err] "=r" (err) |
| 95 | : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) |
| 96 | : "memory"); /* memory required? */ |
| 97 | return err; |
| 98 | } |
| 99 | |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 100 | static inline void xrstor_state(struct xsave_struct *fx, u64 mask) |
Suresh Siddha | 9dc89c0 | 2008-07-29 10:29:23 -0700 | [diff] [blame] | 101 | { |
H. Peter Anvin | 6152e4b | 2008-07-29 17:23:16 -0700 | [diff] [blame] | 102 | u32 lmask = mask; |
| 103 | u32 hmask = mask >> 32; |
| 104 | |
Suresh Siddha | 9dc89c0 | 2008-07-29 10:29:23 -0700 | [diff] [blame] | 105 | asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" |
| 106 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) |
| 107 | : "memory"); |
| 108 | } |
| 109 | |
Suresh Siddha | b359e8a | 2008-07-29 10:29:20 -0700 | [diff] [blame] | 110 | static inline void xsave(struct task_struct *tsk) |
| 111 | { |
| 112 | /* This, however, we can work around by forcing the compiler to select |
| 113 | an addressing mode that doesn't require extended registers. */ |
| 114 | __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" |
| 115 | : : "D" (&(tsk->thread.xstate->xsave)), |
| 116 | "a" (-1), "d"(-1) : "memory"); |
| 117 | } |
Suresh Siddha | dc1e35c | 2008-07-29 10:29:19 -0700 | [diff] [blame] | 118 | #endif |