blob: ab2c507b58b65d9909116878b611faf78c592474 [file] [log] [blame]
Suresh Siddhadc1e35c2008-07-29 10:29:19 -07001#ifndef __ASM_X86_XSAVE_H
2#define __ASM_X86_XSAVE_H
3
H. Peter Anvin6152e4b2008-07-29 17:23:16 -07004#include <linux/types.h>
Suresh Siddhadc1e35c2008-07-29 10:29:19 -07005#include <asm/processor.h>
Suresh Siddhadc1e35c2008-07-29 10:29:19 -07006
Liu, Jinsong56c103e2014-02-21 17:39:02 +00007/* Bit 63 of XCR0 is reserved for future expansion */
8#define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63)))
Suresh Siddhadc1e35c2008-07-29 10:29:19 -07009
Ingo Molnar677b98b2015-04-28 09:40:26 +020010#define XSTATE_CPUID 0x0000000d
11
Suresh Siddhadc1e35c2008-07-29 10:29:19 -070012#define FXSAVE_SIZE 512
13
Sheng Yang2d5b5a62010-06-13 17:29:39 +080014#define XSAVE_HDR_SIZE 64
15#define XSAVE_HDR_OFFSET FXSAVE_SIZE
16
17#define XSAVE_YMM_SIZE 256
18#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
Sheng Yang5ee481d2010-05-17 17:22:23 +080019
Qiaowei Rene7d820a2013-12-05 17:15:34 +080020/* Supported features which support lazy state saving */
Fenghua Yuc2bc11f2014-02-20 13:24:51 -080021#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
22 | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
Qiaowei Rene7d820a2013-12-05 17:15:34 +080023
24/* Supported features which require eager state saving */
25#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
26
27/* All currently supported features */
28#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
Suresh Siddhadc1e35c2008-07-29 10:29:19 -070029
Suresh Siddhab359e8a2008-07-29 10:29:20 -070030#ifdef CONFIG_X86_64
31#define REX_PREFIX "0x48, "
32#else
33#define REX_PREFIX
34#endif
35
H. Peter Anvin6152e4b2008-07-29 17:23:16 -070036extern unsigned int xstate_size;
Ingo Molnar614df7f2015-04-24 09:20:33 +020037extern u64 xfeatures_mask;
Suresh Siddha5b3efd52010-02-11 11:50:59 -080038extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
Ingo Molnar3e5e1262015-04-25 05:08:17 +020039extern struct xsave_struct init_xstate_ctx;
Suresh Siddhadc1e35c2008-07-29 10:29:19 -070040
Suresh Siddha5b3efd52010-02-11 11:50:59 -080041extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
Suresh Siddhadc1e35c2008-07-29 10:29:19 -070042
H. Peter Anvinc9e5a5a2014-05-30 08:19:21 -070043/* These macros all use (%edi)/(%rdi) as the single memory argument. */
Fenghua Yu200b08a2014-05-29 11:12:34 -070044#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
45#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
46#define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
47#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
48#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
49
Fenghua Yub84e7052014-05-29 11:12:35 -070050#define xstate_fault ".section .fixup,\"ax\"\n" \
51 "3: movl $-1,%[err]\n" \
52 " jmp 2b\n" \
53 ".previous\n" \
54 _ASM_EXTABLE(1b, 3b) \
55 : [err] "=r" (err)
56
Fenghua Yuf31a9f72014-05-29 11:12:36 -070057/*
Fenghua Yuadb9d522014-05-29 11:12:40 -070058 * This function is called only during boot time when x86 caps are not set
59 * up and alternative can not be used yet.
60 */
Ingo Molnar3e261c12015-04-22 15:08:34 +020061static inline int xsave_state_booting(struct xsave_struct *fx)
Fenghua Yuadb9d522014-05-29 11:12:40 -070062{
Ingo Molnar3e261c12015-04-22 15:08:34 +020063 u64 mask = -1;
Fenghua Yuadb9d522014-05-29 11:12:40 -070064 u32 lmask = mask;
65 u32 hmask = mask >> 32;
66 int err = 0;
67
68 WARN_ON(system_state != SYSTEM_BOOTING);
69
70 if (boot_cpu_has(X86_FEATURE_XSAVES))
71 asm volatile("1:"XSAVES"\n\t"
72 "2:\n\t"
Quentin Casasnovas06c81732015-03-05 13:19:22 +010073 xstate_fault
74 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
Fenghua Yuadb9d522014-05-29 11:12:40 -070075 : "memory");
76 else
77 asm volatile("1:"XSAVE"\n\t"
78 "2:\n\t"
Quentin Casasnovas06c81732015-03-05 13:19:22 +010079 xstate_fault
80 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
Fenghua Yuadb9d522014-05-29 11:12:40 -070081 : "memory");
Fenghua Yuadb9d522014-05-29 11:12:40 -070082 return err;
83}
84
85/*
86 * This function is called only during boot time when x86 caps are not set
87 * up and alternative can not be used yet.
88 */
89static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
90{
91 u32 lmask = mask;
92 u32 hmask = mask >> 32;
93 int err = 0;
94
95 WARN_ON(system_state != SYSTEM_BOOTING);
96
97 if (boot_cpu_has(X86_FEATURE_XSAVES))
98 asm volatile("1:"XRSTORS"\n\t"
99 "2:\n\t"
Quentin Casasnovas06c81732015-03-05 13:19:22 +0100100 xstate_fault
101 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
Fenghua Yuadb9d522014-05-29 11:12:40 -0700102 : "memory");
103 else
104 asm volatile("1:"XRSTOR"\n\t"
105 "2:\n\t"
Quentin Casasnovas06c81732015-03-05 13:19:22 +0100106 xstate_fault
107 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
Fenghua Yuadb9d522014-05-29 11:12:40 -0700108 : "memory");
Fenghua Yuadb9d522014-05-29 11:12:40 -0700109 return err;
110}
111
112/*
Fenghua Yuf31a9f72014-05-29 11:12:36 -0700113 * Save processor xstate to xsave area.
114 */
Ingo Molnar3e261c12015-04-22 15:08:34 +0200115static inline int xsave_state(struct xsave_struct *fx)
Fenghua Yuf31a9f72014-05-29 11:12:36 -0700116{
Ingo Molnar3e261c12015-04-22 15:08:34 +0200117 u64 mask = -1;
Fenghua Yuf31a9f72014-05-29 11:12:36 -0700118 u32 lmask = mask;
119 u32 hmask = mask >> 32;
120 int err = 0;
121
Ingo Molnar5e907bb2015-04-30 09:09:26 +0200122 WARN_ON(!alternatives_patched);
Ingo Molnar72ee6f82015-04-27 09:23:43 +0200123
Fenghua Yuf31a9f72014-05-29 11:12:36 -0700124 /*
125 * If xsaves is enabled, xsaves replaces xsaveopt because
126 * it supports compact format and supervisor states in addition to
127 * modified optimization in xsaveopt.
128 *
129 * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
130 * because xsaveopt supports modified optimization which is not
131 * supported by xsave.
132 *
133 * If none of xsaves and xsaveopt is enabled, use xsave.
134 */
135 alternative_input_2(
136 "1:"XSAVE,
Quentin Casasnovas06c81732015-03-05 13:19:22 +0100137 XSAVEOPT,
Fenghua Yuf31a9f72014-05-29 11:12:36 -0700138 X86_FEATURE_XSAVEOPT,
Quentin Casasnovas06c81732015-03-05 13:19:22 +0100139 XSAVES,
Fenghua Yuf31a9f72014-05-29 11:12:36 -0700140 X86_FEATURE_XSAVES,
141 [fx] "D" (fx), "a" (lmask), "d" (hmask) :
142 "memory");
143 asm volatile("2:\n\t"
144 xstate_fault
145 : "0" (0)
146 : "memory");
147
148 return err;
149}
150
151/*
152 * Restore processor xstate from xsave area.
153 */
154static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
155{
156 int err = 0;
157 u32 lmask = mask;
158 u32 hmask = mask >> 32;
159
160 /*
161 * Use xrstors to restore context if it is enabled. xrstors supports
162 * compacted format of xsave area which is not supported by xrstor.
163 */
164 alternative_input(
165 "1: " XRSTOR,
Quentin Casasnovas06c81732015-03-05 13:19:22 +0100166 XRSTORS,
Fenghua Yuf31a9f72014-05-29 11:12:36 -0700167 X86_FEATURE_XSAVES,
168 "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
169 : "memory");
170
171 asm volatile("2:\n"
172 xstate_fault
173 : "0" (0)
174 : "memory");
175
176 return err;
177}
178
Fenghua Yuf9de3142014-05-29 11:12:37 -0700179/*
Fenghua Yuf9de3142014-05-29 11:12:37 -0700180 * Restore xstate context for new process during context switch.
181 */
Suresh Siddha0ca5bd0d2012-07-24 16:05:28 -0700182static inline int fpu_xrstor_checking(struct xsave_struct *fx)
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700183{
Fenghua Yuf9de3142014-05-29 11:12:37 -0700184 return xrstor_state(fx, -1);
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700185}
186
Fenghua Yufacbf4d2014-05-29 11:12:38 -0700187/*
188 * Save xstate to user space xsave area.
189 *
190 * We don't use modified optimization because xrstor/xrstors might track
191 * a different application.
192 *
193 * We don't use compacted format xsave area for
194 * backward compatibility for old applications which don't understand
195 * compacted format of xsave area.
196 */
Suresh Siddhac37b5ef2008-07-29 10:29:25 -0700197static inline int xsave_user(struct xsave_struct __user *buf)
Suresh Siddha9dc89c02008-07-29 10:29:23 -0700198{
199 int err;
Suresh Siddha8e221b62010-06-22 16:23:37 -0700200
201 /*
202 * Clear the xsave header first, so that reserved fields are
203 * initialized to zero.
204 */
Ingo Molnar3a544502015-04-24 10:14:36 +0200205 err = __clear_user(&buf->header, sizeof(buf->header));
Suresh Siddha8e221b62010-06-22 16:23:37 -0700206 if (unlikely(err))
207 return -EFAULT;
208
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700209 __asm__ __volatile__(ASM_STAC "\n"
Fenghua Yufacbf4d2014-05-29 11:12:38 -0700210 "1:"XSAVE"\n"
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700211 "2: " ASM_CLAC "\n"
Fenghua Yufacbf4d2014-05-29 11:12:38 -0700212 xstate_fault
Suresh Siddha9dc89c02008-07-29 10:29:23 -0700213 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
214 : "memory");
Suresh Siddha9dc89c02008-07-29 10:29:23 -0700215 return err;
216}
217
Fenghua Yufacbf4d2014-05-29 11:12:38 -0700218/*
219 * Restore xstate from user space xsave area.
220 */
H. Peter Anvin6152e4b2008-07-29 17:23:16 -0700221static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
Suresh Siddha9dc89c02008-07-29 10:29:23 -0700222{
Fenghua Yufacbf4d2014-05-29 11:12:38 -0700223 int err = 0;
Suresh Siddha9dc89c02008-07-29 10:29:23 -0700224 struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
H. Peter Anvin6152e4b2008-07-29 17:23:16 -0700225 u32 lmask = mask;
226 u32 hmask = mask >> 32;
Suresh Siddha9dc89c02008-07-29 10:29:23 -0700227
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700228 __asm__ __volatile__(ASM_STAC "\n"
Fenghua Yufacbf4d2014-05-29 11:12:38 -0700229 "1:"XRSTOR"\n"
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700230 "2: " ASM_CLAC "\n"
Fenghua Yufacbf4d2014-05-29 11:12:38 -0700231 xstate_fault
Suresh Siddha9dc89c02008-07-29 10:29:23 -0700232 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
233 : "memory"); /* memory required? */
234 return err;
235}
236
Fenghua Yu7496d642014-05-29 11:12:44 -0700237void *get_xsave_addr(struct xsave_struct *xsave, int xstate);
238void setup_xstate_comp(void);
239
Suresh Siddhadc1e35c2008-07-29 10:29:19 -0700240#endif