blob: 93fb7c1cffda7a03e8e0248f634e86eb78e3ff3d [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_MSR_H
2#define _ASM_X86_MSR_H
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +02003
Borislav Petkovb72e7462015-06-04 18:55:26 +02004#include "msr-index.h"
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +02005
Glauber de Oliveira Costa8f12dea2008-01-30 13:31:06 +01006#ifndef __ASSEMBLY__
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +01007
8#include <asm/asm.h>
9#include <asm/errno.h>
Borislav Petkov6bc10962009-05-22 12:12:01 +020010#include <asm/cpumask.h>
Borislav Petkovb72e7462015-06-04 18:55:26 +020011#include <uapi/asm/msr.h>
Borislav Petkov6bc10962009-05-22 12:12:01 +020012
13struct msr {
14 union {
15 struct {
16 u32 l;
17 u32 h;
18 };
19 u64 q;
20 };
21};
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010022
Borislav Petkov6ede31e2009-12-17 00:16:25 +010023struct msr_info {
24 u32 msr_no;
25 struct msr reg;
26 struct msr *msrs;
27 int err;
28};
29
30struct msr_regs_info {
31 u32 *regs;
32 int err;
33};
34
Chen Yu7a9c2dd2015-11-25 01:03:41 +080035struct saved_msr {
36 bool valid;
37 struct msr_info info;
38};
39
40struct saved_msrs {
41 unsigned int num;
42 struct saved_msr *array;
43};
44
Andrew Morton1e160cc2008-01-30 13:31:17 +010045static inline unsigned long long native_read_tscp(unsigned int *aux)
Glauber de Oliveira Costa8f12dea2008-01-30 13:31:06 +010046{
47 unsigned long low, high;
Joe Perchesabb0ade2008-03-23 01:02:51 -070048 asm volatile(".byte 0x0f,0x01,0xf9"
49 : "=a" (low), "=d" (high), "=c" (*aux));
Max Asbock41aefdc2008-06-25 14:45:28 -070050 return low | ((u64)high << 32);
Glauber de Oliveira Costa8f12dea2008-01-30 13:31:06 +010051}
52
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010053/*
Jike Songd4f1b102008-10-17 13:25:07 +080054 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
55 * constraint has different meanings. For i386, "A" means exactly
56 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
57 * it means rax *or* rdx.
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010058 */
59#ifdef CONFIG_X86_64
George Spelvin5a33fcb2015-06-25 18:44:13 +020060/* Using 64-bit values saves one instruction clearing the high half of low */
61#define DECLARE_ARGS(val, low, high) unsigned long low, high
62#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010063#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
64#else
65#define DECLARE_ARGS(val, low, high) unsigned long long val
66#define EAX_EDX_VAL(val, low, high) (val)
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010067#define EAX_EDX_RET(val, low, high) "=A" (val)
Glauber de Oliveira Costa8f12dea2008-01-30 13:31:06 +010068#endif
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +020069
Andi Kleen7f47d8c2015-12-01 17:00:59 -080070#ifdef CONFIG_TRACEPOINTS
71/*
72 * Be very careful with includes. This header is prone to include loops.
73 */
74#include <asm/atomic.h>
75#include <linux/tracepoint-defs.h>
76
77extern struct tracepoint __tracepoint_read_msr;
78extern struct tracepoint __tracepoint_write_msr;
79extern struct tracepoint __tracepoint_rdpmc;
80#define msr_tracepoint_active(t) static_key_false(&(t).key)
81extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
82extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
83extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
84#else
85#define msr_tracepoint_active(t) false
86static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
87static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
88static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
89#endif
90
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +020091static inline unsigned long long native_read_msr(unsigned int msr)
92{
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010093 DECLARE_ARGS(val, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +020094
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010095 asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
Andi Kleen7f47d8c2015-12-01 17:00:59 -080096 if (msr_tracepoint_active(__tracepoint_read_msr))
97 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +010098 return EAX_EDX_VAL(val, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +020099}
100
101static inline unsigned long long native_read_msr_safe(unsigned int msr,
102 int *err)
103{
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +0100104 DECLARE_ARGS(val, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200105
H. Peter Anvin08970fc2008-08-25 22:39:15 -0700106 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200107 "1:\n\t"
108 ".section .fixup,\"ax\"\n\t"
H. Peter Anvin08970fc2008-08-25 22:39:15 -0700109 "3: mov %[fault],%[err] ; jmp 1b\n\t"
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200110 ".previous\n\t"
Joe Perchesabb0ade2008-03-23 01:02:51 -0700111 _ASM_EXTABLE(2b, 3b)
H. Peter Anvin08970fc2008-08-25 22:39:15 -0700112 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
H. Peter Anvin0cc02132009-08-31 14:23:29 -0700113 : "c" (msr), [fault] "i" (-EIO));
Andi Kleen7f47d8c2015-12-01 17:00:59 -0800114 if (msr_tracepoint_active(__tracepoint_read_msr))
115 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +0100116 return EAX_EDX_VAL(val, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200117}
118
Glauber de Oliveira Costac9dcda52008-01-30 13:31:07 +0100119static inline void native_write_msr(unsigned int msr,
120 unsigned low, unsigned high)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200121{
Jeremy Fitzhardingeaf2b1c62008-06-25 00:18:59 -0400122 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
Andi Kleen7f47d8c2015-12-01 17:00:59 -0800123 if (msr_tracepoint_active(__tracepoint_read_msr))
124 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200125}
126
Frederic Weisbecker0ca59dd2008-12-24 23:30:02 +0100127/* Can be uninlined because referenced by paravirt */
128notrace static inline int native_write_msr_safe(unsigned int msr,
Glauber de Oliveira Costac9dcda52008-01-30 13:31:07 +0100129 unsigned low, unsigned high)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200130{
131 int err;
H. Peter Anvin08970fc2008-08-25 22:39:15 -0700132 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200133 "1:\n\t"
134 ".section .fixup,\"ax\"\n\t"
H. Peter Anvin08970fc2008-08-25 22:39:15 -0700135 "3: mov %[fault],%[err] ; jmp 1b\n\t"
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200136 ".previous\n\t"
Joe Perchesabb0ade2008-03-23 01:02:51 -0700137 _ASM_EXTABLE(2b, 3b)
H. Peter Anvin08970fc2008-08-25 22:39:15 -0700138 : [err] "=a" (err)
Glauber de Oliveira Costac9dcda52008-01-30 13:31:07 +0100139 : "c" (msr), "0" (low), "d" (high),
H. Peter Anvin0cc02132009-08-31 14:23:29 -0700140 [fault] "i" (-EIO)
Jeremy Fitzhardingeaf2b1c62008-06-25 00:18:59 -0400141 : "memory");
Andi Kleen7f47d8c2015-12-01 17:00:59 -0800142 if (msr_tracepoint_active(__tracepoint_read_msr))
143 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200144 return err;
145}
146
Andre Przywara1f975f72012-06-01 16:52:35 +0200147extern int rdmsr_safe_regs(u32 regs[8]);
148extern int wrmsr_safe_regs(u32 regs[8]);
Borislav Petkov132ec922009-08-31 09:50:09 +0200149
Andy Lutomirski4ea16362015-06-25 18:44:07 +0200150/**
151 * rdtsc() - returns the current TSC without ordering constraints
152 *
153 * rdtsc() returns the result of RDTSC as a 64-bit integer. The
154 * only ordering constraint it supplies is the ordering implied by
155 * "asm volatile": it will put the RDTSC in the place you expect. The
156 * CPU can and will speculatively execute that RDTSC, though, so the
157 * results can be non-monotonic if compared on different CPUs.
158 */
159static __always_inline unsigned long long rdtsc(void)
Ingo Molnar92767af2008-01-30 13:32:40 +0100160{
161 DECLARE_ARGS(val, low, high);
162
Ingo Molnar92767af2008-01-30 13:32:40 +0100163 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
Ingo Molnar92767af2008-01-30 13:32:40 +0100164
165 return EAX_EDX_VAL(val, low, high);
166}
167
Andy Lutomirski03b97302015-06-25 18:44:08 +0200168/**
169 * rdtsc_ordered() - read the current TSC in program order
170 *
171 * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
172 * It is ordered like a load to a global in-memory counter. It should
173 * be impossible to observe non-monotonic rdtsc_unordered() behavior
174 * across multiple CPUs as long as the TSC is synced.
175 */
176static __always_inline unsigned long long rdtsc_ordered(void)
177{
178 /*
179 * The RDTSC instruction is not ordered relative to memory
180 * access. The Intel SDM and the AMD APM are both vague on this
181 * point, but empirically an RDTSC instruction can be
182 * speculatively executed before prior loads. An RDTSC
183 * immediately after an appropriate barrier appears to be
184 * ordered as a normal load, that is, it provides the same
185 * ordering guarantees as reading from a global memory location
186 * that some other imaginary CPU is updating continuously with a
187 * time stamp.
188 */
189 alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
190 "lfence", X86_FEATURE_LFENCE_RDTSC);
191 return rdtsc();
192}
193
Ingo Molnar99770732015-08-21 08:33:53 +0200194/* Deprecated, keep it for a cycle for easier merging: */
195#define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0)
196
Glauber de Oliveira Costab8d1fae2008-01-30 13:31:07 +0100197static inline unsigned long long native_read_pmc(int counter)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200198{
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +0100199 DECLARE_ARGS(val, low, high);
200
201 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
Andi Kleen7f47d8c2015-12-01 17:00:59 -0800202 if (msr_tracepoint_active(__tracepoint_rdpmc))
203 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +0100204 return EAX_EDX_VAL(val, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200205}
206
207#ifdef CONFIG_PARAVIRT
208#include <asm/paravirt.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200209#else
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200210#include <linux/errno.h>
211/*
212 * Access to machine-specific registers (available on 586 and better only)
213 * Note: the rd* operations modify the parameters directly (without using
214 * pointer indirection), this allows gcc to optimize better
215 */
216
Borislav Petkov1423bed2013-03-04 21:16:19 +0100217#define rdmsr(msr, low, high) \
Joe Perchesabb0ade2008-03-23 01:02:51 -0700218do { \
219 u64 __val = native_read_msr((msr)); \
Borislav Petkov1423bed2013-03-04 21:16:19 +0100220 (void)((low) = (u32)__val); \
221 (void)((high) = (u32)(__val >> 32)); \
Joe Perchesabb0ade2008-03-23 01:02:51 -0700222} while (0)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200223
Glauber de Oliveira Costac9dcda52008-01-30 13:31:07 +0100224static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200225{
Glauber de Oliveira Costac9dcda52008-01-30 13:31:07 +0100226 native_write_msr(msr, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200227}
228
Joe Perchesabb0ade2008-03-23 01:02:51 -0700229#define rdmsrl(msr, val) \
230 ((val) = native_read_msr((msr)))
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200231
Andy Lutomirski47edb652015-07-23 12:14:40 -0700232static inline void wrmsrl(unsigned msr, u64 val)
233{
Borislav Petkov679bcea2015-11-23 11:12:26 +0100234 native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
Andy Lutomirski47edb652015-07-23 12:14:40 -0700235}
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200236
237/* wrmsr with exception handling */
Glauber de Oliveira Costac9dcda52008-01-30 13:31:07 +0100238static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200239{
Glauber de Oliveira Costac9dcda52008-01-30 13:31:07 +0100240 return native_write_msr_safe(msr, low, high);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200241}
242
H. Peter Anvin060feb62012-04-19 17:07:34 -0700243/* rdmsr with exception handling */
Borislav Petkov1423bed2013-03-04 21:16:19 +0100244#define rdmsr_safe(msr, low, high) \
Joe Perchesabb0ade2008-03-23 01:02:51 -0700245({ \
246 int __err; \
247 u64 __val = native_read_msr_safe((msr), &__err); \
Borislav Petkov1423bed2013-03-04 21:16:19 +0100248 (*low) = (u32)__val; \
249 (*high) = (u32)(__val >> 32); \
Joe Perchesabb0ade2008-03-23 01:02:51 -0700250 __err; \
251})
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200252
Andi Kleen1de87bd2008-03-22 10:59:28 +0100253static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
254{
255 int err;
256
257 *p = native_read_msr_safe(msr, &err);
258 return err;
259}
Borislav Petkov177fed12009-08-31 09:50:10 +0200260
Joe Perchesabb0ade2008-03-23 01:02:51 -0700261#define rdpmc(counter, low, high) \
262do { \
263 u64 _l = native_read_pmc((counter)); \
264 (low) = (u32)_l; \
265 (high) = (u32)(_l >> 32); \
266} while (0)
Glauber de Oliveira Costac210d242008-01-30 13:31:07 +0100267
Andi Kleen1ff4d582012-06-05 17:56:50 -0700268#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
269
Andy Lutomirski9261e052015-06-25 18:43:57 +0200270#endif /* !CONFIG_PARAVIRT */
271
Andy Lutomirskicf991de2015-06-04 17:13:44 -0700272/*
273 * 64-bit version of wrmsr_safe():
274 */
275static inline int wrmsrl_safe(u32 msr, u64 val)
276{
277 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
278}
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200279
Borislav Petkov1423bed2013-03-04 21:16:19 +0100280#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200281
Sheng Yang5df97402009-12-16 13:48:04 +0800282#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200283
Borislav Petkov50542252009-12-11 18:14:40 +0100284struct msr *msrs_alloc(void);
285void msrs_free(struct msr *msrs);
Borislav Petkov22085a62014-03-09 18:05:23 +0100286int msr_set_bit(u32 msr, u8 bit);
287int msr_clear_bit(u32 msr, u8 bit);
Borislav Petkov50542252009-12-11 18:14:40 +0100288
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200289#ifdef CONFIG_SMP
H. Peter Anvinc6f31932008-08-25 17:27:21 -0700290int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
291int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
Jacob Pan1a6b9912013-10-11 16:54:58 -0700292int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
293int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
Borislav Petkovb8a47542009-07-30 11:10:02 +0200294void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
295void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200296int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
297int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
Jacob Pan1a6b9912013-10-11 16:54:58 -0700298int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
299int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
H. Peter Anvin8b956bf2009-08-31 14:13:48 -0700300int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
301int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200302#else /* CONFIG_SMP */
H. Peter Anvinc6f31932008-08-25 17:27:21 -0700303static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200304{
305 rdmsr(msr_no, *l, *h);
H. Peter Anvinc6f31932008-08-25 17:27:21 -0700306 return 0;
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200307}
H. Peter Anvinc6f31932008-08-25 17:27:21 -0700308static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200309{
310 wrmsr(msr_no, l, h);
H. Peter Anvinc6f31932008-08-25 17:27:21 -0700311 return 0;
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200312}
Jacob Pan1a6b9912013-10-11 16:54:58 -0700313static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
314{
315 rdmsrl(msr_no, *q);
316 return 0;
317}
318static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
319{
320 wrmsrl(msr_no, q);
321 return 0;
322}
Rusty Russell0d0fbbd2009-11-05 22:45:41 +1030323static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
Borislav Petkovb034c192009-05-22 13:52:19 +0200324 struct msr *msrs)
325{
326 rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
327}
Rusty Russell0d0fbbd2009-11-05 22:45:41 +1030328static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
Borislav Petkovb034c192009-05-22 13:52:19 +0200329 struct msr *msrs)
330{
331 wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
332}
Joe Perchesabb0ade2008-03-23 01:02:51 -0700333static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
334 u32 *l, u32 *h)
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200335{
336 return rdmsr_safe(msr_no, l, h);
337}
338static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
339{
340 return wrmsr_safe(msr_no, l, h);
341}
Jacob Pan1a6b9912013-10-11 16:54:58 -0700342static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
343{
344 return rdmsrl_safe(msr_no, q);
345}
346static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
347{
348 return wrmsrl_safe(msr_no, q);
349}
H. Peter Anvin8b956bf2009-08-31 14:13:48 -0700350static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
351{
352 return rdmsr_safe_regs(regs);
353}
354static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
355{
356 return wrmsr_safe_regs(regs);
357}
Thomas Gleixnerbe7baf82007-10-23 22:37:24 +0200358#endif /* CONFIG_SMP */
H. Peter Anvinff55df52009-08-31 14:16:57 -0700359#endif /* __ASSEMBLY__ */
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700360#endif /* _ASM_X86_MSR_H */