Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef X86_64_MSR_H |
| 2 | #define X86_64_MSR_H 1 |
| 3 | |
H. Peter Anvin | 4bc5aa9 | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 4 | #include <asm/msr-index.h> |
| 5 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #ifndef __ASSEMBLY__ |
Rudolf Marek | 4e9baad | 2007-05-08 17:22:01 +0200 | [diff] [blame] | 7 | #include <linux/errno.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | /* |
| 9 | * Access to machine-specific registers (available on 586 and better only) |
| 10 | * Note: the rd* operations modify the parameters directly (without using |
| 11 | * pointer indirection), this allows gcc to optimize better |
| 12 | */ |
| 13 | |
| 14 | #define rdmsr(msr,val1,val2) \ |
| 15 | __asm__ __volatile__("rdmsr" \ |
| 16 | : "=a" (val1), "=d" (val2) \ |
| 17 | : "c" (msr)) |
| 18 | |
| 19 | |
| 20 | #define rdmsrl(msr,val) do { unsigned long a__,b__; \ |
| 21 | __asm__ __volatile__("rdmsr" \ |
| 22 | : "=a" (a__), "=d" (b__) \ |
| 23 | : "c" (msr)); \ |
| 24 | val = a__ | (b__<<32); \ |
Andi Kleen | a88cde1 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 25 | } while(0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | #define wrmsr(msr,val1,val2) \ |
| 28 | __asm__ __volatile__("wrmsr" \ |
| 29 | : /* no outputs */ \ |
| 30 | : "c" (msr), "a" (val1), "d" (val2)) |
| 31 | |
| 32 | #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) |
| 33 | |
| 34 | /* wrmsr with exception handling */ |
Andi Kleen | 059bf0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 35 | #define wrmsr_safe(msr,a,b) ({ int ret__; \ |
| 36 | asm volatile("2: wrmsr ; xorl %0,%0\n" \ |
| 37 | "1:\n\t" \ |
| 38 | ".section .fixup,\"ax\"\n\t" \ |
| 39 | "3: movl %4,%0 ; jmp 1b\n\t" \ |
| 40 | ".previous\n\t" \ |
| 41 | ".section __ex_table,\"a\"\n" \ |
| 42 | " .align 8\n\t" \ |
| 43 | " .quad 2b,3b\n\t" \ |
| 44 | ".previous" \ |
| 45 | : "=a" (ret__) \ |
| 46 | : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | ret__; }) |
| 48 | |
| 49 | #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) |
| 50 | |
Andi Kleen | 059bf0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 51 | #define rdmsr_safe(msr,a,b) \ |
| 52 | ({ int ret__; \ |
| 53 | asm volatile ("1: rdmsr\n" \ |
| 54 | "2:\n" \ |
| 55 | ".section .fixup,\"ax\"\n" \ |
| 56 | "3: movl %4,%0\n" \ |
| 57 | " jmp 2b\n" \ |
| 58 | ".previous\n" \ |
| 59 | ".section __ex_table,\"a\"\n" \ |
| 60 | " .align 8\n" \ |
| 61 | " .quad 1b,3b\n" \ |
Jacob.Shin@amd.com | e6c6675 | 2005-11-20 18:49:07 +0100 | [diff] [blame] | 62 | ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\ |
Andi Kleen | 059bf0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 63 | :"c"(msr), "i"(-EIO), "0"(0)); \ |
| 64 | ret__; }) |
| 65 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | #define rdtsc(low,high) \ |
| 67 | __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) |
| 68 | |
| 69 | #define rdtscl(low) \ |
| 70 | __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") |
| 71 | |
Vojtech Pavlik | 81af444 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 72 | #define rdtscp(low,high,aux) \ |
| 73 | asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux)) |
| 74 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | #define rdtscll(val) do { \ |
| 76 | unsigned int __a,__d; \ |
| 77 | asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ |
| 78 | (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ |
| 79 | } while(0) |
| 80 | |
Vojtech Pavlik | 81af444 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 81 | #define rdtscpll(val, aux) do { \ |
| 82 | unsigned long __a, __d; \ |
| 83 | asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \ |
| 84 | (val) = (__d << 32) | __a; \ |
| 85 | } while (0) |
| 86 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) |
| 88 | |
Vojtech Pavlik | 81af444 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 89 | #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) |
| 90 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | #define rdpmc(counter,low,high) \ |
| 92 | __asm__ __volatile__("rdpmc" \ |
| 93 | : "=a" (low), "=d" (high) \ |
| 94 | : "c" (counter)) |
| 95 | |
Adrian Bunk | 9c0aa0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 96 | static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | unsigned int *ecx, unsigned int *edx) |
| 98 | { |
| 99 | __asm__("cpuid" |
| 100 | : "=a" (*eax), |
| 101 | "=b" (*ebx), |
| 102 | "=c" (*ecx), |
| 103 | "=d" (*edx) |
| 104 | : "0" (op)); |
| 105 | } |
| 106 | |
| 107 | /* Some CPUID calls want 'count' to be placed in ecx */ |
| 108 | static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, |
| 109 | int *edx) |
| 110 | { |
| 111 | __asm__("cpuid" |
| 112 | : "=a" (*eax), |
| 113 | "=b" (*ebx), |
| 114 | "=c" (*ecx), |
| 115 | "=d" (*edx) |
| 116 | : "0" (op), "c" (count)); |
| 117 | } |
| 118 | |
| 119 | /* |
| 120 | * CPUID functions returning a single datum |
| 121 | */ |
Adrian Bunk | 9c0aa0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 122 | static inline unsigned int cpuid_eax(unsigned int op) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | { |
| 124 | unsigned int eax; |
| 125 | |
| 126 | __asm__("cpuid" |
| 127 | : "=a" (eax) |
| 128 | : "0" (op) |
| 129 | : "bx", "cx", "dx"); |
| 130 | return eax; |
| 131 | } |
Adrian Bunk | 9c0aa0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 132 | static inline unsigned int cpuid_ebx(unsigned int op) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | { |
| 134 | unsigned int eax, ebx; |
| 135 | |
| 136 | __asm__("cpuid" |
| 137 | : "=a" (eax), "=b" (ebx) |
| 138 | : "0" (op) |
| 139 | : "cx", "dx" ); |
| 140 | return ebx; |
| 141 | } |
Adrian Bunk | 9c0aa0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 142 | static inline unsigned int cpuid_ecx(unsigned int op) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | { |
| 144 | unsigned int eax, ecx; |
| 145 | |
| 146 | __asm__("cpuid" |
| 147 | : "=a" (eax), "=c" (ecx) |
| 148 | : "0" (op) |
| 149 | : "bx", "dx" ); |
| 150 | return ecx; |
| 151 | } |
Adrian Bunk | 9c0aa0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 152 | static inline unsigned int cpuid_edx(unsigned int op) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | { |
| 154 | unsigned int eax, edx; |
| 155 | |
| 156 | __asm__("cpuid" |
| 157 | : "=a" (eax), "=d" (edx) |
| 158 | : "0" (op) |
| 159 | : "bx", "cx"); |
| 160 | return edx; |
| 161 | } |
| 162 | |
Adrian Bunk | b44755c | 2007-02-20 01:07:13 +0100 | [diff] [blame] | 163 | #ifdef CONFIG_SMP |
Alexey Dobriyan | b077ffb | 2007-02-16 01:48:11 -0800 | [diff] [blame] | 164 | void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
| 165 | void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
Rudolf Marek | 4e9baad | 2007-05-08 17:22:01 +0200 | [diff] [blame] | 166 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
| 167 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
Adrian Bunk | b44755c | 2007-02-20 01:07:13 +0100 | [diff] [blame] | 168 | #else /* CONFIG_SMP */ |
| 169 | static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
| 170 | { |
| 171 | rdmsr(msr_no, *l, *h); |
| 172 | } |
| 173 | static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
| 174 | { |
| 175 | wrmsr(msr_no, l, h); |
| 176 | } |
Rudolf Marek | 4e9baad | 2007-05-08 17:22:01 +0200 | [diff] [blame] | 177 | static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
| 178 | { |
| 179 | return rdmsr_safe(msr_no, l, h); |
| 180 | } |
| 181 | static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
| 182 | { |
| 183 | return wrmsr_safe(msr_no, l, h); |
| 184 | } |
H. Peter Anvin | 4bc5aa9 | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 185 | #endif /* CONFIG_SMP */ |
| 186 | #endif /* __ASSEMBLY__ */ |
| 187 | #endif /* X86_64_MSR_H */ |