Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 1 | /* |
Thomas Gleixner | 2f0798a | 2007-10-12 23:04:23 +0200 | [diff] [blame] | 2 | * x86 TSC related functions |
Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 3 | */ |
Thomas Gleixner | 2f0798a | 2007-10-12 23:04:23 +0200 | [diff] [blame] | 4 | #ifndef _ASM_X86_TSC_H |
| 5 | #define _ASM_X86_TSC_H |
Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 6 | |
| 7 | #include <asm/processor.h> |
| 8 | |
Thomas Gleixner | 2f0798a | 2007-10-12 23:04:23 +0200 | [diff] [blame] | 9 | #define NS_SCALE 10 /* 2^10, carefully chosen */ |
| 10 | #define US_SCALE 32 /* 2^32, arbitralrily chosen */ |
| 11 | |
Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 12 | /* |
| 13 | * Standard way to access the cycle counter. |
| 14 | */ |
| 15 | typedef unsigned long long cycles_t; |
| 16 | |
| 17 | extern unsigned int cpu_khz; |
| 18 | extern unsigned int tsc_khz; |
Glauber de Oliveira Costa | 73018a6 | 2008-01-30 13:31:26 +0100 | [diff] [blame^] | 19 | /* flag for disabling the tsc */ |
| 20 | extern int tsc_disable; |
| 21 | |
| 22 | extern void disable_TSC(void); |
Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 23 | |
| 24 | static inline cycles_t get_cycles(void) |
| 25 | { |
| 26 | unsigned long long ret = 0; |
| 27 | |
| 28 | #ifndef CONFIG_X86_TSC |
| 29 | if (!cpu_has_tsc) |
| 30 | return 0; |
| 31 | #endif |
| 32 | |
| 33 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) |
| 34 | rdtscll(ret); |
| 35 | #endif |
| 36 | return ret; |
| 37 | } |
| 38 | |
| 39 | /* Like get_cycles, but make sure the CPU is synchronized. */ |
Glauber de Oliveira Costa | 4e87173 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 40 | static __always_inline cycles_t __get_cycles_sync(void) |
Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 41 | { |
| 42 | unsigned long long ret; |
Joerg Roedel | 6041b57 | 2007-05-10 22:22:14 -0700 | [diff] [blame] | 43 | unsigned eax, edx; |
Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 44 | |
| 45 | /* |
Glauber de Oliveira Costa | 4e87173 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 46 | * Use RDTSCP if possible; it is guaranteed to be synchronous |
| 47 | * and doesn't cause a VMEXIT on Hypervisors |
Andi Kleen | c5bcb56 | 2007-05-02 19:27:21 +0200 | [diff] [blame] | 48 | */ |
| 49 | alternative_io(ASM_NOP3, ".byte 0x0f,0x01,0xf9", X86_FEATURE_RDTSCP, |
Joerg Roedel | 6041b57 | 2007-05-10 22:22:14 -0700 | [diff] [blame] | 50 | ASM_OUTPUT2("=a" (eax), "=d" (edx)), |
| 51 | "a" (0U), "d" (0U) : "ecx", "memory"); |
| 52 | ret = (((unsigned long long)edx) << 32) | ((unsigned long long)eax); |
Andi Kleen | c5bcb56 | 2007-05-02 19:27:21 +0200 | [diff] [blame] | 53 | if (ret) |
| 54 | return ret; |
| 55 | |
| 56 | /* |
Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 57 | * Don't do an additional sync on CPUs where we know |
| 58 | * RDTSC is already synchronous: |
| 59 | */ |
| 60 | alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, |
| 61 | "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); |
Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 62 | |
Glauber de Oliveira Costa | 4e87173 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 63 | return 0; |
| 64 | } |
| 65 | |
| 66 | static __always_inline cycles_t get_cycles_sync(void) |
| 67 | { |
| 68 | unsigned long long ret; |
| 69 | ret = __get_cycles_sync(); |
| 70 | if (!ret) |
| 71 | rdtscll(ret); |
Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 72 | return ret; |
| 73 | } |
| 74 | |
Glauber de Oliveira Costa | 4e87173 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 75 | #ifdef CONFIG_PARAVIRT |
| 76 | /* |
| 77 | * For paravirt guests, some functionalities are executed through function |
| 78 | * pointers in the various pvops structures. |
| 79 | * These function pointers exist inside the kernel and can not |
| 80 | * be accessed by user space. To avoid this, we make a copy of the |
| 81 | * get_cycles_sync (called in kernel) but force the use of native_read_tsc. |
| 82 | * Ideally, the guest should set up it's own clock and vread |
| 83 | */ |
| 84 | static __always_inline long long vget_cycles_sync(void) |
| 85 | { |
| 86 | unsigned long long ret; |
| 87 | ret = __get_cycles_sync(); |
| 88 | if (!ret) |
| 89 | ret = native_read_tsc(); |
| 90 | return ret; |
| 91 | } |
| 92 | #else |
| 93 | # define vget_cycles_sync() get_cycles_sync() |
| 94 | #endif |
| 95 | |
Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 96 | extern void tsc_init(void); |
john stultz | 5a90cf2 | 2007-05-02 19:27:08 +0200 | [diff] [blame] | 97 | extern void mark_tsc_unstable(char *reason); |
Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 98 | extern int unsynchronized_tsc(void); |
| 99 | extern void init_tsc_clocksource(void); |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 100 | int check_tsc_unstable(void); |
Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 101 | |
| 102 | /* |
| 103 | * Boot-time check whether the TSCs are synchronized across |
| 104 | * all CPUs/cores: |
| 105 | */ |
| 106 | extern void check_tsc_sync_source(int cpu); |
| 107 | extern void check_tsc_sync_target(void); |
| 108 | |
Thomas Gleixner | d371698 | 2007-10-12 23:04:06 +0200 | [diff] [blame] | 109 | extern void tsc_calibrate(void); |
Thomas Gleixner | 80ca9c9 | 2008-01-30 13:30:18 +0100 | [diff] [blame] | 110 | extern int notsc_setup(char *); |
Thomas Gleixner | d371698 | 2007-10-12 23:04:06 +0200 | [diff] [blame] | 111 | |
Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 112 | #endif |