blob: 20ca9c4d46867c7cbdf7228951e818101350159b [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_TIMER_H
2#define _ASM_X86_TIMER_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003#include <linux/init.h>
Shaohua Lic3c433e2005-09-03 15:57:07 -07004#include <linux/pm.h>
Guillaume Chazarain53d517c2008-01-30 13:30:06 +01005#include <linux/percpu.h>
Ingo Molnar8e6dafd2009-02-23 00:34:39 +01006#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#define TICK_SIZE (tick_nsec / 1000)
Zachary Amsden6cb9a832007-03-05 00:30:35 -08009
Zachary Amsden6cb9a832007-03-05 00:30:35 -080010unsigned long long native_sched_clock(void);
Alok Katariae93ef942008-07-01 11:43:36 -070011unsigned long native_calibrate_tsc(void);
Zachary Amsden6cb9a832007-03-05 00:30:35 -080012
Jaswinder Singhcc038492008-07-21 21:52:51 +053013#ifdef CONFIG_X86_32
Linus Torvalds1da177e2005-04-16 15:20:36 -070014extern int timer_ack;
Ingo Molnar8e6dafd2009-02-23 00:34:39 +010015extern irqreturn_t timer_interrupt(int irq, void *dev_id);
Jaswinder Singhcc038492008-07-21 21:52:51 +053016#endif /* CONFIG_X86_32 */
Stephen Rothwell25c1a412009-03-30 11:10:27 +110017extern int recalibrate_cpu_khz(void);
Jaswinder Singhcc038492008-07-21 21:52:51 +053018
19extern int no_timer_check;
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Zachary Amsden6cb9a832007-03-05 00:30:35 -080021#ifndef CONFIG_PARAVIRT
Alok Katariae93ef942008-07-01 11:43:36 -070022#define calibrate_tsc() native_calibrate_tsc()
Zachary Amsden6cb9a832007-03-05 00:30:35 -080023#endif
24
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010025/* Accelerators for sched_clock()
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -070026 * convert from cycles(64bits) => nanoseconds (64bits)
27 * basic equation:
28 * ns = cycles / (freq / ns_per_sec)
29 * ns = cycles * (ns_per_sec / freq)
30 * ns = cycles * (10^9 / (cpu_khz * 10^3))
31 * ns = cycles * (10^6 / cpu_khz)
32 *
33 * Then we use scaling math (suggested by george@mvista.com) to get:
34 * ns = cycles * (10^6 * SC / cpu_khz) / SC
35 * ns = cycles * cyc2ns_scale / SC
36 *
37 * And since SC is a constant power of two, we can convert the div
38 * into a shift.
39 *
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010040 * We can use khz divisor instead of mhz to keep a better precision, since
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -070041 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
42 * (mathieu.desnoyers@polymtl.ca)
43 *
44 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
45 */
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010046
47DECLARE_PER_CPU(unsigned long, cyc2ns);
Peter Zijlstra84599f82009-06-16 12:34:17 -070048DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -070049
50#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
51
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010052static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -070053{
Peter Zijlstra84599f82009-06-16 12:34:17 -070054 int cpu = smp_processor_id();
55 unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
56 ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR;
57 return ns;
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -070058}
59
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010060static inline unsigned long long cycles_2_ns(unsigned long long cyc)
61{
62 unsigned long long ns;
63 unsigned long flags;
64
65 local_irq_save(flags);
66 ns = __cycles_2_ns(cyc);
67 local_irq_restore(flags);
68
69 return ns;
70}
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -070071
H. Peter Anvin1965aae2008-10-22 22:26:29 -070072#endif /* _ASM_X86_TIMER_H */