blob: 10a78c037910e80f74263636997dc1bdefc819a8 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_TIMER_H
2#define _ASM_X86_TIMER_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003#include <linux/init.h>
Shaohua Lic3c433e2005-09-03 15:57:07 -07004#include <linux/pm.h>
Guillaume Chazarain53d517c2008-01-30 13:30:06 +01005#include <linux/percpu.h>
Ingo Molnar8e6dafd2009-02-23 00:34:39 +01006#include <linux/interrupt.h>
Peter Zijlstra5dd12c212013-11-29 18:04:39 +01007#include <linux/math64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#define TICK_SIZE (tick_nsec / 1000)
Zachary Amsden6cb9a832007-03-05 00:30:35 -080010
Zachary Amsden6cb9a832007-03-05 00:30:35 -080011unsigned long long native_sched_clock(void);
Stephen Rothwell25c1a412009-03-30 11:10:27 +110012extern int recalibrate_cpu_khz(void);
Jaswinder Singhcc038492008-07-21 21:52:51 +053013
14extern int no_timer_check;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010016/* Accelerators for sched_clock()
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -070017 * convert from cycles(64bits) => nanoseconds (64bits)
18 * basic equation:
19 * ns = cycles / (freq / ns_per_sec)
20 * ns = cycles * (ns_per_sec / freq)
21 * ns = cycles * (10^9 / (cpu_khz * 10^3))
22 * ns = cycles * (10^6 / cpu_khz)
23 *
24 * Then we use scaling math (suggested by george@mvista.com) to get:
25 * ns = cycles * (10^6 * SC / cpu_khz) / SC
26 * ns = cycles * cyc2ns_scale / SC
27 *
28 * And since SC is a constant power of two, we can convert the div
29 * into a shift.
30 *
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010031 * We can use khz divisor instead of mhz to keep a better precision, since
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -070032 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
33 * (mathieu.desnoyers@polymtl.ca)
34 *
35 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
Salman Qazi4cecf6d2011-11-15 14:12:06 -080036 *
37 * In:
38 *
39 * ns = cycles * cyc2ns_scale / SC
40 *
41 * Although we may still have enough bits to store the value of ns,
42 * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
43 * leading to an incorrect result.
44 *
45 * To avoid this, we can decompose 'cycles' into quotient and remainder
46 * of division by SC. Then,
47 *
48 * ns = (quot * SC + rem) * cyc2ns_scale / SC
49 * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
50 *
51 * - sqazi@google.com
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -070052 */
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010053
54DECLARE_PER_CPU(unsigned long, cyc2ns);
Peter Zijlstra84599f82009-06-16 12:34:17 -070055DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -070056
57#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
58
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010059static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -070060{
Peter Zijlstra5dd12c212013-11-29 18:04:39 +010061 unsigned long long ns = this_cpu_read(cyc2ns_offset);
62 ns += mul_u64_u32_shr(cyc, this_cpu_read(cyc2ns), CYC2NS_SCALE_FACTOR);
Peter Zijlstra84599f82009-06-16 12:34:17 -070063 return ns;
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -070064}
65
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010066static inline unsigned long long cycles_2_ns(unsigned long long cyc)
67{
68 unsigned long long ns;
69 unsigned long flags;
70
71 local_irq_save(flags);
72 ns = __cycles_2_ns(cyc);
73 local_irq_restore(flags);
74
75 return ns;
76}
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -070077
H. Peter Anvin1965aae2008-10-22 22:26:29 -070078#endif /* _ASM_X86_TIMER_H */