john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 1 | /* linux/include/linux/clocksource.h |
| 2 | * |
| 3 | * This file contains the structure definitions for clocksources. |
| 4 | * |
| 5 | * If you are not a clocksource, or timekeeping code, you should |
| 6 | * not be including this file! |
| 7 | */ |
| 8 | #ifndef _LINUX_CLOCKSOURCE_H |
| 9 | #define _LINUX_CLOCKSOURCE_H |
| 10 | |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/timex.h> |
| 13 | #include <linux/time.h> |
| 14 | #include <linux/list.h> |
Eric Dumazet | 329c8d8 | 2007-05-08 00:27:57 -0700 | [diff] [blame] | 15 | #include <linux/cache.h> |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 16 | #include <linux/timer.h> |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 17 | #include <linux/init.h> |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 18 | #include <asm/div64.h> |
| 19 | #include <asm/io.h> |
| 20 | |
| 21 | /* clocksource cycle base type */ |
| 22 | typedef u64 cycle_t; |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 23 | struct clocksource; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 24 | |
| 25 | /** |
Patrick Ohly | a038a35 | 2009-02-12 05:03:34 +0000 | [diff] [blame] | 26 | * struct cyclecounter - hardware abstraction for a free running counter |
| 27 | * Provides completely state-free accessors to the underlying hardware. |
| 28 | * Depending on which hardware it reads, the cycle counter may wrap |
| 29 | * around quickly. Locking rules (if necessary) have to be defined |
| 30 | * by the implementor and user of specific instances of this API. |
| 31 | * |
| 32 | * @read: returns the current cycle value |
| 33 | * @mask: bitmask for two's complement |
| 34 | * subtraction of non 64 bit counters, |
| 35 | * see CLOCKSOURCE_MASK() helper macro |
| 36 | * @mult: cycle to nanosecond multiplier |
| 37 | * @shift: cycle to nanosecond divisor (power of two) |
| 38 | */ |
| 39 | struct cyclecounter { |
| 40 | cycle_t (*read)(const struct cyclecounter *cc); |
| 41 | cycle_t mask; |
| 42 | u32 mult; |
| 43 | u32 shift; |
| 44 | }; |
| 45 | |
| 46 | /** |
| 47 | * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds |
| 48 | * Contains the state needed by timecounter_read() to detect |
| 49 | * cycle counter wrap around. Initialize with |
| 50 | * timecounter_init(). Also used to convert cycle counts into the |
| 51 | * corresponding nanosecond counts with timecounter_cyc2time(). Users |
| 52 | * of this code are responsible for initializing the underlying |
| 53 | * cycle counter hardware, locking issues and reading the time |
| 54 | * more often than the cycle counter wraps around. The nanosecond |
| 55 | * counter will only wrap around after ~585 years. |
| 56 | * |
| 57 | * @cc: the cycle counter used by this instance |
| 58 | * @cycle_last: most recent cycle counter value seen by |
| 59 | * timecounter_read() |
| 60 | * @nsec: continuously increasing count |
| 61 | */ |
| 62 | struct timecounter { |
| 63 | const struct cyclecounter *cc; |
| 64 | cycle_t cycle_last; |
| 65 | u64 nsec; |
| 66 | }; |
| 67 | |
| 68 | /** |
| 69 | * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds |
| 70 | * @tc: Pointer to cycle counter. |
| 71 | * @cycles: Cycles |
| 72 | * |
| 73 | * XXX - This could use some mult_lxl_ll() asm optimization. Same code |
| 74 | * as in cyc2ns, but with unsigned result. |
| 75 | */ |
| 76 | static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, |
| 77 | cycle_t cycles) |
| 78 | { |
| 79 | u64 ret = (u64)cycles; |
| 80 | ret = (ret * cc->mult) >> cc->shift; |
| 81 | return ret; |
| 82 | } |
| 83 | |
| 84 | /** |
| 85 | * timecounter_init - initialize a time counter |
| 86 | * @tc: Pointer to time counter which is to be initialized/reset |
| 87 | * @cc: A cycle counter, ready to be used. |
| 88 | * @start_tstamp: Arbitrary initial time stamp. |
| 89 | * |
| 90 | * After this call the current cycle register (roughly) corresponds to |
| 91 | * the initial time stamp. Every call to timecounter_read() increments |
| 92 | * the time stamp counter by the number of elapsed nanoseconds. |
| 93 | */ |
| 94 | extern void timecounter_init(struct timecounter *tc, |
| 95 | const struct cyclecounter *cc, |
| 96 | u64 start_tstamp); |
| 97 | |
| 98 | /** |
| 99 | * timecounter_read - return nanoseconds elapsed since timecounter_init() |
| 100 | * plus the initial time stamp |
| 101 | * @tc: Pointer to time counter. |
| 102 | * |
| 103 | * In other words, keeps track of time since the same epoch as |
| 104 | * the function which generated the initial time stamp. |
| 105 | */ |
| 106 | extern u64 timecounter_read(struct timecounter *tc); |
| 107 | |
| 108 | /** |
| 109 | * timecounter_cyc2time - convert a cycle counter to same |
| 110 | * time base as values returned by |
| 111 | * timecounter_read() |
| 112 | * @tc: Pointer to time counter. |
| 113 | * @cycle: a value returned by tc->cc->read() |
| 114 | * |
| 115 | * Cycle counts that are converted correctly as long as they |
| 116 | * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], |
| 117 | * with "max cycle count" == cs->mask+1. |
| 118 | * |
| 119 | * This allows conversion of cycle counter values which were generated |
| 120 | * in the past. |
| 121 | */ |
| 122 | extern u64 timecounter_cyc2time(struct timecounter *tc, |
| 123 | cycle_t cycle_tstamp); |
| 124 | |
| 125 | /** |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 126 | * struct clocksource - hardware abstraction for a free running counter |
| 127 | * Provides mostly state-free accessors to the underlying hardware. |
Patrick Ohly | a038a35 | 2009-02-12 05:03:34 +0000 | [diff] [blame] | 128 | * This is the structure used for system time. |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 129 | * |
| 130 | * @name: ptr to clocksource name |
| 131 | * @list: list head for registration |
| 132 | * @rating: rating value for selection (higher is better) |
| 133 | * To avoid rating inflation the following |
| 134 | * list should give you a guide as to how |
| 135 | * to assign your clocksource a rating |
| 136 | * 1-99: Unfit for real use |
| 137 | * Only available for bootup and testing purposes. |
| 138 | * 100-199: Base level usability. |
| 139 | * Functional for real use, but not desired. |
| 140 | * 200-299: Good. |
| 141 | * A correct and usable clocksource. |
| 142 | * 300-399: Desired. |
| 143 | * A reasonably fast and accurate clocksource. |
| 144 | * 400-499: Perfect |
| 145 | * The ideal clocksource. A must-use where |
| 146 | * available. |
Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 147 | * @read: returns a cycle value, passes clocksource as argument |
Magnus Damm | 4614e6a | 2009-04-21 12:24:02 -0700 | [diff] [blame] | 148 | * @enable: optional function to enable the clocksource |
| 149 | * @disable: optional function to disable the clocksource |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 150 | * @mask: bitmask for two's complement |
| 151 | * subtraction of non 64 bit counters |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 152 | * @mult: cycle to nanosecond multiplier |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 153 | * @shift: cycle to nanosecond divisor (power of two) |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 154 | * @max_idle_ns: max idle time permitted by the clocksource (nsecs) |
Thomas Gleixner | 73b08d2 | 2007-02-16 01:27:36 -0800 | [diff] [blame] | 155 | * @flags: flags describing special properties |
john stultz | acc9a9d | 2007-02-16 01:28:17 -0800 | [diff] [blame] | 156 | * @vread: vsyscall based read |
Magnus Damm | c54a42b | 2010-02-02 14:41:41 -0800 | [diff] [blame] | 157 | * @suspend: suspend function for the clocksource, if necessary |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 158 | * @resume: resume function for the clocksource, if necessary |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 159 | */ |
| 160 | struct clocksource { |
Eric Dumazet | 329c8d8 | 2007-05-08 00:27:57 -0700 | [diff] [blame] | 161 | /* |
| 162 | * First part of structure is read mostly |
| 163 | */ |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 164 | char *name; |
| 165 | struct list_head list; |
| 166 | int rating; |
Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 167 | cycle_t (*read)(struct clocksource *cs); |
Magnus Damm | 4614e6a | 2009-04-21 12:24:02 -0700 | [diff] [blame] | 168 | int (*enable)(struct clocksource *cs); |
| 169 | void (*disable)(struct clocksource *cs); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 170 | cycle_t mask; |
| 171 | u32 mult; |
| 172 | u32 shift; |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 173 | u64 max_idle_ns; |
Thomas Gleixner | 73b08d2 | 2007-02-16 01:27:36 -0800 | [diff] [blame] | 174 | unsigned long flags; |
john stultz | acc9a9d | 2007-02-16 01:28:17 -0800 | [diff] [blame] | 175 | cycle_t (*vread)(void); |
Magnus Damm | c54a42b | 2010-02-02 14:41:41 -0800 | [diff] [blame] | 176 | void (*suspend)(struct clocksource *cs); |
Magnus Damm | 1762233 | 2010-02-02 14:41:39 -0800 | [diff] [blame] | 177 | void (*resume)(struct clocksource *cs); |
Tony Luck | 0aa366f | 2007-07-20 11:22:30 -0700 | [diff] [blame] | 178 | #ifdef CONFIG_IA64 |
| 179 | void *fsys_mmio; /* used by fsyscall asm code */ |
| 180 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) |
| 181 | #else |
| 182 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) |
| 183 | #endif |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 184 | |
Eric Dumazet | 329c8d8 | 2007-05-08 00:27:57 -0700 | [diff] [blame] | 185 | /* |
| 186 | * Second part is written at each timer interrupt |
| 187 | * Keep it in a different cache line to dirty no |
| 188 | * more than one cache line. |
| 189 | */ |
| 190 | cycle_t cycle_last ____cacheline_aligned_in_smp; |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 191 | |
| 192 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
| 193 | /* Watchdog related data, used by the framework */ |
| 194 | struct list_head wd_list; |
| 195 | cycle_t wd_last; |
| 196 | #endif |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 197 | }; |
| 198 | |
Thomas Gleixner | 73b08d2 | 2007-02-16 01:27:36 -0800 | [diff] [blame] | 199 | /* |
| 200 | * Clock source flags bits:: |
| 201 | */ |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 202 | #define CLOCK_SOURCE_IS_CONTINUOUS 0x01 |
| 203 | #define CLOCK_SOURCE_MUST_VERIFY 0x02 |
| 204 | |
| 205 | #define CLOCK_SOURCE_WATCHDOG 0x10 |
| 206 | #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 207 | #define CLOCK_SOURCE_UNSTABLE 0x40 |
Thomas Gleixner | 73b08d2 | 2007-02-16 01:27:36 -0800 | [diff] [blame] | 208 | |
Jim Cromie | 7f9f303 | 2006-06-26 00:25:15 -0700 | [diff] [blame] | 209 | /* simplify initialization of mask field */ |
Atsushi Nemoto | 1d76c26 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 210 | #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 211 | |
| 212 | /** |
| 213 | * clocksource_khz2mult - calculates mult from khz and shift |
| 214 | * @khz: Clocksource frequency in KHz |
| 215 | * @shift_constant: Clocksource shift factor |
| 216 | * |
| 217 | * Helper functions that converts a khz counter frequency to a timsource |
| 218 | * multiplier, given the clocksource shift value |
| 219 | */ |
| 220 | static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) |
| 221 | { |
| 222 | /* khz = cyc/(Million ns) |
| 223 | * mult/2^shift = ns/cyc |
| 224 | * mult = ns/cyc * 2^shift |
| 225 | * mult = 1Million/khz * 2^shift |
| 226 | * mult = 1000000 * 2^shift / khz |
| 227 | * mult = (1000000<<shift) / khz |
| 228 | */ |
| 229 | u64 tmp = ((u64)1000000) << shift_constant; |
| 230 | |
| 231 | tmp += khz/2; /* round for do_div */ |
| 232 | do_div(tmp, khz); |
| 233 | |
| 234 | return (u32)tmp; |
| 235 | } |
| 236 | |
| 237 | /** |
| 238 | * clocksource_hz2mult - calculates mult from hz and shift |
| 239 | * @hz: Clocksource frequency in Hz |
| 240 | * @shift_constant: Clocksource shift factor |
| 241 | * |
| 242 | * Helper functions that converts a hz counter |
| 243 | * frequency to a timsource multiplier, given the |
| 244 | * clocksource shift value |
| 245 | */ |
| 246 | static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) |
| 247 | { |
| 248 | /* hz = cyc/(Billion ns) |
| 249 | * mult/2^shift = ns/cyc |
| 250 | * mult = ns/cyc * 2^shift |
| 251 | * mult = 1Billion/hz * 2^shift |
| 252 | * mult = 1000000000 * 2^shift / hz |
| 253 | * mult = (1000000000<<shift) / hz |
| 254 | */ |
| 255 | u64 tmp = ((u64)1000000000) << shift_constant; |
| 256 | |
| 257 | tmp += hz/2; /* round for do_div */ |
| 258 | do_div(tmp, hz); |
| 259 | |
| 260 | return (u32)tmp; |
| 261 | } |
| 262 | |
| 263 | /** |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 264 | * clocksource_cyc2ns - converts clocksource cycles to nanoseconds |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 265 | * |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 266 | * Converts cycles to nanoseconds, using the given mult and shift. |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 267 | * |
| 268 | * XXX - This could use some mult_lxl_ll() asm optimization |
| 269 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 270 | static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 271 | { |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 272 | return ((u64) cycles * mult) >> shift; |
john stultz | 5eb6d20 | 2006-06-26 00:25:07 -0700 | [diff] [blame] | 273 | } |
| 274 | |
| 275 | |
Thomas Gleixner | 92c7e00 | 2007-02-16 01:27:33 -0800 | [diff] [blame] | 276 | extern int clocksource_register(struct clocksource*); |
Thomas Gleixner | 4713e22c | 2008-01-30 13:30:02 +0100 | [diff] [blame] | 277 | extern void clocksource_unregister(struct clocksource*); |
Jason Wessel | 7c3078b | 2008-02-15 14:55:54 -0600 | [diff] [blame] | 278 | extern void clocksource_touch_watchdog(void); |
Thomas Gleixner | 92c7e00 | 2007-02-16 01:27:33 -0800 | [diff] [blame] | 279 | extern struct clocksource* clocksource_get_next(void); |
| 280 | extern void clocksource_change_rating(struct clocksource *cs, int rating); |
Magnus Damm | c54a42b | 2010-02-02 14:41:41 -0800 | [diff] [blame] | 281 | extern void clocksource_suspend(void); |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 282 | extern void clocksource_resume(void); |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 283 | extern struct clocksource * __init __weak clocksource_default_clock(void); |
Thomas Gleixner | 7285dd7 | 2009-08-28 20:25:24 +0200 | [diff] [blame] | 284 | extern void clocksource_mark_unstable(struct clocksource *cs); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 285 | |
Thomas Gleixner | 7d2f944 | 2009-11-11 14:05:29 +0000 | [diff] [blame] | 286 | extern void |
| 287 | clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); |
| 288 | |
John Stultz | d7e81c2 | 2010-05-07 18:07:38 -0700 | [diff] [blame] | 289 | /* |
| 290 | * Don't call __clocksource_register_scale directly, use |
| 291 | * clocksource_register_hz/khz |
| 292 | */ |
| 293 | extern int |
| 294 | __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); |
John Stultz | 852db46 | 2010-07-13 17:56:28 -0700 | [diff] [blame] | 295 | extern void |
| 296 | __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq); |
John Stultz | d7e81c2 | 2010-05-07 18:07:38 -0700 | [diff] [blame] | 297 | |
| 298 | static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) |
| 299 | { |
| 300 | return __clocksource_register_scale(cs, 1, hz); |
| 301 | } |
| 302 | |
| 303 | static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) |
| 304 | { |
| 305 | return __clocksource_register_scale(cs, 1000, khz); |
| 306 | } |
| 307 | |
John Stultz | 852db46 | 2010-07-13 17:56:28 -0700 | [diff] [blame] | 308 | static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz) |
| 309 | { |
| 310 | __clocksource_updatefreq_scale(cs, 1, hz); |
| 311 | } |
| 312 | |
| 313 | static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz) |
| 314 | { |
| 315 | __clocksource_updatefreq_scale(cs, 1000, khz); |
| 316 | } |
John Stultz | d7e81c2 | 2010-05-07 18:07:38 -0700 | [diff] [blame] | 317 | |
Thomas Gleixner | 7d2f944 | 2009-11-11 14:05:29 +0000 | [diff] [blame] | 318 | static inline void |
| 319 | clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec) |
| 320 | { |
| 321 | return clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, |
| 322 | NSEC_PER_SEC, minsec); |
| 323 | } |
| 324 | |
john stultz | acc9a9d | 2007-02-16 01:28:17 -0800 | [diff] [blame] | 325 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL |
Lin Ming | 0696b71 | 2009-11-17 13:49:50 +0800 | [diff] [blame] | 326 | extern void |
John Stultz | 7615856 | 2010-07-13 17:56:23 -0700 | [diff] [blame] | 327 | update_vsyscall(struct timespec *ts, struct timespec *wtm, |
| 328 | struct clocksource *c, u32 mult); |
Tony Breeds | 2c62214 | 2007-10-18 03:04:57 -0700 | [diff] [blame] | 329 | extern void update_vsyscall_tz(void); |
john stultz | acc9a9d | 2007-02-16 01:28:17 -0800 | [diff] [blame] | 330 | #else |
Lin Ming | 0696b71 | 2009-11-17 13:49:50 +0800 | [diff] [blame] | 331 | static inline void |
John Stultz | 7615856 | 2010-07-13 17:56:23 -0700 | [diff] [blame] | 332 | update_vsyscall(struct timespec *ts, struct timespec *wtm, |
| 333 | struct clocksource *c, u32 mult) |
john stultz | acc9a9d | 2007-02-16 01:28:17 -0800 | [diff] [blame] | 334 | { |
| 335 | } |
Tony Breeds | 2c62214 | 2007-10-18 03:04:57 -0700 | [diff] [blame] | 336 | |
| 337 | static inline void update_vsyscall_tz(void) |
| 338 | { |
| 339 | } |
john stultz | acc9a9d | 2007-02-16 01:28:17 -0800 | [diff] [blame] | 340 | #endif |
| 341 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 342 | extern void timekeeping_notify(struct clocksource *clock); |
| 343 | |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 344 | #endif /* _LINUX_CLOCKSOURCE_H */ |