john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 1 | /* linux/include/linux/clocksource.h |
| 2 | * |
| 3 | * This file contains the structure definitions for clocksources. |
| 4 | * |
| 5 | * If you are not a clocksource, or timekeeping code, you should |
| 6 | * not be including this file! |
| 7 | */ |
| 8 | #ifndef _LINUX_CLOCKSOURCE_H |
| 9 | #define _LINUX_CLOCKSOURCE_H |
| 10 | |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/timex.h> |
| 13 | #include <linux/time.h> |
| 14 | #include <linux/list.h> |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 15 | #include <linux/timer.h> |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 16 | #include <asm/div64.h> |
| 17 | #include <asm/io.h> |
| 18 | |
| 19 | /* clocksource cycle base type */ |
| 20 | typedef u64 cycle_t; |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 21 | struct clocksource; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 22 | |
| 23 | /** |
| 24 | * struct clocksource - hardware abstraction for a free running counter |
| 25 | * Provides mostly state-free accessors to the underlying hardware. |
| 26 | * |
| 27 | * @name: ptr to clocksource name |
| 28 | * @list: list head for registration |
| 29 | * @rating: rating value for selection (higher is better) |
| 30 | * To avoid rating inflation the following |
| 31 | * list should give you a guide as to how |
| 32 | * to assign your clocksource a rating |
| 33 | * 1-99: Unfit for real use |
| 34 | * Only available for bootup and testing purposes. |
| 35 | * 100-199: Base level usability. |
| 36 | * Functional for real use, but not desired. |
| 37 | * 200-299: Good. |
| 38 | * A correct and usable clocksource. |
| 39 | * 300-399: Desired. |
| 40 | * A reasonably fast and accurate clocksource. |
| 41 | * 400-499: Perfect |
| 42 | * The ideal clocksource. A must-use where |
| 43 | * available. |
| 44 | * @read: returns a cycle value |
| 45 | * @mask: bitmask for two's complement |
| 46 | * subtraction of non 64 bit counters |
| 47 | * @mult: cycle to nanosecond multiplier |
| 48 | * @shift: cycle to nanosecond divisor (power of two) |
Thomas Gleixner | 73b08d2 | 2007-02-16 01:27:36 -0800 | [diff] [blame] | 49 | * @flags: flags describing special properties |
john stultz | acc9a9d | 2007-02-16 01:28:17 -0800 | [diff] [blame] | 50 | * @vread: vsyscall based read |
Roman Zippel | 19923c1 | 2006-06-26 00:25:18 -0700 | [diff] [blame] | 51 | * @cycle_interval: Used internally by timekeeping core, please ignore. |
| 52 | * @xtime_interval: Used internally by timekeeping core, please ignore. |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 53 | */ |
| 54 | struct clocksource { |
| 55 | char *name; |
| 56 | struct list_head list; |
| 57 | int rating; |
| 58 | cycle_t (*read)(void); |
| 59 | cycle_t mask; |
| 60 | u32 mult; |
| 61 | u32 shift; |
Thomas Gleixner | 73b08d2 | 2007-02-16 01:27:36 -0800 | [diff] [blame] | 62 | unsigned long flags; |
john stultz | acc9a9d | 2007-02-16 01:28:17 -0800 | [diff] [blame] | 63 | cycle_t (*vread)(void); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 64 | |
| 65 | /* timekeeping specific data, ignore */ |
Roman Zippel | 19923c1 | 2006-06-26 00:25:18 -0700 | [diff] [blame] | 66 | cycle_t cycle_last, cycle_interval; |
| 67 | u64 xtime_nsec, xtime_interval; |
| 68 | s64 error; |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 69 | |
| 70 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
| 71 | /* Watchdog related data, used by the framework */ |
| 72 | struct list_head wd_list; |
| 73 | cycle_t wd_last; |
| 74 | #endif |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 75 | }; |
| 76 | |
Thomas Gleixner | 73b08d2 | 2007-02-16 01:27:36 -0800 | [diff] [blame] | 77 | /* |
| 78 | * Clock source flags bits:: |
| 79 | */ |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 80 | #define CLOCK_SOURCE_IS_CONTINUOUS 0x01 |
| 81 | #define CLOCK_SOURCE_MUST_VERIFY 0x02 |
| 82 | |
| 83 | #define CLOCK_SOURCE_WATCHDOG 0x10 |
| 84 | #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 |
Thomas Gleixner | 73b08d2 | 2007-02-16 01:27:36 -0800 | [diff] [blame] | 85 | |
Jim Cromie | 7f9f303 | 2006-06-26 00:25:15 -0700 | [diff] [blame] | 86 | /* simplify initialization of mask field */ |
| 87 | #define CLOCKSOURCE_MASK(bits) (cycle_t)(bits<64 ? ((1ULL<<bits)-1) : -1) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 88 | |
| 89 | /** |
| 90 | * clocksource_khz2mult - calculates mult from khz and shift |
| 91 | * @khz: Clocksource frequency in KHz |
| 92 | * @shift_constant: Clocksource shift factor |
| 93 | * |
| 94 | * Helper functions that converts a khz counter frequency to a timsource |
| 95 | * multiplier, given the clocksource shift value |
| 96 | */ |
| 97 | static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) |
| 98 | { |
| 99 | /* khz = cyc/(Million ns) |
| 100 | * mult/2^shift = ns/cyc |
| 101 | * mult = ns/cyc * 2^shift |
| 102 | * mult = 1Million/khz * 2^shift |
| 103 | * mult = 1000000 * 2^shift / khz |
| 104 | * mult = (1000000<<shift) / khz |
| 105 | */ |
| 106 | u64 tmp = ((u64)1000000) << shift_constant; |
| 107 | |
| 108 | tmp += khz/2; /* round for do_div */ |
| 109 | do_div(tmp, khz); |
| 110 | |
| 111 | return (u32)tmp; |
| 112 | } |
| 113 | |
| 114 | /** |
| 115 | * clocksource_hz2mult - calculates mult from hz and shift |
| 116 | * @hz: Clocksource frequency in Hz |
| 117 | * @shift_constant: Clocksource shift factor |
| 118 | * |
| 119 | * Helper functions that converts a hz counter |
| 120 | * frequency to a timsource multiplier, given the |
| 121 | * clocksource shift value |
| 122 | */ |
| 123 | static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) |
| 124 | { |
| 125 | /* hz = cyc/(Billion ns) |
| 126 | * mult/2^shift = ns/cyc |
| 127 | * mult = ns/cyc * 2^shift |
| 128 | * mult = 1Billion/hz * 2^shift |
| 129 | * mult = 1000000000 * 2^shift / hz |
| 130 | * mult = (1000000000<<shift) / hz |
| 131 | */ |
| 132 | u64 tmp = ((u64)1000000000) << shift_constant; |
| 133 | |
| 134 | tmp += hz/2; /* round for do_div */ |
| 135 | do_div(tmp, hz); |
| 136 | |
| 137 | return (u32)tmp; |
| 138 | } |
| 139 | |
| 140 | /** |
john stultz | a275254 | 2006-06-26 00:25:14 -0700 | [diff] [blame] | 141 | * clocksource_read: - Access the clocksource's current cycle value |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 142 | * @cs: pointer to clocksource being read |
| 143 | * |
| 144 | * Uses the clocksource to return the current cycle_t value |
| 145 | */ |
john stultz | a275254 | 2006-06-26 00:25:14 -0700 | [diff] [blame] | 146 | static inline cycle_t clocksource_read(struct clocksource *cs) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 147 | { |
| 148 | return cs->read(); |
| 149 | } |
| 150 | |
| 151 | /** |
| 152 | * cyc2ns - converts clocksource cycles to nanoseconds |
| 153 | * @cs: Pointer to clocksource |
| 154 | * @cycles: Cycles |
| 155 | * |
| 156 | * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds. |
| 157 | * |
| 158 | * XXX - This could use some mult_lxl_ll() asm optimization |
| 159 | */ |
| 160 | static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles) |
| 161 | { |
| 162 | u64 ret = (u64)cycles; |
| 163 | ret = (ret * cs->mult) >> cs->shift; |
| 164 | return ret; |
| 165 | } |
| 166 | |
| 167 | /** |
john stultz | a275254 | 2006-06-26 00:25:14 -0700 | [diff] [blame] | 168 | * clocksource_calculate_interval - Calculates a clocksource interval struct |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 169 | * |
| 170 | * @c: Pointer to clocksource. |
| 171 | * @length_nsec: Desired interval length in nanoseconds. |
| 172 | * |
| 173 | * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment |
| 174 | * pair and interval request. |
| 175 | * |
| 176 | * Unless you're the timekeeping code, you should not be using this! |
| 177 | */ |
john stultz | a275254 | 2006-06-26 00:25:14 -0700 | [diff] [blame] | 178 | static inline void clocksource_calculate_interval(struct clocksource *c, |
Daniel Walker | f5f1a24 | 2006-12-10 02:21:33 -0800 | [diff] [blame] | 179 | unsigned long length_nsec) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 180 | { |
| 181 | u64 tmp; |
| 182 | |
| 183 | /* XXX - All of this could use a whole lot of optimization */ |
| 184 | tmp = length_nsec; |
| 185 | tmp <<= c->shift; |
| 186 | tmp += c->mult/2; |
| 187 | do_div(tmp, c->mult); |
| 188 | |
Roman Zippel | 19923c1 | 2006-06-26 00:25:18 -0700 | [diff] [blame] | 189 | c->cycle_interval = (cycle_t)tmp; |
| 190 | if (c->cycle_interval == 0) |
| 191 | c->cycle_interval = 1; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 192 | |
Roman Zippel | 19923c1 | 2006-06-26 00:25:18 -0700 | [diff] [blame] | 193 | c->xtime_interval = (u64)c->cycle_interval * c->mult; |
john stultz | 5eb6d20 | 2006-06-26 00:25:07 -0700 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 197 | /* used to install a new clocksource */ |
Thomas Gleixner | 92c7e00 | 2007-02-16 01:27:33 -0800 | [diff] [blame] | 198 | extern int clocksource_register(struct clocksource*); |
| 199 | extern struct clocksource* clocksource_get_next(void); |
| 200 | extern void clocksource_change_rating(struct clocksource *cs, int rating); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 201 | |
john stultz | acc9a9d | 2007-02-16 01:28:17 -0800 | [diff] [blame] | 202 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL |
| 203 | extern void update_vsyscall(struct timespec *ts, struct clocksource *c); |
| 204 | #else |
| 205 | static inline void update_vsyscall(struct timespec *ts, struct clocksource *c) |
| 206 | { |
| 207 | } |
| 208 | #endif |
| 209 | |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 210 | #endif /* _LINUX_CLOCKSOURCE_H */ |