john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 1 | /* linux/include/linux/clocksource.h |
| 2 | * |
| 3 | * This file contains the structure definitions for clocksources. |
| 4 | * |
| 5 | * If you are not a clocksource, or timekeeping code, you should |
| 6 | * not be including this file! |
| 7 | */ |
| 8 | #ifndef _LINUX_CLOCKSOURCE_H |
| 9 | #define _LINUX_CLOCKSOURCE_H |
| 10 | |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/timex.h> |
| 13 | #include <linux/time.h> |
| 14 | #include <linux/list.h> |
Eric Dumazet | 329c8d8 | 2007-05-08 00:27:57 -0700 | [diff] [blame] | 15 | #include <linux/cache.h> |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 16 | #include <linux/timer.h> |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 17 | #include <linux/init.h> |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 18 | #include <asm/div64.h> |
| 19 | #include <asm/io.h> |
| 20 | |
| 21 | /* clocksource cycle base type */ |
| 22 | typedef u64 cycle_t; |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 23 | struct clocksource; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 24 | |
H. Peter Anvin | ae7bd11 | 2011-07-21 13:34:05 -0700 | [diff] [blame] | 25 | #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA |
Andy Lutomirski | 433bd80 | 2011-07-13 09:24:13 -0400 | [diff] [blame] | 26 | #include <asm/clocksource.h> |
H. Peter Anvin | ae7bd11 | 2011-07-21 13:34:05 -0700 | [diff] [blame] | 27 | #endif |
Andy Lutomirski | 433bd80 | 2011-07-13 09:24:13 -0400 | [diff] [blame] | 28 | |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 29 | /** |
Patrick Ohly | a038a35 | 2009-02-12 05:03:34 +0000 | [diff] [blame] | 30 | * struct cyclecounter - hardware abstraction for a free running counter |
| 31 | * Provides completely state-free accessors to the underlying hardware. |
| 32 | * Depending on which hardware it reads, the cycle counter may wrap |
| 33 | * around quickly. Locking rules (if necessary) have to be defined |
| 34 | * by the implementor and user of specific instances of this API. |
| 35 | * |
| 36 | * @read: returns the current cycle value |
| 37 | * @mask: bitmask for two's complement |
| 38 | * subtraction of non 64 bit counters, |
| 39 | * see CLOCKSOURCE_MASK() helper macro |
| 40 | * @mult: cycle to nanosecond multiplier |
| 41 | * @shift: cycle to nanosecond divisor (power of two) |
| 42 | */ |
| 43 | struct cyclecounter { |
| 44 | cycle_t (*read)(const struct cyclecounter *cc); |
| 45 | cycle_t mask; |
| 46 | u32 mult; |
| 47 | u32 shift; |
| 48 | }; |
| 49 | |
| 50 | /** |
| 51 | * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds |
| 52 | * Contains the state needed by timecounter_read() to detect |
| 53 | * cycle counter wrap around. Initialize with |
| 54 | * timecounter_init(). Also used to convert cycle counts into the |
| 55 | * corresponding nanosecond counts with timecounter_cyc2time(). Users |
| 56 | * of this code are responsible for initializing the underlying |
| 57 | * cycle counter hardware, locking issues and reading the time |
| 58 | * more often than the cycle counter wraps around. The nanosecond |
| 59 | * counter will only wrap around after ~585 years. |
| 60 | * |
| 61 | * @cc: the cycle counter used by this instance |
| 62 | * @cycle_last: most recent cycle counter value seen by |
| 63 | * timecounter_read() |
| 64 | * @nsec: continuously increasing count |
| 65 | */ |
| 66 | struct timecounter { |
| 67 | const struct cyclecounter *cc; |
| 68 | cycle_t cycle_last; |
| 69 | u64 nsec; |
| 70 | }; |
| 71 | |
| 72 | /** |
| 73 | * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds |
Kusanagi Kouichi | b1b73d0 | 2011-12-19 18:13:19 +0900 | [diff] [blame] | 74 | * @cc: Pointer to cycle counter. |
Patrick Ohly | a038a35 | 2009-02-12 05:03:34 +0000 | [diff] [blame] | 75 | * @cycles: Cycles |
| 76 | * |
| 77 | * XXX - This could use some mult_lxl_ll() asm optimization. Same code |
| 78 | * as in cyc2ns, but with unsigned result. |
| 79 | */ |
| 80 | static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, |
| 81 | cycle_t cycles) |
| 82 | { |
| 83 | u64 ret = (u64)cycles; |
| 84 | ret = (ret * cc->mult) >> cc->shift; |
| 85 | return ret; |
| 86 | } |
| 87 | |
| 88 | /** |
| 89 | * timecounter_init - initialize a time counter |
| 90 | * @tc: Pointer to time counter which is to be initialized/reset |
| 91 | * @cc: A cycle counter, ready to be used. |
| 92 | * @start_tstamp: Arbitrary initial time stamp. |
| 93 | * |
| 94 | * After this call the current cycle register (roughly) corresponds to |
| 95 | * the initial time stamp. Every call to timecounter_read() increments |
| 96 | * the time stamp counter by the number of elapsed nanoseconds. |
| 97 | */ |
| 98 | extern void timecounter_init(struct timecounter *tc, |
| 99 | const struct cyclecounter *cc, |
| 100 | u64 start_tstamp); |
| 101 | |
| 102 | /** |
| 103 | * timecounter_read - return nanoseconds elapsed since timecounter_init() |
| 104 | * plus the initial time stamp |
| 105 | * @tc: Pointer to time counter. |
| 106 | * |
| 107 | * In other words, keeps track of time since the same epoch as |
| 108 | * the function which generated the initial time stamp. |
| 109 | */ |
| 110 | extern u64 timecounter_read(struct timecounter *tc); |
| 111 | |
| 112 | /** |
| 113 | * timecounter_cyc2time - convert a cycle counter to same |
| 114 | * time base as values returned by |
| 115 | * timecounter_read() |
| 116 | * @tc: Pointer to time counter. |
Kusanagi Kouichi | b1b73d0 | 2011-12-19 18:13:19 +0900 | [diff] [blame] | 117 | * @cycle_tstamp: a value returned by tc->cc->read() |
Patrick Ohly | a038a35 | 2009-02-12 05:03:34 +0000 | [diff] [blame] | 118 | * |
| 119 | * Cycle counts that are converted correctly as long as they |
| 120 | * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], |
| 121 | * with "max cycle count" == cs->mask+1. |
| 122 | * |
| 123 | * This allows conversion of cycle counter values which were generated |
| 124 | * in the past. |
| 125 | */ |
| 126 | extern u64 timecounter_cyc2time(struct timecounter *tc, |
| 127 | cycle_t cycle_tstamp); |
| 128 | |
| 129 | /** |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 130 | * struct clocksource - hardware abstraction for a free running counter |
| 131 | * Provides mostly state-free accessors to the underlying hardware. |
Patrick Ohly | a038a35 | 2009-02-12 05:03:34 +0000 | [diff] [blame] | 132 | * This is the structure used for system time. |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 133 | * |
| 134 | * @name: ptr to clocksource name |
| 135 | * @list: list head for registration |
| 136 | * @rating: rating value for selection (higher is better) |
| 137 | * To avoid rating inflation the following |
| 138 | * list should give you a guide as to how |
| 139 | * to assign your clocksource a rating |
| 140 | * 1-99: Unfit for real use |
| 141 | * Only available for bootup and testing purposes. |
| 142 | * 100-199: Base level usability. |
| 143 | * Functional for real use, but not desired. |
| 144 | * 200-299: Good. |
| 145 | * A correct and usable clocksource. |
| 146 | * 300-399: Desired. |
| 147 | * A reasonably fast and accurate clocksource. |
| 148 | * 400-499: Perfect |
| 149 | * The ideal clocksource. A must-use where |
| 150 | * available. |
Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 151 | * @read: returns a cycle value, passes clocksource as argument |
Magnus Damm | 4614e6a | 2009-04-21 12:24:02 -0700 | [diff] [blame] | 152 | * @enable: optional function to enable the clocksource |
| 153 | * @disable: optional function to disable the clocksource |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 154 | * @mask: bitmask for two's complement |
| 155 | * subtraction of non 64 bit counters |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 156 | * @mult: cycle to nanosecond multiplier |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 157 | * @shift: cycle to nanosecond divisor (power of two) |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 158 | * @max_idle_ns: max idle time permitted by the clocksource (nsecs) |
Kusanagi Kouichi | b1b73d0 | 2011-12-19 18:13:19 +0900 | [diff] [blame] | 159 | * @maxadj: maximum adjustment value to mult (~11%) |
Thomas Gleixner | 73b08d2 | 2007-02-16 01:27:36 -0800 | [diff] [blame] | 160 | * @flags: flags describing special properties |
Andy Lutomirski | 433bd80 | 2011-07-13 09:24:13 -0400 | [diff] [blame] | 161 | * @archdata: arch-specific data |
Magnus Damm | c54a42b | 2010-02-02 14:41:41 -0800 | [diff] [blame] | 162 | * @suspend: suspend function for the clocksource, if necessary |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 163 | * @resume: resume function for the clocksource, if necessary |
Kusanagi Kouichi | b1b73d0 | 2011-12-19 18:13:19 +0900 | [diff] [blame] | 164 | * @cycle_last: most recent cycle counter value seen by ::read() |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 165 | */ |
| 166 | struct clocksource { |
Eric Dumazet | 329c8d8 | 2007-05-08 00:27:57 -0700 | [diff] [blame] | 167 | /* |
Thomas Gleixner | 369db4c | 2011-05-18 21:33:40 +0000 | [diff] [blame] | 168 | * Hotpath data, fits in a single cache line when the |
| 169 | * clocksource itself is cacheline aligned. |
Eric Dumazet | 329c8d8 | 2007-05-08 00:27:57 -0700 | [diff] [blame] | 170 | */ |
Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 171 | cycle_t (*read)(struct clocksource *cs); |
Thomas Gleixner | 369db4c | 2011-05-18 21:33:40 +0000 | [diff] [blame] | 172 | cycle_t cycle_last; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 173 | cycle_t mask; |
| 174 | u32 mult; |
| 175 | u32 shift; |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 176 | u64 max_idle_ns; |
John Stultz | d65670a | 2011-10-31 17:06:35 -0400 | [diff] [blame] | 177 | u32 maxadj; |
H. Peter Anvin | ae7bd11 | 2011-07-21 13:34:05 -0700 | [diff] [blame] | 178 | #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA |
Andy Lutomirski | 433bd80 | 2011-07-13 09:24:13 -0400 | [diff] [blame] | 179 | struct arch_clocksource_data archdata; |
Tony Luck | 0aa366f | 2007-07-20 11:22:30 -0700 | [diff] [blame] | 180 | #endif |
Andy Lutomirski | 433bd80 | 2011-07-13 09:24:13 -0400 | [diff] [blame] | 181 | |
Thomas Gleixner | 369db4c | 2011-05-18 21:33:40 +0000 | [diff] [blame] | 182 | const char *name; |
| 183 | struct list_head list; |
| 184 | int rating; |
Thomas Gleixner | 369db4c | 2011-05-18 21:33:40 +0000 | [diff] [blame] | 185 | int (*enable)(struct clocksource *cs); |
| 186 | void (*disable)(struct clocksource *cs); |
| 187 | unsigned long flags; |
| 188 | void (*suspend)(struct clocksource *cs); |
| 189 | void (*resume)(struct clocksource *cs); |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 190 | |
Kusanagi Kouichi | b1b73d0 | 2011-12-19 18:13:19 +0900 | [diff] [blame] | 191 | /* private: */ |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 192 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
| 193 | /* Watchdog related data, used by the framework */ |
| 194 | struct list_head wd_list; |
Thomas Gleixner | b519951 | 2011-06-16 16:22:08 +0200 | [diff] [blame] | 195 | cycle_t cs_last; |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 196 | cycle_t wd_last; |
| 197 | #endif |
Thomas Gleixner | 369db4c | 2011-05-18 21:33:40 +0000 | [diff] [blame] | 198 | } ____cacheline_aligned; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 199 | |
Thomas Gleixner | 73b08d2 | 2007-02-16 01:27:36 -0800 | [diff] [blame] | 200 | /* |
| 201 | * Clock source flags bits:: |
| 202 | */ |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 203 | #define CLOCK_SOURCE_IS_CONTINUOUS 0x01 |
| 204 | #define CLOCK_SOURCE_MUST_VERIFY 0x02 |
| 205 | |
| 206 | #define CLOCK_SOURCE_WATCHDOG 0x10 |
| 207 | #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 208 | #define CLOCK_SOURCE_UNSTABLE 0x40 |
Thomas Gleixner | 73b08d2 | 2007-02-16 01:27:36 -0800 | [diff] [blame] | 209 | |
Jim Cromie | 7f9f303 | 2006-06-26 00:25:15 -0700 | [diff] [blame] | 210 | /* simplify initialization of mask field */ |
Atsushi Nemoto | 1d76c26 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 211 | #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 212 | |
| 213 | /** |
| 214 | * clocksource_khz2mult - calculates mult from khz and shift |
| 215 | * @khz: Clocksource frequency in KHz |
| 216 | * @shift_constant: Clocksource shift factor |
| 217 | * |
| 218 | * Helper functions that converts a khz counter frequency to a timsource |
| 219 | * multiplier, given the clocksource shift value |
| 220 | */ |
| 221 | static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) |
| 222 | { |
| 223 | /* khz = cyc/(Million ns) |
| 224 | * mult/2^shift = ns/cyc |
| 225 | * mult = ns/cyc * 2^shift |
| 226 | * mult = 1Million/khz * 2^shift |
| 227 | * mult = 1000000 * 2^shift / khz |
| 228 | * mult = (1000000<<shift) / khz |
| 229 | */ |
| 230 | u64 tmp = ((u64)1000000) << shift_constant; |
| 231 | |
| 232 | tmp += khz/2; /* round for do_div */ |
| 233 | do_div(tmp, khz); |
| 234 | |
| 235 | return (u32)tmp; |
| 236 | } |
| 237 | |
| 238 | /** |
| 239 | * clocksource_hz2mult - calculates mult from hz and shift |
| 240 | * @hz: Clocksource frequency in Hz |
| 241 | * @shift_constant: Clocksource shift factor |
| 242 | * |
| 243 | * Helper functions that converts a hz counter |
| 244 | * frequency to a timsource multiplier, given the |
| 245 | * clocksource shift value |
| 246 | */ |
| 247 | static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) |
| 248 | { |
| 249 | /* hz = cyc/(Billion ns) |
| 250 | * mult/2^shift = ns/cyc |
| 251 | * mult = ns/cyc * 2^shift |
| 252 | * mult = 1Billion/hz * 2^shift |
| 253 | * mult = 1000000000 * 2^shift / hz |
| 254 | * mult = (1000000000<<shift) / hz |
| 255 | */ |
| 256 | u64 tmp = ((u64)1000000000) << shift_constant; |
| 257 | |
| 258 | tmp += hz/2; /* round for do_div */ |
| 259 | do_div(tmp, hz); |
| 260 | |
| 261 | return (u32)tmp; |
| 262 | } |
| 263 | |
| 264 | /** |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 265 | * clocksource_cyc2ns - converts clocksource cycles to nanoseconds |
Kusanagi Kouichi | b1b73d0 | 2011-12-19 18:13:19 +0900 | [diff] [blame] | 266 | * @cycles: cycles |
| 267 | * @mult: cycle to nanosecond multiplier |
| 268 | * @shift: cycle to nanosecond divisor (power of two) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 269 | * |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 270 | * Converts cycles to nanoseconds, using the given mult and shift. |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 271 | * |
| 272 | * XXX - This could use some mult_lxl_ll() asm optimization |
| 273 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 274 | static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 275 | { |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 276 | return ((u64) cycles * mult) >> shift; |
john stultz | 5eb6d20 | 2006-06-26 00:25:07 -0700 | [diff] [blame] | 277 | } |
| 278 | |
| 279 | |
Thomas Gleixner | 92c7e00 | 2007-02-16 01:27:33 -0800 | [diff] [blame] | 280 | extern int clocksource_register(struct clocksource*); |
Thomas Gleixner | 4713e22c | 2008-01-30 13:30:02 +0100 | [diff] [blame] | 281 | extern void clocksource_unregister(struct clocksource*); |
Jason Wessel | 7c3078b | 2008-02-15 14:55:54 -0600 | [diff] [blame] | 282 | extern void clocksource_touch_watchdog(void); |
Thomas Gleixner | 92c7e00 | 2007-02-16 01:27:33 -0800 | [diff] [blame] | 283 | extern struct clocksource* clocksource_get_next(void); |
| 284 | extern void clocksource_change_rating(struct clocksource *cs, int rating); |
Magnus Damm | c54a42b | 2010-02-02 14:41:41 -0800 | [diff] [blame] | 285 | extern void clocksource_suspend(void); |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 286 | extern void clocksource_resume(void); |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 287 | extern struct clocksource * __init __weak clocksource_default_clock(void); |
Thomas Gleixner | 7285dd7 | 2009-08-28 20:25:24 +0200 | [diff] [blame] | 288 | extern void clocksource_mark_unstable(struct clocksource *cs); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 289 | |
Thomas Gleixner | 7d2f944 | 2009-11-11 14:05:29 +0000 | [diff] [blame] | 290 | extern void |
| 291 | clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); |
| 292 | |
John Stultz | d7e81c2 | 2010-05-07 18:07:38 -0700 | [diff] [blame] | 293 | /* |
| 294 | * Don't call __clocksource_register_scale directly, use |
| 295 | * clocksource_register_hz/khz |
| 296 | */ |
| 297 | extern int |
| 298 | __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); |
John Stultz | 852db46 | 2010-07-13 17:56:28 -0700 | [diff] [blame] | 299 | extern void |
| 300 | __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq); |
John Stultz | d7e81c2 | 2010-05-07 18:07:38 -0700 | [diff] [blame] | 301 | |
| 302 | static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) |
| 303 | { |
| 304 | return __clocksource_register_scale(cs, 1, hz); |
| 305 | } |
| 306 | |
| 307 | static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) |
| 308 | { |
| 309 | return __clocksource_register_scale(cs, 1000, khz); |
| 310 | } |
| 311 | |
John Stultz | 852db46 | 2010-07-13 17:56:28 -0700 | [diff] [blame] | 312 | static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz) |
| 313 | { |
| 314 | __clocksource_updatefreq_scale(cs, 1, hz); |
| 315 | } |
| 316 | |
| 317 | static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz) |
| 318 | { |
| 319 | __clocksource_updatefreq_scale(cs, 1000, khz); |
| 320 | } |
John Stultz | d7e81c2 | 2010-05-07 18:07:38 -0700 | [diff] [blame] | 321 | |
john stultz | acc9a9d | 2007-02-16 01:28:17 -0800 | [diff] [blame] | 322 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 323 | extern void timekeeping_notify(struct clocksource *clock); |
| 324 | |
Russell King | 442c817 | 2011-05-08 14:06:52 +0100 | [diff] [blame] | 325 | extern cycle_t clocksource_mmio_readl_up(struct clocksource *); |
| 326 | extern cycle_t clocksource_mmio_readl_down(struct clocksource *); |
| 327 | extern cycle_t clocksource_mmio_readw_up(struct clocksource *); |
| 328 | extern cycle_t clocksource_mmio_readw_down(struct clocksource *); |
| 329 | |
| 330 | extern int clocksource_mmio_init(void __iomem *, const char *, |
| 331 | unsigned long, int, unsigned, cycle_t (*)(struct clocksource *)); |
| 332 | |
Russell King | 8c414ff | 2011-05-08 18:50:20 +0100 | [diff] [blame] | 333 | extern int clocksource_i8253_init(void); |
| 334 | |
Stephen Warren | ae278a9 | 2012-11-19 16:41:20 -0700 | [diff] [blame^] | 335 | #ifdef CONFIG_CLKSRC_OF |
| 336 | extern void clocksource_of_init(void); |
| 337 | |
| 338 | #define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ |
| 339 | static const struct of_device_id __clksrc_of_table_##name \ |
| 340 | __used __section(__clksrc_of_table) \ |
| 341 | = { .compatible = compat, .data = fn }; |
| 342 | #endif |
| 343 | |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 344 | #endif /* _LINUX_CLOCKSOURCE_H */ |