blob: 1219be4fb42e8f2a86e9e1268dfe7958fde8263c [file] [log] [blame]
john stultz734efb42006-06-26 00:25:05 -07001/* linux/include/linux/clocksource.h
2 *
3 * This file contains the structure definitions for clocksources.
4 *
5 * If you are not a clocksource, or timekeeping code, you should
6 * not be including this file!
7 */
8#ifndef _LINUX_CLOCKSOURCE_H
9#define _LINUX_CLOCKSOURCE_H
10
11#include <linux/types.h>
12#include <linux/timex.h>
13#include <linux/time.h>
14#include <linux/list.h>
Eric Dumazet329c8d82007-05-08 00:27:57 -070015#include <linux/cache.h>
Thomas Gleixner5d8b34f2007-02-16 01:27:43 -080016#include <linux/timer.h>
john stultz734efb42006-06-26 00:25:05 -070017#include <asm/div64.h>
18#include <asm/io.h>
19
20/* clocksource cycle base type */
21typedef u64 cycle_t;
Thomas Gleixner5d8b34f2007-02-16 01:27:43 -080022struct clocksource;
john stultz734efb42006-06-26 00:25:05 -070023
24/**
Patrick Ohlya038a352009-02-12 05:03:34 +000025 * struct cyclecounter - hardware abstraction for a free running counter
26 * Provides completely state-free accessors to the underlying hardware.
27 * Depending on which hardware it reads, the cycle counter may wrap
28 * around quickly. Locking rules (if necessary) have to be defined
29 * by the implementor and user of specific instances of this API.
30 *
31 * @read: returns the current cycle value
32 * @mask: bitmask for two's complement
33 * subtraction of non 64 bit counters,
34 * see CLOCKSOURCE_MASK() helper macro
35 * @mult: cycle to nanosecond multiplier
36 * @shift: cycle to nanosecond divisor (power of two)
37 */
38struct cyclecounter {
39 cycle_t (*read)(const struct cyclecounter *cc);
40 cycle_t mask;
41 u32 mult;
42 u32 shift;
43};
44
45/**
46 * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
47 * Contains the state needed by timecounter_read() to detect
48 * cycle counter wrap around. Initialize with
49 * timecounter_init(). Also used to convert cycle counts into the
50 * corresponding nanosecond counts with timecounter_cyc2time(). Users
51 * of this code are responsible for initializing the underlying
52 * cycle counter hardware, locking issues and reading the time
53 * more often than the cycle counter wraps around. The nanosecond
54 * counter will only wrap around after ~585 years.
55 *
56 * @cc: the cycle counter used by this instance
57 * @cycle_last: most recent cycle counter value seen by
58 * timecounter_read()
59 * @nsec: continuously increasing count
60 */
61struct timecounter {
62 const struct cyclecounter *cc;
63 cycle_t cycle_last;
64 u64 nsec;
65};
66
67/**
68 * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
69 * @tc: Pointer to cycle counter.
70 * @cycles: Cycles
71 *
72 * XXX - This could use some mult_lxl_ll() asm optimization. Same code
73 * as in cyc2ns, but with unsigned result.
74 */
75static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
76 cycle_t cycles)
77{
78 u64 ret = (u64)cycles;
79 ret = (ret * cc->mult) >> cc->shift;
80 return ret;
81}
82
83/**
84 * timecounter_init - initialize a time counter
85 * @tc: Pointer to time counter which is to be initialized/reset
86 * @cc: A cycle counter, ready to be used.
87 * @start_tstamp: Arbitrary initial time stamp.
88 *
89 * After this call the current cycle register (roughly) corresponds to
90 * the initial time stamp. Every call to timecounter_read() increments
91 * the time stamp counter by the number of elapsed nanoseconds.
92 */
93extern void timecounter_init(struct timecounter *tc,
94 const struct cyclecounter *cc,
95 u64 start_tstamp);
96
97/**
98 * timecounter_read - return nanoseconds elapsed since timecounter_init()
99 * plus the initial time stamp
100 * @tc: Pointer to time counter.
101 *
102 * In other words, keeps track of time since the same epoch as
103 * the function which generated the initial time stamp.
104 */
105extern u64 timecounter_read(struct timecounter *tc);
106
107/**
108 * timecounter_cyc2time - convert a cycle counter to same
109 * time base as values returned by
110 * timecounter_read()
111 * @tc: Pointer to time counter.
112 * @cycle: a value returned by tc->cc->read()
113 *
114 * Cycle counts that are converted correctly as long as they
115 * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
116 * with "max cycle count" == cs->mask+1.
117 *
118 * This allows conversion of cycle counter values which were generated
119 * in the past.
120 */
121extern u64 timecounter_cyc2time(struct timecounter *tc,
122 cycle_t cycle_tstamp);
123
124/**
john stultz734efb42006-06-26 00:25:05 -0700125 * struct clocksource - hardware abstraction for a free running counter
126 * Provides mostly state-free accessors to the underlying hardware.
Patrick Ohlya038a352009-02-12 05:03:34 +0000127 * This is the structure used for system time.
john stultz734efb42006-06-26 00:25:05 -0700128 *
129 * @name: ptr to clocksource name
130 * @list: list head for registration
131 * @rating: rating value for selection (higher is better)
132 * To avoid rating inflation the following
133 * list should give you a guide as to how
134 * to assign your clocksource a rating
135 * 1-99: Unfit for real use
136 * Only available for bootup and testing purposes.
137 * 100-199: Base level usability.
138 * Functional for real use, but not desired.
139 * 200-299: Good.
140 * A correct and usable clocksource.
141 * 300-399: Desired.
142 * A reasonably fast and accurate clocksource.
143 * 400-499: Perfect
144 * The ideal clocksource. A must-use where
145 * available.
Magnus Damm8e196082009-04-21 12:24:00 -0700146 * @read: returns a cycle value, passes clocksource as argument
Magnus Damm4614e6a2009-04-21 12:24:02 -0700147 * @enable: optional function to enable the clocksource
148 * @disable: optional function to disable the clocksource
john stultz734efb42006-06-26 00:25:05 -0700149 * @mask: bitmask for two's complement
150 * subtraction of non 64 bit counters
John Stultz1aa5dfb2008-08-20 16:37:28 -0700151 * @mult: cycle to nanosecond multiplier (adjusted by NTP)
152 * @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP)
john stultz734efb42006-06-26 00:25:05 -0700153 * @shift: cycle to nanosecond divisor (power of two)
Thomas Gleixner73b08d22007-02-16 01:27:36 -0800154 * @flags: flags describing special properties
john stultzacc9a9d2007-02-16 01:28:17 -0800155 * @vread: vsyscall based read
Thomas Gleixnerb52f52a2007-05-09 02:35:15 -0700156 * @resume: resume function for the clocksource, if necessary
Roman Zippel19923c12006-06-26 00:25:18 -0700157 * @cycle_interval: Used internally by timekeeping core, please ignore.
158 * @xtime_interval: Used internally by timekeeping core, please ignore.
john stultz734efb42006-06-26 00:25:05 -0700159 */
160struct clocksource {
Eric Dumazet329c8d82007-05-08 00:27:57 -0700161 /*
162 * First part of structure is read mostly
163 */
john stultz734efb42006-06-26 00:25:05 -0700164 char *name;
165 struct list_head list;
166 int rating;
Magnus Damm8e196082009-04-21 12:24:00 -0700167 cycle_t (*read)(struct clocksource *cs);
Magnus Damm4614e6a2009-04-21 12:24:02 -0700168 int (*enable)(struct clocksource *cs);
169 void (*disable)(struct clocksource *cs);
john stultz734efb42006-06-26 00:25:05 -0700170 cycle_t mask;
171 u32 mult;
John Stultz1aa5dfb2008-08-20 16:37:28 -0700172 u32 mult_orig;
john stultz734efb42006-06-26 00:25:05 -0700173 u32 shift;
Thomas Gleixner73b08d22007-02-16 01:27:36 -0800174 unsigned long flags;
john stultzacc9a9d2007-02-16 01:28:17 -0800175 cycle_t (*vread)(void);
Thomas Gleixnerb52f52a2007-05-09 02:35:15 -0700176 void (*resume)(void);
Tony Luck0aa366f2007-07-20 11:22:30 -0700177#ifdef CONFIG_IA64
178 void *fsys_mmio; /* used by fsyscall asm code */
179#define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr))
180#else
181#define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0)
182#endif
john stultz734efb42006-06-26 00:25:05 -0700183
184 /* timekeeping specific data, ignore */
Eric Dumazet329c8d82007-05-08 00:27:57 -0700185 cycle_t cycle_interval;
186 u64 xtime_interval;
John Stultz2d422442008-08-20 16:37:30 -0700187 u32 raw_interval;
Eric Dumazet329c8d82007-05-08 00:27:57 -0700188 /*
189 * Second part is written at each timer interrupt
190 * Keep it in a different cache line to dirty no
191 * more than one cache line.
192 */
193 cycle_t cycle_last ____cacheline_aligned_in_smp;
194 u64 xtime_nsec;
Roman Zippel19923c12006-06-26 00:25:18 -0700195 s64 error;
John Stultz2d422442008-08-20 16:37:30 -0700196 struct timespec raw_time;
Thomas Gleixner5d8b34f2007-02-16 01:27:43 -0800197
198#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
199 /* Watchdog related data, used by the framework */
200 struct list_head wd_list;
201 cycle_t wd_last;
202#endif
john stultz734efb42006-06-26 00:25:05 -0700203};
204
Roman Zippel7dffa3c2008-05-01 04:34:41 -0700205extern struct clocksource *clock; /* current clocksource */
206
Thomas Gleixner73b08d22007-02-16 01:27:36 -0800207/*
208 * Clock source flags bits::
209 */
Thomas Gleixner5d8b34f2007-02-16 01:27:43 -0800210#define CLOCK_SOURCE_IS_CONTINUOUS 0x01
211#define CLOCK_SOURCE_MUST_VERIFY 0x02
212
213#define CLOCK_SOURCE_WATCHDOG 0x10
214#define CLOCK_SOURCE_VALID_FOR_HRES 0x20
Thomas Gleixner73b08d22007-02-16 01:27:36 -0800215
Jim Cromie7f9f3032006-06-26 00:25:15 -0700216/* simplify initialization of mask field */
Atsushi Nemoto1d76c262008-01-30 13:30:01 +0100217#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
john stultz734efb42006-06-26 00:25:05 -0700218
219/**
220 * clocksource_khz2mult - calculates mult from khz and shift
221 * @khz: Clocksource frequency in KHz
222 * @shift_constant: Clocksource shift factor
223 *
224 * Helper functions that converts a khz counter frequency to a timsource
225 * multiplier, given the clocksource shift value
226 */
227static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
228{
229 /* khz = cyc/(Million ns)
230 * mult/2^shift = ns/cyc
231 * mult = ns/cyc * 2^shift
232 * mult = 1Million/khz * 2^shift
233 * mult = 1000000 * 2^shift / khz
234 * mult = (1000000<<shift) / khz
235 */
236 u64 tmp = ((u64)1000000) << shift_constant;
237
238 tmp += khz/2; /* round for do_div */
239 do_div(tmp, khz);
240
241 return (u32)tmp;
242}
243
244/**
245 * clocksource_hz2mult - calculates mult from hz and shift
246 * @hz: Clocksource frequency in Hz
247 * @shift_constant: Clocksource shift factor
248 *
249 * Helper functions that converts a hz counter
250 * frequency to a timsource multiplier, given the
251 * clocksource shift value
252 */
253static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
254{
255 /* hz = cyc/(Billion ns)
256 * mult/2^shift = ns/cyc
257 * mult = ns/cyc * 2^shift
258 * mult = 1Billion/hz * 2^shift
259 * mult = 1000000000 * 2^shift / hz
260 * mult = (1000000000<<shift) / hz
261 */
262 u64 tmp = ((u64)1000000000) << shift_constant;
263
264 tmp += hz/2; /* round for do_div */
265 do_div(tmp, hz);
266
267 return (u32)tmp;
268}
269
270/**
john stultza2752542006-06-26 00:25:14 -0700271 * clocksource_read: - Access the clocksource's current cycle value
john stultz734efb42006-06-26 00:25:05 -0700272 * @cs: pointer to clocksource being read
273 *
274 * Uses the clocksource to return the current cycle_t value
275 */
john stultza2752542006-06-26 00:25:14 -0700276static inline cycle_t clocksource_read(struct clocksource *cs)
john stultz734efb42006-06-26 00:25:05 -0700277{
Magnus Damm8e196082009-04-21 12:24:00 -0700278 return cs->read(cs);
john stultz734efb42006-06-26 00:25:05 -0700279}
280
281/**
Magnus Damm4614e6a2009-04-21 12:24:02 -0700282 * clocksource_enable: - enable clocksource
283 * @cs: pointer to clocksource
284 *
285 * Enables the specified clocksource. The clocksource callback
286 * function should start up the hardware and setup mult and field
287 * members of struct clocksource to reflect hardware capabilities.
288 */
289static inline int clocksource_enable(struct clocksource *cs)
290{
Magnus Damma25cbd02009-05-01 14:45:46 +0900291 int ret = 0;
292
293 if (cs->enable)
294 ret = cs->enable(cs);
295
Magnus Dammc7121842009-07-28 14:09:55 -0700296 /*
297 * The frequency may have changed while the clocksource
298 * was disabled. If so the code in ->enable() must update
299 * the mult value to reflect the new frequency. Make sure
300 * mult_orig follows this change.
301 */
Magnus Damma25cbd02009-05-01 14:45:46 +0900302 cs->mult_orig = cs->mult;
303
304 return ret;
Magnus Damm4614e6a2009-04-21 12:24:02 -0700305}
306
307/**
308 * clocksource_disable: - disable clocksource
309 * @cs: pointer to clocksource
310 *
311 * Disables the specified clocksource. The clocksource callback
312 * function should power down the now unused hardware block to
313 * save power.
314 */
315static inline void clocksource_disable(struct clocksource *cs)
316{
Magnus Dammc7121842009-07-28 14:09:55 -0700317 /*
318 * Save mult_orig in mult so clocksource_enable() can
319 * restore the value regardless if ->enable() updates
320 * the value of mult or not.
321 */
322 cs->mult = cs->mult_orig;
323
Magnus Damm4614e6a2009-04-21 12:24:02 -0700324 if (cs->disable)
325 cs->disable(cs);
326}
327
328/**
john stultz734efb42006-06-26 00:25:05 -0700329 * cyc2ns - converts clocksource cycles to nanoseconds
330 * @cs: Pointer to clocksource
331 * @cycles: Cycles
332 *
333 * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds.
334 *
335 * XXX - This could use some mult_lxl_ll() asm optimization
336 */
337static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles)
338{
339 u64 ret = (u64)cycles;
340 ret = (ret * cs->mult) >> cs->shift;
341 return ret;
342}
343
344/**
john stultza2752542006-06-26 00:25:14 -0700345 * clocksource_calculate_interval - Calculates a clocksource interval struct
john stultz734efb42006-06-26 00:25:05 -0700346 *
347 * @c: Pointer to clocksource.
348 * @length_nsec: Desired interval length in nanoseconds.
349 *
350 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
351 * pair and interval request.
352 *
353 * Unless you're the timekeeping code, you should not be using this!
354 */
john stultza2752542006-06-26 00:25:14 -0700355static inline void clocksource_calculate_interval(struct clocksource *c,
Daniel Walkerf5f1a242006-12-10 02:21:33 -0800356 unsigned long length_nsec)
john stultz734efb42006-06-26 00:25:05 -0700357{
358 u64 tmp;
359
John Stultz1aa5dfb2008-08-20 16:37:28 -0700360 /* Do the ns -> cycle conversion first, using original mult */
john stultz734efb42006-06-26 00:25:05 -0700361 tmp = length_nsec;
362 tmp <<= c->shift;
John Stultz1aa5dfb2008-08-20 16:37:28 -0700363 tmp += c->mult_orig/2;
364 do_div(tmp, c->mult_orig);
john stultz734efb42006-06-26 00:25:05 -0700365
Roman Zippel19923c12006-06-26 00:25:18 -0700366 c->cycle_interval = (cycle_t)tmp;
367 if (c->cycle_interval == 0)
368 c->cycle_interval = 1;
john stultz734efb42006-06-26 00:25:05 -0700369
John Stultz1aa5dfb2008-08-20 16:37:28 -0700370 /* Go back from cycles -> shifted ns, this time use ntp adjused mult */
Roman Zippel19923c12006-06-26 00:25:18 -0700371 c->xtime_interval = (u64)c->cycle_interval * c->mult;
John Stultz2d422442008-08-20 16:37:30 -0700372 c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift;
john stultz5eb6d202006-06-26 00:25:07 -0700373}
374
375
john stultz734efb42006-06-26 00:25:05 -0700376/* used to install a new clocksource */
Thomas Gleixner92c7e002007-02-16 01:27:33 -0800377extern int clocksource_register(struct clocksource*);
Thomas Gleixner4713e22c2008-01-30 13:30:02 +0100378extern void clocksource_unregister(struct clocksource*);
Jason Wessel7c3078b2008-02-15 14:55:54 -0600379extern void clocksource_touch_watchdog(void);
Thomas Gleixner92c7e002007-02-16 01:27:33 -0800380extern struct clocksource* clocksource_get_next(void);
381extern void clocksource_change_rating(struct clocksource *cs, int rating);
Thomas Gleixnerb52f52a2007-05-09 02:35:15 -0700382extern void clocksource_resume(void);
john stultz734efb42006-06-26 00:25:05 -0700383
john stultzacc9a9d2007-02-16 01:28:17 -0800384#ifdef CONFIG_GENERIC_TIME_VSYSCALL
385extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
Tony Breeds2c622142007-10-18 03:04:57 -0700386extern void update_vsyscall_tz(void);
john stultzacc9a9d2007-02-16 01:28:17 -0800387#else
388static inline void update_vsyscall(struct timespec *ts, struct clocksource *c)
389{
390}
Tony Breeds2c622142007-10-18 03:04:57 -0700391
392static inline void update_vsyscall_tz(void)
393{
394}
john stultzacc9a9d2007-02-16 01:28:17 -0800395#endif
396
john stultz734efb42006-06-26 00:25:05 -0700397#endif /* _LINUX_CLOCKSOURCE_H */