John Stultz | d7b4202 | 2012-09-04 15:12:07 -0400 | [diff] [blame] | 1 | /* |
| 2 | * You SHOULD NOT be including this unless you're vsyscall |
| 3 | * handling code or timekeeping internal code! |
| 4 | */ |
| 5 | |
| 6 | #ifndef _LINUX_TIMEKEEPER_INTERNAL_H |
| 7 | #define _LINUX_TIMEKEEPER_INTERNAL_H |
| 8 | |
| 9 | #include <linux/clocksource.h> |
| 10 | #include <linux/jiffies.h> |
| 11 | #include <linux/time.h> |
| 12 | |
Thomas Gleixner | d28ede8 | 2014-07-16 21:05:16 +0000 | [diff] [blame] | 13 | /** |
| 14 | * struct tk_read_base - base structure for timekeeping readout |
| 15 | * @clock: Current clocksource used for timekeeping. |
| 16 | * @read: Read function of @clock |
| 17 | * @mask: Bitmask for two's complement subtraction of non 64bit clocks |
| 18 | * @cycle_last: @clock cycle value at last update |
Peter Zijlstra | 876e788 | 2015-03-19 10:09:06 +0100 | [diff] [blame] | 19 | * @mult: (NTP adjusted) multiplier for scaled math conversion |
Thomas Gleixner | d28ede8 | 2014-07-16 21:05:16 +0000 | [diff] [blame] | 20 | * @shift: Shift value for scaled math conversion |
| 21 | * @xtime_nsec: Shifted (fractional) nano seconds offset for readout |
Peter Zijlstra | 876e788 | 2015-03-19 10:09:06 +0100 | [diff] [blame] | 22 | * @base: ktime_t (nanoseconds) base time for readout |
Thomas Gleixner | 3fdb14f | 2014-07-16 21:04:07 +0000 | [diff] [blame] | 23 | * |
Thomas Gleixner | d28ede8 | 2014-07-16 21:05:16 +0000 | [diff] [blame] | 24 | * This struct has size 56 byte on 64 bit. Together with a seqcount it |
| 25 | * occupies a single 64byte cache line. |
| 26 | * |
| 27 | * The struct is separate from struct timekeeper as it is also used |
Peter Zijlstra | 876e788 | 2015-03-19 10:09:06 +0100 | [diff] [blame] | 28 | * for a fast NMI safe accessors. |
Thomas Gleixner | d28ede8 | 2014-07-16 21:05:16 +0000 | [diff] [blame] | 29 | */ |
| 30 | struct tk_read_base { |
| 31 | struct clocksource *clock; |
Thomas Gleixner | a5a1d1c | 2016-12-21 20:32:01 +0100 | [diff] [blame] | 32 | u64 (*read)(struct clocksource *cs); |
| 33 | u64 mask; |
| 34 | u64 cycle_last; |
Thomas Gleixner | d28ede8 | 2014-07-16 21:05:16 +0000 | [diff] [blame] | 35 | u32 mult; |
| 36 | u32 shift; |
| 37 | u64 xtime_nsec; |
Peter Zijlstra | 876e788 | 2015-03-19 10:09:06 +0100 | [diff] [blame] | 38 | ktime_t base; |
Thomas Gleixner | d28ede8 | 2014-07-16 21:05:16 +0000 | [diff] [blame] | 39 | }; |
| 40 | |
| 41 | /** |
| 42 | * struct timekeeper - Structure holding internal timekeeping values. |
Peter Zijlstra | 876e788 | 2015-03-19 10:09:06 +0100 | [diff] [blame] | 43 | * @tkr_mono: The readout base structure for CLOCK_MONOTONIC |
Peter Zijlstra | 4a4ad80 | 2015-03-19 09:28:44 +0100 | [diff] [blame] | 44 | * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW |
Thomas Gleixner | d28ede8 | 2014-07-16 21:05:16 +0000 | [diff] [blame] | 45 | * @xtime_sec: Current CLOCK_REALTIME time in seconds |
Heena Sirwani | 9e3680b | 2014-10-29 16:01:16 +0530 | [diff] [blame] | 46 | * @ktime_sec: Current CLOCK_MONOTONIC time in seconds |
Thomas Gleixner | d28ede8 | 2014-07-16 21:05:16 +0000 | [diff] [blame] | 47 | * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset |
| 48 | * @offs_real: Offset clock monotonic -> clock realtime |
| 49 | * @offs_boot: Offset clock monotonic -> clock boottime |
| 50 | * @offs_tai: Offset clock monotonic -> clock tai |
| 51 | * @tai_offset: The current UTC to TAI offset in seconds |
Thomas Gleixner | 868a3e9 | 2015-04-14 21:08:37 +0000 | [diff] [blame] | 52 | * @clock_was_set_seq: The sequence number of clock was set events |
Christopher S. Hall | 2c756fe | 2016-02-22 03:15:23 -0800 | [diff] [blame] | 53 | * @cs_was_changed_seq: The sequence number of clocksource change events |
John Stultz | 833f32d | 2015-06-11 15:54:55 -0700 | [diff] [blame] | 54 | * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second |
Thomas Gleixner | d28ede8 | 2014-07-16 21:05:16 +0000 | [diff] [blame] | 55 | * @raw_time: Monotonic raw base time in timespec64 format |
| 56 | * @cycle_interval: Number of clock cycles in one NTP interval |
| 57 | * @xtime_interval: Number of clock shifted nano seconds in one NTP |
| 58 | * interval. |
| 59 | * @xtime_remainder: Shifted nano seconds left over when rounding |
| 60 | * @cycle_interval |
| 61 | * @raw_interval: Raw nano seconds accumulated per NTP interval. |
| 62 | * @ntp_error: Difference between accumulated time and NTP time in ntp |
| 63 | * shifted nano seconds. |
| 64 | * @ntp_error_shift: Shift conversion between clock shifted nano seconds and |
| 65 | * ntp shifted nano seconds. |
John Stultz | 57d05a9 | 2015-05-13 16:04:47 -0700 | [diff] [blame] | 66 | * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING) |
| 67 | * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING) |
| 68 | * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING) |
Thomas Gleixner | d28ede8 | 2014-07-16 21:05:16 +0000 | [diff] [blame] | 69 | * |
| 70 | * Note: For timespec(64) based interfaces wall_to_monotonic is what |
| 71 | * we need to add to xtime (or xtime corrected for sub jiffie times) |
| 72 | * to get to monotonic time. Monotonic is pegged at zero at system |
| 73 | * boot time, so wall_to_monotonic will be negative, however, we will |
| 74 | * ALWAYS keep the tv_nsec part positive so we can use the usual |
| 75 | * normalization. |
Thomas Gleixner | 3fdb14f | 2014-07-16 21:04:07 +0000 | [diff] [blame] | 76 | * |
| 77 | * wall_to_monotonic is moved after resume from suspend for the |
Thomas Gleixner | d28ede8 | 2014-07-16 21:05:16 +0000 | [diff] [blame] | 78 | * monotonic time not to jump. We need to add total_sleep_time to |
| 79 | * wall_to_monotonic to get the real boot based time offset. |
Thomas Gleixner | 3fdb14f | 2014-07-16 21:04:07 +0000 | [diff] [blame] | 80 | * |
Thomas Gleixner | d28ede8 | 2014-07-16 21:05:16 +0000 | [diff] [blame] | 81 | * wall_to_monotonic is no longer the boot time, getboottime must be |
Thomas Gleixner | 3fdb14f | 2014-07-16 21:04:07 +0000 | [diff] [blame] | 82 | * used instead. |
| 83 | */ |
John Stultz | d7b4202 | 2012-09-04 15:12:07 -0400 | [diff] [blame] | 84 | struct timekeeper { |
Peter Zijlstra | 876e788 | 2015-03-19 10:09:06 +0100 | [diff] [blame] | 85 | struct tk_read_base tkr_mono; |
Peter Zijlstra | 4a4ad80 | 2015-03-19 09:28:44 +0100 | [diff] [blame] | 86 | struct tk_read_base tkr_raw; |
Thomas Gleixner | 3fdb14f | 2014-07-16 21:04:07 +0000 | [diff] [blame] | 87 | u64 xtime_sec; |
Heena Sirwani | 9e3680b | 2014-10-29 16:01:16 +0530 | [diff] [blame] | 88 | unsigned long ktime_sec; |
Thomas Gleixner | 3fdb14f | 2014-07-16 21:04:07 +0000 | [diff] [blame] | 89 | struct timespec64 wall_to_monotonic; |
Thomas Gleixner | 3fdb14f | 2014-07-16 21:04:07 +0000 | [diff] [blame] | 90 | ktime_t offs_real; |
Thomas Gleixner | 3fdb14f | 2014-07-16 21:04:07 +0000 | [diff] [blame] | 91 | ktime_t offs_boot; |
Thomas Gleixner | 3fdb14f | 2014-07-16 21:04:07 +0000 | [diff] [blame] | 92 | ktime_t offs_tai; |
Thomas Gleixner | 3fdb14f | 2014-07-16 21:04:07 +0000 | [diff] [blame] | 93 | s32 tai_offset; |
Thomas Gleixner | 868a3e9 | 2015-04-14 21:08:37 +0000 | [diff] [blame] | 94 | unsigned int clock_was_set_seq; |
Christopher S. Hall | 2c756fe | 2016-02-22 03:15:23 -0800 | [diff] [blame] | 95 | u8 cs_was_changed_seq; |
John Stultz | 833f32d | 2015-06-11 15:54:55 -0700 | [diff] [blame] | 96 | ktime_t next_leap_ktime; |
Thomas Gleixner | 3fdb14f | 2014-07-16 21:04:07 +0000 | [diff] [blame] | 97 | struct timespec64 raw_time; |
| 98 | |
Thomas Gleixner | d28ede8 | 2014-07-16 21:05:16 +0000 | [diff] [blame] | 99 | /* The following members are for timekeeping internal use */ |
Thomas Gleixner | a5a1d1c | 2016-12-21 20:32:01 +0100 | [diff] [blame] | 100 | u64 cycle_interval; |
John Stultz | d7b4202 | 2012-09-04 15:12:07 -0400 | [diff] [blame] | 101 | u64 xtime_interval; |
John Stultz | d7b4202 | 2012-09-04 15:12:07 -0400 | [diff] [blame] | 102 | s64 xtime_remainder; |
John Stultz | d7b4202 | 2012-09-04 15:12:07 -0400 | [diff] [blame] | 103 | u32 raw_interval; |
John Stultz | 375f45b | 2014-04-23 20:53:29 -0700 | [diff] [blame] | 104 | /* The ntp_tick_length() value currently being used. |
| 105 | * This cached copy ensures we consistently apply the tick |
| 106 | * length for an entire tick, as ntp_tick_length may change |
| 107 | * mid-tick, and we don't want to apply that new value to |
| 108 | * the tick in progress. |
| 109 | */ |
| 110 | u64 ntp_tick; |
| 111 | /* Difference between accumulated time and NTP time in ntp |
| 112 | * shifted nano seconds. */ |
Thomas Gleixner | 3fdb14f | 2014-07-16 21:04:07 +0000 | [diff] [blame] | 113 | s64 ntp_error; |
Thomas Gleixner | 3fdb14f | 2014-07-16 21:04:07 +0000 | [diff] [blame] | 114 | u32 ntp_error_shift; |
John Stultz | dc49159 | 2013-12-06 17:25:21 -0800 | [diff] [blame] | 115 | u32 ntp_err_mult; |
John Stultz | 57d05a9 | 2015-05-13 16:04:47 -0700 | [diff] [blame] | 116 | #ifdef CONFIG_DEBUG_TIMEKEEPING |
| 117 | long last_warning; |
| 118 | /* |
| 119 | * These simple flag variables are managed |
| 120 | * without locks, which is racy, but they are |
| 121 | * ok since we don't really care about being |
| 122 | * super precise about how many events were |
| 123 | * seen, just that a problem was observed. |
| 124 | */ |
| 125 | int underflow_seen; |
| 126 | int overflow_seen; |
| 127 | #endif |
John Stultz | d7b4202 | 2012-09-04 15:12:07 -0400 | [diff] [blame] | 128 | }; |
John Stultz | 189374a | 2012-09-04 15:27:48 -0400 | [diff] [blame] | 129 | |
John Stultz | 576094b | 2012-09-11 19:58:13 -0400 | [diff] [blame] | 130 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL |
| 131 | |
| 132 | extern void update_vsyscall(struct timekeeper *tk); |
John Stultz | 189374a | 2012-09-04 15:27:48 -0400 | [diff] [blame] | 133 | extern void update_vsyscall_tz(void); |
John Stultz | 576094b | 2012-09-11 19:58:13 -0400 | [diff] [blame] | 134 | |
| 135 | #elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD) |
| 136 | |
| 137 | extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm, |
Thomas Gleixner | 4a0e637 | 2014-07-16 21:05:13 +0000 | [diff] [blame] | 138 | struct clocksource *c, u32 mult, |
Thomas Gleixner | a5a1d1c | 2016-12-21 20:32:01 +0100 | [diff] [blame] | 139 | u64 cycle_last); |
John Stultz | 576094b | 2012-09-11 19:58:13 -0400 | [diff] [blame] | 140 | extern void update_vsyscall_tz(void); |
| 141 | |
John Stultz | 189374a | 2012-09-04 15:27:48 -0400 | [diff] [blame] | 142 | #else |
John Stultz | 576094b | 2012-09-11 19:58:13 -0400 | [diff] [blame] | 143 | |
| 144 | static inline void update_vsyscall(struct timekeeper *tk) |
John Stultz | 189374a | 2012-09-04 15:27:48 -0400 | [diff] [blame] | 145 | { |
| 146 | } |
| 147 | static inline void update_vsyscall_tz(void) |
| 148 | { |
| 149 | } |
| 150 | #endif |
| 151 | |
John Stultz | d7b4202 | 2012-09-04 15:12:07 -0400 | [diff] [blame] | 152 | #endif /* _LINUX_TIMEKEEPER_INTERNAL_H */ |