blob: dfc80b72c32a3beafd9c59104acc00c09ff3901b [file] [log] [blame]
John Stultzd7b42022012-09-04 15:12:07 -04001/*
2 * You SHOULD NOT be including this unless you're vsyscall
3 * handling code or timekeeping internal code!
4 */
5
6#ifndef _LINUX_TIMEKEEPER_INTERNAL_H
7#define _LINUX_TIMEKEEPER_INTERNAL_H
8
9#include <linux/clocksource.h>
10#include <linux/jiffies.h>
11#include <linux/time.h>
12
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000013/**
14 * struct tk_read_base - base structure for timekeeping readout
15 * @clock: Current clocksource used for timekeeping.
16 * @read: Read function of @clock
17 * @mask: Bitmask for two's complement subtraction of non 64bit clocks
18 * @cycle_last: @clock cycle value at last update
Peter Zijlstra876e7882015-03-19 10:09:06 +010019 * @mult: (NTP adjusted) multiplier for scaled math conversion
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000020 * @shift: Shift value for scaled math conversion
21 * @xtime_nsec: Shifted (fractional) nano seconds offset for readout
Peter Zijlstra876e7882015-03-19 10:09:06 +010022 * @base: ktime_t (nanoseconds) base time for readout
Thomas Gleixner3fdb14f2014-07-16 21:04:07 +000023 *
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000024 * This struct has size 56 byte on 64 bit. Together with a seqcount it
25 * occupies a single 64byte cache line.
26 *
27 * The struct is separate from struct timekeeper as it is also used
Peter Zijlstra876e7882015-03-19 10:09:06 +010028 * for a fast NMI safe accessors.
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000029 */
30struct tk_read_base {
31 struct clocksource *clock;
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000032 cycle_t mask;
33 cycle_t cycle_last;
34 u32 mult;
35 u32 shift;
36 u64 xtime_nsec;
Peter Zijlstra876e7882015-03-19 10:09:06 +010037 ktime_t base;
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000038};
39
40/**
41 * struct timekeeper - Structure holding internal timekeeping values.
Peter Zijlstra876e7882015-03-19 10:09:06 +010042 * @tkr_mono: The readout base structure for CLOCK_MONOTONIC
Peter Zijlstra4a4ad802015-03-19 09:28:44 +010043 * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000044 * @xtime_sec: Current CLOCK_REALTIME time in seconds
Heena Sirwani9e3680b2014-10-29 16:01:16 +053045 * @ktime_sec: Current CLOCK_MONOTONIC time in seconds
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000046 * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
47 * @offs_real: Offset clock monotonic -> clock realtime
48 * @offs_boot: Offset clock monotonic -> clock boottime
49 * @offs_tai: Offset clock monotonic -> clock tai
50 * @tai_offset: The current UTC to TAI offset in seconds
Thomas Gleixner868a3e92015-04-14 21:08:37 +000051 * @clock_was_set_seq: The sequence number of clock was set events
Christopher S. Hall2c756fe2016-02-22 03:15:23 -080052 * @cs_was_changed_seq: The sequence number of clocksource change events
John Stultz833f32d2015-06-11 15:54:55 -070053 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
John Stultz5311c742017-05-22 17:20:20 -070054 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000055 * @cycle_interval: Number of clock cycles in one NTP interval
56 * @xtime_interval: Number of clock shifted nano seconds in one NTP
57 * interval.
58 * @xtime_remainder: Shifted nano seconds left over when rounding
59 * @cycle_interval
John Stultza53bfdd2017-06-08 16:44:21 -070060 * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000061 * @ntp_error: Difference between accumulated time and NTP time in ntp
62 * shifted nano seconds.
63 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
64 * ntp shifted nano seconds.
John Stultz57d05a92015-05-13 16:04:47 -070065 * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING)
66 * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING)
67 * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING)
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000068 *
69 * Note: For timespec(64) based interfaces wall_to_monotonic is what
70 * we need to add to xtime (or xtime corrected for sub jiffie times)
71 * to get to monotonic time. Monotonic is pegged at zero at system
72 * boot time, so wall_to_monotonic will be negative, however, we will
73 * ALWAYS keep the tv_nsec part positive so we can use the usual
74 * normalization.
Thomas Gleixner3fdb14f2014-07-16 21:04:07 +000075 *
76 * wall_to_monotonic is moved after resume from suspend for the
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000077 * monotonic time not to jump. We need to add total_sleep_time to
78 * wall_to_monotonic to get the real boot based time offset.
Thomas Gleixner3fdb14f2014-07-16 21:04:07 +000079 *
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000080 * wall_to_monotonic is no longer the boot time, getboottime must be
Thomas Gleixner3fdb14f2014-07-16 21:04:07 +000081 * used instead.
82 */
John Stultzd7b42022012-09-04 15:12:07 -040083struct timekeeper {
Peter Zijlstra876e7882015-03-19 10:09:06 +010084 struct tk_read_base tkr_mono;
Peter Zijlstra4a4ad802015-03-19 09:28:44 +010085 struct tk_read_base tkr_raw;
Thomas Gleixner3fdb14f2014-07-16 21:04:07 +000086 u64 xtime_sec;
Heena Sirwani9e3680b2014-10-29 16:01:16 +053087 unsigned long ktime_sec;
Thomas Gleixner3fdb14f2014-07-16 21:04:07 +000088 struct timespec64 wall_to_monotonic;
Thomas Gleixner3fdb14f2014-07-16 21:04:07 +000089 ktime_t offs_real;
Thomas Gleixner3fdb14f2014-07-16 21:04:07 +000090 ktime_t offs_boot;
Thomas Gleixner3fdb14f2014-07-16 21:04:07 +000091 ktime_t offs_tai;
Thomas Gleixner3fdb14f2014-07-16 21:04:07 +000092 s32 tai_offset;
Thomas Gleixner868a3e92015-04-14 21:08:37 +000093 unsigned int clock_was_set_seq;
Christopher S. Hall2c756fe2016-02-22 03:15:23 -080094 u8 cs_was_changed_seq;
John Stultz833f32d2015-06-11 15:54:55 -070095 ktime_t next_leap_ktime;
John Stultz5311c742017-05-22 17:20:20 -070096 u64 raw_sec;
Thomas Gleixner3fdb14f2014-07-16 21:04:07 +000097
Thomas Gleixnerd28ede82014-07-16 21:05:16 +000098 /* The following members are for timekeeping internal use */
John Stultzd7b42022012-09-04 15:12:07 -040099 cycle_t cycle_interval;
John Stultzd7b42022012-09-04 15:12:07 -0400100 u64 xtime_interval;
John Stultzd7b42022012-09-04 15:12:07 -0400101 s64 xtime_remainder;
John Stultza53bfdd2017-06-08 16:44:21 -0700102 u64 raw_interval;
John Stultz375f45b2014-04-23 20:53:29 -0700103 /* The ntp_tick_length() value currently being used.
104 * This cached copy ensures we consistently apply the tick
105 * length for an entire tick, as ntp_tick_length may change
106 * mid-tick, and we don't want to apply that new value to
107 * the tick in progress.
108 */
109 u64 ntp_tick;
110 /* Difference between accumulated time and NTP time in ntp
111 * shifted nano seconds. */
Thomas Gleixner3fdb14f2014-07-16 21:04:07 +0000112 s64 ntp_error;
Thomas Gleixner3fdb14f2014-07-16 21:04:07 +0000113 u32 ntp_error_shift;
John Stultzdc491592013-12-06 17:25:21 -0800114 u32 ntp_err_mult;
John Stultz57d05a92015-05-13 16:04:47 -0700115#ifdef CONFIG_DEBUG_TIMEKEEPING
116 long last_warning;
117 /*
118 * These simple flag variables are managed
119 * without locks, which is racy, but they are
120 * ok since we don't really care about being
121 * super precise about how many events were
122 * seen, just that a problem was observed.
123 */
124 int underflow_seen;
125 int overflow_seen;
126#endif
John Stultzd7b42022012-09-04 15:12:07 -0400127};
John Stultz189374a2012-09-04 15:27:48 -0400128
John Stultz576094b2012-09-11 19:58:13 -0400129#ifdef CONFIG_GENERIC_TIME_VSYSCALL
130
131extern void update_vsyscall(struct timekeeper *tk);
John Stultz189374a2012-09-04 15:27:48 -0400132extern void update_vsyscall_tz(void);
John Stultz576094b2012-09-11 19:58:13 -0400133
134#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
135
136extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
Thomas Gleixner4a0e6372014-07-16 21:05:13 +0000137 struct clocksource *c, u32 mult,
John Stultz953dec22014-07-25 21:37:19 -0700138 cycle_t cycle_last);
John Stultz576094b2012-09-11 19:58:13 -0400139extern void update_vsyscall_tz(void);
140
John Stultz189374a2012-09-04 15:27:48 -0400141#else
John Stultz576094b2012-09-11 19:58:13 -0400142
143static inline void update_vsyscall(struct timekeeper *tk)
John Stultz189374a2012-09-04 15:27:48 -0400144{
145}
146static inline void update_vsyscall_tz(void)
147{
148}
149#endif
150
John Stultzd7b42022012-09-04 15:12:07 -0400151#endif /* _LINUX_TIMEKEEPER_INTERNAL_H */