blob: 872e0685d1fbbd2428c56ba205034679738a5b4f [file] [log] [blame]
Russell King112f38a42010-12-15 19:23:07 +00001/*
2 * sched_clock.c: support for extending counters to full 64-bit ns counter
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/clocksource.h>
9#include <linux/init.h>
10#include <linux/jiffies.h>
Stephen Boyda08ca5d2013-07-18 16:21:16 -070011#include <linux/ktime.h>
Russell King112f38a42010-12-15 19:23:07 +000012#include <linux/kernel.h>
Russell Kinga42c3622012-09-09 18:39:28 +010013#include <linux/moduleparam.h>
Russell King112f38a42010-12-15 19:23:07 +000014#include <linux/sched.h>
Russell Kingf153d012012-02-04 12:31:27 +000015#include <linux/syscore_ops.h>
Stephen Boyda08ca5d2013-07-18 16:21:16 -070016#include <linux/hrtimer.h>
Stephen Boyd38ff87f2013-06-01 23:39:40 -070017#include <linux/sched_clock.h>
Stephen Boyd85c3d2d2013-07-18 16:21:15 -070018#include <linux/seqlock.h>
Stephen Boyde7e3ff12013-07-18 16:21:17 -070019#include <linux/bitops.h>
Russell King112f38a42010-12-15 19:23:07 +000020
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -070021/**
22 * struct clock_read_data - data required to read from sched_clock
23 *
24 * @epoch_ns: sched_clock value at last update
25 * @epoch_cyc: Clock cycle value at last update
26 * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit
27 * clocks
28 * @read_sched_clock: Current clock source (or dummy source when suspended)
29 * @mult: Multipler for scaled math conversion
30 * @shift: Shift value for scaled math conversion
31 * @suspended: Flag to indicate if the clock is suspended (stopped)
32 *
33 * Care must be taken when updating this structure; it is read by
34 * some very hot code paths. It occupies <=48 bytes and, when combined
35 * with the seqcount used to synchronize access, comfortably fits into
36 * a 64 byte cache line.
37 */
38struct clock_read_data {
Marc Zyngier2f0778af2011-12-15 12:19:23 +010039 u64 epoch_ns;
Stephen Boyde7e3ff12013-07-18 16:21:17 -070040 u64 epoch_cyc;
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -070041 u64 sched_clock_mask;
42 u64 (*read_sched_clock)(void);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010043 u32 mult;
44 u32 shift;
Colin Cross237ec6f2012-08-07 19:05:10 +010045 bool suspended;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010046};
47
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -070048/**
49 * struct clock_data - all data needed for sched_clock (including
50 * registration of a new clock source)
51 *
52 * @seq: Sequence counter for protecting updates.
53 * @read_data: Data required to read from sched_clock.
54 * @wrap_kt: Duration for which clock can run before wrapping
55 * @rate: Tick rate of the registered clock
56 * @actual_read_sched_clock: Registered clock read function
57 *
58 * The ordering of this structure has been chosen to optimize cache
59 * performance. In particular seq and read_data (combined) should fit
60 * into a single 64 byte cache line.
61 */
62struct clock_data {
63 seqcount_t seq;
64 struct clock_read_data read_data;
65 ktime_t wrap_kt;
66 unsigned long rate;
67};
68
Stephen Boyda08ca5d2013-07-18 16:21:16 -070069static struct hrtimer sched_clock_timer;
Russell Kinga42c3622012-09-09 18:39:28 +010070static int irqtime = -1;
71
72core_param(irqtime, irqtime, int, 0400);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010073
Stephen Boyde7e3ff12013-07-18 16:21:17 -070074static u64 notrace jiffy_sched_clock_read(void)
Marc Zyngier2f0778af2011-12-15 12:19:23 +010075{
Stephen Boyde7e3ff12013-07-18 16:21:17 -070076 /*
77 * We don't need to use get_jiffies_64 on 32-bit arches here
78 * because we register with BITS_PER_LONG
79 */
80 return (u64)(jiffies - INITIAL_JIFFIES);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010081}
82
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -070083static struct clock_data cd ____cacheline_aligned = {
84 .read_data = { .mult = NSEC_PER_SEC / HZ,
85 .read_sched_clock = jiffy_sched_clock_read, },
86};
Marc Zyngier2f0778af2011-12-15 12:19:23 +010087
Stephen Boydcea15092013-04-18 17:33:40 +010088static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
Marc Zyngier2f0778af2011-12-15 12:19:23 +010089{
90 return (cyc * mult) >> shift;
91}
92
Stephen Boydb4042ce2013-07-18 16:21:19 -070093unsigned long long notrace sched_clock(void)
Marc Zyngier2f0778af2011-12-15 12:19:23 +010094{
Daniel Thompson8710e912015-03-26 12:23:22 -070095 u64 cyc, res;
Stephen Boyd85c3d2d2013-07-18 16:21:15 -070096 unsigned long seq;
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -070097 struct clock_read_data *rd = &cd.read_data;
Stephen Boyd336ae112013-06-17 15:40:58 -070098
Marc Zyngier2f0778af2011-12-15 12:19:23 +010099 do {
John Stultz7a06c412014-01-02 15:11:14 -0800100 seq = raw_read_seqcount_begin(&cd.seq);
Daniel Thompson8710e912015-03-26 12:23:22 -0700101
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -0700102 res = rd->epoch_ns;
103 if (!rd->suspended) {
104 cyc = rd->read_sched_clock();
105 cyc = (cyc - rd->epoch_cyc) & rd->sched_clock_mask;
106 res += cyc_to_ns(cyc, rd->mult, rd->shift);
Daniel Thompson8710e912015-03-26 12:23:22 -0700107 }
Stephen Boyd85c3d2d2013-07-18 16:21:15 -0700108 } while (read_seqcount_retry(&cd.seq, seq));
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100109
Daniel Thompson8710e912015-03-26 12:23:22 -0700110 return res;
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100111}
112
113/*
114 * Atomically update the sched_clock epoch.
115 */
116static void notrace update_sched_clock(void)
117{
118 unsigned long flags;
Stephen Boyde7e3ff12013-07-18 16:21:17 -0700119 u64 cyc;
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100120 u64 ns;
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -0700121 struct clock_read_data *rd = &cd.read_data;
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100122
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -0700123 cyc = rd->read_sched_clock();
124 ns = rd->epoch_ns +
125 cyc_to_ns((cyc - rd->epoch_cyc) & rd->sched_clock_mask,
126 rd->mult, rd->shift);
Stephen Boyd85c3d2d2013-07-18 16:21:15 -0700127
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100128 raw_local_irq_save(flags);
John Stultz7a06c412014-01-02 15:11:14 -0800129 raw_write_seqcount_begin(&cd.seq);
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -0700130 rd->epoch_ns = ns;
131 rd->epoch_cyc = cyc;
John Stultz7a06c412014-01-02 15:11:14 -0800132 raw_write_seqcount_end(&cd.seq);
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100133 raw_local_irq_restore(flags);
134}
Russell King112f38a42010-12-15 19:23:07 +0000135
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700136static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
Russell King112f38a42010-12-15 19:23:07 +0000137{
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100138 update_sched_clock();
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700139 hrtimer_forward_now(hrt, cd.wrap_kt);
140 return HRTIMER_RESTART;
Russell King112f38a42010-12-15 19:23:07 +0000141}
142
Stephen Boyde7e3ff12013-07-18 16:21:17 -0700143void __init sched_clock_register(u64 (*read)(void), int bits,
144 unsigned long rate)
Russell King112f38a42010-12-15 19:23:07 +0000145{
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800146 u64 res, wrap, new_mask, new_epoch, cyc, ns;
147 u32 new_mult, new_shift;
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700148 unsigned long r;
Russell King112f38a42010-12-15 19:23:07 +0000149 char r_unit;
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -0700150 struct clock_read_data *rd = &cd.read_data;
Russell King112f38a42010-12-15 19:23:07 +0000151
Rob Herringc1157392013-02-08 16:14:59 -0600152 if (cd.rate > rate)
153 return;
154
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100155 WARN_ON(!irqs_disabled());
Russell King112f38a42010-12-15 19:23:07 +0000156
157 /* calculate the mult/shift to convert counter ticks to ns. */
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800158 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
159
160 new_mask = CLOCKSOURCE_MASK(bits);
Daniel Thompson8710e912015-03-26 12:23:22 -0700161 cd.rate = rate;
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800162
John Stultz362fde02015-03-11 21:16:30 -0700163 /* calculate how many nanosecs until we risk wrapping */
John Stultzfb82fe22015-03-11 21:16:31 -0700164 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
Daniel Thompson8710e912015-03-26 12:23:22 -0700165 cd.wrap_kt = ns_to_ktime(wrap);
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800166
167 /* update epoch for new counter and update epoch_ns from old counter*/
168 new_epoch = read();
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -0700169 cyc = rd->read_sched_clock();
170 ns = rd->epoch_ns +
171 cyc_to_ns((cyc - rd->epoch_cyc) & rd->sched_clock_mask,
172 rd->mult, rd->shift);
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800173
174 raw_write_seqcount_begin(&cd.seq);
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -0700175 rd->read_sched_clock = read;
176 rd->sched_clock_mask = new_mask;
177 rd->mult = new_mult;
178 rd->shift = new_shift;
179 rd->epoch_cyc = new_epoch;
180 rd->epoch_ns = ns;
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800181 raw_write_seqcount_end(&cd.seq);
Russell King112f38a42010-12-15 19:23:07 +0000182
183 r = rate;
184 if (r >= 4000000) {
185 r /= 1000000;
186 r_unit = 'M';
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100187 } else if (r >= 1000) {
Russell King112f38a42010-12-15 19:23:07 +0000188 r /= 1000;
189 r_unit = 'k';
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100190 } else
191 r_unit = ' ';
Russell King112f38a42010-12-15 19:23:07 +0000192
Russell King112f38a42010-12-15 19:23:07 +0000193 /* calculate the ns resolution of this counter */
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800194 res = cyc_to_ns(1ULL, new_mult, new_shift);
195
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700196 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
197 bits, r, r_unit, res, wrap);
Russell King112f38a42010-12-15 19:23:07 +0000198
Russell Kinga42c3622012-09-09 18:39:28 +0100199 /* Enable IRQ time accounting if we have a fast enough sched_clock */
200 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
201 enable_sched_clock_irqtime();
202
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100203 pr_debug("Registered %pF as sched_clock source\n", read);
204}
205
Russell King211baa702011-01-11 16:23:04 +0000206void __init sched_clock_postinit(void)
207{
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100208 /*
209 * If no sched_clock function has been provided at that point,
210 * make it the final one one.
211 */
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -0700212 if (cd.read_data.read_sched_clock == jiffy_sched_clock_read)
Stephen Boyde7e3ff12013-07-18 16:21:17 -0700213 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100214
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700215 update_sched_clock();
216
217 /*
218 * Start the timer to keep sched_clock() properly updated and
219 * sets the initial epoch.
220 */
221 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
222 sched_clock_timer.function = sched_clock_poll;
223 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
Russell King211baa702011-01-11 16:23:04 +0000224}
Russell Kingf153d012012-02-04 12:31:27 +0000225
226static int sched_clock_suspend(void)
227{
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -0700228 struct clock_read_data *rd = &cd.read_data;
229
Stephen Boydf723aa12014-07-23 21:03:50 -0700230 update_sched_clock();
231 hrtimer_cancel(&sched_clock_timer);
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -0700232 rd->suspended = true;
Russell Kingf153d012012-02-04 12:31:27 +0000233 return 0;
234}
235
Colin Cross237ec6f2012-08-07 19:05:10 +0100236static void sched_clock_resume(void)
237{
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -0700238 struct clock_read_data *rd = &cd.read_data;
239
240 rd->epoch_cyc = rd->read_sched_clock();
Stephen Boydf723aa12014-07-23 21:03:50 -0700241 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
Daniel Thompsoncf7c9c12015-03-26 12:23:23 -0700242 rd->suspended = false;
Colin Cross237ec6f2012-08-07 19:05:10 +0100243}
244
Russell Kingf153d012012-02-04 12:31:27 +0000245static struct syscore_ops sched_clock_ops = {
246 .suspend = sched_clock_suspend,
Colin Cross237ec6f2012-08-07 19:05:10 +0100247 .resume = sched_clock_resume,
Russell Kingf153d012012-02-04 12:31:27 +0000248};
249
250static int __init sched_clock_syscore_init(void)
251{
252 register_syscore_ops(&sched_clock_ops);
253 return 0;
254}
255device_initcall(sched_clock_syscore_init);