blob: ca3bc5c7027ca8214b85c3c1e546a66275c83000 [file] [log] [blame]
Russell King112f38a42010-12-15 19:23:07 +00001/*
2 * sched_clock.c: support for extending counters to full 64-bit ns counter
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/clocksource.h>
9#include <linux/init.h>
10#include <linux/jiffies.h>
Stephen Boyda08ca5d2013-07-18 16:21:16 -070011#include <linux/ktime.h>
Russell King112f38a42010-12-15 19:23:07 +000012#include <linux/kernel.h>
Russell Kinga42c3622012-09-09 18:39:28 +010013#include <linux/moduleparam.h>
Russell King112f38a42010-12-15 19:23:07 +000014#include <linux/sched.h>
Russell Kingf153d012012-02-04 12:31:27 +000015#include <linux/syscore_ops.h>
Stephen Boyda08ca5d2013-07-18 16:21:16 -070016#include <linux/hrtimer.h>
Stephen Boyd38ff87f2013-06-01 23:39:40 -070017#include <linux/sched_clock.h>
Stephen Boyd85c3d2d2013-07-18 16:21:15 -070018#include <linux/seqlock.h>
Stephen Boyde7e3ff12013-07-18 16:21:17 -070019#include <linux/bitops.h>
Russell King112f38a42010-12-15 19:23:07 +000020
Marc Zyngier2f0778af2011-12-15 12:19:23 +010021struct clock_data {
Stephen Boyda08ca5d2013-07-18 16:21:16 -070022 ktime_t wrap_kt;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010023 u64 epoch_ns;
Stephen Boyde7e3ff12013-07-18 16:21:17 -070024 u64 epoch_cyc;
Stephen Boyd85c3d2d2013-07-18 16:21:15 -070025 seqcount_t seq;
Rob Herringc1157392013-02-08 16:14:59 -060026 unsigned long rate;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010027 u32 mult;
28 u32 shift;
Colin Cross237ec6f2012-08-07 19:05:10 +010029 bool suspended;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010030};
31
Stephen Boyda08ca5d2013-07-18 16:21:16 -070032static struct hrtimer sched_clock_timer;
Russell Kinga42c3622012-09-09 18:39:28 +010033static int irqtime = -1;
34
35core_param(irqtime, irqtime, int, 0400);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010036
37static struct clock_data cd = {
38 .mult = NSEC_PER_SEC / HZ,
39};
40
Stephen Boyde7e3ff12013-07-18 16:21:17 -070041static u64 __read_mostly sched_clock_mask;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010042
Stephen Boyde7e3ff12013-07-18 16:21:17 -070043static u64 notrace jiffy_sched_clock_read(void)
Marc Zyngier2f0778af2011-12-15 12:19:23 +010044{
Stephen Boyde7e3ff12013-07-18 16:21:17 -070045 /*
46 * We don't need to use get_jiffies_64 on 32-bit arches here
47 * because we register with BITS_PER_LONG
48 */
49 return (u64)(jiffies - INITIAL_JIFFIES);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010050}
51
Stephen Boyde7e3ff12013-07-18 16:21:17 -070052static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010053
Stephen Boydcea15092013-04-18 17:33:40 +010054static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
Marc Zyngier2f0778af2011-12-15 12:19:23 +010055{
56 return (cyc * mult) >> shift;
57}
58
Stephen Boydb4042ce2013-07-18 16:21:19 -070059unsigned long long notrace sched_clock(void)
Marc Zyngier2f0778af2011-12-15 12:19:23 +010060{
61 u64 epoch_ns;
Stephen Boyde7e3ff12013-07-18 16:21:17 -070062 u64 epoch_cyc;
63 u64 cyc;
Stephen Boyd85c3d2d2013-07-18 16:21:15 -070064 unsigned long seq;
Stephen Boyd336ae112013-06-17 15:40:58 -070065
66 if (cd.suspended)
67 return cd.epoch_ns;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010068
Marc Zyngier2f0778af2011-12-15 12:19:23 +010069 do {
John Stultz7a06c412014-01-02 15:11:14 -080070 seq = raw_read_seqcount_begin(&cd.seq);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010071 epoch_cyc = cd.epoch_cyc;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010072 epoch_ns = cd.epoch_ns;
Stephen Boyd85c3d2d2013-07-18 16:21:15 -070073 } while (read_seqcount_retry(&cd.seq, seq));
Marc Zyngier2f0778af2011-12-15 12:19:23 +010074
Stephen Boyd336ae112013-06-17 15:40:58 -070075 cyc = read_sched_clock();
76 cyc = (cyc - epoch_cyc) & sched_clock_mask;
77 return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010078}
79
80/*
81 * Atomically update the sched_clock epoch.
82 */
83static void notrace update_sched_clock(void)
84{
85 unsigned long flags;
Stephen Boyde7e3ff12013-07-18 16:21:17 -070086 u64 cyc;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010087 u64 ns;
88
89 cyc = read_sched_clock();
90 ns = cd.epoch_ns +
91 cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
92 cd.mult, cd.shift);
Stephen Boyd85c3d2d2013-07-18 16:21:15 -070093
Marc Zyngier2f0778af2011-12-15 12:19:23 +010094 raw_local_irq_save(flags);
John Stultz7a06c412014-01-02 15:11:14 -080095 raw_write_seqcount_begin(&cd.seq);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010096 cd.epoch_ns = ns;
Joonsoo Kim7c4e9ce2013-02-09 05:52:45 +010097 cd.epoch_cyc = cyc;
John Stultz7a06c412014-01-02 15:11:14 -080098 raw_write_seqcount_end(&cd.seq);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010099 raw_local_irq_restore(flags);
100}
Russell King112f38a42010-12-15 19:23:07 +0000101
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700102static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
Russell King112f38a42010-12-15 19:23:07 +0000103{
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100104 update_sched_clock();
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700105 hrtimer_forward_now(hrt, cd.wrap_kt);
106 return HRTIMER_RESTART;
Russell King112f38a42010-12-15 19:23:07 +0000107}
108
Stephen Boyde7e3ff12013-07-18 16:21:17 -0700109void __init sched_clock_register(u64 (*read)(void), int bits,
110 unsigned long rate)
Russell King112f38a42010-12-15 19:23:07 +0000111{
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800112 u64 res, wrap, new_mask, new_epoch, cyc, ns;
113 u32 new_mult, new_shift;
114 ktime_t new_wrap_kt;
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700115 unsigned long r;
Russell King112f38a42010-12-15 19:23:07 +0000116 char r_unit;
117
Rob Herringc1157392013-02-08 16:14:59 -0600118 if (cd.rate > rate)
119 return;
120
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100121 WARN_ON(!irqs_disabled());
Russell King112f38a42010-12-15 19:23:07 +0000122
123 /* calculate the mult/shift to convert counter ticks to ns. */
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800124 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
125
126 new_mask = CLOCKSOURCE_MASK(bits);
127
John Stultz362fde02015-03-11 21:16:30 -0700128 /* calculate how many nanosecs until we risk wrapping */
John Stultzfb82fe22015-03-11 21:16:31 -0700129 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
John Stultz362fde02015-03-11 21:16:30 -0700130 new_wrap_kt = ns_to_ktime(wrap);
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800131
132 /* update epoch for new counter and update epoch_ns from old counter*/
133 new_epoch = read();
134 cyc = read_sched_clock();
135 ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
136 cd.mult, cd.shift);
137
138 raw_write_seqcount_begin(&cd.seq);
139 read_sched_clock = read;
140 sched_clock_mask = new_mask;
141 cd.rate = rate;
142 cd.wrap_kt = new_wrap_kt;
143 cd.mult = new_mult;
144 cd.shift = new_shift;
145 cd.epoch_cyc = new_epoch;
146 cd.epoch_ns = ns;
147 raw_write_seqcount_end(&cd.seq);
Russell King112f38a42010-12-15 19:23:07 +0000148
149 r = rate;
150 if (r >= 4000000) {
151 r /= 1000000;
152 r_unit = 'M';
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100153 } else if (r >= 1000) {
Russell King112f38a42010-12-15 19:23:07 +0000154 r /= 1000;
155 r_unit = 'k';
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100156 } else
157 r_unit = ' ';
Russell King112f38a42010-12-15 19:23:07 +0000158
Russell King112f38a42010-12-15 19:23:07 +0000159 /* calculate the ns resolution of this counter */
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800160 res = cyc_to_ns(1ULL, new_mult, new_shift);
161
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700162 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
163 bits, r, r_unit, res, wrap);
Russell King112f38a42010-12-15 19:23:07 +0000164
Russell Kinga42c3622012-09-09 18:39:28 +0100165 /* Enable IRQ time accounting if we have a fast enough sched_clock */
166 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
167 enable_sched_clock_irqtime();
168
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100169 pr_debug("Registered %pF as sched_clock source\n", read);
170}
171
Russell King211baa702011-01-11 16:23:04 +0000172void __init sched_clock_postinit(void)
173{
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100174 /*
175 * If no sched_clock function has been provided at that point,
176 * make it the final one one.
177 */
178 if (read_sched_clock == jiffy_sched_clock_read)
Stephen Boyde7e3ff12013-07-18 16:21:17 -0700179 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100180
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700181 update_sched_clock();
182
183 /*
184 * Start the timer to keep sched_clock() properly updated and
185 * sets the initial epoch.
186 */
187 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
188 sched_clock_timer.function = sched_clock_poll;
189 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
Russell King211baa702011-01-11 16:23:04 +0000190}
Russell Kingf153d012012-02-04 12:31:27 +0000191
192static int sched_clock_suspend(void)
193{
Stephen Boydf723aa12014-07-23 21:03:50 -0700194 update_sched_clock();
195 hrtimer_cancel(&sched_clock_timer);
Felipe Balbi 26a4dae52012-10-23 19:00:03 +0100196 cd.suspended = true;
Russell Kingf153d012012-02-04 12:31:27 +0000197 return 0;
198}
199
Colin Cross237ec6f2012-08-07 19:05:10 +0100200static void sched_clock_resume(void)
201{
Felipe Balbi 26a4dae52012-10-23 19:00:03 +0100202 cd.epoch_cyc = read_sched_clock();
Stephen Boydf723aa12014-07-23 21:03:50 -0700203 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
Felipe Balbi 26a4dae52012-10-23 19:00:03 +0100204 cd.suspended = false;
Colin Cross237ec6f2012-08-07 19:05:10 +0100205}
206
Russell Kingf153d012012-02-04 12:31:27 +0000207static struct syscore_ops sched_clock_ops = {
208 .suspend = sched_clock_suspend,
Colin Cross237ec6f2012-08-07 19:05:10 +0100209 .resume = sched_clock_resume,
Russell Kingf153d012012-02-04 12:31:27 +0000210};
211
212static int __init sched_clock_syscore_init(void)
213{
214 register_syscore_ops(&sched_clock_ops);
215 return 0;
216}
217device_initcall(sched_clock_syscore_init);