blob: 4d23dc4d8139988e13946ee48d37a76333c916c0 [file] [log] [blame]
Russell King112f38a42010-12-15 19:23:07 +00001/*
2 * sched_clock.c: support for extending counters to full 64-bit ns counter
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/clocksource.h>
9#include <linux/init.h>
10#include <linux/jiffies.h>
Stephen Boyda08ca5d2013-07-18 16:21:16 -070011#include <linux/ktime.h>
Russell King112f38a42010-12-15 19:23:07 +000012#include <linux/kernel.h>
Russell Kinga42c3622012-09-09 18:39:28 +010013#include <linux/moduleparam.h>
Russell King112f38a42010-12-15 19:23:07 +000014#include <linux/sched.h>
Russell Kingf153d012012-02-04 12:31:27 +000015#include <linux/syscore_ops.h>
Stephen Boyda08ca5d2013-07-18 16:21:16 -070016#include <linux/hrtimer.h>
Stephen Boyd38ff87f2013-06-01 23:39:40 -070017#include <linux/sched_clock.h>
Stephen Boyd85c3d2d2013-07-18 16:21:15 -070018#include <linux/seqlock.h>
Stephen Boyde7e3ff12013-07-18 16:21:17 -070019#include <linux/bitops.h>
Russell King112f38a42010-12-15 19:23:07 +000020
Marc Zyngier2f0778af2011-12-15 12:19:23 +010021struct clock_data {
Stephen Boyda08ca5d2013-07-18 16:21:16 -070022 ktime_t wrap_kt;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010023 u64 epoch_ns;
Stephen Boyde7e3ff12013-07-18 16:21:17 -070024 u64 epoch_cyc;
Stephen Boyd85c3d2d2013-07-18 16:21:15 -070025 seqcount_t seq;
Rob Herringc1157392013-02-08 16:14:59 -060026 unsigned long rate;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010027 u32 mult;
28 u32 shift;
Colin Cross237ec6f2012-08-07 19:05:10 +010029 bool suspended;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010030};
31
Stephen Boyda08ca5d2013-07-18 16:21:16 -070032static struct hrtimer sched_clock_timer;
Russell Kinga42c3622012-09-09 18:39:28 +010033static int irqtime = -1;
34
35core_param(irqtime, irqtime, int, 0400);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010036
37static struct clock_data cd = {
38 .mult = NSEC_PER_SEC / HZ,
39};
40
Stephen Boyde7e3ff12013-07-18 16:21:17 -070041static u64 __read_mostly sched_clock_mask;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010042
Stephen Boyde7e3ff12013-07-18 16:21:17 -070043static u64 notrace jiffy_sched_clock_read(void)
Marc Zyngier2f0778af2011-12-15 12:19:23 +010044{
Stephen Boyde7e3ff12013-07-18 16:21:17 -070045 /*
46 * We don't need to use get_jiffies_64 on 32-bit arches here
47 * because we register with BITS_PER_LONG
48 */
49 return (u64)(jiffies - INITIAL_JIFFIES);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010050}
51
Stephen Boyde7e3ff12013-07-18 16:21:17 -070052static u32 __read_mostly (*read_sched_clock_32)(void);
53
54static u64 notrace read_sched_clock_32_wrapper(void)
55{
56 return read_sched_clock_32();
57}
58
59static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010060
Stephen Boydcea15092013-04-18 17:33:40 +010061static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
Marc Zyngier2f0778af2011-12-15 12:19:23 +010062{
63 return (cyc * mult) >> shift;
64}
65
Stephen Boydb4042ce2013-07-18 16:21:19 -070066unsigned long long notrace sched_clock(void)
Marc Zyngier2f0778af2011-12-15 12:19:23 +010067{
68 u64 epoch_ns;
Stephen Boyde7e3ff12013-07-18 16:21:17 -070069 u64 epoch_cyc;
70 u64 cyc;
Stephen Boyd85c3d2d2013-07-18 16:21:15 -070071 unsigned long seq;
Stephen Boyd336ae112013-06-17 15:40:58 -070072
73 if (cd.suspended)
74 return cd.epoch_ns;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010075
Marc Zyngier2f0778af2011-12-15 12:19:23 +010076 do {
John Stultz7a06c412014-01-02 15:11:14 -080077 seq = raw_read_seqcount_begin(&cd.seq);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010078 epoch_cyc = cd.epoch_cyc;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010079 epoch_ns = cd.epoch_ns;
Stephen Boyd85c3d2d2013-07-18 16:21:15 -070080 } while (read_seqcount_retry(&cd.seq, seq));
Marc Zyngier2f0778af2011-12-15 12:19:23 +010081
Stephen Boyd336ae112013-06-17 15:40:58 -070082 cyc = read_sched_clock();
83 cyc = (cyc - epoch_cyc) & sched_clock_mask;
84 return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010085}
86
87/*
88 * Atomically update the sched_clock epoch.
89 */
90static void notrace update_sched_clock(void)
91{
92 unsigned long flags;
Stephen Boyde7e3ff12013-07-18 16:21:17 -070093 u64 cyc;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010094 u64 ns;
95
96 cyc = read_sched_clock();
97 ns = cd.epoch_ns +
98 cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
99 cd.mult, cd.shift);
Stephen Boyd85c3d2d2013-07-18 16:21:15 -0700100
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100101 raw_local_irq_save(flags);
John Stultz7a06c412014-01-02 15:11:14 -0800102 raw_write_seqcount_begin(&cd.seq);
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100103 cd.epoch_ns = ns;
Joonsoo Kim7c4e9ce2013-02-09 05:52:45 +0100104 cd.epoch_cyc = cyc;
John Stultz7a06c412014-01-02 15:11:14 -0800105 raw_write_seqcount_end(&cd.seq);
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100106 raw_local_irq_restore(flags);
107}
Russell King112f38a42010-12-15 19:23:07 +0000108
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700109static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
Russell King112f38a42010-12-15 19:23:07 +0000110{
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100111 update_sched_clock();
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700112 hrtimer_forward_now(hrt, cd.wrap_kt);
113 return HRTIMER_RESTART;
Russell King112f38a42010-12-15 19:23:07 +0000114}
115
Stephen Boyde7e3ff12013-07-18 16:21:17 -0700116void __init sched_clock_register(u64 (*read)(void), int bits,
117 unsigned long rate)
Russell King112f38a42010-12-15 19:23:07 +0000118{
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800119 u64 res, wrap, new_mask, new_epoch, cyc, ns;
120 u32 new_mult, new_shift;
121 ktime_t new_wrap_kt;
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700122 unsigned long r;
Russell King112f38a42010-12-15 19:23:07 +0000123 char r_unit;
124
Rob Herringc1157392013-02-08 16:14:59 -0600125 if (cd.rate > rate)
126 return;
127
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100128 WARN_ON(!irqs_disabled());
Russell King112f38a42010-12-15 19:23:07 +0000129
130 /* calculate the mult/shift to convert counter ticks to ns. */
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800131 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
132
133 new_mask = CLOCKSOURCE_MASK(bits);
134
135 /* calculate how many ns until we wrap */
136 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask);
137 new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
138
139 /* update epoch for new counter and update epoch_ns from old counter*/
140 new_epoch = read();
141 cyc = read_sched_clock();
142 ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
143 cd.mult, cd.shift);
144
145 raw_write_seqcount_begin(&cd.seq);
146 read_sched_clock = read;
147 sched_clock_mask = new_mask;
148 cd.rate = rate;
149 cd.wrap_kt = new_wrap_kt;
150 cd.mult = new_mult;
151 cd.shift = new_shift;
152 cd.epoch_cyc = new_epoch;
153 cd.epoch_ns = ns;
154 raw_write_seqcount_end(&cd.seq);
Russell King112f38a42010-12-15 19:23:07 +0000155
156 r = rate;
157 if (r >= 4000000) {
158 r /= 1000000;
159 r_unit = 'M';
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100160 } else if (r >= 1000) {
Russell King112f38a42010-12-15 19:23:07 +0000161 r /= 1000;
162 r_unit = 'k';
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100163 } else
164 r_unit = ' ';
Russell King112f38a42010-12-15 19:23:07 +0000165
Russell King112f38a42010-12-15 19:23:07 +0000166 /* calculate the ns resolution of this counter */
Stephen Boyd5ae8aab2014-02-17 10:45:36 -0800167 res = cyc_to_ns(1ULL, new_mult, new_shift);
168
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700169 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
170 bits, r, r_unit, res, wrap);
Russell King112f38a42010-12-15 19:23:07 +0000171
Russell Kinga42c3622012-09-09 18:39:28 +0100172 /* Enable IRQ time accounting if we have a fast enough sched_clock */
173 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
174 enable_sched_clock_irqtime();
175
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100176 pr_debug("Registered %pF as sched_clock source\n", read);
177}
178
Stephen Boyde7e3ff12013-07-18 16:21:17 -0700179void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
180{
181 read_sched_clock_32 = read;
182 sched_clock_register(read_sched_clock_32_wrapper, bits, rate);
183}
184
Russell King211baa702011-01-11 16:23:04 +0000185void __init sched_clock_postinit(void)
186{
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100187 /*
188 * If no sched_clock function has been provided at that point,
189 * make it the final one one.
190 */
191 if (read_sched_clock == jiffy_sched_clock_read)
Stephen Boyde7e3ff12013-07-18 16:21:17 -0700192 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100193
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700194 update_sched_clock();
195
196 /*
197 * Start the timer to keep sched_clock() properly updated and
198 * sets the initial epoch.
199 */
200 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
201 sched_clock_timer.function = sched_clock_poll;
202 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
Russell King211baa702011-01-11 16:23:04 +0000203}
Russell Kingf153d012012-02-04 12:31:27 +0000204
205static int sched_clock_suspend(void)
206{
Stephen Boyda08ca5d2013-07-18 16:21:16 -0700207 sched_clock_poll(&sched_clock_timer);
Felipe Balbi 26a4dae52012-10-23 19:00:03 +0100208 cd.suspended = true;
Russell Kingf153d012012-02-04 12:31:27 +0000209 return 0;
210}
211
Colin Cross237ec6f2012-08-07 19:05:10 +0100212static void sched_clock_resume(void)
213{
Felipe Balbi 26a4dae52012-10-23 19:00:03 +0100214 cd.epoch_cyc = read_sched_clock();
Felipe Balbi 26a4dae52012-10-23 19:00:03 +0100215 cd.suspended = false;
Colin Cross237ec6f2012-08-07 19:05:10 +0100216}
217
Russell Kingf153d012012-02-04 12:31:27 +0000218static struct syscore_ops sched_clock_ops = {
219 .suspend = sched_clock_suspend,
Colin Cross237ec6f2012-08-07 19:05:10 +0100220 .resume = sched_clock_resume,
Russell Kingf153d012012-02-04 12:31:27 +0000221};
222
223static int __init sched_clock_syscore_init(void)
224{
225 register_syscore_ops(&sched_clock_ops);
226 return 0;
227}
228device_initcall(sched_clock_syscore_init);