blob: fc6692e2b603b747553835f04a85bf3b1f9c5f3b [file] [log] [blame]
Russell King112f38a42010-12-15 19:23:07 +00001/*
2 * sched_clock.c: support for extending counters to full 64-bit ns counter
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/clocksource.h>
9#include <linux/init.h>
10#include <linux/jiffies.h>
11#include <linux/kernel.h>
Russell Kinga42c3622012-09-09 18:39:28 +010012#include <linux/moduleparam.h>
Russell King112f38a42010-12-15 19:23:07 +000013#include <linux/sched.h>
Russell Kingf153d012012-02-04 12:31:27 +000014#include <linux/syscore_ops.h>
Russell King112f38a42010-12-15 19:23:07 +000015#include <linux/timer.h>
16
17#include <asm/sched_clock.h>
18
Marc Zyngier2f0778af2011-12-15 12:19:23 +010019struct clock_data {
20 u64 epoch_ns;
21 u32 epoch_cyc;
22 u32 epoch_cyc_copy;
23 u32 mult;
24 u32 shift;
Colin Cross237ec6f2012-08-07 19:05:10 +010025 bool suspended;
26 bool needs_suspend;
Marc Zyngier2f0778af2011-12-15 12:19:23 +010027};
28
Russell King112f38a42010-12-15 19:23:07 +000029static void sched_clock_poll(unsigned long wrap_ticks);
30static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
Russell Kinga42c3622012-09-09 18:39:28 +010031static int irqtime = -1;
32
33core_param(irqtime, irqtime, int, 0400);
Marc Zyngier2f0778af2011-12-15 12:19:23 +010034
35static struct clock_data cd = {
36 .mult = NSEC_PER_SEC / HZ,
37};
38
39static u32 __read_mostly sched_clock_mask = 0xffffffff;
40
41static u32 notrace jiffy_sched_clock_read(void)
42{
43 return (u32)(jiffies - INITIAL_JIFFIES);
44}
45
46static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
47
48static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
49{
50 return (cyc * mult) >> shift;
51}
52
53static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
54{
55 u64 epoch_ns;
56 u32 epoch_cyc;
57
Colin Cross237ec6f2012-08-07 19:05:10 +010058 if (cd.suspended)
59 return cd.epoch_ns;
60
Marc Zyngier2f0778af2011-12-15 12:19:23 +010061 /*
62 * Load the epoch_cyc and epoch_ns atomically. We do this by
63 * ensuring that we always write epoch_cyc, epoch_ns and
64 * epoch_cyc_copy in strict order, and read them in strict order.
65 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
66 * the middle of an update, and we should repeat the load.
67 */
68 do {
69 epoch_cyc = cd.epoch_cyc;
70 smp_rmb();
71 epoch_ns = cd.epoch_ns;
72 smp_rmb();
73 } while (epoch_cyc != cd.epoch_cyc_copy);
74
75 return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
76}
77
78/*
79 * Atomically update the sched_clock epoch.
80 */
81static void notrace update_sched_clock(void)
82{
83 unsigned long flags;
84 u32 cyc;
85 u64 ns;
86
87 cyc = read_sched_clock();
88 ns = cd.epoch_ns +
89 cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
90 cd.mult, cd.shift);
91 /*
92 * Write epoch_cyc and epoch_ns in a way that the update is
93 * detectable in cyc_to_fixed_sched_clock().
94 */
95 raw_local_irq_save(flags);
96 cd.epoch_cyc = cyc;
97 smp_wmb();
98 cd.epoch_ns = ns;
99 smp_wmb();
100 cd.epoch_cyc_copy = cyc;
101 raw_local_irq_restore(flags);
102}
Russell King112f38a42010-12-15 19:23:07 +0000103
104static void sched_clock_poll(unsigned long wrap_ticks)
105{
106 mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100107 update_sched_clock();
Russell King112f38a42010-12-15 19:23:07 +0000108}
109
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100110void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
Russell King112f38a42010-12-15 19:23:07 +0000111{
112 unsigned long r, w;
113 u64 res, wrap;
114 char r_unit;
115
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100116 BUG_ON(bits > 32);
117 WARN_ON(!irqs_disabled());
118 WARN_ON(read_sched_clock != jiffy_sched_clock_read);
119 read_sched_clock = read;
120 sched_clock_mask = (1 << bits) - 1;
Russell King112f38a42010-12-15 19:23:07 +0000121
122 /* calculate the mult/shift to convert counter ticks to ns. */
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100123 clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
Russell King112f38a42010-12-15 19:23:07 +0000124
125 r = rate;
126 if (r >= 4000000) {
127 r /= 1000000;
128 r_unit = 'M';
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100129 } else if (r >= 1000) {
Russell King112f38a42010-12-15 19:23:07 +0000130 r /= 1000;
131 r_unit = 'k';
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100132 } else
133 r_unit = ' ';
Russell King112f38a42010-12-15 19:23:07 +0000134
135 /* calculate how many ns until we wrap */
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100136 wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
Russell King112f38a42010-12-15 19:23:07 +0000137 do_div(wrap, NSEC_PER_MSEC);
138 w = wrap;
139
140 /* calculate the ns resolution of this counter */
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100141 res = cyc_to_ns(1ULL, cd.mult, cd.shift);
Russell King112f38a42010-12-15 19:23:07 +0000142 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100143 bits, r, r_unit, res, w);
Russell King112f38a42010-12-15 19:23:07 +0000144
145 /*
146 * Start the timer to keep sched_clock() properly updated and
147 * sets the initial epoch.
148 */
149 sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100150 update_sched_clock();
Russell King112f38a42010-12-15 19:23:07 +0000151
152 /*
153 * Ensure that sched_clock() starts off at 0ns
154 */
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100155 cd.epoch_ns = 0;
156
Russell Kinga42c3622012-09-09 18:39:28 +0100157 /* Enable IRQ time accounting if we have a fast enough sched_clock */
158 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
159 enable_sched_clock_irqtime();
160
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100161 pr_debug("Registered %pF as sched_clock source\n", read);
162}
163
164unsigned long long notrace sched_clock(void)
165{
166 u32 cyc = read_sched_clock();
167 return cyc_to_sched_clock(cyc, sched_clock_mask);
Russell King112f38a42010-12-15 19:23:07 +0000168}
Russell King211baa702011-01-11 16:23:04 +0000169
170void __init sched_clock_postinit(void)
171{
Marc Zyngier2f0778af2011-12-15 12:19:23 +0100172 /*
173 * If no sched_clock function has been provided at that point,
174 * make it the final one one.
175 */
176 if (read_sched_clock == jiffy_sched_clock_read)
177 setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
178
Russell King211baa702011-01-11 16:23:04 +0000179 sched_clock_poll(sched_clock_timer.data);
180}
Russell Kingf153d012012-02-04 12:31:27 +0000181
182static int sched_clock_suspend(void)
183{
184 sched_clock_poll(sched_clock_timer.data);
Felipe Balbi 26a4dae52012-10-23 19:00:03 +0100185 cd.suspended = true;
Russell Kingf153d012012-02-04 12:31:27 +0000186 return 0;
187}
188
Colin Cross237ec6f2012-08-07 19:05:10 +0100189static void sched_clock_resume(void)
190{
Felipe Balbi 26a4dae52012-10-23 19:00:03 +0100191 cd.epoch_cyc = read_sched_clock();
192 cd.epoch_cyc_copy = cd.epoch_cyc;
193 cd.suspended = false;
Colin Cross237ec6f2012-08-07 19:05:10 +0100194}
195
Russell Kingf153d012012-02-04 12:31:27 +0000196static struct syscore_ops sched_clock_ops = {
197 .suspend = sched_clock_suspend,
Colin Cross237ec6f2012-08-07 19:05:10 +0100198 .resume = sched_clock_resume,
Russell Kingf153d012012-02-04 12:31:27 +0000199};
200
201static int __init sched_clock_syscore_init(void)
202{
203 register_syscore_ops(&sched_clock_ops);
204 return 0;
205}
206device_initcall(sched_clock_syscore_init);