blob: c3b8a2246b993448abcf7773d46d4ed9ff527248 [file] [log] [blame]
Alessandro Rubini28ad94e2009-07-02 19:06:47 +01001/*
Linus Walleija0719f52010-09-13 13:40:04 +01002 * linux/arch/arm/plat-nomadik/timer.c
Alessandro Rubini28ad94e2009-07-02 19:06:47 +01003 *
4 * Copyright (C) 2008 STMicroelectronics
Alessandro Rubinib102c012010-03-05 12:38:51 +01005 * Copyright (C) 2010 Alessandro Rubini
Linus Walleij8fbb97a22010-11-19 10:16:05 +01006 * Copyright (C) 2010 Linus Walleij for ST-Ericsson
Alessandro Rubini28ad94e2009-07-02 19:06:47 +01007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2, as
10 * published by the Free Software Foundation.
11 */
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/io.h>
16#include <linux/clockchips.h>
Linus Walleijba327b12010-05-26 07:38:54 +010017#include <linux/clk.h>
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010018#include <linux/jiffies.h>
Linus Walleijba327b12010-05-26 07:38:54 +010019#include <linux/err.h>
Linus Walleij8fbb97a22010-11-19 10:16:05 +010020#include <linux/cnt32_to_63.h>
21#include <linux/timer.h>
Russell King5e06b642010-12-15 19:19:25 +000022#include <linux/sched.h>
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010023#include <asm/mach/time.h>
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010024
Srinidhi Kasagar59b559d2009-11-12 06:20:54 +010025#include <plat/mtu.h>
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010026
Linus Walleij8fbb97a22010-11-19 10:16:05 +010027void __iomem *mtu_base; /* Assigned by machine code */
Srinidhi Kasagar59b559d2009-11-12 06:20:54 +010028
Linus Walleij2a847512010-05-07 10:03:02 +010029/*
30 * Kernel assumes that sched_clock can be called early
31 * but the MTU may not yet be initialized.
32 */
33static cycle_t nmdk_read_timer_dummy(struct clocksource *cs)
34{
35 return 0;
36}
37
Alessandro Rubinib102c012010-03-05 12:38:51 +010038/* clocksource: MTU decrements, so we negate the value being read. */
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010039static cycle_t nmdk_read_timer(struct clocksource *cs)
40{
Alessandro Rubinib102c012010-03-05 12:38:51 +010041 return -readl(mtu_base + MTU_VAL(0));
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010042}
43
44static struct clocksource nmdk_clksrc = {
45 .name = "mtu_0",
Alessandro Rubinib102c012010-03-05 12:38:51 +010046 .rating = 200,
Linus Walleij2a847512010-05-07 10:03:02 +010047 .read = nmdk_read_timer_dummy,
Alessandro Rubinib102c012010-03-05 12:38:51 +010048 .mask = CLOCKSOURCE_MASK(32),
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010049 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
50};
51
Linus Walleij2a847512010-05-07 10:03:02 +010052/*
53 * Override the global weak sched_clock symbol with this
54 * local implementation which uses the clocksource to get some
Linus Walleij8fbb97a22010-11-19 10:16:05 +010055 * better resolution when scheduling the kernel.
56 *
57 * Because the hardware timer period may be quite short
58 * (32.3 secs on the 133 MHz MTU timer selection on ux500)
59 * and because cnt32_to_63() needs to be called at least once per
60 * half period to work properly, a kernel keepwarm() timer is set up
61 * to ensure this requirement is always met.
62 *
63 * Also the sched_clock timer will wrap around at some point,
64 * here we set it to run continously for a year.
Linus Walleij2a847512010-05-07 10:03:02 +010065 */
Linus Walleij8fbb97a22010-11-19 10:16:05 +010066#define SCHED_CLOCK_MIN_WRAP 3600*24*365
67static struct timer_list cnt32_to_63_keepwarm_timer;
68static u32 sched_mult;
69static u32 sched_shift;
70
Linus Walleij2a847512010-05-07 10:03:02 +010071unsigned long long notrace sched_clock(void)
72{
Linus Walleij8fbb97a22010-11-19 10:16:05 +010073 u64 cycles;
74
75 if (unlikely(!mtu_base))
76 return 0;
77
78 cycles = cnt32_to_63(-readl(mtu_base + MTU_VAL(0)));
79 /*
80 * sched_mult is guaranteed to be even so will
81 * shift out bit 63
82 */
83 return (cycles * sched_mult) >> sched_shift;
84}
85
86/* Just kick sched_clock every so often */
87static void cnt32_to_63_keepwarm(unsigned long data)
88{
89 mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + data));
90 (void) sched_clock();
91}
92
93/*
94 * Set up a timer to keep sched_clock():s 32_to_63 algorithm warm
95 * once in half a 32bit timer wrap interval.
96 */
97static void __init nmdk_sched_clock_init(unsigned long rate)
98{
99 u32 v;
100 unsigned long delta;
101 u64 days;
102
103 /* Find the apropriate mult and shift factors */
104 clocks_calc_mult_shift(&sched_mult, &sched_shift,
105 rate, NSEC_PER_SEC, SCHED_CLOCK_MIN_WRAP);
106 /* We need to multiply by an even number to get rid of bit 63 */
107 if (sched_mult & 1)
108 sched_mult++;
109
110 /* Let's see what we get, take max counter and scale it */
111 days = (0xFFFFFFFFFFFFFFFFLLU * sched_mult) >> sched_shift;
112 do_div(days, NSEC_PER_SEC);
113 do_div(days, (3600*24));
114
115 pr_info("sched_clock: using %d bits @ %lu Hz wrap in %lu days\n",
116 (64 - sched_shift), rate, (unsigned long) days);
117
118 /*
119 * Program a timer to kick us at half 32bit wraparound
120 * Formula: seconds per wrap = (2^32) / f
121 */
122 v = 0xFFFFFFFFUL / rate;
123 /* We want half of the wrap time to keep cnt32_to_63 warm */
124 v /= 2;
125 pr_debug("sched_clock: prescaled timer rate: %lu Hz, "
126 "initialize keepwarm timer every %d seconds\n", rate, v);
127 /* Convert seconds to jiffies */
128 delta = msecs_to_jiffies(v*1000);
129 setup_timer(&cnt32_to_63_keepwarm_timer, cnt32_to_63_keepwarm, delta);
130 mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + delta));
Linus Walleij2a847512010-05-07 10:03:02 +0100131}
132
Alessandro Rubinib102c012010-03-05 12:38:51 +0100133/* Clockevent device: use one-shot mode */
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100134static void nmdk_clkevt_mode(enum clock_event_mode mode,
135 struct clock_event_device *dev)
136{
Alessandro Rubinib102c012010-03-05 12:38:51 +0100137 u32 cr;
138
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100139 switch (mode) {
140 case CLOCK_EVT_MODE_PERIODIC:
Alessandro Rubinib102c012010-03-05 12:38:51 +0100141 pr_err("%s: periodic mode not supported\n", __func__);
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100142 break;
143 case CLOCK_EVT_MODE_ONESHOT:
Alessandro Rubinib102c012010-03-05 12:38:51 +0100144 /* Load highest value, enable device, enable interrupts */
145 cr = readl(mtu_base + MTU_CR(1));
146 writel(0, mtu_base + MTU_LR(1));
147 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
Linus Walleija0719f52010-09-13 13:40:04 +0100148 writel(1 << 1, mtu_base + MTU_IMSC);
Alessandro Rubinib102c012010-03-05 12:38:51 +0100149 break;
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100150 case CLOCK_EVT_MODE_SHUTDOWN:
151 case CLOCK_EVT_MODE_UNUSED:
Alessandro Rubinib102c012010-03-05 12:38:51 +0100152 /* disable irq */
153 writel(0, mtu_base + MTU_IMSC);
Linus Walleij29179472010-06-01 08:26:49 +0100154 /* disable timer */
155 cr = readl(mtu_base + MTU_CR(1));
156 cr &= ~MTU_CRn_ENA;
157 writel(cr, mtu_base + MTU_CR(1));
158 /* load some high default value */
159 writel(0xffffffff, mtu_base + MTU_LR(1));
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100160 break;
161 case CLOCK_EVT_MODE_RESUME:
162 break;
163 }
164}
165
Alessandro Rubinib102c012010-03-05 12:38:51 +0100166static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
167{
168 /* writing the value has immediate effect */
169 writel(evt, mtu_base + MTU_LR(1));
170 return 0;
171}
172
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100173static struct clock_event_device nmdk_clkevt = {
Alessandro Rubinib102c012010-03-05 12:38:51 +0100174 .name = "mtu_1",
175 .features = CLOCK_EVT_FEAT_ONESHOT,
Alessandro Rubinib102c012010-03-05 12:38:51 +0100176 .rating = 200,
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100177 .set_mode = nmdk_clkevt_mode,
Alessandro Rubinib102c012010-03-05 12:38:51 +0100178 .set_next_event = nmdk_clkevt_next,
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100179};
180
181/*
Alessandro Rubinib102c012010-03-05 12:38:51 +0100182 * IRQ Handler for timer 1 of the MTU block.
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100183 */
184static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
185{
Alessandro Rubinib102c012010-03-05 12:38:51 +0100186 struct clock_event_device *evdev = dev_id;
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100187
Alessandro Rubinib102c012010-03-05 12:38:51 +0100188 writel(1 << 1, mtu_base + MTU_ICR); /* Interrupt clear reg */
189 evdev->event_handler(evdev);
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100190 return IRQ_HANDLED;
191}
192
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100193static struct irqaction nmdk_timer_irq = {
194 .name = "Nomadik Timer Tick",
195 .flags = IRQF_DISABLED | IRQF_TIMER,
196 .handler = nmdk_timer_interrupt,
Alessandro Rubinib102c012010-03-05 12:38:51 +0100197 .dev_id = &nmdk_clkevt,
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100198};
199
Srinidhi Kasagar59b559d2009-11-12 06:20:54 +0100200void __init nmdk_timer_init(void)
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100201{
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100202 unsigned long rate;
Linus Walleijba327b12010-05-26 07:38:54 +0100203 struct clk *clk0;
Linus Walleija0719f52010-09-13 13:40:04 +0100204 u32 cr = MTU_CRn_32BITS;
Linus Walleijba327b12010-05-26 07:38:54 +0100205
206 clk0 = clk_get_sys("mtu0", NULL);
207 BUG_ON(IS_ERR(clk0));
208
Linus Walleijba327b12010-05-26 07:38:54 +0100209 clk_enable(clk0);
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100210
Alessandro Rubinib102c012010-03-05 12:38:51 +0100211 /*
Linus Walleija0719f52010-09-13 13:40:04 +0100212 * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
213 * for ux500.
214 * Use a divide-by-16 counter if the tick rate is more than 32MHz.
215 * At 32 MHz, the timer (with 32 bit counter) can be programmed
216 * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
217 * with 16 gives too low timer resolution.
Alessandro Rubinib102c012010-03-05 12:38:51 +0100218 */
Linus Walleijba327b12010-05-26 07:38:54 +0100219 rate = clk_get_rate(clk0);
Linus Walleija0719f52010-09-13 13:40:04 +0100220 if (rate > 32000000) {
Alessandro Rubinib102c012010-03-05 12:38:51 +0100221 rate /= 16;
222 cr |= MTU_CRn_PRESCALE_16;
223 } else {
224 cr |= MTU_CRn_PRESCALE_1;
225 }
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100226
Alessandro Rubinib102c012010-03-05 12:38:51 +0100227 /* Timer 0 is the free running clocksource */
228 writel(cr, mtu_base + MTU_CR(0));
229 writel(0, mtu_base + MTU_LR(0));
230 writel(0, mtu_base + MTU_BGLR(0));
231 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100232
Linus Walleij8fbb97a22010-11-19 10:16:05 +0100233 /* Now the clock source is ready */
Linus Walleij2a847512010-05-07 10:03:02 +0100234 nmdk_clksrc.read = nmdk_read_timer;
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100235
Russell King8492fd22010-12-13 13:20:49 +0000236 if (clocksource_register_hz(&nmdk_clksrc, rate))
Alessandro Rubinib102c012010-03-05 12:38:51 +0100237 pr_err("timer: failed to initialize clock source %s\n",
238 nmdk_clksrc.name);
239
Linus Walleij8fbb97a22010-11-19 10:16:05 +0100240 nmdk_sched_clock_init(rate);
241
Linus Walleij99f76892010-09-13 13:38:55 +0100242 /* Timer 1 is used for events */
243
Linus Walleij29179472010-06-01 08:26:49 +0100244 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
245
Alessandro Rubinib102c012010-03-05 12:38:51 +0100246 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
Linus Walleij29179472010-06-01 08:26:49 +0100247
Alessandro Rubinib102c012010-03-05 12:38:51 +0100248 nmdk_clkevt.max_delta_ns =
249 clockevent_delta2ns(0xffffffff, &nmdk_clkevt);
250 nmdk_clkevt.min_delta_ns =
251 clockevent_delta2ns(0x00000002, &nmdk_clkevt);
252 nmdk_clkevt.cpumask = cpumask_of(0);
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100253
254 /* Register irq and clockevents */
255 setup_irq(IRQ_MTU0, &nmdk_timer_irq);
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100256 clockevents_register_device(&nmdk_clkevt);
257}