blob: 63cdc6025bd74dfaf46bc93ec49125e9f75bb49d [file] [log] [blame]
Alessandro Rubini28ad94e2009-07-02 19:06:47 +01001/*
Linus Walleija0719f52010-09-13 13:40:04 +01002 * linux/arch/arm/plat-nomadik/timer.c
Alessandro Rubini28ad94e2009-07-02 19:06:47 +01003 *
4 * Copyright (C) 2008 STMicroelectronics
Alessandro Rubinib102c012010-03-05 12:38:51 +01005 * Copyright (C) 2010 Alessandro Rubini
Linus Walleij8fbb97a22010-11-19 10:16:05 +01006 * Copyright (C) 2010 Linus Walleij for ST-Ericsson
Alessandro Rubini28ad94e2009-07-02 19:06:47 +01007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2, as
10 * published by the Free Software Foundation.
11 */
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/io.h>
16#include <linux/clockchips.h>
Linus Walleijba327b12010-05-26 07:38:54 +010017#include <linux/clk.h>
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010018#include <linux/jiffies.h>
Linus Walleijba327b12010-05-26 07:38:54 +010019#include <linux/err.h>
Linus Walleij8fbb97a22010-11-19 10:16:05 +010020#include <linux/cnt32_to_63.h>
21#include <linux/timer.h>
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010022#include <asm/mach/time.h>
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010023
Srinidhi Kasagar59b559d2009-11-12 06:20:54 +010024#include <plat/mtu.h>
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010025
Linus Walleij8fbb97a22010-11-19 10:16:05 +010026void __iomem *mtu_base; /* Assigned by machine code */
Srinidhi Kasagar59b559d2009-11-12 06:20:54 +010027
Linus Walleij2a847512010-05-07 10:03:02 +010028/*
29 * Kernel assumes that sched_clock can be called early
30 * but the MTU may not yet be initialized.
31 */
32static cycle_t nmdk_read_timer_dummy(struct clocksource *cs)
33{
34 return 0;
35}
36
Alessandro Rubinib102c012010-03-05 12:38:51 +010037/* clocksource: MTU decrements, so we negate the value being read. */
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010038static cycle_t nmdk_read_timer(struct clocksource *cs)
39{
Alessandro Rubinib102c012010-03-05 12:38:51 +010040 return -readl(mtu_base + MTU_VAL(0));
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010041}
42
43static struct clocksource nmdk_clksrc = {
44 .name = "mtu_0",
Alessandro Rubinib102c012010-03-05 12:38:51 +010045 .rating = 200,
Linus Walleij2a847512010-05-07 10:03:02 +010046 .read = nmdk_read_timer_dummy,
Alessandro Rubinib102c012010-03-05 12:38:51 +010047 .mask = CLOCKSOURCE_MASK(32),
Alessandro Rubini28ad94e2009-07-02 19:06:47 +010048 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
49};
50
Linus Walleij2a847512010-05-07 10:03:02 +010051/*
52 * Override the global weak sched_clock symbol with this
53 * local implementation which uses the clocksource to get some
Linus Walleij8fbb97a22010-11-19 10:16:05 +010054 * better resolution when scheduling the kernel.
55 *
56 * Because the hardware timer period may be quite short
57 * (32.3 secs on the 133 MHz MTU timer selection on ux500)
58 * and because cnt32_to_63() needs to be called at least once per
59 * half period to work properly, a kernel keepwarm() timer is set up
60 * to ensure this requirement is always met.
61 *
62 * Also the sched_clock timer will wrap around at some point,
63 * here we set it to run continously for a year.
Linus Walleij2a847512010-05-07 10:03:02 +010064 */
Linus Walleij8fbb97a22010-11-19 10:16:05 +010065#define SCHED_CLOCK_MIN_WRAP 3600*24*365
66static struct timer_list cnt32_to_63_keepwarm_timer;
67static u32 sched_mult;
68static u32 sched_shift;
69
Linus Walleij2a847512010-05-07 10:03:02 +010070unsigned long long notrace sched_clock(void)
71{
Linus Walleij8fbb97a22010-11-19 10:16:05 +010072 u64 cycles;
73
74 if (unlikely(!mtu_base))
75 return 0;
76
77 cycles = cnt32_to_63(-readl(mtu_base + MTU_VAL(0)));
78 /*
79 * sched_mult is guaranteed to be even so will
80 * shift out bit 63
81 */
82 return (cycles * sched_mult) >> sched_shift;
83}
84
85/* Just kick sched_clock every so often */
86static void cnt32_to_63_keepwarm(unsigned long data)
87{
88 mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + data));
89 (void) sched_clock();
90}
91
92/*
93 * Set up a timer to keep sched_clock():s 32_to_63 algorithm warm
94 * once in half a 32bit timer wrap interval.
95 */
96static void __init nmdk_sched_clock_init(unsigned long rate)
97{
98 u32 v;
99 unsigned long delta;
100 u64 days;
101
102 /* Find the apropriate mult and shift factors */
103 clocks_calc_mult_shift(&sched_mult, &sched_shift,
104 rate, NSEC_PER_SEC, SCHED_CLOCK_MIN_WRAP);
105 /* We need to multiply by an even number to get rid of bit 63 */
106 if (sched_mult & 1)
107 sched_mult++;
108
109 /* Let's see what we get, take max counter and scale it */
110 days = (0xFFFFFFFFFFFFFFFFLLU * sched_mult) >> sched_shift;
111 do_div(days, NSEC_PER_SEC);
112 do_div(days, (3600*24));
113
114 pr_info("sched_clock: using %d bits @ %lu Hz wrap in %lu days\n",
115 (64 - sched_shift), rate, (unsigned long) days);
116
117 /*
118 * Program a timer to kick us at half 32bit wraparound
119 * Formula: seconds per wrap = (2^32) / f
120 */
121 v = 0xFFFFFFFFUL / rate;
122 /* We want half of the wrap time to keep cnt32_to_63 warm */
123 v /= 2;
124 pr_debug("sched_clock: prescaled timer rate: %lu Hz, "
125 "initialize keepwarm timer every %d seconds\n", rate, v);
126 /* Convert seconds to jiffies */
127 delta = msecs_to_jiffies(v*1000);
128 setup_timer(&cnt32_to_63_keepwarm_timer, cnt32_to_63_keepwarm, delta);
129 mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + delta));
Linus Walleij2a847512010-05-07 10:03:02 +0100130}
131
Alessandro Rubinib102c012010-03-05 12:38:51 +0100132/* Clockevent device: use one-shot mode */
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100133static void nmdk_clkevt_mode(enum clock_event_mode mode,
134 struct clock_event_device *dev)
135{
Alessandro Rubinib102c012010-03-05 12:38:51 +0100136 u32 cr;
137
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100138 switch (mode) {
139 case CLOCK_EVT_MODE_PERIODIC:
Alessandro Rubinib102c012010-03-05 12:38:51 +0100140 pr_err("%s: periodic mode not supported\n", __func__);
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100141 break;
142 case CLOCK_EVT_MODE_ONESHOT:
Alessandro Rubinib102c012010-03-05 12:38:51 +0100143 /* Load highest value, enable device, enable interrupts */
144 cr = readl(mtu_base + MTU_CR(1));
145 writel(0, mtu_base + MTU_LR(1));
146 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
Linus Walleija0719f52010-09-13 13:40:04 +0100147 writel(1 << 1, mtu_base + MTU_IMSC);
Alessandro Rubinib102c012010-03-05 12:38:51 +0100148 break;
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100149 case CLOCK_EVT_MODE_SHUTDOWN:
150 case CLOCK_EVT_MODE_UNUSED:
Alessandro Rubinib102c012010-03-05 12:38:51 +0100151 /* disable irq */
152 writel(0, mtu_base + MTU_IMSC);
Linus Walleij29179472010-06-01 08:26:49 +0100153 /* disable timer */
154 cr = readl(mtu_base + MTU_CR(1));
155 cr &= ~MTU_CRn_ENA;
156 writel(cr, mtu_base + MTU_CR(1));
157 /* load some high default value */
158 writel(0xffffffff, mtu_base + MTU_LR(1));
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100159 break;
160 case CLOCK_EVT_MODE_RESUME:
161 break;
162 }
163}
164
Alessandro Rubinib102c012010-03-05 12:38:51 +0100165static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
166{
167 /* writing the value has immediate effect */
168 writel(evt, mtu_base + MTU_LR(1));
169 return 0;
170}
171
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100172static struct clock_event_device nmdk_clkevt = {
Alessandro Rubinib102c012010-03-05 12:38:51 +0100173 .name = "mtu_1",
174 .features = CLOCK_EVT_FEAT_ONESHOT,
Alessandro Rubinib102c012010-03-05 12:38:51 +0100175 .rating = 200,
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100176 .set_mode = nmdk_clkevt_mode,
Alessandro Rubinib102c012010-03-05 12:38:51 +0100177 .set_next_event = nmdk_clkevt_next,
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100178};
179
180/*
Alessandro Rubinib102c012010-03-05 12:38:51 +0100181 * IRQ Handler for timer 1 of the MTU block.
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100182 */
183static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
184{
Alessandro Rubinib102c012010-03-05 12:38:51 +0100185 struct clock_event_device *evdev = dev_id;
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100186
Alessandro Rubinib102c012010-03-05 12:38:51 +0100187 writel(1 << 1, mtu_base + MTU_ICR); /* Interrupt clear reg */
188 evdev->event_handler(evdev);
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100189 return IRQ_HANDLED;
190}
191
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100192static struct irqaction nmdk_timer_irq = {
193 .name = "Nomadik Timer Tick",
194 .flags = IRQF_DISABLED | IRQF_TIMER,
195 .handler = nmdk_timer_interrupt,
Alessandro Rubinib102c012010-03-05 12:38:51 +0100196 .dev_id = &nmdk_clkevt,
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100197};
198
Srinidhi Kasagar59b559d2009-11-12 06:20:54 +0100199void __init nmdk_timer_init(void)
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100200{
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100201 unsigned long rate;
Linus Walleijba327b12010-05-26 07:38:54 +0100202 struct clk *clk0;
Linus Walleija0719f52010-09-13 13:40:04 +0100203 u32 cr = MTU_CRn_32BITS;
Linus Walleijba327b12010-05-26 07:38:54 +0100204
205 clk0 = clk_get_sys("mtu0", NULL);
206 BUG_ON(IS_ERR(clk0));
207
Linus Walleijba327b12010-05-26 07:38:54 +0100208 clk_enable(clk0);
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100209
Alessandro Rubinib102c012010-03-05 12:38:51 +0100210 /*
Linus Walleija0719f52010-09-13 13:40:04 +0100211 * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
212 * for ux500.
213 * Use a divide-by-16 counter if the tick rate is more than 32MHz.
214 * At 32 MHz, the timer (with 32 bit counter) can be programmed
215 * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
216 * with 16 gives too low timer resolution.
Alessandro Rubinib102c012010-03-05 12:38:51 +0100217 */
Linus Walleijba327b12010-05-26 07:38:54 +0100218 rate = clk_get_rate(clk0);
Linus Walleija0719f52010-09-13 13:40:04 +0100219 if (rate > 32000000) {
Alessandro Rubinib102c012010-03-05 12:38:51 +0100220 rate /= 16;
221 cr |= MTU_CRn_PRESCALE_16;
222 } else {
223 cr |= MTU_CRn_PRESCALE_1;
224 }
Linus Walleij29179472010-06-01 08:26:49 +0100225 clocksource_calc_mult_shift(&nmdk_clksrc, rate, MTU_MIN_RANGE);
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100226
Alessandro Rubinib102c012010-03-05 12:38:51 +0100227 /* Timer 0 is the free running clocksource */
228 writel(cr, mtu_base + MTU_CR(0));
229 writel(0, mtu_base + MTU_LR(0));
230 writel(0, mtu_base + MTU_BGLR(0));
231 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100232
Linus Walleij8fbb97a22010-11-19 10:16:05 +0100233 /* Now the clock source is ready */
Linus Walleij2a847512010-05-07 10:03:02 +0100234 nmdk_clksrc.read = nmdk_read_timer;
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100235
Srinidhi Kasagar59b559d2009-11-12 06:20:54 +0100236 if (clocksource_register(&nmdk_clksrc))
Alessandro Rubinib102c012010-03-05 12:38:51 +0100237 pr_err("timer: failed to initialize clock source %s\n",
238 nmdk_clksrc.name);
239
Linus Walleij8fbb97a22010-11-19 10:16:05 +0100240 nmdk_sched_clock_init(rate);
241
Linus Walleij99f76892010-09-13 13:38:55 +0100242 /* Timer 1 is used for events */
243
Linus Walleij29179472010-06-01 08:26:49 +0100244 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
245
Alessandro Rubinib102c012010-03-05 12:38:51 +0100246 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
Linus Walleij29179472010-06-01 08:26:49 +0100247
Alessandro Rubinib102c012010-03-05 12:38:51 +0100248 nmdk_clkevt.max_delta_ns =
249 clockevent_delta2ns(0xffffffff, &nmdk_clkevt);
250 nmdk_clkevt.min_delta_ns =
251 clockevent_delta2ns(0x00000002, &nmdk_clkevt);
252 nmdk_clkevt.cpumask = cpumask_of(0);
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100253
254 /* Register irq and clockevents */
255 setup_irq(IRQ_MTU0, &nmdk_timer_irq);
Alessandro Rubini28ad94e2009-07-02 19:06:47 +0100256 clockevents_register_device(&nmdk_clkevt);
257}