blob: 160c9602f49064161794a37d7f8af74955d5f734 [file] [log] [blame]
Kevin Hilman7c6337e2007-04-30 19:37:19 +01001/*
2 * DaVinci timer subsystem
3 *
4 * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
5 *
6 * 2007 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 */
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/types.h>
14#include <linux/interrupt.h>
15#include <linux/clocksource.h>
16#include <linux/clockchips.h>
Russell Kingfced80c2008-09-06 12:10:45 +010017#include <linux/io.h>
Kevin Hilmanf5c122d2009-04-14 07:04:16 -050018#include <linux/clk.h>
19#include <linux/err.h>
Kevin Hilmanfb631382009-04-29 16:23:59 -070020#include <linux/platform_device.h>
Stephen Boyd38ff87f2013-06-01 23:39:40 -070021#include <linux/sched_clock.h>
Kevin Hilman7c6337e2007-04-30 19:37:19 +010022
Kevin Hilman7c6337e2007-04-30 19:37:19 +010023#include <asm/mach/irq.h>
24#include <asm/mach/time.h>
Sekhar Nori5d0ef6a2011-12-23 17:57:19 +000025
Kevin Hilmanf5c122d2009-04-14 07:04:16 -050026#include <mach/cputype.h>
Sekhar Nori5d0ef6a2011-12-23 17:57:19 +000027#include <mach/hardware.h>
Mark A. Greerf64691b2009-04-15 12:40:11 -070028#include <mach/time.h>
Sekhar Nori5d0ef6a2011-12-23 17:57:19 +000029
Kevin Hilmanf5c122d2009-04-14 07:04:16 -050030#include "clock.h"
Kevin Hilman7c6337e2007-04-30 19:37:19 +010031
32static struct clock_event_device clockevent_davinci;
Kevin Hilmane6099002009-04-14 07:06:37 -050033static unsigned int davinci_clock_tick_rate;
Kevin Hilman7c6337e2007-04-30 19:37:19 +010034
Kevin Hilman7c6337e2007-04-30 19:37:19 +010035/*
36 * This driver configures the 2 64-bit count-up timers as 4 independent
37 * 32-bit count-up timers used as follows:
Kevin Hilman7c6337e2007-04-30 19:37:19 +010038 */
Mark A. Greerf64691b2009-04-15 12:40:11 -070039
40enum {
41 TID_CLOCKEVENT,
42 TID_CLOCKSOURCE,
43};
Kevin Hilman7c6337e2007-04-30 19:37:19 +010044
45/* Timer register offsets */
Mark A. Greer3abd5ac2009-04-15 12:41:54 -070046#define PID12 0x0
47#define TIM12 0x10
48#define TIM34 0x14
49#define PRD12 0x18
50#define PRD34 0x1c
51#define TCR 0x20
52#define TGCR 0x24
53#define WDTCR 0x28
54
55/* Offsets of the 8 compare registers */
56#define CMP12_0 0x60
57#define CMP12_1 0x64
58#define CMP12_2 0x68
59#define CMP12_3 0x6c
60#define CMP12_4 0x70
61#define CMP12_5 0x74
62#define CMP12_6 0x78
63#define CMP12_7 0x7c
Kevin Hilman7c6337e2007-04-30 19:37:19 +010064
65/* Timer register bitfields */
66#define TCR_ENAMODE_DISABLE 0x0
67#define TCR_ENAMODE_ONESHOT 0x1
68#define TCR_ENAMODE_PERIODIC 0x2
69#define TCR_ENAMODE_MASK 0x3
70
71#define TGCR_TIMMODE_SHIFT 2
72#define TGCR_TIMMODE_64BIT_GP 0x0
73#define TGCR_TIMMODE_32BIT_UNCHAINED 0x1
74#define TGCR_TIMMODE_64BIT_WDOG 0x2
75#define TGCR_TIMMODE_32BIT_CHAINED 0x3
76
77#define TGCR_TIM12RS_SHIFT 0
78#define TGCR_TIM34RS_SHIFT 1
79#define TGCR_RESET 0x0
80#define TGCR_UNRESET 0x1
81#define TGCR_RESET_MASK 0x3
82
83#define WDTCR_WDEN_SHIFT 14
84#define WDTCR_WDEN_DISABLE 0x0
85#define WDTCR_WDEN_ENABLE 0x1
86#define WDTCR_WDKEY_SHIFT 16
87#define WDTCR_WDKEY_SEQ0 0xa5c6
88#define WDTCR_WDKEY_SEQ1 0xda7e
89
90struct timer_s {
91 char *name;
92 unsigned int id;
93 unsigned long period;
94 unsigned long opts;
Mark A. Greer3abd5ac2009-04-15 12:41:54 -070095 unsigned long flags;
Kevin Hilmanf5c122d2009-04-14 07:04:16 -050096 void __iomem *base;
97 unsigned long tim_off;
98 unsigned long prd_off;
Kevin Hilman7c6337e2007-04-30 19:37:19 +010099 unsigned long enamode_shift;
100 struct irqaction irqaction;
101};
102static struct timer_s timers[];
103
104/* values for 'opts' field of struct timer_s */
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700105#define TIMER_OPTS_DISABLED 0x01
106#define TIMER_OPTS_ONESHOT 0x02
107#define TIMER_OPTS_PERIODIC 0x04
108#define TIMER_OPTS_STATE_MASK 0x07
109
110#define TIMER_OPTS_USE_COMPARE 0x80000000
111#define USING_COMPARE(t) ((t)->opts & TIMER_OPTS_USE_COMPARE)
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100112
Mark A. Greerf64691b2009-04-15 12:40:11 -0700113static char *id_to_name[] = {
114 [T0_BOT] = "timer0_0",
115 [T0_TOP] = "timer0_1",
116 [T1_BOT] = "timer1_0",
117 [T1_TOP] = "timer1_1",
118};
119
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100120static int timer32_config(struct timer_s *t)
121{
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700122 u32 tcr;
Mark A. Greer55700782009-04-15 12:42:06 -0700123 struct davinci_soc_info *soc_info = &davinci_soc_info;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100124
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700125 if (USING_COMPARE(t)) {
126 struct davinci_timer_instance *dtip =
127 soc_info->timer_info->timers;
128 int event_timer = ID_TO_TIMER(timers[TID_CLOCKEVENT].id);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100129
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700130 /*
131 * Next interrupt should be the current time reg value plus
132 * the new period (using 32-bit unsigned addition/wrapping
133 * to 0 on overflow). This assumes that the clocksource
134 * is setup to count to 2^32-1 before wrapping around to 0.
135 */
136 __raw_writel(__raw_readl(t->base + t->tim_off) + t->period,
137 t->base + dtip[event_timer].cmp_off);
138 } else {
139 tcr = __raw_readl(t->base + TCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100140
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700141 /* disable timer */
142 tcr &= ~(TCR_ENAMODE_MASK << t->enamode_shift);
143 __raw_writel(tcr, t->base + TCR);
144
145 /* reset counter to zero, set new period */
146 __raw_writel(0, t->base + t->tim_off);
147 __raw_writel(t->period, t->base + t->prd_off);
148
149 /* Set enable mode */
150 if (t->opts & TIMER_OPTS_ONESHOT)
151 tcr |= TCR_ENAMODE_ONESHOT << t->enamode_shift;
152 else if (t->opts & TIMER_OPTS_PERIODIC)
153 tcr |= TCR_ENAMODE_PERIODIC << t->enamode_shift;
154
155 __raw_writel(tcr, t->base + TCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100156 }
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100157 return 0;
158}
159
160static inline u32 timer32_read(struct timer_s *t)
161{
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500162 return __raw_readl(t->base + t->tim_off);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100163}
164
165static irqreturn_t timer_interrupt(int irq, void *dev_id)
166{
167 struct clock_event_device *evt = &clockevent_davinci;
168
169 evt->event_handler(evt);
170 return IRQ_HANDLED;
171}
172
173/* called when 32-bit counter wraps */
174static irqreturn_t freerun_interrupt(int irq, void *dev_id)
175{
176 return IRQ_HANDLED;
177}
178
179static struct timer_s timers[] = {
180 [TID_CLOCKEVENT] = {
181 .name = "clockevent",
182 .opts = TIMER_OPTS_DISABLED,
183 .irqaction = {
Michael Opdenacker1091a652013-09-07 09:07:13 +0200184 .flags = IRQF_TIMER,
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100185 .handler = timer_interrupt,
186 }
187 },
188 [TID_CLOCKSOURCE] = {
189 .name = "free-run counter",
190 .period = ~0,
191 .opts = TIMER_OPTS_PERIODIC,
192 .irqaction = {
Michael Opdenacker1091a652013-09-07 09:07:13 +0200193 .flags = IRQF_TIMER,
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100194 .handler = freerun_interrupt,
195 }
196 },
197};
198
199static void __init timer_init(void)
200{
Mark A. Greerf64691b2009-04-15 12:40:11 -0700201 struct davinci_soc_info *soc_info = &davinci_soc_info;
202 struct davinci_timer_instance *dtip = soc_info->timer_info->timers;
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400203 void __iomem *base[2];
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100204 int i;
205
206 /* Global init of each 64-bit timer as a whole */
207 for(i=0; i<2; i++) {
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500208 u32 tgcr;
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400209
210 base[i] = ioremap(dtip[i].base, SZ_4K);
211 if (WARN_ON(!base[i]))
212 continue;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100213
214 /* Disabled, Internal clock source */
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400215 __raw_writel(0, base[i] + TCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100216
217 /* reset both timers, no pre-scaler for timer34 */
218 tgcr = 0;
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400219 __raw_writel(tgcr, base[i] + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100220
221 /* Set both timers to unchained 32-bit */
222 tgcr = TGCR_TIMMODE_32BIT_UNCHAINED << TGCR_TIMMODE_SHIFT;
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400223 __raw_writel(tgcr, base[i] + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100224
225 /* Unreset timers */
226 tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
227 (TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400228 __raw_writel(tgcr, base[i] + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100229
230 /* Init both counters to zero */
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400231 __raw_writel(0, base[i] + TIM12);
232 __raw_writel(0, base[i] + TIM34);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100233 }
234
235 /* Init of each timer as a 32-bit timer */
236 for (i=0; i< ARRAY_SIZE(timers); i++) {
237 struct timer_s *t = &timers[i];
Mark A. Greerf64691b2009-04-15 12:40:11 -0700238 int timer = ID_TO_TIMER(t->id);
239 u32 irq;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100240
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400241 t->base = base[timer];
242 if (!t->base)
243 continue;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100244
Mark A. Greerf64691b2009-04-15 12:40:11 -0700245 if (IS_TIMER_BOT(t->id)) {
246 t->enamode_shift = 6;
247 t->tim_off = TIM12;
248 t->prd_off = PRD12;
249 irq = dtip[timer].bottom_irq;
250 } else {
251 t->enamode_shift = 22;
252 t->tim_off = TIM34;
253 t->prd_off = PRD34;
254 irq = dtip[timer].top_irq;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100255 }
Mark A. Greerf64691b2009-04-15 12:40:11 -0700256
257 /* Register interrupt */
258 t->irqaction.name = t->name;
259 t->irqaction.dev_id = (void *)t;
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700260
261 if (t->irqaction.handler != NULL) {
262 irq = USING_COMPARE(t) ? dtip[i].cmp_irq : irq;
Mark A. Greerf64691b2009-04-15 12:40:11 -0700263 setup_irq(irq, &t->irqaction);
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700264 }
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100265 }
266}
267
268/*
269 * clocksource
270 */
Magnus Damm8e196082009-04-21 12:24:00 -0700271static cycle_t read_cycles(struct clocksource *cs)
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100272{
273 struct timer_s *t = &timers[TID_CLOCKSOURCE];
274
275 return (cycles_t)timer32_read(t);
276}
277
278static struct clocksource clocksource_davinci = {
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100279 .rating = 300,
Marc Zyngier30c9c5b2012-01-16 11:44:12 +0000280 .read = read_cycles,
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100281 .mask = CLOCKSOURCE_MASK(32),
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100282 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
283};
284
285/*
Andreas Gaeer6d1c57c2010-10-06 10:38:55 +0200286 * Overwrite weak default sched_clock with something more precise
287 */
Stephen Boyd14d58cb2013-11-15 15:26:11 -0800288static u64 notrace davinci_read_sched_clock(void)
Andreas Gaeer6d1c57c2010-10-06 10:38:55 +0200289{
Marc Zyngier30c9c5b2012-01-16 11:44:12 +0000290 return timer32_read(&timers[TID_CLOCKSOURCE]);
Andreas Gaeer6d1c57c2010-10-06 10:38:55 +0200291}
292
293/*
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100294 * clockevent
295 */
296static int davinci_set_next_event(unsigned long cycles,
297 struct clock_event_device *evt)
298{
299 struct timer_s *t = &timers[TID_CLOCKEVENT];
300
301 t->period = cycles;
302 timer32_config(t);
303 return 0;
304}
305
306static void davinci_set_mode(enum clock_event_mode mode,
307 struct clock_event_device *evt)
308{
309 struct timer_s *t = &timers[TID_CLOCKEVENT];
310
311 switch (mode) {
312 case CLOCK_EVT_MODE_PERIODIC:
Kevin Hilmane6099002009-04-14 07:06:37 -0500313 t->period = davinci_clock_tick_rate / (HZ);
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700314 t->opts &= ~TIMER_OPTS_STATE_MASK;
315 t->opts |= TIMER_OPTS_PERIODIC;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100316 timer32_config(t);
317 break;
318 case CLOCK_EVT_MODE_ONESHOT:
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700319 t->opts &= ~TIMER_OPTS_STATE_MASK;
320 t->opts |= TIMER_OPTS_ONESHOT;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100321 break;
322 case CLOCK_EVT_MODE_UNUSED:
323 case CLOCK_EVT_MODE_SHUTDOWN:
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700324 t->opts &= ~TIMER_OPTS_STATE_MASK;
325 t->opts |= TIMER_OPTS_DISABLED;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100326 break;
Thomas Gleixner18de5bc2007-07-21 04:37:34 -0700327 case CLOCK_EVT_MODE_RESUME:
328 break;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100329 }
330}
331
332static struct clock_event_device clockevent_davinci = {
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100333 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100334 .set_next_event = davinci_set_next_event,
335 .set_mode = davinci_set_mode,
336};
337
338
Stephen Warren6bb27d72012-11-08 12:40:59 -0700339void __init davinci_timer_init(void)
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100340{
Kevin Hilmane6099002009-04-14 07:06:37 -0500341 struct clk *timer_clk;
Mark A. Greerf64691b2009-04-15 12:40:11 -0700342 struct davinci_soc_info *soc_info = &davinci_soc_info;
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700343 unsigned int clockevent_id;
344 unsigned int clocksource_id;
Kevin Hilmand99c3872010-03-11 14:57:35 -0800345 int i;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100346
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700347 clockevent_id = soc_info->timer_info->clockevent_id;
348 clocksource_id = soc_info->timer_info->clocksource_id;
349
350 timers[TID_CLOCKEVENT].id = clockevent_id;
351 timers[TID_CLOCKSOURCE].id = clocksource_id;
352
353 /*
354 * If using same timer for both clock events & clocksource,
355 * a compare register must be used to generate an event interrupt.
356 * This is equivalent to a oneshot timer only (not periodic).
357 */
358 if (clockevent_id == clocksource_id) {
359 struct davinci_timer_instance *dtip =
360 soc_info->timer_info->timers;
361 int event_timer = ID_TO_TIMER(clockevent_id);
362
363 /* Only bottom timers can use compare regs */
364 if (IS_TIMER_TOP(clockevent_id))
Joe Perchesa7ca2bc2014-10-31 17:51:51 -0700365 pr_warn("%s: Invalid use of system timers. Results unpredictable.\n",
366 __func__);
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700367 else if ((dtip[event_timer].cmp_off == 0)
368 || (dtip[event_timer].cmp_irq == 0))
Joe Perchesa7ca2bc2014-10-31 17:51:51 -0700369 pr_warn("%s: Invalid timer instance setup. Results unpredictable.\n",
370 __func__);
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700371 else {
372 timers[TID_CLOCKEVENT].opts |= TIMER_OPTS_USE_COMPARE;
373 clockevent_davinci.features = CLOCK_EVT_FEAT_ONESHOT;
374 }
375 }
Mark A. Greerf64691b2009-04-15 12:40:11 -0700376
Kevin Hilmane6099002009-04-14 07:06:37 -0500377 timer_clk = clk_get(NULL, "timer0");
378 BUG_ON(IS_ERR(timer_clk));
m-karicheri2@ti.comb6f1ffe2012-08-02 16:53:48 +0000379 clk_prepare_enable(timer_clk);
Kevin Hilmane6099002009-04-14 07:06:37 -0500380
Cyril Chemparathy8ca2e592010-03-25 17:43:45 -0400381 /* init timer hw */
382 timer_init();
383
Kevin Hilmane6099002009-04-14 07:06:37 -0500384 davinci_clock_tick_rate = clk_get_rate(timer_clk);
385
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100386 /* setup clocksource */
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700387 clocksource_davinci.name = id_to_name[clocksource_id];
Russell King7c044be2010-12-13 13:17:12 +0000388 if (clocksource_register_hz(&clocksource_davinci,
389 davinci_clock_tick_rate))
Joe Perchesa7ca2bc2014-10-31 17:51:51 -0700390 pr_err("%s: can't register clocksource!\n",
391 clocksource_davinci.name);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100392
Stephen Boyd14d58cb2013-11-15 15:26:11 -0800393 sched_clock_register(davinci_read_sched_clock, 32,
Marc Zyngier30c9c5b2012-01-16 11:44:12 +0000394 davinci_clock_tick_rate);
395
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100396 /* setup clockevent */
Mark A. Greerf64691b2009-04-15 12:40:11 -0700397 clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100398
Rusty Russell320ab2b2008-12-13 21:20:26 +1030399 clockevent_davinci.cpumask = cpumask_of(0);
Uwe Kleine-Königbf94d092013-10-13 10:36:30 +0200400 clockevents_config_and_register(&clockevent_davinci,
401 davinci_clock_tick_rate, 1, 0xfffffffe);
Kevin Hilmand99c3872010-03-11 14:57:35 -0800402
403 for (i=0; i< ARRAY_SIZE(timers); i++)
404 timer32_config(&timers[i]);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100405}
406
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100407/* reset board using watchdog timer */
Cyril Chemparathyc78a5bc2010-05-01 18:38:28 -0400408void davinci_watchdog_reset(struct platform_device *pdev)
Kevin Hilmanfb631382009-04-29 16:23:59 -0700409{
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500410 u32 tgcr, wdtcr;
Cyril Chemparathyc78a5bc2010-05-01 18:38:28 -0400411 void __iomem *base;
Kevin Hilmane6099002009-04-14 07:06:37 -0500412 struct clk *wd_clk;
Kevin Hilmane6099002009-04-14 07:06:37 -0500413
Cyril Chemparathyc78a5bc2010-05-01 18:38:28 -0400414 base = ioremap(pdev->resource[0].start, SZ_4K);
415 if (WARN_ON(!base))
416 return;
417
Kevin Hilman5fcd2942009-06-03 12:24:50 -0700418 wd_clk = clk_get(&pdev->dev, NULL);
Kevin Hilmane6099002009-04-14 07:06:37 -0500419 if (WARN_ON(IS_ERR(wd_clk)))
420 return;
m-karicheri2@ti.comb6f1ffe2012-08-02 16:53:48 +0000421 clk_prepare_enable(wd_clk);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100422
423 /* disable, internal clock source */
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500424 __raw_writel(0, base + TCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100425
426 /* reset timer, set mode to 64-bit watchdog, and unreset */
427 tgcr = 0;
David Griegoa23f7dc2009-06-01 11:41:54 -0700428 __raw_writel(tgcr, base + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100429 tgcr = TGCR_TIMMODE_64BIT_WDOG << TGCR_TIMMODE_SHIFT;
430 tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
431 (TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
David Griegoa23f7dc2009-06-01 11:41:54 -0700432 __raw_writel(tgcr, base + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100433
434 /* clear counter and period regs */
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500435 __raw_writel(0, base + TIM12);
436 __raw_writel(0, base + TIM34);
437 __raw_writel(0, base + PRD12);
438 __raw_writel(0, base + PRD34);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100439
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100440 /* put watchdog in pre-active state */
David Griegoa23f7dc2009-06-01 11:41:54 -0700441 wdtcr = __raw_readl(base + WDTCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100442 wdtcr = (WDTCR_WDKEY_SEQ0 << WDTCR_WDKEY_SHIFT) |
443 (WDTCR_WDEN_ENABLE << WDTCR_WDEN_SHIFT);
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500444 __raw_writel(wdtcr, base + WDTCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100445
446 /* put watchdog in active state */
447 wdtcr = (WDTCR_WDKEY_SEQ1 << WDTCR_WDKEY_SHIFT) |
448 (WDTCR_WDEN_ENABLE << WDTCR_WDEN_SHIFT);
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500449 __raw_writel(wdtcr, base + WDTCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100450
451 /* write an invalid value to the WDKEY field to trigger
452 * a watchdog reset */
453 wdtcr = 0x00004000;
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500454 __raw_writel(wdtcr, base + WDTCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100455}