blob: 6c18445a4639b169dc2dd24ad0b6d0475870486f [file] [log] [blame]
Kevin Hilman7c6337e2007-04-30 19:37:19 +01001/*
2 * DaVinci timer subsystem
3 *
4 * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
5 *
6 * 2007 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 */
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/types.h>
14#include <linux/interrupt.h>
15#include <linux/clocksource.h>
16#include <linux/clockchips.h>
Russell Kingfced80c2008-09-06 12:10:45 +010017#include <linux/io.h>
Kevin Hilmanf5c122d2009-04-14 07:04:16 -050018#include <linux/clk.h>
19#include <linux/err.h>
Kevin Hilmanfb631382009-04-29 16:23:59 -070020#include <linux/platform_device.h>
Stephen Boyd38ff87f2013-06-01 23:39:40 -070021#include <linux/sched_clock.h>
Kevin Hilman7c6337e2007-04-30 19:37:19 +010022
Kevin Hilman7c6337e2007-04-30 19:37:19 +010023#include <asm/mach/irq.h>
24#include <asm/mach/time.h>
Sekhar Nori5d0ef6a2011-12-23 17:57:19 +000025
Kevin Hilmanf5c122d2009-04-14 07:04:16 -050026#include <mach/cputype.h>
Sekhar Nori5d0ef6a2011-12-23 17:57:19 +000027#include <mach/hardware.h>
Mark A. Greerf64691b2009-04-15 12:40:11 -070028#include <mach/time.h>
Sekhar Nori5d0ef6a2011-12-23 17:57:19 +000029
Kevin Hilmanf5c122d2009-04-14 07:04:16 -050030#include "clock.h"
Kevin Hilman7c6337e2007-04-30 19:37:19 +010031
32static struct clock_event_device clockevent_davinci;
Kevin Hilmane6099002009-04-14 07:06:37 -050033static unsigned int davinci_clock_tick_rate;
Kevin Hilman7c6337e2007-04-30 19:37:19 +010034
Kevin Hilman7c6337e2007-04-30 19:37:19 +010035/*
36 * This driver configures the 2 64-bit count-up timers as 4 independent
37 * 32-bit count-up timers used as follows:
Kevin Hilman7c6337e2007-04-30 19:37:19 +010038 */
Mark A. Greerf64691b2009-04-15 12:40:11 -070039
40enum {
41 TID_CLOCKEVENT,
42 TID_CLOCKSOURCE,
43};
Kevin Hilman7c6337e2007-04-30 19:37:19 +010044
45/* Timer register offsets */
Mark A. Greer3abd5ac2009-04-15 12:41:54 -070046#define PID12 0x0
47#define TIM12 0x10
48#define TIM34 0x14
49#define PRD12 0x18
50#define PRD34 0x1c
51#define TCR 0x20
52#define TGCR 0x24
53#define WDTCR 0x28
54
55/* Offsets of the 8 compare registers */
56#define CMP12_0 0x60
57#define CMP12_1 0x64
58#define CMP12_2 0x68
59#define CMP12_3 0x6c
60#define CMP12_4 0x70
61#define CMP12_5 0x74
62#define CMP12_6 0x78
63#define CMP12_7 0x7c
Kevin Hilman7c6337e2007-04-30 19:37:19 +010064
65/* Timer register bitfields */
66#define TCR_ENAMODE_DISABLE 0x0
67#define TCR_ENAMODE_ONESHOT 0x1
68#define TCR_ENAMODE_PERIODIC 0x2
69#define TCR_ENAMODE_MASK 0x3
70
71#define TGCR_TIMMODE_SHIFT 2
72#define TGCR_TIMMODE_64BIT_GP 0x0
73#define TGCR_TIMMODE_32BIT_UNCHAINED 0x1
74#define TGCR_TIMMODE_64BIT_WDOG 0x2
75#define TGCR_TIMMODE_32BIT_CHAINED 0x3
76
77#define TGCR_TIM12RS_SHIFT 0
78#define TGCR_TIM34RS_SHIFT 1
79#define TGCR_RESET 0x0
80#define TGCR_UNRESET 0x1
81#define TGCR_RESET_MASK 0x3
82
83#define WDTCR_WDEN_SHIFT 14
84#define WDTCR_WDEN_DISABLE 0x0
85#define WDTCR_WDEN_ENABLE 0x1
86#define WDTCR_WDKEY_SHIFT 16
87#define WDTCR_WDKEY_SEQ0 0xa5c6
88#define WDTCR_WDKEY_SEQ1 0xda7e
89
90struct timer_s {
91 char *name;
92 unsigned int id;
93 unsigned long period;
94 unsigned long opts;
Mark A. Greer3abd5ac2009-04-15 12:41:54 -070095 unsigned long flags;
Kevin Hilmanf5c122d2009-04-14 07:04:16 -050096 void __iomem *base;
97 unsigned long tim_off;
98 unsigned long prd_off;
Kevin Hilman7c6337e2007-04-30 19:37:19 +010099 unsigned long enamode_shift;
100 struct irqaction irqaction;
101};
102static struct timer_s timers[];
103
104/* values for 'opts' field of struct timer_s */
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700105#define TIMER_OPTS_DISABLED 0x01
106#define TIMER_OPTS_ONESHOT 0x02
107#define TIMER_OPTS_PERIODIC 0x04
108#define TIMER_OPTS_STATE_MASK 0x07
109
110#define TIMER_OPTS_USE_COMPARE 0x80000000
111#define USING_COMPARE(t) ((t)->opts & TIMER_OPTS_USE_COMPARE)
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100112
Mark A. Greerf64691b2009-04-15 12:40:11 -0700113static char *id_to_name[] = {
114 [T0_BOT] = "timer0_0",
115 [T0_TOP] = "timer0_1",
116 [T1_BOT] = "timer1_0",
117 [T1_TOP] = "timer1_1",
118};
119
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100120static int timer32_config(struct timer_s *t)
121{
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700122 u32 tcr;
Mark A. Greer55700782009-04-15 12:42:06 -0700123 struct davinci_soc_info *soc_info = &davinci_soc_info;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100124
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700125 if (USING_COMPARE(t)) {
126 struct davinci_timer_instance *dtip =
127 soc_info->timer_info->timers;
128 int event_timer = ID_TO_TIMER(timers[TID_CLOCKEVENT].id);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100129
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700130 /*
131 * Next interrupt should be the current time reg value plus
132 * the new period (using 32-bit unsigned addition/wrapping
133 * to 0 on overflow). This assumes that the clocksource
134 * is setup to count to 2^32-1 before wrapping around to 0.
135 */
136 __raw_writel(__raw_readl(t->base + t->tim_off) + t->period,
137 t->base + dtip[event_timer].cmp_off);
138 } else {
139 tcr = __raw_readl(t->base + TCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100140
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700141 /* disable timer */
142 tcr &= ~(TCR_ENAMODE_MASK << t->enamode_shift);
143 __raw_writel(tcr, t->base + TCR);
144
145 /* reset counter to zero, set new period */
146 __raw_writel(0, t->base + t->tim_off);
147 __raw_writel(t->period, t->base + t->prd_off);
148
149 /* Set enable mode */
150 if (t->opts & TIMER_OPTS_ONESHOT)
151 tcr |= TCR_ENAMODE_ONESHOT << t->enamode_shift;
152 else if (t->opts & TIMER_OPTS_PERIODIC)
153 tcr |= TCR_ENAMODE_PERIODIC << t->enamode_shift;
154
155 __raw_writel(tcr, t->base + TCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100156 }
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100157 return 0;
158}
159
160static inline u32 timer32_read(struct timer_s *t)
161{
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500162 return __raw_readl(t->base + t->tim_off);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100163}
164
165static irqreturn_t timer_interrupt(int irq, void *dev_id)
166{
167 struct clock_event_device *evt = &clockevent_davinci;
168
169 evt->event_handler(evt);
170 return IRQ_HANDLED;
171}
172
173/* called when 32-bit counter wraps */
174static irqreturn_t freerun_interrupt(int irq, void *dev_id)
175{
176 return IRQ_HANDLED;
177}
178
179static struct timer_s timers[] = {
180 [TID_CLOCKEVENT] = {
181 .name = "clockevent",
182 .opts = TIMER_OPTS_DISABLED,
183 .irqaction = {
Michael Opdenacker1091a652013-09-07 09:07:13 +0200184 .flags = IRQF_TIMER,
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100185 .handler = timer_interrupt,
186 }
187 },
188 [TID_CLOCKSOURCE] = {
189 .name = "free-run counter",
190 .period = ~0,
191 .opts = TIMER_OPTS_PERIODIC,
192 .irqaction = {
Michael Opdenacker1091a652013-09-07 09:07:13 +0200193 .flags = IRQF_TIMER,
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100194 .handler = freerun_interrupt,
195 }
196 },
197};
198
199static void __init timer_init(void)
200{
Mark A. Greerf64691b2009-04-15 12:40:11 -0700201 struct davinci_soc_info *soc_info = &davinci_soc_info;
202 struct davinci_timer_instance *dtip = soc_info->timer_info->timers;
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400203 void __iomem *base[2];
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100204 int i;
205
206 /* Global init of each 64-bit timer as a whole */
207 for(i=0; i<2; i++) {
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500208 u32 tgcr;
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400209
210 base[i] = ioremap(dtip[i].base, SZ_4K);
211 if (WARN_ON(!base[i]))
212 continue;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100213
214 /* Disabled, Internal clock source */
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400215 __raw_writel(0, base[i] + TCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100216
217 /* reset both timers, no pre-scaler for timer34 */
218 tgcr = 0;
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400219 __raw_writel(tgcr, base[i] + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100220
221 /* Set both timers to unchained 32-bit */
222 tgcr = TGCR_TIMMODE_32BIT_UNCHAINED << TGCR_TIMMODE_SHIFT;
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400223 __raw_writel(tgcr, base[i] + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100224
225 /* Unreset timers */
226 tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
227 (TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400228 __raw_writel(tgcr, base[i] + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100229
230 /* Init both counters to zero */
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400231 __raw_writel(0, base[i] + TIM12);
232 __raw_writel(0, base[i] + TIM34);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100233 }
234
235 /* Init of each timer as a 32-bit timer */
236 for (i=0; i< ARRAY_SIZE(timers); i++) {
237 struct timer_s *t = &timers[i];
Mark A. Greerf64691b2009-04-15 12:40:11 -0700238 int timer = ID_TO_TIMER(t->id);
239 u32 irq;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100240
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400241 t->base = base[timer];
242 if (!t->base)
243 continue;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100244
Mark A. Greerf64691b2009-04-15 12:40:11 -0700245 if (IS_TIMER_BOT(t->id)) {
246 t->enamode_shift = 6;
247 t->tim_off = TIM12;
248 t->prd_off = PRD12;
249 irq = dtip[timer].bottom_irq;
250 } else {
251 t->enamode_shift = 22;
252 t->tim_off = TIM34;
253 t->prd_off = PRD34;
254 irq = dtip[timer].top_irq;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100255 }
Mark A. Greerf64691b2009-04-15 12:40:11 -0700256
257 /* Register interrupt */
258 t->irqaction.name = t->name;
259 t->irqaction.dev_id = (void *)t;
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700260
261 if (t->irqaction.handler != NULL) {
262 irq = USING_COMPARE(t) ? dtip[i].cmp_irq : irq;
Mark A. Greerf64691b2009-04-15 12:40:11 -0700263 setup_irq(irq, &t->irqaction);
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700264 }
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100265 }
266}
267
268/*
269 * clocksource
270 */
Magnus Damm8e196082009-04-21 12:24:00 -0700271static cycle_t read_cycles(struct clocksource *cs)
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100272{
273 struct timer_s *t = &timers[TID_CLOCKSOURCE];
274
275 return (cycles_t)timer32_read(t);
276}
277
278static struct clocksource clocksource_davinci = {
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100279 .rating = 300,
Marc Zyngier30c9c5b2012-01-16 11:44:12 +0000280 .read = read_cycles,
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100281 .mask = CLOCKSOURCE_MASK(32),
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100282 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
283};
284
285/*
Andreas Gaeer6d1c57c2010-10-06 10:38:55 +0200286 * Overwrite weak default sched_clock with something more precise
287 */
Stephen Boyd14d58cb2013-11-15 15:26:11 -0800288static u64 notrace davinci_read_sched_clock(void)
Andreas Gaeer6d1c57c2010-10-06 10:38:55 +0200289{
Marc Zyngier30c9c5b2012-01-16 11:44:12 +0000290 return timer32_read(&timers[TID_CLOCKSOURCE]);
Andreas Gaeer6d1c57c2010-10-06 10:38:55 +0200291}
292
293/*
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100294 * clockevent
295 */
296static int davinci_set_next_event(unsigned long cycles,
297 struct clock_event_device *evt)
298{
299 struct timer_s *t = &timers[TID_CLOCKEVENT];
300
301 t->period = cycles;
302 timer32_config(t);
303 return 0;
304}
305
Viresh Kumarbc660a452015-02-27 13:39:52 +0530306static int davinci_shutdown(struct clock_event_device *evt)
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100307{
308 struct timer_s *t = &timers[TID_CLOCKEVENT];
309
Viresh Kumarbc660a452015-02-27 13:39:52 +0530310 t->opts &= ~TIMER_OPTS_STATE_MASK;
311 t->opts |= TIMER_OPTS_DISABLED;
312 return 0;
313}
314
315static int davinci_set_oneshot(struct clock_event_device *evt)
316{
317 struct timer_s *t = &timers[TID_CLOCKEVENT];
318
319 t->opts &= ~TIMER_OPTS_STATE_MASK;
320 t->opts |= TIMER_OPTS_ONESHOT;
321 return 0;
322}
323
324static int davinci_set_periodic(struct clock_event_device *evt)
325{
326 struct timer_s *t = &timers[TID_CLOCKEVENT];
327
328 t->period = davinci_clock_tick_rate / (HZ);
329 t->opts &= ~TIMER_OPTS_STATE_MASK;
330 t->opts |= TIMER_OPTS_PERIODIC;
331 timer32_config(t);
332 return 0;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100333}
334
335static struct clock_event_device clockevent_davinci = {
Viresh Kumarbc660a452015-02-27 13:39:52 +0530336 .features = CLOCK_EVT_FEAT_PERIODIC |
337 CLOCK_EVT_FEAT_ONESHOT,
338 .set_next_event = davinci_set_next_event,
339 .set_state_shutdown = davinci_shutdown,
340 .set_state_periodic = davinci_set_periodic,
341 .set_state_oneshot = davinci_set_oneshot,
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100342};
343
344
Stephen Warren6bb27d72012-11-08 12:40:59 -0700345void __init davinci_timer_init(void)
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100346{
Kevin Hilmane6099002009-04-14 07:06:37 -0500347 struct clk *timer_clk;
Mark A. Greerf64691b2009-04-15 12:40:11 -0700348 struct davinci_soc_info *soc_info = &davinci_soc_info;
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700349 unsigned int clockevent_id;
350 unsigned int clocksource_id;
Kevin Hilmand99c3872010-03-11 14:57:35 -0800351 int i;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100352
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700353 clockevent_id = soc_info->timer_info->clockevent_id;
354 clocksource_id = soc_info->timer_info->clocksource_id;
355
356 timers[TID_CLOCKEVENT].id = clockevent_id;
357 timers[TID_CLOCKSOURCE].id = clocksource_id;
358
359 /*
360 * If using same timer for both clock events & clocksource,
361 * a compare register must be used to generate an event interrupt.
362 * This is equivalent to a oneshot timer only (not periodic).
363 */
364 if (clockevent_id == clocksource_id) {
365 struct davinci_timer_instance *dtip =
366 soc_info->timer_info->timers;
367 int event_timer = ID_TO_TIMER(clockevent_id);
368
369 /* Only bottom timers can use compare regs */
370 if (IS_TIMER_TOP(clockevent_id))
Joe Perchesa7ca2bc2014-10-31 17:51:51 -0700371 pr_warn("%s: Invalid use of system timers. Results unpredictable.\n",
372 __func__);
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700373 else if ((dtip[event_timer].cmp_off == 0)
374 || (dtip[event_timer].cmp_irq == 0))
Joe Perchesa7ca2bc2014-10-31 17:51:51 -0700375 pr_warn("%s: Invalid timer instance setup. Results unpredictable.\n",
376 __func__);
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700377 else {
378 timers[TID_CLOCKEVENT].opts |= TIMER_OPTS_USE_COMPARE;
379 clockevent_davinci.features = CLOCK_EVT_FEAT_ONESHOT;
380 }
381 }
Mark A. Greerf64691b2009-04-15 12:40:11 -0700382
Kevin Hilmane6099002009-04-14 07:06:37 -0500383 timer_clk = clk_get(NULL, "timer0");
384 BUG_ON(IS_ERR(timer_clk));
m-karicheri2@ti.comb6f1ffe2012-08-02 16:53:48 +0000385 clk_prepare_enable(timer_clk);
Kevin Hilmane6099002009-04-14 07:06:37 -0500386
Cyril Chemparathy8ca2e592010-03-25 17:43:45 -0400387 /* init timer hw */
388 timer_init();
389
Kevin Hilmane6099002009-04-14 07:06:37 -0500390 davinci_clock_tick_rate = clk_get_rate(timer_clk);
391
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100392 /* setup clocksource */
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700393 clocksource_davinci.name = id_to_name[clocksource_id];
Russell King7c044be2010-12-13 13:17:12 +0000394 if (clocksource_register_hz(&clocksource_davinci,
395 davinci_clock_tick_rate))
Joe Perchesa7ca2bc2014-10-31 17:51:51 -0700396 pr_err("%s: can't register clocksource!\n",
397 clocksource_davinci.name);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100398
Stephen Boyd14d58cb2013-11-15 15:26:11 -0800399 sched_clock_register(davinci_read_sched_clock, 32,
Marc Zyngier30c9c5b2012-01-16 11:44:12 +0000400 davinci_clock_tick_rate);
401
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100402 /* setup clockevent */
Mark A. Greerf64691b2009-04-15 12:40:11 -0700403 clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100404
Rusty Russell320ab2b2008-12-13 21:20:26 +1030405 clockevent_davinci.cpumask = cpumask_of(0);
Uwe Kleine-Königbf94d092013-10-13 10:36:30 +0200406 clockevents_config_and_register(&clockevent_davinci,
407 davinci_clock_tick_rate, 1, 0xfffffffe);
Kevin Hilmand99c3872010-03-11 14:57:35 -0800408
409 for (i=0; i< ARRAY_SIZE(timers); i++)
410 timer32_config(&timers[i]);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100411}
412
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100413/* reset board using watchdog timer */
Cyril Chemparathyc78a5bc2010-05-01 18:38:28 -0400414void davinci_watchdog_reset(struct platform_device *pdev)
Kevin Hilmanfb631382009-04-29 16:23:59 -0700415{
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500416 u32 tgcr, wdtcr;
Cyril Chemparathyc78a5bc2010-05-01 18:38:28 -0400417 void __iomem *base;
Kevin Hilmane6099002009-04-14 07:06:37 -0500418 struct clk *wd_clk;
Kevin Hilmane6099002009-04-14 07:06:37 -0500419
Cyril Chemparathyc78a5bc2010-05-01 18:38:28 -0400420 base = ioremap(pdev->resource[0].start, SZ_4K);
421 if (WARN_ON(!base))
422 return;
423
Kevin Hilman5fcd2942009-06-03 12:24:50 -0700424 wd_clk = clk_get(&pdev->dev, NULL);
Kevin Hilmane6099002009-04-14 07:06:37 -0500425 if (WARN_ON(IS_ERR(wd_clk)))
426 return;
m-karicheri2@ti.comb6f1ffe2012-08-02 16:53:48 +0000427 clk_prepare_enable(wd_clk);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100428
429 /* disable, internal clock source */
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500430 __raw_writel(0, base + TCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100431
432 /* reset timer, set mode to 64-bit watchdog, and unreset */
433 tgcr = 0;
David Griegoa23f7dc2009-06-01 11:41:54 -0700434 __raw_writel(tgcr, base + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100435 tgcr = TGCR_TIMMODE_64BIT_WDOG << TGCR_TIMMODE_SHIFT;
436 tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
437 (TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
David Griegoa23f7dc2009-06-01 11:41:54 -0700438 __raw_writel(tgcr, base + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100439
440 /* clear counter and period regs */
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500441 __raw_writel(0, base + TIM12);
442 __raw_writel(0, base + TIM34);
443 __raw_writel(0, base + PRD12);
444 __raw_writel(0, base + PRD34);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100445
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100446 /* put watchdog in pre-active state */
David Griegoa23f7dc2009-06-01 11:41:54 -0700447 wdtcr = __raw_readl(base + WDTCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100448 wdtcr = (WDTCR_WDKEY_SEQ0 << WDTCR_WDKEY_SHIFT) |
449 (WDTCR_WDEN_ENABLE << WDTCR_WDEN_SHIFT);
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500450 __raw_writel(wdtcr, base + WDTCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100451
452 /* put watchdog in active state */
453 wdtcr = (WDTCR_WDKEY_SEQ1 << WDTCR_WDKEY_SHIFT) |
454 (WDTCR_WDEN_ENABLE << WDTCR_WDEN_SHIFT);
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500455 __raw_writel(wdtcr, base + WDTCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100456
457 /* write an invalid value to the WDKEY field to trigger
458 * a watchdog reset */
459 wdtcr = 0x00004000;
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500460 __raw_writel(wdtcr, base + WDTCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100461}