blob: 9847938785caa5c5fdcfa134ad454ec8696bef53 [file] [log] [blame]
Kevin Hilman7c6337e2007-04-30 19:37:19 +01001/*
2 * DaVinci timer subsystem
3 *
4 * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
5 *
6 * 2007 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 */
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/types.h>
14#include <linux/interrupt.h>
15#include <linux/clocksource.h>
16#include <linux/clockchips.h>
Russell Kingfced80c2008-09-06 12:10:45 +010017#include <linux/io.h>
Kevin Hilmanf5c122d2009-04-14 07:04:16 -050018#include <linux/clk.h>
19#include <linux/err.h>
Kevin Hilmanfb631382009-04-29 16:23:59 -070020#include <linux/platform_device.h>
Kevin Hilman7c6337e2007-04-30 19:37:19 +010021
Marc Zyngier30c9c5b2012-01-16 11:44:12 +000022#include <asm/sched_clock.h>
Kevin Hilman7c6337e2007-04-30 19:37:19 +010023#include <asm/mach/irq.h>
24#include <asm/mach/time.h>
Sekhar Nori5d0ef6a2011-12-23 17:57:19 +000025
Kevin Hilmanf5c122d2009-04-14 07:04:16 -050026#include <mach/cputype.h>
Sekhar Nori5d0ef6a2011-12-23 17:57:19 +000027#include <mach/hardware.h>
Mark A. Greerf64691b2009-04-15 12:40:11 -070028#include <mach/time.h>
Sekhar Nori5d0ef6a2011-12-23 17:57:19 +000029
Kevin Hilmanf5c122d2009-04-14 07:04:16 -050030#include "clock.h"
Kevin Hilman7c6337e2007-04-30 19:37:19 +010031
32static struct clock_event_device clockevent_davinci;
Kevin Hilmane6099002009-04-14 07:06:37 -050033static unsigned int davinci_clock_tick_rate;
Kevin Hilman7c6337e2007-04-30 19:37:19 +010034
Kevin Hilman7c6337e2007-04-30 19:37:19 +010035/*
36 * This driver configures the 2 64-bit count-up timers as 4 independent
37 * 32-bit count-up timers used as follows:
Kevin Hilman7c6337e2007-04-30 19:37:19 +010038 */
Mark A. Greerf64691b2009-04-15 12:40:11 -070039
40enum {
41 TID_CLOCKEVENT,
42 TID_CLOCKSOURCE,
43};
Kevin Hilman7c6337e2007-04-30 19:37:19 +010044
45/* Timer register offsets */
Mark A. Greer3abd5ac2009-04-15 12:41:54 -070046#define PID12 0x0
47#define TIM12 0x10
48#define TIM34 0x14
49#define PRD12 0x18
50#define PRD34 0x1c
51#define TCR 0x20
52#define TGCR 0x24
53#define WDTCR 0x28
54
55/* Offsets of the 8 compare registers */
56#define CMP12_0 0x60
57#define CMP12_1 0x64
58#define CMP12_2 0x68
59#define CMP12_3 0x6c
60#define CMP12_4 0x70
61#define CMP12_5 0x74
62#define CMP12_6 0x78
63#define CMP12_7 0x7c
Kevin Hilman7c6337e2007-04-30 19:37:19 +010064
65/* Timer register bitfields */
66#define TCR_ENAMODE_DISABLE 0x0
67#define TCR_ENAMODE_ONESHOT 0x1
68#define TCR_ENAMODE_PERIODIC 0x2
69#define TCR_ENAMODE_MASK 0x3
70
71#define TGCR_TIMMODE_SHIFT 2
72#define TGCR_TIMMODE_64BIT_GP 0x0
73#define TGCR_TIMMODE_32BIT_UNCHAINED 0x1
74#define TGCR_TIMMODE_64BIT_WDOG 0x2
75#define TGCR_TIMMODE_32BIT_CHAINED 0x3
76
77#define TGCR_TIM12RS_SHIFT 0
78#define TGCR_TIM34RS_SHIFT 1
79#define TGCR_RESET 0x0
80#define TGCR_UNRESET 0x1
81#define TGCR_RESET_MASK 0x3
82
83#define WDTCR_WDEN_SHIFT 14
84#define WDTCR_WDEN_DISABLE 0x0
85#define WDTCR_WDEN_ENABLE 0x1
86#define WDTCR_WDKEY_SHIFT 16
87#define WDTCR_WDKEY_SEQ0 0xa5c6
88#define WDTCR_WDKEY_SEQ1 0xda7e
89
90struct timer_s {
91 char *name;
92 unsigned int id;
93 unsigned long period;
94 unsigned long opts;
Mark A. Greer3abd5ac2009-04-15 12:41:54 -070095 unsigned long flags;
Kevin Hilmanf5c122d2009-04-14 07:04:16 -050096 void __iomem *base;
97 unsigned long tim_off;
98 unsigned long prd_off;
Kevin Hilman7c6337e2007-04-30 19:37:19 +010099 unsigned long enamode_shift;
100 struct irqaction irqaction;
101};
102static struct timer_s timers[];
103
104/* values for 'opts' field of struct timer_s */
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700105#define TIMER_OPTS_DISABLED 0x01
106#define TIMER_OPTS_ONESHOT 0x02
107#define TIMER_OPTS_PERIODIC 0x04
108#define TIMER_OPTS_STATE_MASK 0x07
109
110#define TIMER_OPTS_USE_COMPARE 0x80000000
111#define USING_COMPARE(t) ((t)->opts & TIMER_OPTS_USE_COMPARE)
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100112
Mark A. Greerf64691b2009-04-15 12:40:11 -0700113static char *id_to_name[] = {
114 [T0_BOT] = "timer0_0",
115 [T0_TOP] = "timer0_1",
116 [T1_BOT] = "timer1_0",
117 [T1_TOP] = "timer1_1",
118};
119
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100120static int timer32_config(struct timer_s *t)
121{
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700122 u32 tcr;
Mark A. Greer55700782009-04-15 12:42:06 -0700123 struct davinci_soc_info *soc_info = &davinci_soc_info;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100124
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700125 if (USING_COMPARE(t)) {
126 struct davinci_timer_instance *dtip =
127 soc_info->timer_info->timers;
128 int event_timer = ID_TO_TIMER(timers[TID_CLOCKEVENT].id);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100129
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700130 /*
131 * Next interrupt should be the current time reg value plus
132 * the new period (using 32-bit unsigned addition/wrapping
133 * to 0 on overflow). This assumes that the clocksource
134 * is setup to count to 2^32-1 before wrapping around to 0.
135 */
136 __raw_writel(__raw_readl(t->base + t->tim_off) + t->period,
137 t->base + dtip[event_timer].cmp_off);
138 } else {
139 tcr = __raw_readl(t->base + TCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100140
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700141 /* disable timer */
142 tcr &= ~(TCR_ENAMODE_MASK << t->enamode_shift);
143 __raw_writel(tcr, t->base + TCR);
144
145 /* reset counter to zero, set new period */
146 __raw_writel(0, t->base + t->tim_off);
147 __raw_writel(t->period, t->base + t->prd_off);
148
149 /* Set enable mode */
150 if (t->opts & TIMER_OPTS_ONESHOT)
151 tcr |= TCR_ENAMODE_ONESHOT << t->enamode_shift;
152 else if (t->opts & TIMER_OPTS_PERIODIC)
153 tcr |= TCR_ENAMODE_PERIODIC << t->enamode_shift;
154
155 __raw_writel(tcr, t->base + TCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100156 }
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100157 return 0;
158}
159
160static inline u32 timer32_read(struct timer_s *t)
161{
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500162 return __raw_readl(t->base + t->tim_off);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100163}
164
165static irqreturn_t timer_interrupt(int irq, void *dev_id)
166{
167 struct clock_event_device *evt = &clockevent_davinci;
168
169 evt->event_handler(evt);
170 return IRQ_HANDLED;
171}
172
173/* called when 32-bit counter wraps */
174static irqreturn_t freerun_interrupt(int irq, void *dev_id)
175{
176 return IRQ_HANDLED;
177}
178
179static struct timer_s timers[] = {
180 [TID_CLOCKEVENT] = {
181 .name = "clockevent",
182 .opts = TIMER_OPTS_DISABLED,
183 .irqaction = {
184 .flags = IRQF_DISABLED | IRQF_TIMER,
185 .handler = timer_interrupt,
186 }
187 },
188 [TID_CLOCKSOURCE] = {
189 .name = "free-run counter",
190 .period = ~0,
191 .opts = TIMER_OPTS_PERIODIC,
192 .irqaction = {
193 .flags = IRQF_DISABLED | IRQF_TIMER,
194 .handler = freerun_interrupt,
195 }
196 },
197};
198
199static void __init timer_init(void)
200{
Mark A. Greerf64691b2009-04-15 12:40:11 -0700201 struct davinci_soc_info *soc_info = &davinci_soc_info;
202 struct davinci_timer_instance *dtip = soc_info->timer_info->timers;
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400203 void __iomem *base[2];
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100204 int i;
205
206 /* Global init of each 64-bit timer as a whole */
207 for(i=0; i<2; i++) {
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500208 u32 tgcr;
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400209
210 base[i] = ioremap(dtip[i].base, SZ_4K);
211 if (WARN_ON(!base[i]))
212 continue;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100213
214 /* Disabled, Internal clock source */
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400215 __raw_writel(0, base[i] + TCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100216
217 /* reset both timers, no pre-scaler for timer34 */
218 tgcr = 0;
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400219 __raw_writel(tgcr, base[i] + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100220
221 /* Set both timers to unchained 32-bit */
222 tgcr = TGCR_TIMMODE_32BIT_UNCHAINED << TGCR_TIMMODE_SHIFT;
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400223 __raw_writel(tgcr, base[i] + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100224
225 /* Unreset timers */
226 tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
227 (TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400228 __raw_writel(tgcr, base[i] + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100229
230 /* Init both counters to zero */
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400231 __raw_writel(0, base[i] + TIM12);
232 __raw_writel(0, base[i] + TIM34);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100233 }
234
235 /* Init of each timer as a 32-bit timer */
236 for (i=0; i< ARRAY_SIZE(timers); i++) {
237 struct timer_s *t = &timers[i];
Mark A. Greerf64691b2009-04-15 12:40:11 -0700238 int timer = ID_TO_TIMER(t->id);
239 u32 irq;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100240
Cyril Chemparathy1bcd38a2010-05-07 17:06:35 -0400241 t->base = base[timer];
242 if (!t->base)
243 continue;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100244
Mark A. Greerf64691b2009-04-15 12:40:11 -0700245 if (IS_TIMER_BOT(t->id)) {
246 t->enamode_shift = 6;
247 t->tim_off = TIM12;
248 t->prd_off = PRD12;
249 irq = dtip[timer].bottom_irq;
250 } else {
251 t->enamode_shift = 22;
252 t->tim_off = TIM34;
253 t->prd_off = PRD34;
254 irq = dtip[timer].top_irq;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100255 }
Mark A. Greerf64691b2009-04-15 12:40:11 -0700256
257 /* Register interrupt */
258 t->irqaction.name = t->name;
259 t->irqaction.dev_id = (void *)t;
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700260
261 if (t->irqaction.handler != NULL) {
262 irq = USING_COMPARE(t) ? dtip[i].cmp_irq : irq;
Mark A. Greerf64691b2009-04-15 12:40:11 -0700263 setup_irq(irq, &t->irqaction);
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700264 }
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100265 }
266}
267
268/*
269 * clocksource
270 */
Magnus Damm8e196082009-04-21 12:24:00 -0700271static cycle_t read_cycles(struct clocksource *cs)
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100272{
273 struct timer_s *t = &timers[TID_CLOCKSOURCE];
274
275 return (cycles_t)timer32_read(t);
276}
277
278static struct clocksource clocksource_davinci = {
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100279 .rating = 300,
Marc Zyngier30c9c5b2012-01-16 11:44:12 +0000280 .read = read_cycles,
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100281 .mask = CLOCKSOURCE_MASK(32),
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100282 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
283};
284
285/*
Andreas Gaeer6d1c57c2010-10-06 10:38:55 +0200286 * Overwrite weak default sched_clock with something more precise
287 */
Marc Zyngier30c9c5b2012-01-16 11:44:12 +0000288static u32 notrace davinci_read_sched_clock(void)
Andreas Gaeer6d1c57c2010-10-06 10:38:55 +0200289{
Marc Zyngier30c9c5b2012-01-16 11:44:12 +0000290 return timer32_read(&timers[TID_CLOCKSOURCE]);
Andreas Gaeer6d1c57c2010-10-06 10:38:55 +0200291}
292
293/*
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100294 * clockevent
295 */
296static int davinci_set_next_event(unsigned long cycles,
297 struct clock_event_device *evt)
298{
299 struct timer_s *t = &timers[TID_CLOCKEVENT];
300
301 t->period = cycles;
302 timer32_config(t);
303 return 0;
304}
305
306static void davinci_set_mode(enum clock_event_mode mode,
307 struct clock_event_device *evt)
308{
309 struct timer_s *t = &timers[TID_CLOCKEVENT];
310
311 switch (mode) {
312 case CLOCK_EVT_MODE_PERIODIC:
Kevin Hilmane6099002009-04-14 07:06:37 -0500313 t->period = davinci_clock_tick_rate / (HZ);
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700314 t->opts &= ~TIMER_OPTS_STATE_MASK;
315 t->opts |= TIMER_OPTS_PERIODIC;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100316 timer32_config(t);
317 break;
318 case CLOCK_EVT_MODE_ONESHOT:
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700319 t->opts &= ~TIMER_OPTS_STATE_MASK;
320 t->opts |= TIMER_OPTS_ONESHOT;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100321 break;
322 case CLOCK_EVT_MODE_UNUSED:
323 case CLOCK_EVT_MODE_SHUTDOWN:
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700324 t->opts &= ~TIMER_OPTS_STATE_MASK;
325 t->opts |= TIMER_OPTS_DISABLED;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100326 break;
Thomas Gleixner18de5bc2007-07-21 04:37:34 -0700327 case CLOCK_EVT_MODE_RESUME:
328 break;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100329 }
330}
331
332static struct clock_event_device clockevent_davinci = {
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100333 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
334 .shift = 32,
335 .set_next_event = davinci_set_next_event,
336 .set_mode = davinci_set_mode,
337};
338
339
340static void __init davinci_timer_init(void)
341{
Kevin Hilmane6099002009-04-14 07:06:37 -0500342 struct clk *timer_clk;
Mark A. Greerf64691b2009-04-15 12:40:11 -0700343 struct davinci_soc_info *soc_info = &davinci_soc_info;
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700344 unsigned int clockevent_id;
345 unsigned int clocksource_id;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100346 static char err[] __initdata = KERN_ERR
347 "%s: can't register clocksource!\n";
Kevin Hilmand99c3872010-03-11 14:57:35 -0800348 int i;
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100349
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700350 clockevent_id = soc_info->timer_info->clockevent_id;
351 clocksource_id = soc_info->timer_info->clocksource_id;
352
353 timers[TID_CLOCKEVENT].id = clockevent_id;
354 timers[TID_CLOCKSOURCE].id = clocksource_id;
355
356 /*
357 * If using same timer for both clock events & clocksource,
358 * a compare register must be used to generate an event interrupt.
359 * This is equivalent to a oneshot timer only (not periodic).
360 */
361 if (clockevent_id == clocksource_id) {
362 struct davinci_timer_instance *dtip =
363 soc_info->timer_info->timers;
364 int event_timer = ID_TO_TIMER(clockevent_id);
365
366 /* Only bottom timers can use compare regs */
367 if (IS_TIMER_TOP(clockevent_id))
368 pr_warning("davinci_timer_init: Invalid use"
369 " of system timers. Results unpredictable.\n");
370 else if ((dtip[event_timer].cmp_off == 0)
371 || (dtip[event_timer].cmp_irq == 0))
372 pr_warning("davinci_timer_init: Invalid timer instance"
373 " setup. Results unpredictable.\n");
374 else {
375 timers[TID_CLOCKEVENT].opts |= TIMER_OPTS_USE_COMPARE;
376 clockevent_davinci.features = CLOCK_EVT_FEAT_ONESHOT;
377 }
378 }
Mark A. Greerf64691b2009-04-15 12:40:11 -0700379
Kevin Hilmane6099002009-04-14 07:06:37 -0500380 timer_clk = clk_get(NULL, "timer0");
381 BUG_ON(IS_ERR(timer_clk));
m-karicheri2@ti.comb6f1ffe2012-08-02 16:53:48 +0000382 clk_prepare_enable(timer_clk);
Kevin Hilmane6099002009-04-14 07:06:37 -0500383
Cyril Chemparathy8ca2e592010-03-25 17:43:45 -0400384 /* init timer hw */
385 timer_init();
386
Kevin Hilmane6099002009-04-14 07:06:37 -0500387 davinci_clock_tick_rate = clk_get_rate(timer_clk);
388
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100389 /* setup clocksource */
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700390 clocksource_davinci.name = id_to_name[clocksource_id];
Russell King7c044be2010-12-13 13:17:12 +0000391 if (clocksource_register_hz(&clocksource_davinci,
392 davinci_clock_tick_rate))
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100393 printk(err, clocksource_davinci.name);
394
Marc Zyngier30c9c5b2012-01-16 11:44:12 +0000395 setup_sched_clock(davinci_read_sched_clock, 32,
396 davinci_clock_tick_rate);
397
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100398 /* setup clockevent */
Mark A. Greerf64691b2009-04-15 12:40:11 -0700399 clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
Kevin Hilmane6099002009-04-14 07:06:37 -0500400 clockevent_davinci.mult = div_sc(davinci_clock_tick_rate, NSEC_PER_SEC,
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100401 clockevent_davinci.shift);
402 clockevent_davinci.max_delta_ns =
403 clockevent_delta2ns(0xfffffffe, &clockevent_davinci);
Mark A. Greer3abd5ac2009-04-15 12:41:54 -0700404 clockevent_davinci.min_delta_ns = 50000; /* 50 usec */
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100405
Rusty Russell320ab2b2008-12-13 21:20:26 +1030406 clockevent_davinci.cpumask = cpumask_of(0);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100407 clockevents_register_device(&clockevent_davinci);
Kevin Hilmand99c3872010-03-11 14:57:35 -0800408
409 for (i=0; i< ARRAY_SIZE(timers); i++)
410 timer32_config(&timers[i]);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100411}
412
413struct sys_timer davinci_timer = {
414 .init = davinci_timer_init,
415};
416
417
418/* reset board using watchdog timer */
Cyril Chemparathyc78a5bc2010-05-01 18:38:28 -0400419void davinci_watchdog_reset(struct platform_device *pdev)
Kevin Hilmanfb631382009-04-29 16:23:59 -0700420{
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500421 u32 tgcr, wdtcr;
Cyril Chemparathyc78a5bc2010-05-01 18:38:28 -0400422 void __iomem *base;
Kevin Hilmane6099002009-04-14 07:06:37 -0500423 struct clk *wd_clk;
Kevin Hilmane6099002009-04-14 07:06:37 -0500424
Cyril Chemparathyc78a5bc2010-05-01 18:38:28 -0400425 base = ioremap(pdev->resource[0].start, SZ_4K);
426 if (WARN_ON(!base))
427 return;
428
Kevin Hilman5fcd2942009-06-03 12:24:50 -0700429 wd_clk = clk_get(&pdev->dev, NULL);
Kevin Hilmane6099002009-04-14 07:06:37 -0500430 if (WARN_ON(IS_ERR(wd_clk)))
431 return;
m-karicheri2@ti.comb6f1ffe2012-08-02 16:53:48 +0000432 clk_prepare_enable(wd_clk);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100433
434 /* disable, internal clock source */
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500435 __raw_writel(0, base + TCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100436
437 /* reset timer, set mode to 64-bit watchdog, and unreset */
438 tgcr = 0;
David Griegoa23f7dc2009-06-01 11:41:54 -0700439 __raw_writel(tgcr, base + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100440 tgcr = TGCR_TIMMODE_64BIT_WDOG << TGCR_TIMMODE_SHIFT;
441 tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
442 (TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
David Griegoa23f7dc2009-06-01 11:41:54 -0700443 __raw_writel(tgcr, base + TGCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100444
445 /* clear counter and period regs */
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500446 __raw_writel(0, base + TIM12);
447 __raw_writel(0, base + TIM34);
448 __raw_writel(0, base + PRD12);
449 __raw_writel(0, base + PRD34);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100450
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100451 /* put watchdog in pre-active state */
David Griegoa23f7dc2009-06-01 11:41:54 -0700452 wdtcr = __raw_readl(base + WDTCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100453 wdtcr = (WDTCR_WDKEY_SEQ0 << WDTCR_WDKEY_SHIFT) |
454 (WDTCR_WDEN_ENABLE << WDTCR_WDEN_SHIFT);
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500455 __raw_writel(wdtcr, base + WDTCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100456
457 /* put watchdog in active state */
458 wdtcr = (WDTCR_WDKEY_SEQ1 << WDTCR_WDKEY_SHIFT) |
459 (WDTCR_WDEN_ENABLE << WDTCR_WDEN_SHIFT);
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500460 __raw_writel(wdtcr, base + WDTCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100461
462 /* write an invalid value to the WDKEY field to trigger
463 * a watchdog reset */
464 wdtcr = 0x00004000;
Kevin Hilmanf5c122d2009-04-14 07:04:16 -0500465 __raw_writel(wdtcr, base + WDTCR);
Kevin Hilman7c6337e2007-04-30 19:37:19 +0100466}