blob: da904d7f75307f9798802444f19e2ffd8ee4777a [file] [log] [blame]
Magnus Damm3fb1b6a2009-01-22 09:55:59 +00001/*
2 * SuperH Timer Support - CMT
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000021#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/io.h>
26#include <linux/clk.h>
27#include <linux/irq.h>
28#include <linux/err.h>
Magnus Damm3f7e5e22011-07-13 07:59:48 +000029#include <linux/delay.h>
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000030#include <linux/clocksource.h>
31#include <linux/clockchips.h>
Paul Mundt46a12f72009-05-03 17:57:17 +090032#include <linux/sh_timer.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090033#include <linux/slab.h>
Paul Gortmaker7deeab52011-07-03 13:36:22 -040034#include <linux/module.h>
Rafael J. Wysocki615a4452012-03-13 22:40:06 +010035#include <linux/pm_domain.h>
Rafael J. Wysockibad81382012-08-06 01:48:57 +020036#include <linux/pm_runtime.h>
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000037
38struct sh_cmt_priv {
39 void __iomem *mapbase;
40 struct clk *clk;
41 unsigned long width; /* 16 or 32 bit version of hardware block */
42 unsigned long overflow_bit;
43 unsigned long clear_bits;
44 struct irqaction irqaction;
45 struct platform_device *pdev;
46
47 unsigned long flags;
48 unsigned long match_value;
49 unsigned long next_match_value;
50 unsigned long max_match_value;
51 unsigned long rate;
Paul Mundt7d0c3992012-05-25 13:36:43 +090052 raw_spinlock_t lock;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000053 struct clock_event_device ced;
Magnus Damm19bdc9d2009-04-17 05:26:31 +000054 struct clocksource cs;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000055 unsigned long total_cycles;
Rafael J. Wysockibad81382012-08-06 01:48:57 +020056 bool cs_enabled;
Magnus Damma6a912c2012-12-14 14:54:19 +090057
58 /* callbacks for CMCNT and CMCOR access */
59 unsigned long (*read_count)(void __iomem *base, unsigned long offs);
60 void (*write_count)(void __iomem *base, unsigned long offs,
61 unsigned long value);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000062};
63
Magnus Damma6a912c2012-12-14 14:54:19 +090064static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
Magnus Damm587acb32012-12-14 14:54:10 +090065{
66 return ioread16(base + (offs << 1));
67}
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000068
Magnus Damma6a912c2012-12-14 14:54:19 +090069static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
70{
71 return ioread32(base + (offs << 2));
72}
73
74static void sh_cmt_write16(void __iomem *base, unsigned long offs,
75 unsigned long value)
Magnus Damm587acb32012-12-14 14:54:10 +090076{
77 iowrite16(value, base + (offs << 1));
78}
79
Magnus Damma6a912c2012-12-14 14:54:19 +090080static void sh_cmt_write32(void __iomem *base, unsigned long offs,
81 unsigned long value)
82{
83 iowrite32(value, base + (offs << 2));
84}
85
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000086#define CMCSR 0 /* channel register */
87#define CMCNT 1 /* channel register */
88#define CMCOR 2 /* channel register */
89
Magnus Damm1b56b962012-12-14 14:54:00 +090090static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p)
91{
Magnus Damm587acb32012-12-14 14:54:10 +090092 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
93
94 return sh_cmt_read16(p->mapbase - cfg->channel_offset, 0);
Magnus Damm1b56b962012-12-14 14:54:00 +090095}
96
97static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p)
98{
Magnus Damm587acb32012-12-14 14:54:10 +090099 return sh_cmt_read16(p->mapbase, CMCSR);
Magnus Damm1b56b962012-12-14 14:54:00 +0900100}
101
102static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p)
103{
Magnus Damma6a912c2012-12-14 14:54:19 +0900104 return p->read_count(p->mapbase, CMCNT);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000105}
106
Magnus Damm1b56b962012-12-14 14:54:00 +0900107static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p,
108 unsigned long value)
109{
Magnus Damm587acb32012-12-14 14:54:10 +0900110 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
111
112 sh_cmt_write16(p->mapbase - cfg->channel_offset, 0, value);
Magnus Damm1b56b962012-12-14 14:54:00 +0900113}
114
115static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p,
116 unsigned long value)
117{
Magnus Damm587acb32012-12-14 14:54:10 +0900118 sh_cmt_write16(p->mapbase, CMCSR, value);
Magnus Damm1b56b962012-12-14 14:54:00 +0900119}
120
121static inline void sh_cmt_write_cmcnt(struct sh_cmt_priv *p,
122 unsigned long value)
123{
Magnus Damma6a912c2012-12-14 14:54:19 +0900124 p->write_count(p->mapbase, CMCNT, value);
Magnus Damm1b56b962012-12-14 14:54:00 +0900125}
126
127static inline void sh_cmt_write_cmcor(struct sh_cmt_priv *p,
128 unsigned long value)
129{
Magnus Damma6a912c2012-12-14 14:54:19 +0900130 p->write_count(p->mapbase, CMCOR, value);
Magnus Damm1b56b962012-12-14 14:54:00 +0900131}
132
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000133static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
134 int *has_wrapped)
135{
136 unsigned long v1, v2, v3;
Magnus Damm5b644c72009-04-28 08:17:54 +0000137 int o1, o2;
138
Magnus Damm1b56b962012-12-14 14:54:00 +0900139 o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000140
141 /* Make sure the timer value is stable. Stolen from acpi_pm.c */
142 do {
Magnus Damm5b644c72009-04-28 08:17:54 +0000143 o2 = o1;
Magnus Damm1b56b962012-12-14 14:54:00 +0900144 v1 = sh_cmt_read_cmcnt(p);
145 v2 = sh_cmt_read_cmcnt(p);
146 v3 = sh_cmt_read_cmcnt(p);
147 o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
Magnus Damm5b644c72009-04-28 08:17:54 +0000148 } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
149 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000150
Magnus Damm5b644c72009-04-28 08:17:54 +0000151 *has_wrapped = o1;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000152 return v2;
153}
154
Magnus Damm587acb32012-12-14 14:54:10 +0900155static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000156
157static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
158{
Paul Mundt46a12f72009-05-03 17:57:17 +0900159 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000160 unsigned long flags, value;
161
162 /* start stop register shared by multiple timer channels */
Paul Mundt7d0c3992012-05-25 13:36:43 +0900163 raw_spin_lock_irqsave(&sh_cmt_lock, flags);
Magnus Damm1b56b962012-12-14 14:54:00 +0900164 value = sh_cmt_read_cmstr(p);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000165
166 if (start)
167 value |= 1 << cfg->timer_bit;
168 else
169 value &= ~(1 << cfg->timer_bit);
170
Magnus Damm1b56b962012-12-14 14:54:00 +0900171 sh_cmt_write_cmstr(p, value);
Paul Mundt7d0c3992012-05-25 13:36:43 +0900172 raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000173}
174
175static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
176{
Magnus Damm3f7e5e22011-07-13 07:59:48 +0000177 int k, ret;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000178
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200179 pm_runtime_get_sync(&p->pdev->dev);
180 dev_pm_syscore_device(&p->pdev->dev, true);
181
Paul Mundt9436b4a2011-05-31 15:26:42 +0900182 /* enable clock */
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000183 ret = clk_enable(p->clk);
184 if (ret) {
Paul Mundt214a6072010-03-10 16:26:25 +0900185 dev_err(&p->pdev->dev, "cannot enable clock\n");
Magnus Damm3f7e5e22011-07-13 07:59:48 +0000186 goto err0;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000187 }
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000188
189 /* make sure channel is disabled */
190 sh_cmt_start_stop_ch(p, 0);
191
192 /* configure channel, periodic mode and maximum timeout */
Magnus Damm3014f472009-04-29 14:50:37 +0000193 if (p->width == 16) {
194 *rate = clk_get_rate(p->clk) / 512;
Magnus Damm1b56b962012-12-14 14:54:00 +0900195 sh_cmt_write_cmcsr(p, 0x43);
Magnus Damm3014f472009-04-29 14:50:37 +0000196 } else {
197 *rate = clk_get_rate(p->clk) / 8;
Magnus Damm1b56b962012-12-14 14:54:00 +0900198 sh_cmt_write_cmcsr(p, 0x01a4);
Magnus Damm3014f472009-04-29 14:50:37 +0000199 }
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000200
Magnus Damm1b56b962012-12-14 14:54:00 +0900201 sh_cmt_write_cmcor(p, 0xffffffff);
202 sh_cmt_write_cmcnt(p, 0);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000203
Magnus Damm3f7e5e22011-07-13 07:59:48 +0000204 /*
205 * According to the sh73a0 user's manual, as CMCNT can be operated
206 * only by the RCLK (Pseudo 32 KHz), there's one restriction on
207 * modifying CMCNT register; two RCLK cycles are necessary before
208 * this register is either read or any modification of the value
209 * it holds is reflected in the LSI's actual operation.
210 *
211 * While at it, we're supposed to clear out the CMCNT as of this
212 * moment, so make sure it's processed properly here. This will
213 * take RCLKx2 at maximum.
214 */
215 for (k = 0; k < 100; k++) {
Magnus Damm1b56b962012-12-14 14:54:00 +0900216 if (!sh_cmt_read_cmcnt(p))
Magnus Damm3f7e5e22011-07-13 07:59:48 +0000217 break;
218 udelay(1);
219 }
220
Magnus Damm1b56b962012-12-14 14:54:00 +0900221 if (sh_cmt_read_cmcnt(p)) {
Magnus Damm3f7e5e22011-07-13 07:59:48 +0000222 dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
223 ret = -ETIMEDOUT;
224 goto err1;
225 }
226
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000227 /* enable channel */
228 sh_cmt_start_stop_ch(p, 1);
229 return 0;
Magnus Damm3f7e5e22011-07-13 07:59:48 +0000230 err1:
231 /* stop clock */
232 clk_disable(p->clk);
233
234 err0:
235 return ret;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000236}
237
238static void sh_cmt_disable(struct sh_cmt_priv *p)
239{
240 /* disable channel */
241 sh_cmt_start_stop_ch(p, 0);
242
Magnus Dammbe890a12009-06-17 05:04:04 +0000243 /* disable interrupts in CMT block */
Magnus Damm1b56b962012-12-14 14:54:00 +0900244 sh_cmt_write_cmcsr(p, 0);
Magnus Dammbe890a12009-06-17 05:04:04 +0000245
Paul Mundt9436b4a2011-05-31 15:26:42 +0900246 /* stop clock */
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000247 clk_disable(p->clk);
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200248
249 dev_pm_syscore_device(&p->pdev->dev, false);
250 pm_runtime_put(&p->pdev->dev);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000251}
252
253/* private flags */
254#define FLAG_CLOCKEVENT (1 << 0)
255#define FLAG_CLOCKSOURCE (1 << 1)
256#define FLAG_REPROGRAM (1 << 2)
257#define FLAG_SKIPEVENT (1 << 3)
258#define FLAG_IRQCONTEXT (1 << 4)
259
260static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
261 int absolute)
262{
263 unsigned long new_match;
264 unsigned long value = p->next_match_value;
265 unsigned long delay = 0;
266 unsigned long now = 0;
267 int has_wrapped;
268
269 now = sh_cmt_get_counter(p, &has_wrapped);
270 p->flags |= FLAG_REPROGRAM; /* force reprogram */
271
272 if (has_wrapped) {
273 /* we're competing with the interrupt handler.
274 * -> let the interrupt handler reprogram the timer.
275 * -> interrupt number two handles the event.
276 */
277 p->flags |= FLAG_SKIPEVENT;
278 return;
279 }
280
281 if (absolute)
282 now = 0;
283
284 do {
285 /* reprogram the timer hardware,
286 * but don't save the new match value yet.
287 */
288 new_match = now + value + delay;
289 if (new_match > p->max_match_value)
290 new_match = p->max_match_value;
291
Magnus Damm1b56b962012-12-14 14:54:00 +0900292 sh_cmt_write_cmcor(p, new_match);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000293
294 now = sh_cmt_get_counter(p, &has_wrapped);
295 if (has_wrapped && (new_match > p->match_value)) {
296 /* we are changing to a greater match value,
297 * so this wrap must be caused by the counter
298 * matching the old value.
299 * -> first interrupt reprograms the timer.
300 * -> interrupt number two handles the event.
301 */
302 p->flags |= FLAG_SKIPEVENT;
303 break;
304 }
305
306 if (has_wrapped) {
307 /* we are changing to a smaller match value,
308 * so the wrap must be caused by the counter
309 * matching the new value.
310 * -> save programmed match value.
311 * -> let isr handle the event.
312 */
313 p->match_value = new_match;
314 break;
315 }
316
317 /* be safe: verify hardware settings */
318 if (now < new_match) {
319 /* timer value is below match value, all good.
320 * this makes sure we won't miss any match events.
321 * -> save programmed match value.
322 * -> let isr handle the event.
323 */
324 p->match_value = new_match;
325 break;
326 }
327
328 /* the counter has reached a value greater
329 * than our new match value. and since the
330 * has_wrapped flag isn't set we must have
331 * programmed a too close event.
332 * -> increase delay and retry.
333 */
334 if (delay)
335 delay <<= 1;
336 else
337 delay = 1;
338
339 if (!delay)
Paul Mundt214a6072010-03-10 16:26:25 +0900340 dev_warn(&p->pdev->dev, "too long delay\n");
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000341
342 } while (delay);
343}
344
Takashi YOSHII65ada542010-12-17 07:25:09 +0000345static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
346{
347 if (delta > p->max_match_value)
348 dev_warn(&p->pdev->dev, "delta out of range\n");
349
350 p->next_match_value = delta;
351 sh_cmt_clock_event_program_verify(p, 0);
352}
353
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000354static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
355{
356 unsigned long flags;
357
Paul Mundt7d0c3992012-05-25 13:36:43 +0900358 raw_spin_lock_irqsave(&p->lock, flags);
Takashi YOSHII65ada542010-12-17 07:25:09 +0000359 __sh_cmt_set_next(p, delta);
Paul Mundt7d0c3992012-05-25 13:36:43 +0900360 raw_spin_unlock_irqrestore(&p->lock, flags);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000361}
362
363static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
364{
365 struct sh_cmt_priv *p = dev_id;
366
367 /* clear flags */
Magnus Damm1b56b962012-12-14 14:54:00 +0900368 sh_cmt_write_cmcsr(p, sh_cmt_read_cmcsr(p) & p->clear_bits);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000369
370 /* update clock source counter to begin with if enabled
371 * the wrap flag should be cleared by the timer specific
372 * isr before we end up here.
373 */
374 if (p->flags & FLAG_CLOCKSOURCE)
Magnus Damm43809472010-08-04 04:31:38 +0000375 p->total_cycles += p->match_value + 1;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000376
377 if (!(p->flags & FLAG_REPROGRAM))
378 p->next_match_value = p->max_match_value;
379
380 p->flags |= FLAG_IRQCONTEXT;
381
382 if (p->flags & FLAG_CLOCKEVENT) {
383 if (!(p->flags & FLAG_SKIPEVENT)) {
384 if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
385 p->next_match_value = p->max_match_value;
386 p->flags |= FLAG_REPROGRAM;
387 }
388
389 p->ced.event_handler(&p->ced);
390 }
391 }
392
393 p->flags &= ~FLAG_SKIPEVENT;
394
395 if (p->flags & FLAG_REPROGRAM) {
396 p->flags &= ~FLAG_REPROGRAM;
397 sh_cmt_clock_event_program_verify(p, 1);
398
399 if (p->flags & FLAG_CLOCKEVENT)
400 if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
401 || (p->match_value == p->next_match_value))
402 p->flags &= ~FLAG_REPROGRAM;
403 }
404
405 p->flags &= ~FLAG_IRQCONTEXT;
406
407 return IRQ_HANDLED;
408}
409
410static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
411{
412 int ret = 0;
413 unsigned long flags;
414
Paul Mundt7d0c3992012-05-25 13:36:43 +0900415 raw_spin_lock_irqsave(&p->lock, flags);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000416
417 if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
418 ret = sh_cmt_enable(p, &p->rate);
419
420 if (ret)
421 goto out;
422 p->flags |= flag;
423
424 /* setup timeout if no clockevent */
425 if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
Takashi YOSHII65ada542010-12-17 07:25:09 +0000426 __sh_cmt_set_next(p, p->max_match_value);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000427 out:
Paul Mundt7d0c3992012-05-25 13:36:43 +0900428 raw_spin_unlock_irqrestore(&p->lock, flags);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000429
430 return ret;
431}
432
433static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
434{
435 unsigned long flags;
436 unsigned long f;
437
Paul Mundt7d0c3992012-05-25 13:36:43 +0900438 raw_spin_lock_irqsave(&p->lock, flags);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000439
440 f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
441 p->flags &= ~flag;
442
443 if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
444 sh_cmt_disable(p);
445
446 /* adjust the timeout to maximum if only clocksource left */
447 if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
Takashi YOSHII65ada542010-12-17 07:25:09 +0000448 __sh_cmt_set_next(p, p->max_match_value);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000449
Paul Mundt7d0c3992012-05-25 13:36:43 +0900450 raw_spin_unlock_irqrestore(&p->lock, flags);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000451}
452
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000453static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
454{
455 return container_of(cs, struct sh_cmt_priv, cs);
456}
457
458static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
459{
460 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
461 unsigned long flags, raw;
462 unsigned long value;
463 int has_wrapped;
464
Paul Mundt7d0c3992012-05-25 13:36:43 +0900465 raw_spin_lock_irqsave(&p->lock, flags);
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000466 value = p->total_cycles;
467 raw = sh_cmt_get_counter(p, &has_wrapped);
468
469 if (unlikely(has_wrapped))
Magnus Damm43809472010-08-04 04:31:38 +0000470 raw += p->match_value + 1;
Paul Mundt7d0c3992012-05-25 13:36:43 +0900471 raw_spin_unlock_irqrestore(&p->lock, flags);
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000472
473 return value + raw;
474}
475
476static int sh_cmt_clocksource_enable(struct clocksource *cs)
477{
Magnus Damm3593f5f2011-04-25 22:32:11 +0900478 int ret;
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000479 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000480
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200481 WARN_ON(p->cs_enabled);
482
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000483 p->total_cycles = 0;
484
Magnus Damm3593f5f2011-04-25 22:32:11 +0900485 ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200486 if (!ret) {
Magnus Damm3593f5f2011-04-25 22:32:11 +0900487 __clocksource_updatefreq_hz(cs, p->rate);
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200488 p->cs_enabled = true;
489 }
Magnus Damm3593f5f2011-04-25 22:32:11 +0900490 return ret;
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000491}
492
493static void sh_cmt_clocksource_disable(struct clocksource *cs)
494{
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200495 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
496
497 WARN_ON(!p->cs_enabled);
498
499 sh_cmt_stop(p, FLAG_CLOCKSOURCE);
500 p->cs_enabled = false;
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000501}
502
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200503static void sh_cmt_clocksource_suspend(struct clocksource *cs)
504{
505 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
506
507 sh_cmt_stop(p, FLAG_CLOCKSOURCE);
508 pm_genpd_syscore_poweroff(&p->pdev->dev);
509}
510
Magnus Dammc8162882010-02-02 14:41:40 -0800511static void sh_cmt_clocksource_resume(struct clocksource *cs)
512{
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200513 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
514
515 pm_genpd_syscore_poweron(&p->pdev->dev);
516 sh_cmt_start(p, FLAG_CLOCKSOURCE);
Magnus Dammc8162882010-02-02 14:41:40 -0800517}
518
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000519static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
520 char *name, unsigned long rating)
521{
522 struct clocksource *cs = &p->cs;
523
524 cs->name = name;
525 cs->rating = rating;
526 cs->read = sh_cmt_clocksource_read;
527 cs->enable = sh_cmt_clocksource_enable;
528 cs->disable = sh_cmt_clocksource_disable;
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200529 cs->suspend = sh_cmt_clocksource_suspend;
Magnus Dammc8162882010-02-02 14:41:40 -0800530 cs->resume = sh_cmt_clocksource_resume;
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000531 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
532 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
Paul Mundtf4d7c352010-06-02 17:10:44 +0900533
Paul Mundt214a6072010-03-10 16:26:25 +0900534 dev_info(&p->pdev->dev, "used as clock source\n");
Paul Mundtf4d7c352010-06-02 17:10:44 +0900535
Magnus Damm3593f5f2011-04-25 22:32:11 +0900536 /* Register with dummy 1 Hz value, gets updated in ->enable() */
537 clocksource_register_hz(cs, 1);
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000538 return 0;
539}
540
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000541static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
542{
543 return container_of(ced, struct sh_cmt_priv, ced);
544}
545
546static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic)
547{
548 struct clock_event_device *ced = &p->ced;
549
550 sh_cmt_start(p, FLAG_CLOCKEVENT);
551
552 /* TODO: calculate good shift from rate and counter bit width */
553
554 ced->shift = 32;
555 ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
556 ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced);
557 ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
558
559 if (periodic)
Magnus Damm43809472010-08-04 04:31:38 +0000560 sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000561 else
562 sh_cmt_set_next(p, p->max_match_value);
563}
564
565static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
566 struct clock_event_device *ced)
567{
568 struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
569
570 /* deal with old setting first */
571 switch (ced->mode) {
572 case CLOCK_EVT_MODE_PERIODIC:
573 case CLOCK_EVT_MODE_ONESHOT:
574 sh_cmt_stop(p, FLAG_CLOCKEVENT);
575 break;
576 default:
577 break;
578 }
579
580 switch (mode) {
581 case CLOCK_EVT_MODE_PERIODIC:
Paul Mundt214a6072010-03-10 16:26:25 +0900582 dev_info(&p->pdev->dev, "used for periodic clock events\n");
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000583 sh_cmt_clock_event_start(p, 1);
584 break;
585 case CLOCK_EVT_MODE_ONESHOT:
Paul Mundt214a6072010-03-10 16:26:25 +0900586 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000587 sh_cmt_clock_event_start(p, 0);
588 break;
589 case CLOCK_EVT_MODE_SHUTDOWN:
590 case CLOCK_EVT_MODE_UNUSED:
591 sh_cmt_stop(p, FLAG_CLOCKEVENT);
592 break;
593 default:
594 break;
595 }
596}
597
598static int sh_cmt_clock_event_next(unsigned long delta,
599 struct clock_event_device *ced)
600{
601 struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
602
603 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
604 if (likely(p->flags & FLAG_IRQCONTEXT))
Magnus Damm43809472010-08-04 04:31:38 +0000605 p->next_match_value = delta - 1;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000606 else
Magnus Damm43809472010-08-04 04:31:38 +0000607 sh_cmt_set_next(p, delta - 1);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000608
609 return 0;
610}
611
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200612static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
613{
614 pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev);
615}
616
617static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
618{
619 pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev);
620}
621
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000622static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
623 char *name, unsigned long rating)
624{
625 struct clock_event_device *ced = &p->ced;
626
627 memset(ced, 0, sizeof(*ced));
628
629 ced->name = name;
630 ced->features = CLOCK_EVT_FEAT_PERIODIC;
631 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
632 ced->rating = rating;
633 ced->cpumask = cpumask_of(0);
634 ced->set_next_event = sh_cmt_clock_event_next;
635 ced->set_mode = sh_cmt_clock_event_mode;
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200636 ced->suspend = sh_cmt_clock_event_suspend;
637 ced->resume = sh_cmt_clock_event_resume;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000638
Paul Mundt214a6072010-03-10 16:26:25 +0900639 dev_info(&p->pdev->dev, "used for clock events\n");
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000640 clockevents_register_device(ced);
641}
642
Paul Mundtd1fcc0a2009-05-03 18:05:42 +0900643static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
644 unsigned long clockevent_rating,
645 unsigned long clocksource_rating)
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000646{
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000647 if (clockevent_rating)
648 sh_cmt_register_clockevent(p, name, clockevent_rating);
649
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000650 if (clocksource_rating)
651 sh_cmt_register_clocksource(p, name, clocksource_rating);
652
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000653 return 0;
654}
655
656static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
657{
Paul Mundt46a12f72009-05-03 17:57:17 +0900658 struct sh_timer_config *cfg = pdev->dev.platform_data;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000659 struct resource *res;
660 int irq, ret;
661 ret = -ENXIO;
662
663 memset(p, 0, sizeof(*p));
664 p->pdev = pdev;
665
666 if (!cfg) {
667 dev_err(&p->pdev->dev, "missing platform data\n");
668 goto err0;
669 }
670
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000671 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
672 if (!res) {
673 dev_err(&p->pdev->dev, "failed to get I/O memory\n");
674 goto err0;
675 }
676
677 irq = platform_get_irq(p->pdev, 0);
678 if (irq < 0) {
679 dev_err(&p->pdev->dev, "failed to get irq\n");
680 goto err0;
681 }
682
683 /* map memory, let mapbase point to our channel */
684 p->mapbase = ioremap_nocache(res->start, resource_size(res));
685 if (p->mapbase == NULL) {
Paul Mundt214a6072010-03-10 16:26:25 +0900686 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000687 goto err0;
688 }
689
690 /* request irq using setup_irq() (too early for request_irq()) */
Paul Mundt214a6072010-03-10 16:26:25 +0900691 p->irqaction.name = dev_name(&p->pdev->dev);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000692 p->irqaction.handler = sh_cmt_interrupt;
693 p->irqaction.dev_id = p;
Paul Mundtfecf0662010-04-15 11:59:28 +0900694 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
695 IRQF_IRQPOLL | IRQF_NOBALANCING;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000696
697 /* get hold of clock */
Paul Mundtc2a25e82010-03-29 16:55:43 +0900698 p->clk = clk_get(&p->pdev->dev, "cmt_fck");
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000699 if (IS_ERR(p->clk)) {
Magnus Damm03ff8582010-10-13 07:36:38 +0000700 dev_err(&p->pdev->dev, "cannot get clock\n");
701 ret = PTR_ERR(p->clk);
702 goto err1;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000703 }
704
705 if (resource_size(res) == 6) {
706 p->width = 16;
Magnus Damma6a912c2012-12-14 14:54:19 +0900707 p->read_count = sh_cmt_read16;
708 p->write_count = sh_cmt_write16;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000709 p->overflow_bit = 0x80;
Magnus Damm3014f472009-04-29 14:50:37 +0000710 p->clear_bits = ~0x80;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000711 } else {
712 p->width = 32;
Magnus Damma6a912c2012-12-14 14:54:19 +0900713 p->read_count = sh_cmt_read32;
714 p->write_count = sh_cmt_write32;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000715 p->overflow_bit = 0x8000;
716 p->clear_bits = ~0xc000;
717 }
718
Magnus Damm44a10f92012-12-14 14:53:41 +0900719 if (p->width == (sizeof(p->max_match_value) * 8))
720 p->max_match_value = ~0;
721 else
722 p->max_match_value = (1 << p->width) - 1;
723
724 p->match_value = p->max_match_value;
725 raw_spin_lock_init(&p->lock);
726
Paul Mundt214a6072010-03-10 16:26:25 +0900727 ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
Paul Mundtda64c2a2010-02-25 16:37:46 +0900728 cfg->clockevent_rating,
729 cfg->clocksource_rating);
730 if (ret) {
Paul Mundt214a6072010-03-10 16:26:25 +0900731 dev_err(&p->pdev->dev, "registration failed\n");
Magnus Damm2fd61b32012-12-14 14:53:32 +0900732 goto err2;
Paul Mundtda64c2a2010-02-25 16:37:46 +0900733 }
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200734 p->cs_enabled = false;
Paul Mundtda64c2a2010-02-25 16:37:46 +0900735
736 ret = setup_irq(irq, &p->irqaction);
737 if (ret) {
Paul Mundt214a6072010-03-10 16:26:25 +0900738 dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
Magnus Damm2fd61b32012-12-14 14:53:32 +0900739 goto err2;
Paul Mundtda64c2a2010-02-25 16:37:46 +0900740 }
741
Magnus Dammadccc692012-12-14 14:53:51 +0900742 platform_set_drvdata(pdev, p);
743
Paul Mundtda64c2a2010-02-25 16:37:46 +0900744 return 0;
Magnus Damm2fd61b32012-12-14 14:53:32 +0900745err2:
746 clk_put(p->clk);
Paul Mundtda64c2a2010-02-25 16:37:46 +0900747
748err1:
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000749 iounmap(p->mapbase);
Paul Mundtda64c2a2010-02-25 16:37:46 +0900750err0:
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000751 return ret;
752}
753
Greg Kroah-Hartman18505142012-12-21 15:11:38 -0800754static int sh_cmt_probe(struct platform_device *pdev)
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000755{
756 struct sh_cmt_priv *p = platform_get_drvdata(pdev);
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200757 struct sh_timer_config *cfg = pdev->dev.platform_data;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000758 int ret;
759
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200760 if (!is_early_platform_device(pdev)) {
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200761 pm_runtime_set_active(&pdev->dev);
762 pm_runtime_enable(&pdev->dev);
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200763 }
Rafael J. Wysocki615a4452012-03-13 22:40:06 +0100764
Magnus Damme475eed2009-04-15 10:50:04 +0000765 if (p) {
Paul Mundt214a6072010-03-10 16:26:25 +0900766 dev_info(&pdev->dev, "kept as earlytimer\n");
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200767 goto out;
Magnus Damme475eed2009-04-15 10:50:04 +0000768 }
769
Magnus Damm8e0b8422009-04-28 08:19:50 +0000770 p = kmalloc(sizeof(*p), GFP_KERNEL);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000771 if (p == NULL) {
772 dev_err(&pdev->dev, "failed to allocate driver data\n");
773 return -ENOMEM;
774 }
775
776 ret = sh_cmt_setup(p, pdev);
777 if (ret) {
Magnus Damm8e0b8422009-04-28 08:19:50 +0000778 kfree(p);
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200779 pm_runtime_idle(&pdev->dev);
780 return ret;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000781 }
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200782 if (is_early_platform_device(pdev))
783 return 0;
784
785 out:
786 if (cfg->clockevent_rating || cfg->clocksource_rating)
787 pm_runtime_irq_safe(&pdev->dev);
788 else
789 pm_runtime_idle(&pdev->dev);
790
791 return 0;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000792}
793
Greg Kroah-Hartman18505142012-12-21 15:11:38 -0800794static int sh_cmt_remove(struct platform_device *pdev)
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000795{
796 return -EBUSY; /* cannot unregister clockevent and clocksource */
797}
798
799static struct platform_driver sh_cmt_device_driver = {
800 .probe = sh_cmt_probe,
Greg Kroah-Hartman18505142012-12-21 15:11:38 -0800801 .remove = sh_cmt_remove,
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000802 .driver = {
803 .name = "sh_cmt",
804 }
805};
806
807static int __init sh_cmt_init(void)
808{
809 return platform_driver_register(&sh_cmt_device_driver);
810}
811
812static void __exit sh_cmt_exit(void)
813{
814 platform_driver_unregister(&sh_cmt_device_driver);
815}
816
Magnus Damme475eed2009-04-15 10:50:04 +0000817early_platform_init("earlytimer", &sh_cmt_device_driver);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000818module_init(sh_cmt_init);
819module_exit(sh_cmt_exit);
820
821MODULE_AUTHOR("Magnus Damm");
822MODULE_DESCRIPTION("SuperH CMT Timer Driver");
823MODULE_LICENSE("GPL v2");