blob: eefacc3ac4f2bbd86de82d9e14d31cf86a788942 [file] [log] [blame]
Magnus Damm3fb1b6a2009-01-22 09:55:59 +00001/*
2 * SuperH Timer Support - CMT
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000021#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/io.h>
26#include <linux/clk.h>
27#include <linux/irq.h>
28#include <linux/err.h>
Magnus Damm3f7e5e22011-07-13 07:59:48 +000029#include <linux/delay.h>
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000030#include <linux/clocksource.h>
31#include <linux/clockchips.h>
Paul Mundt46a12f72009-05-03 17:57:17 +090032#include <linux/sh_timer.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090033#include <linux/slab.h>
Paul Gortmaker7deeab52011-07-03 13:36:22 -040034#include <linux/module.h>
Rafael J. Wysocki615a4452012-03-13 22:40:06 +010035#include <linux/pm_domain.h>
Rafael J. Wysockibad81382012-08-06 01:48:57 +020036#include <linux/pm_runtime.h>
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000037
38struct sh_cmt_priv {
39 void __iomem *mapbase;
40 struct clk *clk;
41 unsigned long width; /* 16 or 32 bit version of hardware block */
42 unsigned long overflow_bit;
43 unsigned long clear_bits;
44 struct irqaction irqaction;
45 struct platform_device *pdev;
46
47 unsigned long flags;
48 unsigned long match_value;
49 unsigned long next_match_value;
50 unsigned long max_match_value;
51 unsigned long rate;
Paul Mundt7d0c3992012-05-25 13:36:43 +090052 raw_spinlock_t lock;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000053 struct clock_event_device ced;
Magnus Damm19bdc9d2009-04-17 05:26:31 +000054 struct clocksource cs;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000055 unsigned long total_cycles;
Rafael J. Wysockibad81382012-08-06 01:48:57 +020056 bool cs_enabled;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000057};
58
Magnus Damm587acb32012-12-14 14:54:10 +090059static inline unsigned long sh_cmt_read16(void __iomem *base,
60 unsigned long offs)
61{
62 return ioread16(base + (offs << 1));
63}
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000064
Magnus Damm587acb32012-12-14 14:54:10 +090065static inline void sh_cmt_write16(void __iomem *base, unsigned long offs,
66 unsigned long value)
67{
68 iowrite16(value, base + (offs << 1));
69}
70
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000071#define CMCSR 0 /* channel register */
72#define CMCNT 1 /* channel register */
73#define CMCOR 2 /* channel register */
74
75static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
76{
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000077 void __iomem *base = p->mapbase;
Magnus Damm587acb32012-12-14 14:54:10 +090078 unsigned long offs = reg_nr;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000079
Magnus Damm587acb32012-12-14 14:54:10 +090080 if (p->width == 16) {
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000081 offs <<= 1;
Magnus Damm587acb32012-12-14 14:54:10 +090082 return ioread16(base + offs);
83 } else {
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000084 offs <<= 2;
Magnus Damm587acb32012-12-14 14:54:10 +090085 return ioread32(base + offs);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000086 }
Magnus Damm3fb1b6a2009-01-22 09:55:59 +000087}
88
Magnus Damm1b56b962012-12-14 14:54:00 +090089static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p)
90{
Magnus Damm587acb32012-12-14 14:54:10 +090091 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
92
93 return sh_cmt_read16(p->mapbase - cfg->channel_offset, 0);
Magnus Damm1b56b962012-12-14 14:54:00 +090094}
95
96static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p)
97{
Magnus Damm587acb32012-12-14 14:54:10 +090098 return sh_cmt_read16(p->mapbase, CMCSR);
Magnus Damm1b56b962012-12-14 14:54:00 +090099}
100
101static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p)
102{
103 return sh_cmt_read(p, CMCNT);
104}
105
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000106static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr,
107 unsigned long value)
108{
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000109 void __iomem *base = p->mapbase;
Magnus Damm587acb32012-12-14 14:54:10 +0900110 unsigned long offs = reg_nr;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000111
Magnus Damm587acb32012-12-14 14:54:10 +0900112 if (p->width == 16) {
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000113 offs <<= 1;
Magnus Damm587acb32012-12-14 14:54:10 +0900114 iowrite16(value, base + offs);
115 } else {
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000116 offs <<= 2;
Magnus Damm587acb32012-12-14 14:54:10 +0900117 iowrite32(value, base + offs);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000118 }
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000119}
120
Magnus Damm1b56b962012-12-14 14:54:00 +0900121static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p,
122 unsigned long value)
123{
Magnus Damm587acb32012-12-14 14:54:10 +0900124 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
125
126 sh_cmt_write16(p->mapbase - cfg->channel_offset, 0, value);
Magnus Damm1b56b962012-12-14 14:54:00 +0900127}
128
129static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p,
130 unsigned long value)
131{
Magnus Damm587acb32012-12-14 14:54:10 +0900132 sh_cmt_write16(p->mapbase, CMCSR, value);
Magnus Damm1b56b962012-12-14 14:54:00 +0900133}
134
135static inline void sh_cmt_write_cmcnt(struct sh_cmt_priv *p,
136 unsigned long value)
137{
138 sh_cmt_write(p, CMCNT, value);
139}
140
141static inline void sh_cmt_write_cmcor(struct sh_cmt_priv *p,
142 unsigned long value)
143{
144 sh_cmt_write(p, CMCOR, value);
145}
146
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000147static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
148 int *has_wrapped)
149{
150 unsigned long v1, v2, v3;
Magnus Damm5b644c72009-04-28 08:17:54 +0000151 int o1, o2;
152
Magnus Damm1b56b962012-12-14 14:54:00 +0900153 o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000154
155 /* Make sure the timer value is stable. Stolen from acpi_pm.c */
156 do {
Magnus Damm5b644c72009-04-28 08:17:54 +0000157 o2 = o1;
Magnus Damm1b56b962012-12-14 14:54:00 +0900158 v1 = sh_cmt_read_cmcnt(p);
159 v2 = sh_cmt_read_cmcnt(p);
160 v3 = sh_cmt_read_cmcnt(p);
161 o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
Magnus Damm5b644c72009-04-28 08:17:54 +0000162 } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
163 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000164
Magnus Damm5b644c72009-04-28 08:17:54 +0000165 *has_wrapped = o1;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000166 return v2;
167}
168
Magnus Damm587acb32012-12-14 14:54:10 +0900169static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000170
171static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
172{
Paul Mundt46a12f72009-05-03 17:57:17 +0900173 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000174 unsigned long flags, value;
175
176 /* start stop register shared by multiple timer channels */
Paul Mundt7d0c3992012-05-25 13:36:43 +0900177 raw_spin_lock_irqsave(&sh_cmt_lock, flags);
Magnus Damm1b56b962012-12-14 14:54:00 +0900178 value = sh_cmt_read_cmstr(p);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000179
180 if (start)
181 value |= 1 << cfg->timer_bit;
182 else
183 value &= ~(1 << cfg->timer_bit);
184
Magnus Damm1b56b962012-12-14 14:54:00 +0900185 sh_cmt_write_cmstr(p, value);
Paul Mundt7d0c3992012-05-25 13:36:43 +0900186 raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000187}
188
189static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
190{
Magnus Damm3f7e5e22011-07-13 07:59:48 +0000191 int k, ret;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000192
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200193 pm_runtime_get_sync(&p->pdev->dev);
194 dev_pm_syscore_device(&p->pdev->dev, true);
195
Paul Mundt9436b4a2011-05-31 15:26:42 +0900196 /* enable clock */
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000197 ret = clk_enable(p->clk);
198 if (ret) {
Paul Mundt214a6072010-03-10 16:26:25 +0900199 dev_err(&p->pdev->dev, "cannot enable clock\n");
Magnus Damm3f7e5e22011-07-13 07:59:48 +0000200 goto err0;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000201 }
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000202
203 /* make sure channel is disabled */
204 sh_cmt_start_stop_ch(p, 0);
205
206 /* configure channel, periodic mode and maximum timeout */
Magnus Damm3014f472009-04-29 14:50:37 +0000207 if (p->width == 16) {
208 *rate = clk_get_rate(p->clk) / 512;
Magnus Damm1b56b962012-12-14 14:54:00 +0900209 sh_cmt_write_cmcsr(p, 0x43);
Magnus Damm3014f472009-04-29 14:50:37 +0000210 } else {
211 *rate = clk_get_rate(p->clk) / 8;
Magnus Damm1b56b962012-12-14 14:54:00 +0900212 sh_cmt_write_cmcsr(p, 0x01a4);
Magnus Damm3014f472009-04-29 14:50:37 +0000213 }
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000214
Magnus Damm1b56b962012-12-14 14:54:00 +0900215 sh_cmt_write_cmcor(p, 0xffffffff);
216 sh_cmt_write_cmcnt(p, 0);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000217
Magnus Damm3f7e5e22011-07-13 07:59:48 +0000218 /*
219 * According to the sh73a0 user's manual, as CMCNT can be operated
220 * only by the RCLK (Pseudo 32 KHz), there's one restriction on
221 * modifying CMCNT register; two RCLK cycles are necessary before
222 * this register is either read or any modification of the value
223 * it holds is reflected in the LSI's actual operation.
224 *
225 * While at it, we're supposed to clear out the CMCNT as of this
226 * moment, so make sure it's processed properly here. This will
227 * take RCLKx2 at maximum.
228 */
229 for (k = 0; k < 100; k++) {
Magnus Damm1b56b962012-12-14 14:54:00 +0900230 if (!sh_cmt_read_cmcnt(p))
Magnus Damm3f7e5e22011-07-13 07:59:48 +0000231 break;
232 udelay(1);
233 }
234
Magnus Damm1b56b962012-12-14 14:54:00 +0900235 if (sh_cmt_read_cmcnt(p)) {
Magnus Damm3f7e5e22011-07-13 07:59:48 +0000236 dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
237 ret = -ETIMEDOUT;
238 goto err1;
239 }
240
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000241 /* enable channel */
242 sh_cmt_start_stop_ch(p, 1);
243 return 0;
Magnus Damm3f7e5e22011-07-13 07:59:48 +0000244 err1:
245 /* stop clock */
246 clk_disable(p->clk);
247
248 err0:
249 return ret;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000250}
251
252static void sh_cmt_disable(struct sh_cmt_priv *p)
253{
254 /* disable channel */
255 sh_cmt_start_stop_ch(p, 0);
256
Magnus Dammbe890a12009-06-17 05:04:04 +0000257 /* disable interrupts in CMT block */
Magnus Damm1b56b962012-12-14 14:54:00 +0900258 sh_cmt_write_cmcsr(p, 0);
Magnus Dammbe890a12009-06-17 05:04:04 +0000259
Paul Mundt9436b4a2011-05-31 15:26:42 +0900260 /* stop clock */
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000261 clk_disable(p->clk);
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200262
263 dev_pm_syscore_device(&p->pdev->dev, false);
264 pm_runtime_put(&p->pdev->dev);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000265}
266
267/* private flags */
268#define FLAG_CLOCKEVENT (1 << 0)
269#define FLAG_CLOCKSOURCE (1 << 1)
270#define FLAG_REPROGRAM (1 << 2)
271#define FLAG_SKIPEVENT (1 << 3)
272#define FLAG_IRQCONTEXT (1 << 4)
273
274static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
275 int absolute)
276{
277 unsigned long new_match;
278 unsigned long value = p->next_match_value;
279 unsigned long delay = 0;
280 unsigned long now = 0;
281 int has_wrapped;
282
283 now = sh_cmt_get_counter(p, &has_wrapped);
284 p->flags |= FLAG_REPROGRAM; /* force reprogram */
285
286 if (has_wrapped) {
287 /* we're competing with the interrupt handler.
288 * -> let the interrupt handler reprogram the timer.
289 * -> interrupt number two handles the event.
290 */
291 p->flags |= FLAG_SKIPEVENT;
292 return;
293 }
294
295 if (absolute)
296 now = 0;
297
298 do {
299 /* reprogram the timer hardware,
300 * but don't save the new match value yet.
301 */
302 new_match = now + value + delay;
303 if (new_match > p->max_match_value)
304 new_match = p->max_match_value;
305
Magnus Damm1b56b962012-12-14 14:54:00 +0900306 sh_cmt_write_cmcor(p, new_match);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000307
308 now = sh_cmt_get_counter(p, &has_wrapped);
309 if (has_wrapped && (new_match > p->match_value)) {
310 /* we are changing to a greater match value,
311 * so this wrap must be caused by the counter
312 * matching the old value.
313 * -> first interrupt reprograms the timer.
314 * -> interrupt number two handles the event.
315 */
316 p->flags |= FLAG_SKIPEVENT;
317 break;
318 }
319
320 if (has_wrapped) {
321 /* we are changing to a smaller match value,
322 * so the wrap must be caused by the counter
323 * matching the new value.
324 * -> save programmed match value.
325 * -> let isr handle the event.
326 */
327 p->match_value = new_match;
328 break;
329 }
330
331 /* be safe: verify hardware settings */
332 if (now < new_match) {
333 /* timer value is below match value, all good.
334 * this makes sure we won't miss any match events.
335 * -> save programmed match value.
336 * -> let isr handle the event.
337 */
338 p->match_value = new_match;
339 break;
340 }
341
342 /* the counter has reached a value greater
343 * than our new match value. and since the
344 * has_wrapped flag isn't set we must have
345 * programmed a too close event.
346 * -> increase delay and retry.
347 */
348 if (delay)
349 delay <<= 1;
350 else
351 delay = 1;
352
353 if (!delay)
Paul Mundt214a6072010-03-10 16:26:25 +0900354 dev_warn(&p->pdev->dev, "too long delay\n");
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000355
356 } while (delay);
357}
358
Takashi YOSHII65ada542010-12-17 07:25:09 +0000359static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
360{
361 if (delta > p->max_match_value)
362 dev_warn(&p->pdev->dev, "delta out of range\n");
363
364 p->next_match_value = delta;
365 sh_cmt_clock_event_program_verify(p, 0);
366}
367
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000368static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
369{
370 unsigned long flags;
371
Paul Mundt7d0c3992012-05-25 13:36:43 +0900372 raw_spin_lock_irqsave(&p->lock, flags);
Takashi YOSHII65ada542010-12-17 07:25:09 +0000373 __sh_cmt_set_next(p, delta);
Paul Mundt7d0c3992012-05-25 13:36:43 +0900374 raw_spin_unlock_irqrestore(&p->lock, flags);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000375}
376
377static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
378{
379 struct sh_cmt_priv *p = dev_id;
380
381 /* clear flags */
Magnus Damm1b56b962012-12-14 14:54:00 +0900382 sh_cmt_write_cmcsr(p, sh_cmt_read_cmcsr(p) & p->clear_bits);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000383
384 /* update clock source counter to begin with if enabled
385 * the wrap flag should be cleared by the timer specific
386 * isr before we end up here.
387 */
388 if (p->flags & FLAG_CLOCKSOURCE)
Magnus Damm43809472010-08-04 04:31:38 +0000389 p->total_cycles += p->match_value + 1;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000390
391 if (!(p->flags & FLAG_REPROGRAM))
392 p->next_match_value = p->max_match_value;
393
394 p->flags |= FLAG_IRQCONTEXT;
395
396 if (p->flags & FLAG_CLOCKEVENT) {
397 if (!(p->flags & FLAG_SKIPEVENT)) {
398 if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
399 p->next_match_value = p->max_match_value;
400 p->flags |= FLAG_REPROGRAM;
401 }
402
403 p->ced.event_handler(&p->ced);
404 }
405 }
406
407 p->flags &= ~FLAG_SKIPEVENT;
408
409 if (p->flags & FLAG_REPROGRAM) {
410 p->flags &= ~FLAG_REPROGRAM;
411 sh_cmt_clock_event_program_verify(p, 1);
412
413 if (p->flags & FLAG_CLOCKEVENT)
414 if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
415 || (p->match_value == p->next_match_value))
416 p->flags &= ~FLAG_REPROGRAM;
417 }
418
419 p->flags &= ~FLAG_IRQCONTEXT;
420
421 return IRQ_HANDLED;
422}
423
424static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
425{
426 int ret = 0;
427 unsigned long flags;
428
Paul Mundt7d0c3992012-05-25 13:36:43 +0900429 raw_spin_lock_irqsave(&p->lock, flags);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000430
431 if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
432 ret = sh_cmt_enable(p, &p->rate);
433
434 if (ret)
435 goto out;
436 p->flags |= flag;
437
438 /* setup timeout if no clockevent */
439 if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
Takashi YOSHII65ada542010-12-17 07:25:09 +0000440 __sh_cmt_set_next(p, p->max_match_value);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000441 out:
Paul Mundt7d0c3992012-05-25 13:36:43 +0900442 raw_spin_unlock_irqrestore(&p->lock, flags);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000443
444 return ret;
445}
446
447static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
448{
449 unsigned long flags;
450 unsigned long f;
451
Paul Mundt7d0c3992012-05-25 13:36:43 +0900452 raw_spin_lock_irqsave(&p->lock, flags);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000453
454 f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
455 p->flags &= ~flag;
456
457 if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
458 sh_cmt_disable(p);
459
460 /* adjust the timeout to maximum if only clocksource left */
461 if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
Takashi YOSHII65ada542010-12-17 07:25:09 +0000462 __sh_cmt_set_next(p, p->max_match_value);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000463
Paul Mundt7d0c3992012-05-25 13:36:43 +0900464 raw_spin_unlock_irqrestore(&p->lock, flags);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000465}
466
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000467static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
468{
469 return container_of(cs, struct sh_cmt_priv, cs);
470}
471
472static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
473{
474 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
475 unsigned long flags, raw;
476 unsigned long value;
477 int has_wrapped;
478
Paul Mundt7d0c3992012-05-25 13:36:43 +0900479 raw_spin_lock_irqsave(&p->lock, flags);
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000480 value = p->total_cycles;
481 raw = sh_cmt_get_counter(p, &has_wrapped);
482
483 if (unlikely(has_wrapped))
Magnus Damm43809472010-08-04 04:31:38 +0000484 raw += p->match_value + 1;
Paul Mundt7d0c3992012-05-25 13:36:43 +0900485 raw_spin_unlock_irqrestore(&p->lock, flags);
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000486
487 return value + raw;
488}
489
490static int sh_cmt_clocksource_enable(struct clocksource *cs)
491{
Magnus Damm3593f5f2011-04-25 22:32:11 +0900492 int ret;
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000493 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000494
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200495 WARN_ON(p->cs_enabled);
496
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000497 p->total_cycles = 0;
498
Magnus Damm3593f5f2011-04-25 22:32:11 +0900499 ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200500 if (!ret) {
Magnus Damm3593f5f2011-04-25 22:32:11 +0900501 __clocksource_updatefreq_hz(cs, p->rate);
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200502 p->cs_enabled = true;
503 }
Magnus Damm3593f5f2011-04-25 22:32:11 +0900504 return ret;
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000505}
506
507static void sh_cmt_clocksource_disable(struct clocksource *cs)
508{
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200509 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
510
511 WARN_ON(!p->cs_enabled);
512
513 sh_cmt_stop(p, FLAG_CLOCKSOURCE);
514 p->cs_enabled = false;
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000515}
516
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200517static void sh_cmt_clocksource_suspend(struct clocksource *cs)
518{
519 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
520
521 sh_cmt_stop(p, FLAG_CLOCKSOURCE);
522 pm_genpd_syscore_poweroff(&p->pdev->dev);
523}
524
Magnus Dammc8162882010-02-02 14:41:40 -0800525static void sh_cmt_clocksource_resume(struct clocksource *cs)
526{
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200527 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
528
529 pm_genpd_syscore_poweron(&p->pdev->dev);
530 sh_cmt_start(p, FLAG_CLOCKSOURCE);
Magnus Dammc8162882010-02-02 14:41:40 -0800531}
532
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000533static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
534 char *name, unsigned long rating)
535{
536 struct clocksource *cs = &p->cs;
537
538 cs->name = name;
539 cs->rating = rating;
540 cs->read = sh_cmt_clocksource_read;
541 cs->enable = sh_cmt_clocksource_enable;
542 cs->disable = sh_cmt_clocksource_disable;
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200543 cs->suspend = sh_cmt_clocksource_suspend;
Magnus Dammc8162882010-02-02 14:41:40 -0800544 cs->resume = sh_cmt_clocksource_resume;
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000545 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
546 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
Paul Mundtf4d7c352010-06-02 17:10:44 +0900547
Paul Mundt214a6072010-03-10 16:26:25 +0900548 dev_info(&p->pdev->dev, "used as clock source\n");
Paul Mundtf4d7c352010-06-02 17:10:44 +0900549
Magnus Damm3593f5f2011-04-25 22:32:11 +0900550 /* Register with dummy 1 Hz value, gets updated in ->enable() */
551 clocksource_register_hz(cs, 1);
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000552 return 0;
553}
554
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000555static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
556{
557 return container_of(ced, struct sh_cmt_priv, ced);
558}
559
560static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic)
561{
562 struct clock_event_device *ced = &p->ced;
563
564 sh_cmt_start(p, FLAG_CLOCKEVENT);
565
566 /* TODO: calculate good shift from rate and counter bit width */
567
568 ced->shift = 32;
569 ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
570 ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced);
571 ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
572
573 if (periodic)
Magnus Damm43809472010-08-04 04:31:38 +0000574 sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000575 else
576 sh_cmt_set_next(p, p->max_match_value);
577}
578
579static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
580 struct clock_event_device *ced)
581{
582 struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
583
584 /* deal with old setting first */
585 switch (ced->mode) {
586 case CLOCK_EVT_MODE_PERIODIC:
587 case CLOCK_EVT_MODE_ONESHOT:
588 sh_cmt_stop(p, FLAG_CLOCKEVENT);
589 break;
590 default:
591 break;
592 }
593
594 switch (mode) {
595 case CLOCK_EVT_MODE_PERIODIC:
Paul Mundt214a6072010-03-10 16:26:25 +0900596 dev_info(&p->pdev->dev, "used for periodic clock events\n");
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000597 sh_cmt_clock_event_start(p, 1);
598 break;
599 case CLOCK_EVT_MODE_ONESHOT:
Paul Mundt214a6072010-03-10 16:26:25 +0900600 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000601 sh_cmt_clock_event_start(p, 0);
602 break;
603 case CLOCK_EVT_MODE_SHUTDOWN:
604 case CLOCK_EVT_MODE_UNUSED:
605 sh_cmt_stop(p, FLAG_CLOCKEVENT);
606 break;
607 default:
608 break;
609 }
610}
611
612static int sh_cmt_clock_event_next(unsigned long delta,
613 struct clock_event_device *ced)
614{
615 struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
616
617 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
618 if (likely(p->flags & FLAG_IRQCONTEXT))
Magnus Damm43809472010-08-04 04:31:38 +0000619 p->next_match_value = delta - 1;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000620 else
Magnus Damm43809472010-08-04 04:31:38 +0000621 sh_cmt_set_next(p, delta - 1);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000622
623 return 0;
624}
625
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200626static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
627{
628 pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev);
629}
630
631static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
632{
633 pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev);
634}
635
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000636static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
637 char *name, unsigned long rating)
638{
639 struct clock_event_device *ced = &p->ced;
640
641 memset(ced, 0, sizeof(*ced));
642
643 ced->name = name;
644 ced->features = CLOCK_EVT_FEAT_PERIODIC;
645 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
646 ced->rating = rating;
647 ced->cpumask = cpumask_of(0);
648 ced->set_next_event = sh_cmt_clock_event_next;
649 ced->set_mode = sh_cmt_clock_event_mode;
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200650 ced->suspend = sh_cmt_clock_event_suspend;
651 ced->resume = sh_cmt_clock_event_resume;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000652
Paul Mundt214a6072010-03-10 16:26:25 +0900653 dev_info(&p->pdev->dev, "used for clock events\n");
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000654 clockevents_register_device(ced);
655}
656
Paul Mundtd1fcc0a2009-05-03 18:05:42 +0900657static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
658 unsigned long clockevent_rating,
659 unsigned long clocksource_rating)
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000660{
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000661 if (clockevent_rating)
662 sh_cmt_register_clockevent(p, name, clockevent_rating);
663
Magnus Damm19bdc9d2009-04-17 05:26:31 +0000664 if (clocksource_rating)
665 sh_cmt_register_clocksource(p, name, clocksource_rating);
666
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000667 return 0;
668}
669
670static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
671{
Paul Mundt46a12f72009-05-03 17:57:17 +0900672 struct sh_timer_config *cfg = pdev->dev.platform_data;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000673 struct resource *res;
674 int irq, ret;
675 ret = -ENXIO;
676
677 memset(p, 0, sizeof(*p));
678 p->pdev = pdev;
679
680 if (!cfg) {
681 dev_err(&p->pdev->dev, "missing platform data\n");
682 goto err0;
683 }
684
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000685 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
686 if (!res) {
687 dev_err(&p->pdev->dev, "failed to get I/O memory\n");
688 goto err0;
689 }
690
691 irq = platform_get_irq(p->pdev, 0);
692 if (irq < 0) {
693 dev_err(&p->pdev->dev, "failed to get irq\n");
694 goto err0;
695 }
696
697 /* map memory, let mapbase point to our channel */
698 p->mapbase = ioremap_nocache(res->start, resource_size(res));
699 if (p->mapbase == NULL) {
Paul Mundt214a6072010-03-10 16:26:25 +0900700 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000701 goto err0;
702 }
703
704 /* request irq using setup_irq() (too early for request_irq()) */
Paul Mundt214a6072010-03-10 16:26:25 +0900705 p->irqaction.name = dev_name(&p->pdev->dev);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000706 p->irqaction.handler = sh_cmt_interrupt;
707 p->irqaction.dev_id = p;
Paul Mundtfecf0662010-04-15 11:59:28 +0900708 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
709 IRQF_IRQPOLL | IRQF_NOBALANCING;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000710
711 /* get hold of clock */
Paul Mundtc2a25e82010-03-29 16:55:43 +0900712 p->clk = clk_get(&p->pdev->dev, "cmt_fck");
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000713 if (IS_ERR(p->clk)) {
Magnus Damm03ff8582010-10-13 07:36:38 +0000714 dev_err(&p->pdev->dev, "cannot get clock\n");
715 ret = PTR_ERR(p->clk);
716 goto err1;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000717 }
718
719 if (resource_size(res) == 6) {
720 p->width = 16;
721 p->overflow_bit = 0x80;
Magnus Damm3014f472009-04-29 14:50:37 +0000722 p->clear_bits = ~0x80;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000723 } else {
724 p->width = 32;
725 p->overflow_bit = 0x8000;
726 p->clear_bits = ~0xc000;
727 }
728
Magnus Damm44a10f92012-12-14 14:53:41 +0900729 if (p->width == (sizeof(p->max_match_value) * 8))
730 p->max_match_value = ~0;
731 else
732 p->max_match_value = (1 << p->width) - 1;
733
734 p->match_value = p->max_match_value;
735 raw_spin_lock_init(&p->lock);
736
Paul Mundt214a6072010-03-10 16:26:25 +0900737 ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
Paul Mundtda64c2a2010-02-25 16:37:46 +0900738 cfg->clockevent_rating,
739 cfg->clocksource_rating);
740 if (ret) {
Paul Mundt214a6072010-03-10 16:26:25 +0900741 dev_err(&p->pdev->dev, "registration failed\n");
Magnus Damm2fd61b32012-12-14 14:53:32 +0900742 goto err2;
Paul Mundtda64c2a2010-02-25 16:37:46 +0900743 }
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200744 p->cs_enabled = false;
Paul Mundtda64c2a2010-02-25 16:37:46 +0900745
746 ret = setup_irq(irq, &p->irqaction);
747 if (ret) {
Paul Mundt214a6072010-03-10 16:26:25 +0900748 dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
Magnus Damm2fd61b32012-12-14 14:53:32 +0900749 goto err2;
Paul Mundtda64c2a2010-02-25 16:37:46 +0900750 }
751
Magnus Dammadccc692012-12-14 14:53:51 +0900752 platform_set_drvdata(pdev, p);
753
Paul Mundtda64c2a2010-02-25 16:37:46 +0900754 return 0;
Magnus Damm2fd61b32012-12-14 14:53:32 +0900755err2:
756 clk_put(p->clk);
Paul Mundtda64c2a2010-02-25 16:37:46 +0900757
758err1:
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000759 iounmap(p->mapbase);
Paul Mundtda64c2a2010-02-25 16:37:46 +0900760err0:
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000761 return ret;
762}
763
Greg Kroah-Hartman18505142012-12-21 15:11:38 -0800764static int sh_cmt_probe(struct platform_device *pdev)
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000765{
766 struct sh_cmt_priv *p = platform_get_drvdata(pdev);
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200767 struct sh_timer_config *cfg = pdev->dev.platform_data;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000768 int ret;
769
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200770 if (!is_early_platform_device(pdev)) {
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200771 pm_runtime_set_active(&pdev->dev);
772 pm_runtime_enable(&pdev->dev);
Rafael J. Wysocki9bb5ec82012-08-06 01:43:03 +0200773 }
Rafael J. Wysocki615a4452012-03-13 22:40:06 +0100774
Magnus Damme475eed2009-04-15 10:50:04 +0000775 if (p) {
Paul Mundt214a6072010-03-10 16:26:25 +0900776 dev_info(&pdev->dev, "kept as earlytimer\n");
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200777 goto out;
Magnus Damme475eed2009-04-15 10:50:04 +0000778 }
779
Magnus Damm8e0b8422009-04-28 08:19:50 +0000780 p = kmalloc(sizeof(*p), GFP_KERNEL);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000781 if (p == NULL) {
782 dev_err(&pdev->dev, "failed to allocate driver data\n");
783 return -ENOMEM;
784 }
785
786 ret = sh_cmt_setup(p, pdev);
787 if (ret) {
Magnus Damm8e0b8422009-04-28 08:19:50 +0000788 kfree(p);
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200789 pm_runtime_idle(&pdev->dev);
790 return ret;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000791 }
Rafael J. Wysockibad81382012-08-06 01:48:57 +0200792 if (is_early_platform_device(pdev))
793 return 0;
794
795 out:
796 if (cfg->clockevent_rating || cfg->clocksource_rating)
797 pm_runtime_irq_safe(&pdev->dev);
798 else
799 pm_runtime_idle(&pdev->dev);
800
801 return 0;
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000802}
803
Greg Kroah-Hartman18505142012-12-21 15:11:38 -0800804static int sh_cmt_remove(struct platform_device *pdev)
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000805{
806 return -EBUSY; /* cannot unregister clockevent and clocksource */
807}
808
809static struct platform_driver sh_cmt_device_driver = {
810 .probe = sh_cmt_probe,
Greg Kroah-Hartman18505142012-12-21 15:11:38 -0800811 .remove = sh_cmt_remove,
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000812 .driver = {
813 .name = "sh_cmt",
814 }
815};
816
817static int __init sh_cmt_init(void)
818{
819 return platform_driver_register(&sh_cmt_device_driver);
820}
821
822static void __exit sh_cmt_exit(void)
823{
824 platform_driver_unregister(&sh_cmt_device_driver);
825}
826
Magnus Damme475eed2009-04-15 10:50:04 +0000827early_platform_init("earlytimer", &sh_cmt_device_driver);
Magnus Damm3fb1b6a2009-01-22 09:55:59 +0000828module_init(sh_cmt_init);
829module_exit(sh_cmt_exit);
830
831MODULE_AUTHOR("Magnus Damm");
832MODULE_DESCRIPTION("SuperH CMT Timer Driver");
833MODULE_LICENSE("GPL v2");