Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 1 | /* |
| 2 | * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support |
| 3 | * |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 4 | * Copyright (C) 2005 - 2007 Paul Mundt |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 5 | * |
| 6 | * TMU handling code hacked out of arch/sh/kernel/time.c |
| 7 | * |
| 8 | * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka |
| 9 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> |
| 10 | * Copyright (C) 2002, 2003, 2004 Paul Mundt |
| 11 | * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> |
| 12 | * |
| 13 | * This file is subject to the terms and conditions of the GNU General Public |
| 14 | * License. See the file "COPYING" in the main directory of this archive |
| 15 | * for more details. |
| 16 | */ |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/kernel.h> |
| 19 | #include <linux/interrupt.h> |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 20 | #include <linux/seqlock.h> |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 21 | #include <linux/clockchips.h> |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 22 | #include <asm/timer.h> |
| 23 | #include <asm/rtc.h> |
| 24 | #include <asm/io.h> |
| 25 | #include <asm/irq.h> |
| 26 | #include <asm/clock.h> |
| 27 | |
| 28 | #define TMU_TOCR_INIT 0x00 |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 29 | #define TMU_TCR_INIT 0x0020 |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 30 | |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 31 | #define TMU0 (0) |
| 32 | #define TMU1 (1) |
| 33 | |
| 34 | static inline void _tmu_start(int tmu_num) |
| 35 | { |
| 36 | ctrl_outb(ctrl_inb(TMU_012_TSTR) | (0x1<<tmu_num), TMU_012_TSTR); |
| 37 | } |
| 38 | |
| 39 | static inline void _tmu_set_irq(int tmu_num, int enabled) |
| 40 | { |
| 41 | register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num); |
| 42 | ctrl_outw( (enabled ? ctrl_inw(tmu_tcr) | (1<<5) : ctrl_inw(tmu_tcr) & ~(1<<5)), tmu_tcr); |
| 43 | } |
| 44 | |
| 45 | static inline void _tmu_stop(int tmu_num) |
| 46 | { |
| 47 | ctrl_outb(ctrl_inb(TMU_012_TSTR) & ~(0x1<<tmu_num), TMU_012_TSTR); |
| 48 | } |
| 49 | |
| 50 | static inline void _tmu_clear_status(int tmu_num) |
| 51 | { |
| 52 | register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num); |
| 53 | /* Clear UNF bit */ |
| 54 | ctrl_outw(ctrl_inw(tmu_tcr) & ~0x100, tmu_tcr); |
| 55 | } |
| 56 | |
| 57 | static inline unsigned long _tmu_read(int tmu_num) |
| 58 | { |
| 59 | return ctrl_inl(TMU0_TCNT+0xC*tmu_num); |
| 60 | } |
| 61 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 62 | static int tmu_timer_start(void) |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 63 | { |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 64 | _tmu_start(TMU0); |
| 65 | _tmu_start(TMU1); |
| 66 | _tmu_set_irq(TMU0,1); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 67 | return 0; |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 68 | } |
| 69 | |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 70 | static int tmu_timer_stop(void) |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 71 | { |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 72 | _tmu_stop(TMU0); |
| 73 | _tmu_stop(TMU1); |
| 74 | _tmu_clear_status(TMU0); |
| 75 | return 0; |
| 76 | } |
| 77 | |
| 78 | /* |
| 79 | * also when the module_clk is scaled the TMU1 |
| 80 | * will show the same frequency |
| 81 | */ |
| 82 | static int tmus_are_scaled; |
| 83 | |
| 84 | static cycle_t tmu_timer_read(void) |
| 85 | { |
| 86 | return ((cycle_t)(~_tmu_read(TMU1)))<<tmus_are_scaled; |
| 87 | } |
| 88 | |
| 89 | |
| 90 | static unsigned long tmu_latest_interval[3]; |
| 91 | static void tmu_timer_set_interval(int tmu_num, unsigned long interval, unsigned int reload) |
| 92 | { |
| 93 | unsigned long tmu_tcnt = TMU0_TCNT + tmu_num*0xC; |
| 94 | unsigned long tmu_tcor = TMU0_TCOR + tmu_num*0xC; |
| 95 | |
| 96 | _tmu_stop(tmu_num); |
| 97 | |
| 98 | ctrl_outl(interval, tmu_tcnt); |
| 99 | tmu_latest_interval[tmu_num] = interval; |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 100 | |
| 101 | /* |
| 102 | * TCNT reloads from TCOR on underflow, clear it if we don't |
| 103 | * intend to auto-reload |
| 104 | */ |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 105 | ctrl_outl( reload ? interval : 0 , tmu_tcor); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 106 | |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 107 | _tmu_start(tmu_num); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 108 | } |
| 109 | |
| 110 | static int tmu_set_next_event(unsigned long cycles, |
| 111 | struct clock_event_device *evt) |
| 112 | { |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 113 | tmu_timer_set_interval(TMU0,cycles, evt->mode == CLOCK_EVT_MODE_PERIODIC); |
| 114 | _tmu_set_irq(TMU0,1); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 115 | return 0; |
| 116 | } |
| 117 | |
| 118 | static void tmu_set_mode(enum clock_event_mode mode, |
| 119 | struct clock_event_device *evt) |
| 120 | { |
| 121 | switch (mode) { |
| 122 | case CLOCK_EVT_MODE_PERIODIC: |
| 123 | ctrl_outl(ctrl_inl(TMU0_TCNT), TMU0_TCOR); |
| 124 | break; |
| 125 | case CLOCK_EVT_MODE_ONESHOT: |
| 126 | ctrl_outl(0, TMU0_TCOR); |
| 127 | break; |
| 128 | case CLOCK_EVT_MODE_UNUSED: |
| 129 | case CLOCK_EVT_MODE_SHUTDOWN: |
Thomas Gleixner | 18de5bc | 2007-07-21 04:37:34 -0700 | [diff] [blame] | 130 | case CLOCK_EVT_MODE_RESUME: |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 131 | break; |
| 132 | } |
| 133 | } |
| 134 | |
| 135 | static struct clock_event_device tmu0_clockevent = { |
| 136 | .name = "tmu0", |
| 137 | .shift = 32, |
| 138 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
| 139 | .set_mode = tmu_set_mode, |
| 140 | .set_next_event = tmu_set_next_event, |
| 141 | }; |
| 142 | |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 143 | static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 144 | { |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 145 | struct clock_event_device *evt = &tmu0_clockevent; |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 146 | _tmu_clear_status(TMU0); |
| 147 | _tmu_set_irq(TMU0,tmu0_clockevent.mode != CLOCK_EVT_MODE_ONESHOT); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 148 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 149 | evt->event_handler(evt); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 150 | |
| 151 | return IRQ_HANDLED; |
| 152 | } |
| 153 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 154 | static struct irqaction tmu0_irq = { |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 155 | .name = "periodic/oneshot timer", |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 156 | .handler = tmu_timer_interrupt, |
Bernhard Walle | e9485ba | 2007-05-08 00:35:34 -0700 | [diff] [blame] | 157 | .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 158 | .mask = CPU_MASK_NONE, |
| 159 | }; |
| 160 | |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 161 | static void __init tmu_clk_init(struct clk *clk) |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 162 | { |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 163 | u8 divisor = TMU_TCR_INIT & 0x7; |
| 164 | int tmu_num = clk->name[3]-'0'; |
| 165 | ctrl_outw(TMU_TCR_INIT, TMU0_TCR+(tmu_num*0xC)); |
| 166 | clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1)); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 167 | } |
| 168 | |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 169 | static void tmu_clk_recalc(struct clk *clk) |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 170 | { |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 171 | int tmu_num = clk->name[3]-'0'; |
| 172 | unsigned long prev_rate = clk_get_rate(clk); |
| 173 | unsigned long flags; |
| 174 | u8 divisor = ctrl_inw(TMU0_TCR+tmu_num*0xC) & 0x7; |
| 175 | clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1)); |
| 176 | |
| 177 | if(prev_rate==clk_get_rate(clk)) |
| 178 | return; |
| 179 | |
| 180 | if(tmu_num) |
| 181 | return; /* No more work on TMU1 */ |
| 182 | |
| 183 | local_irq_save(flags); |
| 184 | tmus_are_scaled = (prev_rate > clk->rate); |
| 185 | |
| 186 | _tmu_stop(TMU0); |
| 187 | |
| 188 | tmu0_clockevent.mult = div_sc(clk->rate, NSEC_PER_SEC, |
| 189 | tmu0_clockevent.shift); |
| 190 | tmu0_clockevent.max_delta_ns = |
| 191 | clockevent_delta2ns(-1, &tmu0_clockevent); |
| 192 | tmu0_clockevent.min_delta_ns = |
| 193 | clockevent_delta2ns(1, &tmu0_clockevent); |
| 194 | |
| 195 | if (tmus_are_scaled) |
| 196 | tmu_latest_interval[TMU0] >>= 1; |
| 197 | else |
| 198 | tmu_latest_interval[TMU0] <<= 1; |
| 199 | |
| 200 | tmu_timer_set_interval(TMU0, |
| 201 | tmu_latest_interval[TMU0], |
| 202 | tmu0_clockevent.mode == CLOCK_EVT_MODE_PERIODIC); |
| 203 | |
| 204 | _tmu_start(TMU0); |
| 205 | |
| 206 | local_irq_restore(flags); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 207 | } |
| 208 | |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 209 | static struct clk_ops tmu_clk_ops = { |
| 210 | .init = tmu_clk_init, |
| 211 | .recalc = tmu_clk_recalc, |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 212 | }; |
| 213 | |
| 214 | static struct clk tmu0_clk = { |
| 215 | .name = "tmu0_clk", |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 216 | .ops = &tmu_clk_ops, |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 217 | }; |
| 218 | |
| 219 | static struct clk tmu1_clk = { |
| 220 | .name = "tmu1_clk", |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 221 | .ops = &tmu_clk_ops, |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 222 | }; |
| 223 | |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 224 | static int tmu_timer_init(void) |
| 225 | { |
| 226 | unsigned long interval; |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 227 | unsigned long frequency; |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 228 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 229 | setup_irq(CONFIG_SH_TIMER_IRQ, &tmu0_irq); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 230 | |
Paul Mundt | 1d11856 | 2006-12-01 13:15:14 +0900 | [diff] [blame] | 231 | tmu0_clk.parent = clk_get(NULL, "module_clk"); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 232 | tmu1_clk.parent = clk_get(NULL, "module_clk"); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 233 | |
Andriy Skulysh | 3aa770e | 2006-09-27 16:20:22 +0900 | [diff] [blame] | 234 | tmu_timer_stop(); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 235 | |
Markus Brunner | 3ea6bc3 | 2007-08-20 08:59:33 +0900 | [diff] [blame] | 236 | #if !defined(CONFIG_CPU_SUBTYPE_SH7720) && \ |
Yoshihiro Shimoda | 31a49c4 | 2007-12-26 11:45:06 +0900 | [diff] [blame] | 237 | !defined(CONFIG_CPU_SUBTYPE_SH7721) && \ |
Markus Brunner | 3ea6bc3 | 2007-08-20 08:59:33 +0900 | [diff] [blame] | 238 | !defined(CONFIG_CPU_SUBTYPE_SH7760) && \ |
Paul Mundt | 2b1bd1a | 2007-06-20 18:27:10 +0900 | [diff] [blame] | 239 | !defined(CONFIG_CPU_SUBTYPE_SH7785) && \ |
| 240 | !defined(CONFIG_CPU_SUBTYPE_SHX3) |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 241 | ctrl_outb(TMU_TOCR_INIT, TMU_TOCR); |
| 242 | #endif |
| 243 | |
| 244 | clk_register(&tmu0_clk); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 245 | clk_register(&tmu1_clk); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 246 | clk_enable(&tmu0_clk); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 247 | clk_enable(&tmu1_clk); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 248 | |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 249 | frequency = clk_get_rate(&tmu0_clk); |
| 250 | interval = (frequency + HZ / 2) / HZ; |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 251 | |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 252 | tmu_timer_set_interval(TMU0,interval, 1); |
| 253 | tmu_timer_set_interval(TMU1,~0,1); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 254 | |
Francesco Virlinzi | 61c6638 | 2008-09-05 16:40:22 +0900 | [diff] [blame] | 255 | _tmu_start(TMU1); |
| 256 | |
| 257 | sh_hpt_frequency = clk_get_rate(&tmu1_clk); |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 258 | |
| 259 | tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC, |
| 260 | tmu0_clockevent.shift); |
| 261 | tmu0_clockevent.max_delta_ns = |
| 262 | clockevent_delta2ns(-1, &tmu0_clockevent); |
| 263 | tmu0_clockevent.min_delta_ns = |
| 264 | clockevent_delta2ns(1, &tmu0_clockevent); |
| 265 | |
| 266 | tmu0_clockevent.cpumask = cpumask_of_cpu(0); |
| 267 | |
| 268 | clockevents_register_device(&tmu0_clockevent); |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 269 | |
| 270 | return 0; |
| 271 | } |
| 272 | |
Adrian Bunk | 4c1cfab | 2008-06-18 03:36:50 +0300 | [diff] [blame] | 273 | static struct sys_timer_ops tmu_timer_ops = { |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 274 | .init = tmu_timer_init, |
Andriy Skulysh | 3aa770e | 2006-09-27 16:20:22 +0900 | [diff] [blame] | 275 | .start = tmu_timer_start, |
| 276 | .stop = tmu_timer_stop, |
Paul Mundt | 57be2b4 | 2007-05-09 17:33:24 +0900 | [diff] [blame] | 277 | .read = tmu_timer_read, |
Paul Mundt | aa01666 | 2006-01-16 22:14:18 -0800 | [diff] [blame] | 278 | }; |
| 279 | |
| 280 | struct sys_timer tmu_timer = { |
| 281 | .name = "tmu", |
| 282 | .ops = &tmu_timer_ops, |
| 283 | }; |