David Brownell | 4d243f9 | 2008-02-22 17:28:37 -0800 | [diff] [blame] | 1 | #include <linux/init.h> |
| 2 | #include <linux/clocksource.h> |
| 3 | #include <linux/clockchips.h> |
| 4 | #include <linux/interrupt.h> |
| 5 | #include <linux/irq.h> |
| 6 | |
| 7 | #include <linux/clk.h> |
| 8 | #include <linux/err.h> |
| 9 | #include <linux/ioport.h> |
| 10 | #include <linux/io.h> |
| 11 | #include <linux/platform_device.h> |
| 12 | #include <linux/atmel_tc.h> |
| 13 | |
| 14 | |
| 15 | /* |
| 16 | * We're configured to use a specific TC block, one that's not hooked |
| 17 | * up to external hardware, to provide a time solution: |
| 18 | * |
| 19 | * - Two channels combine to create a free-running 32 bit counter |
| 20 | * with a base rate of 5+ MHz, packaged as a clocksource (with |
| 21 | * resolution better than 200 nsec). |
| 22 | * |
| 23 | * - The third channel may be used to provide a 16-bit clockevent |
| 24 | * source, used in either periodic or oneshot mode. This runs |
| 25 | * at 32 KiHZ, and can handle delays of up to two seconds. |
| 26 | * |
| 27 | * A boot clocksource and clockevent source are also currently needed, |
| 28 | * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so |
| 29 | * this code can be used when init_timers() is called, well before most |
| 30 | * devices are set up. (Some low end AT91 parts, which can run uClinux, |
| 31 | * have only the timers in one TC block... they currently don't support |
| 32 | * the tclib code, because of that initialization issue.) |
| 33 | * |
| 34 | * REVISIT behavior during system suspend states... we should disable |
| 35 | * all clocks and save the power. Easily done for clockevent devices, |
| 36 | * but clocksources won't necessarily get the needed notifications. |
| 37 | * For deeper system sleep states, this will be mandatory... |
| 38 | */ |
| 39 | |
| 40 | static void __iomem *tcaddr; |
| 41 | |
| 42 | static cycle_t tc_get_cycles(void) |
| 43 | { |
| 44 | unsigned long flags; |
| 45 | u32 lower, upper; |
| 46 | |
| 47 | raw_local_irq_save(flags); |
| 48 | do { |
| 49 | upper = __raw_readl(tcaddr + ATMEL_TC_REG(1, CV)); |
| 50 | lower = __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); |
| 51 | } while (upper != __raw_readl(tcaddr + ATMEL_TC_REG(1, CV))); |
| 52 | |
| 53 | raw_local_irq_restore(flags); |
| 54 | return (upper << 16) | lower; |
| 55 | } |
| 56 | |
| 57 | static struct clocksource clksrc = { |
| 58 | .name = "tcb_clksrc", |
| 59 | .rating = 200, |
| 60 | .read = tc_get_cycles, |
| 61 | .mask = CLOCKSOURCE_MASK(32), |
| 62 | .shift = 18, |
| 63 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
| 64 | }; |
| 65 | |
| 66 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
| 67 | |
| 68 | struct tc_clkevt_device { |
| 69 | struct clock_event_device clkevt; |
| 70 | struct clk *clk; |
| 71 | void __iomem *regs; |
| 72 | }; |
| 73 | |
| 74 | static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) |
| 75 | { |
| 76 | return container_of(clkevt, struct tc_clkevt_device, clkevt); |
| 77 | } |
| 78 | |
| 79 | /* For now, we always use the 32K clock ... this optimizes for NO_HZ, |
| 80 | * because using one of the divided clocks would usually mean the |
| 81 | * tick rate can never be less than several dozen Hz (vs 0.5 Hz). |
| 82 | * |
| 83 | * A divided clock could be good for high resolution timers, since |
| 84 | * 30.5 usec resolution can seem "low". |
| 85 | */ |
| 86 | static u32 timer_clock; |
| 87 | |
| 88 | static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) |
| 89 | { |
| 90 | struct tc_clkevt_device *tcd = to_tc_clkevt(d); |
| 91 | void __iomem *regs = tcd->regs; |
| 92 | |
| 93 | if (tcd->clkevt.mode == CLOCK_EVT_MODE_PERIODIC |
| 94 | || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) { |
| 95 | __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); |
| 96 | __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); |
| 97 | clk_disable(tcd->clk); |
| 98 | } |
| 99 | |
| 100 | switch (m) { |
| 101 | |
| 102 | /* By not making the gentime core emulate periodic mode on top |
| 103 | * of oneshot, we get lower overhead and improved accuracy. |
| 104 | */ |
| 105 | case CLOCK_EVT_MODE_PERIODIC: |
| 106 | clk_enable(tcd->clk); |
| 107 | |
| 108 | /* slow clock, count up to RC, then irq and restart */ |
| 109 | __raw_writel(timer_clock |
| 110 | | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, |
| 111 | regs + ATMEL_TC_REG(2, CMR)); |
| 112 | __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); |
| 113 | |
| 114 | /* Enable clock and interrupts on RC compare */ |
| 115 | __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); |
| 116 | |
| 117 | /* go go gadget! */ |
| 118 | __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, |
| 119 | regs + ATMEL_TC_REG(2, CCR)); |
| 120 | break; |
| 121 | |
| 122 | case CLOCK_EVT_MODE_ONESHOT: |
| 123 | clk_enable(tcd->clk); |
| 124 | |
| 125 | /* slow clock, count up to RC, then irq and stop */ |
| 126 | __raw_writel(timer_clock | ATMEL_TC_CPCSTOP |
| 127 | | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, |
| 128 | regs + ATMEL_TC_REG(2, CMR)); |
| 129 | __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); |
| 130 | |
| 131 | /* set_next_event() configures and starts the timer */ |
| 132 | break; |
| 133 | |
| 134 | default: |
| 135 | break; |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | static int tc_next_event(unsigned long delta, struct clock_event_device *d) |
| 140 | { |
| 141 | __raw_writel(delta, tcaddr + ATMEL_TC_REG(2, RC)); |
| 142 | |
| 143 | /* go go gadget! */ |
| 144 | __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, |
| 145 | tcaddr + ATMEL_TC_REG(2, CCR)); |
| 146 | return 0; |
| 147 | } |
| 148 | |
| 149 | static struct tc_clkevt_device clkevt = { |
| 150 | .clkevt = { |
| 151 | .name = "tc_clkevt", |
| 152 | .features = CLOCK_EVT_FEAT_PERIODIC |
| 153 | | CLOCK_EVT_FEAT_ONESHOT, |
| 154 | .shift = 32, |
| 155 | /* Should be lower than at91rm9200's system timer */ |
| 156 | .rating = 125, |
David Brownell | 4d243f9 | 2008-02-22 17:28:37 -0800 | [diff] [blame] | 157 | .set_next_event = tc_next_event, |
| 158 | .set_mode = tc_mode, |
| 159 | }, |
| 160 | }; |
| 161 | |
| 162 | static irqreturn_t ch2_irq(int irq, void *handle) |
| 163 | { |
| 164 | struct tc_clkevt_device *dev = handle; |
| 165 | unsigned int sr; |
| 166 | |
| 167 | sr = __raw_readl(dev->regs + ATMEL_TC_REG(2, SR)); |
| 168 | if (sr & ATMEL_TC_CPCS) { |
| 169 | dev->clkevt.event_handler(&dev->clkevt); |
| 170 | return IRQ_HANDLED; |
| 171 | } |
| 172 | |
| 173 | return IRQ_NONE; |
| 174 | } |
| 175 | |
| 176 | static struct irqaction tc_irqaction = { |
| 177 | .name = "tc_clkevt", |
| 178 | .flags = IRQF_TIMER | IRQF_DISABLED, |
| 179 | .handler = ch2_irq, |
| 180 | }; |
| 181 | |
David Brownell | 3ee08ae | 2008-03-13 09:44:48 -0800 | [diff] [blame] | 182 | static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) |
David Brownell | 4d243f9 | 2008-02-22 17:28:37 -0800 | [diff] [blame] | 183 | { |
David Brownell | 4d243f9 | 2008-02-22 17:28:37 -0800 | [diff] [blame] | 184 | struct clk *t2_clk = tc->clk[2]; |
| 185 | int irq = tc->irq[2]; |
| 186 | |
| 187 | clkevt.regs = tc->regs; |
| 188 | clkevt.clk = t2_clk; |
| 189 | tc_irqaction.dev_id = &clkevt; |
| 190 | |
| 191 | timer_clock = clk32k_divisor_idx; |
| 192 | |
| 193 | clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift); |
| 194 | clkevt.clkevt.max_delta_ns |
| 195 | = clockevent_delta2ns(0xffff, &clkevt.clkevt); |
| 196 | clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1; |
Rusty Russell | 320ab2b | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 197 | clkevt.clkevt.cpumask = cpumask_of(0); |
David Brownell | 4d243f9 | 2008-02-22 17:28:37 -0800 | [diff] [blame] | 198 | |
| 199 | setup_irq(irq, &tc_irqaction); |
| 200 | |
| 201 | clockevents_register_device(&clkevt.clkevt); |
| 202 | } |
| 203 | |
| 204 | #else /* !CONFIG_GENERIC_CLOCKEVENTS */ |
| 205 | |
David Brownell | 3ee08ae | 2008-03-13 09:44:48 -0800 | [diff] [blame] | 206 | static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) |
David Brownell | 4d243f9 | 2008-02-22 17:28:37 -0800 | [diff] [blame] | 207 | { |
| 208 | /* NOTHING */ |
| 209 | } |
| 210 | |
| 211 | #endif |
| 212 | |
| 213 | static int __init tcb_clksrc_init(void) |
| 214 | { |
| 215 | static char bootinfo[] __initdata |
| 216 | = KERN_DEBUG "%s: tc%d at %d.%03d MHz\n"; |
| 217 | |
| 218 | struct platform_device *pdev; |
| 219 | struct atmel_tc *tc; |
David Brownell | 3ee08ae | 2008-03-13 09:44:48 -0800 | [diff] [blame] | 220 | struct clk *t0_clk; |
David Brownell | 4d243f9 | 2008-02-22 17:28:37 -0800 | [diff] [blame] | 221 | u32 rate, divided_rate = 0; |
| 222 | int best_divisor_idx = -1; |
| 223 | int clk32k_divisor_idx = -1; |
| 224 | int i; |
| 225 | |
| 226 | tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK, clksrc.name); |
| 227 | if (!tc) { |
| 228 | pr_debug("can't alloc TC for clocksource\n"); |
| 229 | return -ENODEV; |
| 230 | } |
| 231 | tcaddr = tc->regs; |
| 232 | pdev = tc->pdev; |
| 233 | |
| 234 | t0_clk = tc->clk[0]; |
| 235 | clk_enable(t0_clk); |
| 236 | |
| 237 | /* How fast will we be counting? Pick something over 5 MHz. */ |
| 238 | rate = (u32) clk_get_rate(t0_clk); |
| 239 | for (i = 0; i < 5; i++) { |
| 240 | unsigned divisor = atmel_tc_divisors[i]; |
| 241 | unsigned tmp; |
| 242 | |
| 243 | /* remember 32 KiHz clock for later */ |
| 244 | if (!divisor) { |
| 245 | clk32k_divisor_idx = i; |
| 246 | continue; |
| 247 | } |
| 248 | |
| 249 | tmp = rate / divisor; |
| 250 | pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp); |
| 251 | if (best_divisor_idx > 0) { |
| 252 | if (tmp < 5 * 1000 * 1000) |
| 253 | continue; |
| 254 | } |
| 255 | divided_rate = tmp; |
| 256 | best_divisor_idx = i; |
| 257 | } |
| 258 | |
| 259 | clksrc.mult = clocksource_hz2mult(divided_rate, clksrc.shift); |
| 260 | |
| 261 | printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK, |
| 262 | divided_rate / 1000000, |
| 263 | ((divided_rate + 500000) % 1000000) / 1000); |
| 264 | |
| 265 | /* tclib will give us three clocks no matter what the |
| 266 | * underlying platform supports. |
| 267 | */ |
| 268 | clk_enable(tc->clk[1]); |
| 269 | |
| 270 | /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */ |
| 271 | __raw_writel(best_divisor_idx /* likely divide-by-8 */ |
| 272 | | ATMEL_TC_WAVE |
| 273 | | ATMEL_TC_WAVESEL_UP /* free-run */ |
| 274 | | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */ |
| 275 | | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */ |
| 276 | tcaddr + ATMEL_TC_REG(0, CMR)); |
| 277 | __raw_writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA)); |
| 278 | __raw_writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC)); |
| 279 | __raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */ |
| 280 | __raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR)); |
| 281 | |
| 282 | /* channel 1: waveform mode, input TIOA0 */ |
| 283 | __raw_writel(ATMEL_TC_XC1 /* input: TIOA0 */ |
| 284 | | ATMEL_TC_WAVE |
| 285 | | ATMEL_TC_WAVESEL_UP, /* free-run */ |
| 286 | tcaddr + ATMEL_TC_REG(1, CMR)); |
| 287 | __raw_writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */ |
| 288 | __raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR)); |
| 289 | |
| 290 | /* chain channel 0 to channel 1, then reset all the timers */ |
| 291 | __raw_writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR); |
| 292 | __raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); |
| 293 | |
| 294 | /* and away we go! */ |
| 295 | clocksource_register(&clksrc); |
| 296 | |
| 297 | /* channel 2: periodic and oneshot timer support */ |
David Brownell | 3ee08ae | 2008-03-13 09:44:48 -0800 | [diff] [blame] | 298 | setup_clkevents(tc, clk32k_divisor_idx); |
David Brownell | 4d243f9 | 2008-02-22 17:28:37 -0800 | [diff] [blame] | 299 | |
| 300 | return 0; |
| 301 | } |
| 302 | arch_initcall(tcb_clksrc_init); |