blob: 4c8a759e60cdb56c14cf57002a9672f81100822e [file] [log] [blame]
Magnus Dammd5ed4c22009-04-30 07:02:49 +00001/*
2 * SuperH Timer Support - MTU2
3 *
4 * Copyright (C) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/delay.h>
26#include <linux/io.h>
27#include <linux/clk.h>
28#include <linux/irq.h>
29#include <linux/err.h>
30#include <linux/clockchips.h>
Paul Mundt46a12f72009-05-03 17:57:17 +090031#include <linux/sh_timer.h>
Magnus Dammd5ed4c22009-04-30 07:02:49 +000032
33struct sh_mtu2_priv {
34 void __iomem *mapbase;
35 struct clk *clk;
36 struct irqaction irqaction;
37 struct platform_device *pdev;
38 unsigned long rate;
39 unsigned long periodic;
40 struct clock_event_device ced;
41};
42
43static DEFINE_SPINLOCK(sh_mtu2_lock);
44
45#define TSTR -1 /* shared register */
46#define TCR 0 /* channel register */
47#define TMDR 1 /* channel register */
48#define TIOR 2 /* channel register */
49#define TIER 3 /* channel register */
50#define TSR 4 /* channel register */
51#define TCNT 5 /* channel register */
52#define TGR 6 /* channel register */
53
54static unsigned long mtu2_reg_offs[] = {
55 [TCR] = 0,
56 [TMDR] = 1,
57 [TIOR] = 2,
58 [TIER] = 4,
59 [TSR] = 5,
60 [TCNT] = 6,
61 [TGR] = 8,
62};
63
64static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr)
65{
Paul Mundt46a12f72009-05-03 17:57:17 +090066 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
Magnus Dammd5ed4c22009-04-30 07:02:49 +000067 void __iomem *base = p->mapbase;
68 unsigned long offs;
69
70 if (reg_nr == TSTR)
71 return ioread8(base + cfg->channel_offset);
72
73 offs = mtu2_reg_offs[reg_nr];
74
75 if ((reg_nr == TCNT) || (reg_nr == TGR))
76 return ioread16(base + offs);
77 else
78 return ioread8(base + offs);
79}
80
81static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr,
82 unsigned long value)
83{
Paul Mundt46a12f72009-05-03 17:57:17 +090084 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
Magnus Dammd5ed4c22009-04-30 07:02:49 +000085 void __iomem *base = p->mapbase;
86 unsigned long offs;
87
88 if (reg_nr == TSTR) {
89 iowrite8(value, base + cfg->channel_offset);
90 return;
91 }
92
93 offs = mtu2_reg_offs[reg_nr];
94
95 if ((reg_nr == TCNT) || (reg_nr == TGR))
96 iowrite16(value, base + offs);
97 else
98 iowrite8(value, base + offs);
99}
100
101static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
102{
Paul Mundt46a12f72009-05-03 17:57:17 +0900103 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
Magnus Dammd5ed4c22009-04-30 07:02:49 +0000104 unsigned long flags, value;
105
106 /* start stop register shared by multiple timer channels */
107 spin_lock_irqsave(&sh_mtu2_lock, flags);
108 value = sh_mtu2_read(p, TSTR);
109
110 if (start)
111 value |= 1 << cfg->timer_bit;
112 else
113 value &= ~(1 << cfg->timer_bit);
114
115 sh_mtu2_write(p, TSTR, value);
116 spin_unlock_irqrestore(&sh_mtu2_lock, flags);
117}
118
119static int sh_mtu2_enable(struct sh_mtu2_priv *p)
120{
Paul Mundt46a12f72009-05-03 17:57:17 +0900121 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
Magnus Dammd5ed4c22009-04-30 07:02:49 +0000122 int ret;
123
124 /* enable clock */
125 ret = clk_enable(p->clk);
126 if (ret) {
127 pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk);
128 return ret;
129 }
130
131 /* make sure channel is disabled */
132 sh_mtu2_start_stop_ch(p, 0);
133
134 p->rate = clk_get_rate(p->clk) / 64;
135 p->periodic = (p->rate + HZ/2) / HZ;
136
137 /* "Periodic Counter Operation" */
138 sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */
139 sh_mtu2_write(p, TIOR, 0);
140 sh_mtu2_write(p, TGR, p->periodic);
141 sh_mtu2_write(p, TCNT, 0);
142 sh_mtu2_write(p, TMDR, 0);
143 sh_mtu2_write(p, TIER, 0x01);
144
145 /* enable channel */
146 sh_mtu2_start_stop_ch(p, 1);
147
148 return 0;
149}
150
151static void sh_mtu2_disable(struct sh_mtu2_priv *p)
152{
153 /* disable channel */
154 sh_mtu2_start_stop_ch(p, 0);
155
156 /* stop clock */
157 clk_disable(p->clk);
158}
159
160static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
161{
162 struct sh_mtu2_priv *p = dev_id;
163
164 /* acknowledge interrupt */
165 sh_mtu2_read(p, TSR);
166 sh_mtu2_write(p, TSR, 0xfe);
167
168 /* notify clockevent layer */
169 p->ced.event_handler(&p->ced);
170 return IRQ_HANDLED;
171}
172
173static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced)
174{
175 return container_of(ced, struct sh_mtu2_priv, ced);
176}
177
178static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
179 struct clock_event_device *ced)
180{
181 struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced);
182 int disabled = 0;
183
184 /* deal with old setting first */
185 switch (ced->mode) {
186 case CLOCK_EVT_MODE_PERIODIC:
187 sh_mtu2_disable(p);
188 disabled = 1;
189 break;
190 default:
191 break;
192 }
193
194 switch (mode) {
195 case CLOCK_EVT_MODE_PERIODIC:
196 pr_info("sh_mtu2: %s used for periodic clock events\n",
197 ced->name);
198 sh_mtu2_enable(p);
199 break;
200 case CLOCK_EVT_MODE_UNUSED:
201 if (!disabled)
202 sh_mtu2_disable(p);
203 break;
204 case CLOCK_EVT_MODE_SHUTDOWN:
205 default:
206 break;
207 }
208}
209
210static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
211 char *name, unsigned long rating)
212{
213 struct clock_event_device *ced = &p->ced;
214 int ret;
215
216 memset(ced, 0, sizeof(*ced));
217
218 ced->name = name;
219 ced->features = CLOCK_EVT_FEAT_PERIODIC;
220 ced->rating = rating;
221 ced->cpumask = cpumask_of(0);
222 ced->set_mode = sh_mtu2_clock_event_mode;
223
Paul Mundtda64c2a2010-02-25 16:37:46 +0900224 pr_info("sh_mtu2: %s used for clock events\n", ced->name);
225 clockevents_register_device(ced);
226
Magnus Dammd5ed4c22009-04-30 07:02:49 +0000227 ret = setup_irq(p->irqaction.irq, &p->irqaction);
228 if (ret) {
229 pr_err("sh_mtu2: failed to request irq %d\n",
230 p->irqaction.irq);
231 return;
232 }
Magnus Dammd5ed4c22009-04-30 07:02:49 +0000233}
234
Paul Mundtd1fcc0a2009-05-03 18:05:42 +0900235static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name,
236 unsigned long clockevent_rating)
Magnus Dammd5ed4c22009-04-30 07:02:49 +0000237{
238 if (clockevent_rating)
239 sh_mtu2_register_clockevent(p, name, clockevent_rating);
240
241 return 0;
242}
243
244static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
245{
Paul Mundt46a12f72009-05-03 17:57:17 +0900246 struct sh_timer_config *cfg = pdev->dev.platform_data;
Magnus Dammd5ed4c22009-04-30 07:02:49 +0000247 struct resource *res;
248 int irq, ret;
249 ret = -ENXIO;
250
251 memset(p, 0, sizeof(*p));
252 p->pdev = pdev;
253
254 if (!cfg) {
255 dev_err(&p->pdev->dev, "missing platform data\n");
256 goto err0;
257 }
258
259 platform_set_drvdata(pdev, p);
260
261 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
262 if (!res) {
263 dev_err(&p->pdev->dev, "failed to get I/O memory\n");
264 goto err0;
265 }
266
267 irq = platform_get_irq(p->pdev, 0);
268 if (irq < 0) {
269 dev_err(&p->pdev->dev, "failed to get irq\n");
270 goto err0;
271 }
272
273 /* map memory, let mapbase point to our channel */
274 p->mapbase = ioremap_nocache(res->start, resource_size(res));
275 if (p->mapbase == NULL) {
276 pr_err("sh_mtu2: failed to remap I/O memory\n");
277 goto err0;
278 }
279
280 /* setup data for setup_irq() (too early for request_irq()) */
281 p->irqaction.name = cfg->name;
282 p->irqaction.handler = sh_mtu2_interrupt;
283 p->irqaction.dev_id = p;
284 p->irqaction.irq = irq;
285 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
Magnus Dammd5ed4c22009-04-30 07:02:49 +0000286
287 /* get hold of clock */
288 p->clk = clk_get(&p->pdev->dev, cfg->clk);
289 if (IS_ERR(p->clk)) {
290 pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk);
291 ret = PTR_ERR(p->clk);
292 goto err1;
293 }
294
295 return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating);
296 err1:
297 iounmap(p->mapbase);
298 err0:
299 return ret;
300}
301
302static int __devinit sh_mtu2_probe(struct platform_device *pdev)
303{
304 struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
Paul Mundt46a12f72009-05-03 17:57:17 +0900305 struct sh_timer_config *cfg = pdev->dev.platform_data;
Magnus Dammd5ed4c22009-04-30 07:02:49 +0000306 int ret;
307
308 if (p) {
309 pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name);
310 return 0;
311 }
312
313 p = kmalloc(sizeof(*p), GFP_KERNEL);
314 if (p == NULL) {
315 dev_err(&pdev->dev, "failed to allocate driver data\n");
316 return -ENOMEM;
317 }
318
319 ret = sh_mtu2_setup(p, pdev);
320 if (ret) {
321 kfree(p);
322 platform_set_drvdata(pdev, NULL);
323 }
324 return ret;
325}
326
327static int __devexit sh_mtu2_remove(struct platform_device *pdev)
328{
329 return -EBUSY; /* cannot unregister clockevent */
330}
331
332static struct platform_driver sh_mtu2_device_driver = {
333 .probe = sh_mtu2_probe,
334 .remove = __devexit_p(sh_mtu2_remove),
335 .driver = {
336 .name = "sh_mtu2",
337 }
338};
339
340static int __init sh_mtu2_init(void)
341{
342 return platform_driver_register(&sh_mtu2_device_driver);
343}
344
345static void __exit sh_mtu2_exit(void)
346{
347 platform_driver_unregister(&sh_mtu2_device_driver);
348}
349
350early_platform_init("earlytimer", &sh_mtu2_device_driver);
351module_init(sh_mtu2_init);
352module_exit(sh_mtu2_exit);
353
354MODULE_AUTHOR("Magnus Damm");
355MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
356MODULE_LICENSE("GPL v2");