blob: 5ec732e6ca9221ee3b3ea5fb9aa71c0424e51725 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Intel & MS High Precision Event Timer Implementation.
3 *
4 * Copyright (C) 2003 Intel Corporation
5 * Venki Pallipadi
6 * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
7 * Bob Picco <robert.picco@hp.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/config.h>
15#include <linux/interrupt.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/miscdevice.h>
20#include <linux/major.h>
21#include <linux/ioport.h>
22#include <linux/fcntl.h>
23#include <linux/init.h>
24#include <linux/poll.h>
25#include <linux/proc_fs.h>
26#include <linux/spinlock.h>
27#include <linux/sysctl.h>
28#include <linux/wait.h>
29#include <linux/bcd.h>
30#include <linux/seq_file.h>
31#include <linux/bitops.h>
32
33#include <asm/current.h>
34#include <asm/uaccess.h>
35#include <asm/system.h>
36#include <asm/io.h>
37#include <asm/irq.h>
38#include <asm/div64.h>
39
40#include <linux/acpi.h>
41#include <acpi/acpi_bus.h>
42#include <linux/hpet.h>
43
44/*
45 * The High Precision Event Timer driver.
46 * This driver is closely modelled after the rtc.c driver.
47 * http://www.intel.com/labs/platcomp/hpet/hpetspec.htm
48 */
49#define HPET_USER_FREQ (64)
50#define HPET_DRIFT (500)
51
52static u32 hpet_ntimer, hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
53
54/* A lock for concurrent access by app and isr hpet activity. */
55static DEFINE_SPINLOCK(hpet_lock);
56/* A lock for concurrent intermodule access to hpet and isr hpet activity. */
57static DEFINE_SPINLOCK(hpet_task_lock);
58
59#define HPET_DEV_NAME (7)
60
61struct hpet_dev {
62 struct hpets *hd_hpets;
63 struct hpet __iomem *hd_hpet;
64 struct hpet_timer __iomem *hd_timer;
65 unsigned long hd_ireqfreq;
66 unsigned long hd_irqdata;
67 wait_queue_head_t hd_waitqueue;
68 struct fasync_struct *hd_async_queue;
69 struct hpet_task *hd_task;
70 unsigned int hd_flags;
71 unsigned int hd_irq;
72 unsigned int hd_hdwirq;
73 char hd_name[HPET_DEV_NAME];
74};
75
76struct hpets {
77 struct hpets *hp_next;
78 struct hpet __iomem *hp_hpet;
79 unsigned long hp_hpet_phys;
80 struct time_interpolator *hp_interpolator;
81 unsigned long hp_period;
82 unsigned long hp_delta;
83 unsigned int hp_ntimer;
84 unsigned int hp_which;
85 struct hpet_dev hp_dev[1];
86};
87
88static struct hpets *hpets;
89
90#define HPET_OPEN 0x0001
91#define HPET_IE 0x0002 /* interrupt enabled */
92#define HPET_PERIODIC 0x0004
93
94#if BITS_PER_LONG == 64
95#define write_counter(V, MC) writeq(V, MC)
96#define read_counter(MC) readq(MC)
97#else
98#define write_counter(V, MC) writel(V, MC)
99#define read_counter(MC) readl(MC)
100#endif
101
102#ifndef readq
103static unsigned long long __inline readq(void __iomem *addr)
104{
105 return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
106}
107#endif
108
109#ifndef writeq
110static void __inline writeq(unsigned long long v, void __iomem *addr)
111{
112 writel(v & 0xffffffff, addr);
113 writel(v >> 32, addr + 4);
114}
115#endif
116
117static irqreturn_t hpet_interrupt(int irq, void *data, struct pt_regs *regs)
118{
119 struct hpet_dev *devp;
120 unsigned long isr;
121
122 devp = data;
123
124 spin_lock(&hpet_lock);
125 devp->hd_irqdata++;
126
127 /*
128 * For non-periodic timers, increment the accumulator.
129 * This has the effect of treating non-periodic like periodic.
130 */
131 if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
132 unsigned long m, t;
133
134 t = devp->hd_ireqfreq;
135 m = read_counter(&devp->hd_hpet->hpet_mc);
136 write_counter(t + m + devp->hd_hpets->hp_delta,
137 &devp->hd_timer->hpet_compare);
138 }
139
140 isr = (1 << (devp - devp->hd_hpets->hp_dev));
141 writeq(isr, &devp->hd_hpet->hpet_isr);
142 spin_unlock(&hpet_lock);
143
144 spin_lock(&hpet_task_lock);
145 if (devp->hd_task)
146 devp->hd_task->ht_func(devp->hd_task->ht_data);
147 spin_unlock(&hpet_task_lock);
148
149 wake_up_interruptible(&devp->hd_waitqueue);
150
151 kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN);
152
153 return IRQ_HANDLED;
154}
155
156static int hpet_open(struct inode *inode, struct file *file)
157{
158 struct hpet_dev *devp;
159 struct hpets *hpetp;
160 int i;
161
162 if (file->f_mode & FMODE_WRITE)
163 return -EINVAL;
164
165 spin_lock_irq(&hpet_lock);
166
167 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
168 for (i = 0; i < hpetp->hp_ntimer; i++)
169 if (hpetp->hp_dev[i].hd_flags & HPET_OPEN
170 || hpetp->hp_dev[i].hd_task)
171 continue;
172 else {
173 devp = &hpetp->hp_dev[i];
174 break;
175 }
176
177 if (!devp) {
178 spin_unlock_irq(&hpet_lock);
179 return -EBUSY;
180 }
181
182 file->private_data = devp;
183 devp->hd_irqdata = 0;
184 devp->hd_flags |= HPET_OPEN;
185 spin_unlock_irq(&hpet_lock);
186
187 return 0;
188}
189
190static ssize_t
191hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
192{
193 DECLARE_WAITQUEUE(wait, current);
194 unsigned long data;
195 ssize_t retval;
196 struct hpet_dev *devp;
197
198 devp = file->private_data;
199 if (!devp->hd_ireqfreq)
200 return -EIO;
201
202 if (count < sizeof(unsigned long))
203 return -EINVAL;
204
205 add_wait_queue(&devp->hd_waitqueue, &wait);
206
207 for ( ; ; ) {
208 set_current_state(TASK_INTERRUPTIBLE);
209
210 spin_lock_irq(&hpet_lock);
211 data = devp->hd_irqdata;
212 devp->hd_irqdata = 0;
213 spin_unlock_irq(&hpet_lock);
214
215 if (data)
216 break;
217 else if (file->f_flags & O_NONBLOCK) {
218 retval = -EAGAIN;
219 goto out;
220 } else if (signal_pending(current)) {
221 retval = -ERESTARTSYS;
222 goto out;
223 }
224 schedule();
225 }
226
227 retval = put_user(data, (unsigned long __user *)buf);
228 if (!retval)
229 retval = sizeof(unsigned long);
230out:
231 __set_current_state(TASK_RUNNING);
232 remove_wait_queue(&devp->hd_waitqueue, &wait);
233
234 return retval;
235}
236
237static unsigned int hpet_poll(struct file *file, poll_table * wait)
238{
239 unsigned long v;
240 struct hpet_dev *devp;
241
242 devp = file->private_data;
243
244 if (!devp->hd_ireqfreq)
245 return 0;
246
247 poll_wait(file, &devp->hd_waitqueue, wait);
248
249 spin_lock_irq(&hpet_lock);
250 v = devp->hd_irqdata;
251 spin_unlock_irq(&hpet_lock);
252
253 if (v != 0)
254 return POLLIN | POLLRDNORM;
255
256 return 0;
257}
258
259static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
260{
261#ifdef CONFIG_HPET_MMAP
262 struct hpet_dev *devp;
263 unsigned long addr;
264
265 if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
266 return -EINVAL;
267
268 devp = file->private_data;
269 addr = devp->hd_hpets->hp_hpet_phys;
270
271 if (addr & (PAGE_SIZE - 1))
272 return -ENOSYS;
273
274 vma->vm_flags |= VM_IO;
275 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
276 addr = __pa(addr);
277
278 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
279 PAGE_SIZE, vma->vm_page_prot)) {
280 printk(KERN_ERR "remap_pfn_range failed in hpet.c\n");
281 return -EAGAIN;
282 }
283
284 return 0;
285#else
286 return -ENOSYS;
287#endif
288}
289
290static int hpet_fasync(int fd, struct file *file, int on)
291{
292 struct hpet_dev *devp;
293
294 devp = file->private_data;
295
296 if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0)
297 return 0;
298 else
299 return -EIO;
300}
301
302static int hpet_release(struct inode *inode, struct file *file)
303{
304 struct hpet_dev *devp;
305 struct hpet_timer __iomem *timer;
306 int irq = 0;
307
308 devp = file->private_data;
309 timer = devp->hd_timer;
310
311 spin_lock_irq(&hpet_lock);
312
313 writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK),
314 &timer->hpet_config);
315
316 irq = devp->hd_irq;
317 devp->hd_irq = 0;
318
319 devp->hd_ireqfreq = 0;
320
321 if (devp->hd_flags & HPET_PERIODIC
322 && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
323 unsigned long v;
324
325 v = readq(&timer->hpet_config);
326 v ^= Tn_TYPE_CNF_MASK;
327 writeq(v, &timer->hpet_config);
328 }
329
330 devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC);
331 spin_unlock_irq(&hpet_lock);
332
333 if (irq)
334 free_irq(irq, devp);
335
336 if (file->f_flags & FASYNC)
337 hpet_fasync(-1, file, 0);
338
339 file->private_data = NULL;
340 return 0;
341}
342
343static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
344
345static int
346hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
347 unsigned long arg)
348{
349 struct hpet_dev *devp;
350
351 devp = file->private_data;
352 return hpet_ioctl_common(devp, cmd, arg, 0);
353}
354
355static int hpet_ioctl_ieon(struct hpet_dev *devp)
356{
357 struct hpet_timer __iomem *timer;
358 struct hpet __iomem *hpet;
359 struct hpets *hpetp;
360 int irq;
361 unsigned long g, v, t, m;
362 unsigned long flags, isr;
363
364 timer = devp->hd_timer;
365 hpet = devp->hd_hpet;
366 hpetp = devp->hd_hpets;
367
368 v = readq(&timer->hpet_config);
369 spin_lock_irq(&hpet_lock);
370
371 if (devp->hd_flags & HPET_IE) {
372 spin_unlock_irq(&hpet_lock);
373 return -EBUSY;
374 }
375
376 devp->hd_flags |= HPET_IE;
377 spin_unlock_irq(&hpet_lock);
378
379 t = readq(&timer->hpet_config);
380 irq = devp->hd_hdwirq;
381
382 if (irq) {
383 sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
384
385 if (request_irq
386 (irq, hpet_interrupt, SA_INTERRUPT, devp->hd_name, (void *)devp)) {
387 printk(KERN_ERR "hpet: IRQ %d is not free\n", irq);
388 irq = 0;
389 }
390 }
391
392 if (irq == 0) {
393 spin_lock_irq(&hpet_lock);
394 devp->hd_flags ^= HPET_IE;
395 spin_unlock_irq(&hpet_lock);
396 return -EIO;
397 }
398
399 devp->hd_irq = irq;
400 t = devp->hd_ireqfreq;
401 v = readq(&timer->hpet_config);
402 g = v | Tn_INT_ENB_CNF_MASK;
403
404 if (devp->hd_flags & HPET_PERIODIC) {
405 write_counter(t, &timer->hpet_compare);
406 g |= Tn_TYPE_CNF_MASK;
407 v |= Tn_TYPE_CNF_MASK;
408 writeq(v, &timer->hpet_config);
409 v |= Tn_VAL_SET_CNF_MASK;
410 writeq(v, &timer->hpet_config);
411 local_irq_save(flags);
412 m = read_counter(&hpet->hpet_mc);
413 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
414 } else {
415 local_irq_save(flags);
416 m = read_counter(&hpet->hpet_mc);
417 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
418 }
419
420 isr = (1 << (devp - hpets->hp_dev));
421 writeq(isr, &hpet->hpet_isr);
422 writeq(g, &timer->hpet_config);
423 local_irq_restore(flags);
424
425 return 0;
426}
427
428static inline unsigned long hpet_time_div(unsigned long dis)
429{
430 unsigned long long m = 1000000000000000ULL;
431
432 do_div(m, dis);
433
434 return (unsigned long)m;
435}
436
437static int
438hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
439{
440 struct hpet_timer __iomem *timer;
441 struct hpet __iomem *hpet;
442 struct hpets *hpetp;
443 int err;
444 unsigned long v;
445
446 switch (cmd) {
447 case HPET_IE_OFF:
448 case HPET_INFO:
449 case HPET_EPI:
450 case HPET_DPI:
451 case HPET_IRQFREQ:
452 timer = devp->hd_timer;
453 hpet = devp->hd_hpet;
454 hpetp = devp->hd_hpets;
455 break;
456 case HPET_IE_ON:
457 return hpet_ioctl_ieon(devp);
458 default:
459 return -EINVAL;
460 }
461
462 err = 0;
463
464 switch (cmd) {
465 case HPET_IE_OFF:
466 if ((devp->hd_flags & HPET_IE) == 0)
467 break;
468 v = readq(&timer->hpet_config);
469 v &= ~Tn_INT_ENB_CNF_MASK;
470 writeq(v, &timer->hpet_config);
471 if (devp->hd_irq) {
472 free_irq(devp->hd_irq, devp);
473 devp->hd_irq = 0;
474 }
475 devp->hd_flags ^= HPET_IE;
476 break;
477 case HPET_INFO:
478 {
479 struct hpet_info info;
480
481 info.hi_ireqfreq = hpet_time_div(hpetp->hp_period *
482 devp->hd_ireqfreq);
483 info.hi_flags =
484 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
485 info.hi_hpet = devp->hd_hpets->hp_which;
486 info.hi_timer = devp - devp->hd_hpets->hp_dev;
487 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
488 err = -EFAULT;
489 break;
490 }
491 case HPET_EPI:
492 v = readq(&timer->hpet_config);
493 if ((v & Tn_PER_INT_CAP_MASK) == 0) {
494 err = -ENXIO;
495 break;
496 }
497 devp->hd_flags |= HPET_PERIODIC;
498 break;
499 case HPET_DPI:
500 v = readq(&timer->hpet_config);
501 if ((v & Tn_PER_INT_CAP_MASK) == 0) {
502 err = -ENXIO;
503 break;
504 }
505 if (devp->hd_flags & HPET_PERIODIC &&
506 readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
507 v = readq(&timer->hpet_config);
508 v ^= Tn_TYPE_CNF_MASK;
509 writeq(v, &timer->hpet_config);
510 }
511 devp->hd_flags &= ~HPET_PERIODIC;
512 break;
513 case HPET_IRQFREQ:
514 if (!kernel && (arg > hpet_max_freq) &&
515 !capable(CAP_SYS_RESOURCE)) {
516 err = -EACCES;
517 break;
518 }
519
520 if (arg & (arg - 1)) {
521 err = -EINVAL;
522 break;
523 }
524
525 devp->hd_ireqfreq = hpet_time_div(hpetp->hp_period * arg);
526 }
527
528 return err;
529}
530
531static struct file_operations hpet_fops = {
532 .owner = THIS_MODULE,
533 .llseek = no_llseek,
534 .read = hpet_read,
535 .poll = hpet_poll,
536 .ioctl = hpet_ioctl,
537 .open = hpet_open,
538 .release = hpet_release,
539 .fasync = hpet_fasync,
540 .mmap = hpet_mmap,
541};
542
543EXPORT_SYMBOL(hpet_alloc);
544EXPORT_SYMBOL(hpet_register);
545EXPORT_SYMBOL(hpet_unregister);
546EXPORT_SYMBOL(hpet_control);
547
548int hpet_register(struct hpet_task *tp, int periodic)
549{
550 unsigned int i;
551 u64 mask;
552 struct hpet_timer __iomem *timer;
553 struct hpet_dev *devp;
554 struct hpets *hpetp;
555
556 switch (periodic) {
557 case 1:
558 mask = Tn_PER_INT_CAP_MASK;
559 break;
560 case 0:
561 mask = 0;
562 break;
563 default:
564 return -EINVAL;
565 }
566
567 spin_lock_irq(&hpet_task_lock);
568 spin_lock(&hpet_lock);
569
570 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
571 for (timer = hpetp->hp_hpet->hpet_timers, i = 0;
572 i < hpetp->hp_ntimer; i++, timer++) {
573 if ((readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK)
574 != mask)
575 continue;
576
577 devp = &hpetp->hp_dev[i];
578
579 if (devp->hd_flags & HPET_OPEN || devp->hd_task) {
580 devp = NULL;
581 continue;
582 }
583
584 tp->ht_opaque = devp;
585 devp->hd_task = tp;
586 break;
587 }
588
589 spin_unlock(&hpet_lock);
590 spin_unlock_irq(&hpet_task_lock);
591
592 if (tp->ht_opaque)
593 return 0;
594 else
595 return -EBUSY;
596}
597
598static inline int hpet_tpcheck(struct hpet_task *tp)
599{
600 struct hpet_dev *devp;
601 struct hpets *hpetp;
602
603 devp = tp->ht_opaque;
604
605 if (!devp)
606 return -ENXIO;
607
608 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
609 if (devp >= hpetp->hp_dev
610 && devp < (hpetp->hp_dev + hpetp->hp_ntimer)
611 && devp->hd_hpet == hpetp->hp_hpet)
612 return 0;
613
614 return -ENXIO;
615}
616
617int hpet_unregister(struct hpet_task *tp)
618{
619 struct hpet_dev *devp;
620 struct hpet_timer __iomem *timer;
621 int err;
622
623 if ((err = hpet_tpcheck(tp)))
624 return err;
625
626 spin_lock_irq(&hpet_task_lock);
627 spin_lock(&hpet_lock);
628
629 devp = tp->ht_opaque;
630 if (devp->hd_task != tp) {
631 spin_unlock(&hpet_lock);
632 spin_unlock_irq(&hpet_task_lock);
633 return -ENXIO;
634 }
635
636 timer = devp->hd_timer;
637 writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK),
638 &timer->hpet_config);
639 devp->hd_flags &= ~(HPET_IE | HPET_PERIODIC);
640 devp->hd_task = NULL;
641 spin_unlock(&hpet_lock);
642 spin_unlock_irq(&hpet_task_lock);
643
644 return 0;
645}
646
647int hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg)
648{
649 struct hpet_dev *devp;
650 int err;
651
652 if ((err = hpet_tpcheck(tp)))
653 return err;
654
655 spin_lock_irq(&hpet_lock);
656 devp = tp->ht_opaque;
657 if (devp->hd_task != tp) {
658 spin_unlock_irq(&hpet_lock);
659 return -ENXIO;
660 }
661 spin_unlock_irq(&hpet_lock);
662 return hpet_ioctl_common(devp, cmd, arg, 1);
663}
664
665static ctl_table hpet_table[] = {
666 {
667 .ctl_name = 1,
668 .procname = "max-user-freq",
669 .data = &hpet_max_freq,
670 .maxlen = sizeof(int),
671 .mode = 0644,
672 .proc_handler = &proc_dointvec,
673 },
674 {.ctl_name = 0}
675};
676
677static ctl_table hpet_root[] = {
678 {
679 .ctl_name = 1,
680 .procname = "hpet",
681 .maxlen = 0,
682 .mode = 0555,
683 .child = hpet_table,
684 },
685 {.ctl_name = 0}
686};
687
688static ctl_table dev_root[] = {
689 {
690 .ctl_name = CTL_DEV,
691 .procname = "dev",
692 .maxlen = 0,
693 .mode = 0555,
694 .child = hpet_root,
695 },
696 {.ctl_name = 0}
697};
698
699static struct ctl_table_header *sysctl_header;
700
701static void hpet_register_interpolator(struct hpets *hpetp)
702{
703#ifdef CONFIG_TIME_INTERPOLATION
704 struct time_interpolator *ti;
705
706 ti = kmalloc(sizeof(*ti), GFP_KERNEL);
707 if (!ti)
708 return;
709
710 memset(ti, 0, sizeof(*ti));
711 ti->source = TIME_SOURCE_MMIO64;
712 ti->shift = 10;
713 ti->addr = &hpetp->hp_hpet->hpet_mc;
714 ti->frequency = hpet_time_div(hpets->hp_period);
715 ti->drift = ti->frequency * HPET_DRIFT / 1000000;
716 ti->mask = -1;
717
718 hpetp->hp_interpolator = ti;
719 register_time_interpolator(ti);
720#endif
721}
722
723/*
724 * Adjustment for when arming the timer with
725 * initial conditions. That is, main counter
726 * ticks expired before interrupts are enabled.
727 */
728#define TICK_CALIBRATE (1000UL)
729
730static unsigned long hpet_calibrate(struct hpets *hpetp)
731{
732 struct hpet_timer __iomem *timer = NULL;
733 unsigned long t, m, count, i, flags, start;
734 struct hpet_dev *devp;
735 int j;
736 struct hpet __iomem *hpet;
737
738 for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++)
739 if ((devp->hd_flags & HPET_OPEN) == 0) {
740 timer = devp->hd_timer;
741 break;
742 }
743
744 if (!timer)
745 return 0;
746
747 hpet = hpets->hp_hpet;
748 t = read_counter(&timer->hpet_compare);
749
750 i = 0;
751 count = hpet_time_div(hpetp->hp_period * TICK_CALIBRATE);
752
753 local_irq_save(flags);
754
755 start = read_counter(&hpet->hpet_mc);
756
757 do {
758 m = read_counter(&hpet->hpet_mc);
759 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
760 } while (i++, (m - start) < count);
761
762 local_irq_restore(flags);
763
764 return (m - start) / i;
765}
766
767int hpet_alloc(struct hpet_data *hdp)
768{
769 u64 cap, mcfg;
770 struct hpet_dev *devp;
771 u32 i, ntimer;
772 struct hpets *hpetp;
773 size_t siz;
774 struct hpet __iomem *hpet;
775 static struct hpets *last = (struct hpets *)0;
776 unsigned long ns;
777
778 /*
779 * hpet_alloc can be called by platform dependent code.
780 * if platform dependent code has allocated the hpet
781 * ACPI also reports hpet, then we catch it here.
782 */
783 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
784 if (hpetp->hp_hpet == hdp->hd_address)
785 return 0;
786
787 siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) *
788 sizeof(struct hpet_dev));
789
790 hpetp = kmalloc(siz, GFP_KERNEL);
791
792 if (!hpetp)
793 return -ENOMEM;
794
795 memset(hpetp, 0, siz);
796
797 hpetp->hp_which = hpet_nhpet++;
798 hpetp->hp_hpet = hdp->hd_address;
799 hpetp->hp_hpet_phys = hdp->hd_phys_address;
800
801 hpetp->hp_ntimer = hdp->hd_nirqs;
802
803 for (i = 0; i < hdp->hd_nirqs; i++)
804 hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
805
806 hpet = hpetp->hp_hpet;
807
808 cap = readq(&hpet->hpet_cap);
809
810 ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1;
811
812 if (hpetp->hp_ntimer != ntimer) {
813 printk(KERN_WARNING "hpet: number irqs doesn't agree"
814 " with number of timers\n");
815 kfree(hpetp);
816 return -ENODEV;
817 }
818
819 if (last)
820 last->hp_next = hpetp;
821 else
822 hpets = hpetp;
823
824 last = hpetp;
825
826 hpetp->hp_period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >>
827 HPET_COUNTER_CLK_PERIOD_SHIFT;
828
829 printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
830 hpetp->hp_which, hdp->hd_phys_address,
831 hpetp->hp_ntimer > 1 ? "s" : "");
832 for (i = 0; i < hpetp->hp_ntimer; i++)
833 printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
834 printk("\n");
835
836 ns = hpetp->hp_period; /* femptoseconds, 10^-15 */
837 do_div(ns, 1000000); /* convert to nanoseconds, 10^-9 */
838 printk(KERN_INFO "hpet%d: %ldns tick, %d %d-bit timers\n",
839 hpetp->hp_which, ns, hpetp->hp_ntimer,
840 cap & HPET_COUNTER_SIZE_MASK ? 64 : 32);
841
842 mcfg = readq(&hpet->hpet_config);
843 if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) {
844 write_counter(0L, &hpet->hpet_mc);
845 mcfg |= HPET_ENABLE_CNF_MASK;
846 writeq(mcfg, &hpet->hpet_config);
847 }
848
849 for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer;
850 i++, hpet_ntimer++, devp++) {
851 unsigned long v;
852 struct hpet_timer __iomem *timer;
853
854 timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
855 v = readq(&timer->hpet_config);
856
857 devp->hd_hpets = hpetp;
858 devp->hd_hpet = hpet;
859 devp->hd_timer = timer;
860
861 /*
862 * If the timer was reserved by platform code,
863 * then make timer unavailable for opens.
864 */
865 if (hdp->hd_state & (1 << i)) {
866 devp->hd_flags = HPET_OPEN;
867 continue;
868 }
869
870 init_waitqueue_head(&devp->hd_waitqueue);
871 }
872
873 hpetp->hp_delta = hpet_calibrate(hpetp);
874 hpet_register_interpolator(hpetp);
875
876 return 0;
877}
878
879static acpi_status hpet_resources(struct acpi_resource *res, void *data)
880{
881 struct hpet_data *hdp;
882 acpi_status status;
883 struct acpi_resource_address64 addr;
884 struct hpets *hpetp;
885
886 hdp = data;
887
888 status = acpi_resource_to_address64(res, &addr);
889
890 if (ACPI_SUCCESS(status)) {
891 unsigned long size;
892
893 size = addr.max_address_range - addr.min_address_range + 1;
894 hdp->hd_phys_address = addr.min_address_range;
895 hdp->hd_address = ioremap(addr.min_address_range, size);
896
897 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
898 if (hpetp->hp_hpet == hdp->hd_address)
899 return -EBUSY;
900 } else if (res->id == ACPI_RSTYPE_EXT_IRQ) {
901 struct acpi_resource_ext_irq *irqp;
902 int i;
903
904 irqp = &res->data.extended_irq;
905
906 if (irqp->number_of_interrupts > 0) {
907 hdp->hd_nirqs = irqp->number_of_interrupts;
908
909 for (i = 0; i < hdp->hd_nirqs; i++)
910 hdp->hd_irq[i] =
911 acpi_register_gsi(irqp->interrupts[i],
912 irqp->edge_level,
913 irqp->active_high_low);
914 }
915 }
916
917 return AE_OK;
918}
919
920static int hpet_acpi_add(struct acpi_device *device)
921{
922 acpi_status result;
923 struct hpet_data data;
924
925 memset(&data, 0, sizeof(data));
926
927 result =
928 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
929 hpet_resources, &data);
930
931 if (ACPI_FAILURE(result))
932 return -ENODEV;
933
934 if (!data.hd_address || !data.hd_nirqs) {
935 printk("%s: no address or irqs in _CRS\n", __FUNCTION__);
936 return -ENODEV;
937 }
938
939 return hpet_alloc(&data);
940}
941
942static int hpet_acpi_remove(struct acpi_device *device, int type)
943{
944 /* XXX need to unregister interpolator, dealloc mem, etc */
945 return -EINVAL;
946}
947
948static struct acpi_driver hpet_acpi_driver = {
949 .name = "hpet",
950 .ids = "PNP0103",
951 .ops = {
952 .add = hpet_acpi_add,
953 .remove = hpet_acpi_remove,
954 },
955};
956
957static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
958
959static int __init hpet_init(void)
960{
961 int result;
962
963 result = misc_register(&hpet_misc);
964 if (result < 0)
965 return -ENODEV;
966
967 sysctl_header = register_sysctl_table(dev_root, 0);
968
969 result = acpi_bus_register_driver(&hpet_acpi_driver);
970 if (result < 0) {
971 if (sysctl_header)
972 unregister_sysctl_table(sysctl_header);
973 misc_deregister(&hpet_misc);
974 return result;
975 }
976
977 return 0;
978}
979
980static void __exit hpet_exit(void)
981{
982 acpi_bus_unregister_driver(&hpet_acpi_driver);
983
984 if (sysctl_header)
985 unregister_sysctl_table(sysctl_header);
986 misc_deregister(&hpet_misc);
987
988 return;
989}
990
991module_init(hpet_init);
992module_exit(hpet_exit);
993MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>");
994MODULE_LICENSE("GPL");