blob: 3bafe438fa750025989983f6b2757e18d7776346 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86-64/kernel/time.c
3 *
4 * "High Precision Event Timer" based timekeeping.
5 *
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
14 */
15
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/interrupt.h>
19#include <linux/init.h>
20#include <linux/mc146818rtc.h>
21#include <linux/irq.h>
22#include <linux/time.h>
23#include <linux/ioport.h>
24#include <linux/module.h>
25#include <linux/device.h>
26#include <linux/sysdev.h>
27#include <linux/bcd.h>
28#include <linux/kallsyms.h>
29#include <asm/8253pit.h>
30#include <asm/pgtable.h>
31#include <asm/vsyscall.h>
32#include <asm/timex.h>
33#include <asm/proto.h>
34#include <asm/hpet.h>
35#include <asm/sections.h>
36#include <linux/cpufreq.h>
37#include <linux/hpet.h>
38#ifdef CONFIG_X86_LOCAL_APIC
39#include <asm/apic.h>
40#endif
41
42u64 jiffies_64 = INITIAL_JIFFIES;
43
44EXPORT_SYMBOL(jiffies_64);
45
46#ifdef CONFIG_CPU_FREQ
47static void cpufreq_delayed_get(void);
48#endif
49extern void i8254_timer_resume(void);
50extern int using_apic_timer;
51
52DEFINE_SPINLOCK(rtc_lock);
53DEFINE_SPINLOCK(i8253_lock);
54
55static int nohpet __initdata = 0;
56static int notsc __initdata = 0;
57
58#undef HPET_HACK_ENABLE_DANGEROUS
59
60unsigned int cpu_khz; /* TSC clocks / usec, not used here */
61static unsigned long hpet_period; /* fsecs / HPET clock */
62unsigned long hpet_tick; /* HPET clocks / interrupt */
63unsigned long vxtime_hz = PIT_TICK_RATE;
64int report_lost_ticks; /* command line option */
65unsigned long long monotonic_base;
66
67struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
68
69volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
70unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
71struct timespec __xtime __section_xtime;
72struct timezone __sys_tz __section_sys_tz;
73
74static inline void rdtscll_sync(unsigned long *tsc)
75{
76#ifdef CONFIG_SMP
77 sync_core();
78#endif
79 rdtscll(*tsc);
80}
81
82/*
83 * do_gettimeoffset() returns microseconds since last timer interrupt was
84 * triggered by hardware. A memory read of HPET is slower than a register read
85 * of TSC, but much more reliable. It's also synchronized to the timer
86 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
87 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
88 * This is not a problem, because jiffies hasn't updated either. They are bound
89 * together by xtime_lock.
90 */
91
92static inline unsigned int do_gettimeoffset_tsc(void)
93{
94 unsigned long t;
95 unsigned long x;
96 rdtscll_sync(&t);
97 if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
98 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
99 return x;
100}
101
102static inline unsigned int do_gettimeoffset_hpet(void)
103{
104 return ((hpet_readl(HPET_COUNTER) - vxtime.last) * vxtime.quot) >> 32;
105}
106
107unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
108
109/*
110 * This version of gettimeofday() has microsecond resolution and better than
111 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
112 * MHz) HPET timer.
113 */
114
115void do_gettimeofday(struct timeval *tv)
116{
117 unsigned long seq, t;
118 unsigned int sec, usec;
119
120 do {
121 seq = read_seqbegin(&xtime_lock);
122
123 sec = xtime.tv_sec;
124 usec = xtime.tv_nsec / 1000;
125
126 /* i386 does some correction here to keep the clock
127 monotonous even when ntpd is fixing drift.
128 But they didn't work for me, there is a non monotonic
129 clock anyways with ntp.
130 I dropped all corrections now until a real solution can
131 be found. Note when you fix it here you need to do the same
132 in arch/x86_64/kernel/vsyscall.c and export all needed
133 variables in vmlinux.lds. -AK */
134
135 t = (jiffies - wall_jiffies) * (1000000L / HZ) +
136 do_gettimeoffset();
137 usec += t;
138
139 } while (read_seqretry(&xtime_lock, seq));
140
141 tv->tv_sec = sec + usec / 1000000;
142 tv->tv_usec = usec % 1000000;
143}
144
145EXPORT_SYMBOL(do_gettimeofday);
146
147/*
148 * settimeofday() first undoes the correction that gettimeofday would do
149 * on the time, and then saves it. This is ugly, but has been like this for
150 * ages already.
151 */
152
153int do_settimeofday(struct timespec *tv)
154{
155 time_t wtm_sec, sec = tv->tv_sec;
156 long wtm_nsec, nsec = tv->tv_nsec;
157
158 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
159 return -EINVAL;
160
161 write_seqlock_irq(&xtime_lock);
162
163 nsec -= do_gettimeoffset() * 1000 +
164 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
165
166 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
167 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
168
169 set_normalized_timespec(&xtime, sec, nsec);
170 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
171
172 time_adjust = 0; /* stop active adjtime() */
173 time_status |= STA_UNSYNC;
174 time_maxerror = NTP_PHASE_LIMIT;
175 time_esterror = NTP_PHASE_LIMIT;
176
177 write_sequnlock_irq(&xtime_lock);
178 clock_was_set();
179 return 0;
180}
181
182EXPORT_SYMBOL(do_settimeofday);
183
184unsigned long profile_pc(struct pt_regs *regs)
185{
186 unsigned long pc = instruction_pointer(regs);
187
188 /* Assume the lock function has either no stack frame or only a single word.
189 This checks if the address on the stack looks like a kernel text address.
190 There is a small window for false hits, but in that case the tick
191 is just accounted to the spinlock function.
192 Better would be to write these functions in assembler again
193 and check exactly. */
194 if (in_lock_functions(pc)) {
195 char *v = *(char **)regs->rsp;
196 if ((v >= _stext && v <= _etext) ||
197 (v >= _sinittext && v <= _einittext) ||
198 (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
199 return (unsigned long)v;
200 return ((unsigned long *)regs->rsp)[1];
201 }
202 return pc;
203}
204EXPORT_SYMBOL(profile_pc);
205
206/*
207 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
208 * ms after the second nowtime has started, because when nowtime is written
209 * into the registers of the CMOS clock, it will jump to the next second
210 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
211 * sheet for details.
212 */
213
214static void set_rtc_mmss(unsigned long nowtime)
215{
216 int real_seconds, real_minutes, cmos_minutes;
217 unsigned char control, freq_select;
218
219/*
220 * IRQs are disabled when we're called from the timer interrupt,
221 * no need for spin_lock_irqsave()
222 */
223
224 spin_lock(&rtc_lock);
225
226/*
227 * Tell the clock it's being set and stop it.
228 */
229
230 control = CMOS_READ(RTC_CONTROL);
231 CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
232
233 freq_select = CMOS_READ(RTC_FREQ_SELECT);
234 CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
235
236 cmos_minutes = CMOS_READ(RTC_MINUTES);
237 BCD_TO_BIN(cmos_minutes);
238
239/*
240 * since we're only adjusting minutes and seconds, don't interfere with hour
241 * overflow. This avoids messing with unknown time zones but requires your RTC
242 * not to be off by more than 15 minutes. Since we're calling it only when
243 * our clock is externally synchronized using NTP, this shouldn't be a problem.
244 */
245
246 real_seconds = nowtime % 60;
247 real_minutes = nowtime / 60;
248 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
249 real_minutes += 30; /* correct for half hour time zone */
250 real_minutes %= 60;
251
252#if 0
253 /* AMD 8111 is a really bad time keeper and hits this regularly.
254 It probably was an attempt to avoid screwing up DST, but ignore
255 that for now. */
256 if (abs(real_minutes - cmos_minutes) >= 30) {
257 printk(KERN_WARNING "time.c: can't update CMOS clock "
258 "from %d to %d\n", cmos_minutes, real_minutes);
259 } else
260#endif
261
262 {
263 BIN_TO_BCD(real_seconds);
264 BIN_TO_BCD(real_minutes);
265 CMOS_WRITE(real_seconds, RTC_SECONDS);
266 CMOS_WRITE(real_minutes, RTC_MINUTES);
267 }
268
269/*
270 * The following flags have to be released exactly in this order, otherwise the
271 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
272 * not reset the oscillator and will not update precisely 500 ms later. You
273 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
274 * believes data sheets anyway ... -- Markus Kuhn
275 */
276
277 CMOS_WRITE(control, RTC_CONTROL);
278 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
279
280 spin_unlock(&rtc_lock);
281}
282
283
284/* monotonic_clock(): returns # of nanoseconds passed since time_init()
285 * Note: This function is required to return accurate
286 * time even in the absence of multiple timer ticks.
287 */
288unsigned long long monotonic_clock(void)
289{
290 unsigned long seq;
291 u32 last_offset, this_offset, offset;
292 unsigned long long base;
293
294 if (vxtime.mode == VXTIME_HPET) {
295 do {
296 seq = read_seqbegin(&xtime_lock);
297
298 last_offset = vxtime.last;
299 base = monotonic_base;
300 this_offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
301
302 } while (read_seqretry(&xtime_lock, seq));
303 offset = (this_offset - last_offset);
304 offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
305 return base + offset;
306 }else{
307 do {
308 seq = read_seqbegin(&xtime_lock);
309
310 last_offset = vxtime.last_tsc;
311 base = monotonic_base;
312 } while (read_seqretry(&xtime_lock, seq));
313 sync_core();
314 rdtscll(this_offset);
315 offset = (this_offset - last_offset)*1000/cpu_khz;
316 return base + offset;
317 }
318
319
320}
321EXPORT_SYMBOL(monotonic_clock);
322
323static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
324{
325 static long lost_count;
326 static int warned;
327
328 if (report_lost_ticks) {
329 printk(KERN_WARNING "time.c: Lost %d timer "
330 "tick(s)! ", lost);
331 print_symbol("rip %s)\n", regs->rip);
332 }
333
334 if (lost_count == 1000 && !warned) {
335 printk(KERN_WARNING
336 "warning: many lost ticks.\n"
337 KERN_WARNING "Your time source seems to be instable or "
338 "some driver is hogging interupts\n");
339 print_symbol("rip %s\n", regs->rip);
340 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
341 printk(KERN_WARNING "Falling back to HPET\n");
342 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
343 vxtime.mode = VXTIME_HPET;
344 do_gettimeoffset = do_gettimeoffset_hpet;
345 }
346 /* else should fall back to PIT, but code missing. */
347 warned = 1;
348 } else
349 lost_count++;
350
351#ifdef CONFIG_CPU_FREQ
352 /* In some cases the CPU can change frequency without us noticing
353 (like going into thermal throttle)
354 Give cpufreq a change to catch up. */
355 if ((lost_count+1) % 25 == 0) {
356 cpufreq_delayed_get();
357 }
358#endif
359}
360
361static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
362{
363 static unsigned long rtc_update = 0;
364 unsigned long tsc;
365 int delay, offset = 0, lost = 0;
366
367/*
368 * Here we are in the timer irq handler. We have irqs locally disabled (so we
369 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
370 * on the other CPU, so we need a lock. We also need to lock the vsyscall
371 * variables, because both do_timer() and us change them -arca+vojtech
372 */
373
374 write_seqlock(&xtime_lock);
375
376 if (vxtime.hpet_address) {
377 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
378 delay = hpet_readl(HPET_COUNTER) - offset;
379 } else {
380 spin_lock(&i8253_lock);
381 outb_p(0x00, 0x43);
382 delay = inb_p(0x40);
383 delay |= inb(0x40) << 8;
384 spin_unlock(&i8253_lock);
385 delay = LATCH - 1 - delay;
386 }
387
388 rdtscll_sync(&tsc);
389
390 if (vxtime.mode == VXTIME_HPET) {
391 if (offset - vxtime.last > hpet_tick) {
392 lost = (offset - vxtime.last) / hpet_tick - 1;
393 }
394
395 monotonic_base +=
396 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
397
398 vxtime.last = offset;
399 } else {
400 offset = (((tsc - vxtime.last_tsc) *
401 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
402
403 if (offset < 0)
404 offset = 0;
405
406 if (offset > (USEC_PER_SEC / HZ)) {
407 lost = offset / (USEC_PER_SEC / HZ);
408 offset %= (USEC_PER_SEC / HZ);
409 }
410
411 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
412
413 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
414
415 if ((((tsc - vxtime.last_tsc) *
416 vxtime.tsc_quot) >> 32) < offset)
417 vxtime.last_tsc = tsc -
418 (((long) offset << 32) / vxtime.tsc_quot) - 1;
419 }
420
421 if (lost > 0) {
422 handle_lost_ticks(lost, regs);
423 jiffies += lost;
424 }
425
426/*
427 * Do the timer stuff.
428 */
429
430 do_timer(regs);
431#ifndef CONFIG_SMP
432 update_process_times(user_mode(regs));
433#endif
434
435/*
436 * In the SMP case we use the local APIC timer interrupt to do the profiling,
437 * except when we simulate SMP mode on a uniprocessor system, in that case we
438 * have to call the local interrupt handler.
439 */
440
441#ifndef CONFIG_X86_LOCAL_APIC
442 profile_tick(CPU_PROFILING, regs);
443#else
444 if (!using_apic_timer)
445 smp_local_timer_interrupt(regs);
446#endif
447
448/*
449 * If we have an externally synchronized Linux clock, then update CMOS clock
450 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
451 * closest to exactly 500 ms before the next second. If the update fails, we
452 * don't care, as it'll be updated on the next turn, and the problem (time way
453 * off) isn't likely to go away much sooner anyway.
454 */
455
456 if ((~time_status & STA_UNSYNC) && xtime.tv_sec > rtc_update &&
457 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
458 set_rtc_mmss(xtime.tv_sec);
459 rtc_update = xtime.tv_sec + 660;
460 }
461
462 write_sequnlock(&xtime_lock);
463
464 return IRQ_HANDLED;
465}
466
467static unsigned int cyc2ns_scale;
468#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
469
470static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
471{
472 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
473}
474
475static inline unsigned long long cycles_2_ns(unsigned long long cyc)
476{
477 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
478}
479
480unsigned long long sched_clock(void)
481{
482 unsigned long a = 0;
483
484#if 0
485 /* Don't do a HPET read here. Using TSC always is much faster
486 and HPET may not be mapped yet when the scheduler first runs.
487 Disadvantage is a small drift between CPUs in some configurations,
488 but that should be tolerable. */
489 if (__vxtime.mode == VXTIME_HPET)
490 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
491#endif
492
493 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
494 which means it is not completely exact and may not be monotonous between
495 CPUs. But the errors should be too small to matter for scheduling
496 purposes. */
497
498 rdtscll(a);
499 return cycles_2_ns(a);
500}
501
502unsigned long get_cmos_time(void)
503{
504 unsigned int timeout, year, mon, day, hour, min, sec;
505 unsigned char last, this;
506 unsigned long flags;
507
508/*
509 * The Linux interpretation of the CMOS clock register contents: When the
510 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
511 * second which has precisely just started. Waiting for this can take up to 1
512 * second, we timeout approximately after 2.4 seconds on a machine with
513 * standard 8.3 MHz ISA bus.
514 */
515
516 spin_lock_irqsave(&rtc_lock, flags);
517
518 timeout = 1000000;
519 last = this = 0;
520
521 while (timeout && last && !this) {
522 last = this;
523 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
524 timeout--;
525 }
526
527/*
528 * Here we are safe to assume the registers won't change for a whole second, so
529 * we just go ahead and read them.
530 */
531
532 sec = CMOS_READ(RTC_SECONDS);
533 min = CMOS_READ(RTC_MINUTES);
534 hour = CMOS_READ(RTC_HOURS);
535 day = CMOS_READ(RTC_DAY_OF_MONTH);
536 mon = CMOS_READ(RTC_MONTH);
537 year = CMOS_READ(RTC_YEAR);
538
539 spin_unlock_irqrestore(&rtc_lock, flags);
540
541/*
542 * We know that x86-64 always uses BCD format, no need to check the config
543 * register.
544 */
545
546 BCD_TO_BIN(sec);
547 BCD_TO_BIN(min);
548 BCD_TO_BIN(hour);
549 BCD_TO_BIN(day);
550 BCD_TO_BIN(mon);
551 BCD_TO_BIN(year);
552
553/*
554 * x86-64 systems only exists since 2002.
555 * This will work up to Dec 31, 2100
556 */
557 year += 2000;
558
559 return mktime(year, mon, day, hour, min, sec);
560}
561
562#ifdef CONFIG_CPU_FREQ
563
564/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
565 changes.
566
567 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
568 not that important because current Opteron setups do not support
569 scaling on SMP anyroads.
570
571 Should fix up last_tsc too. Currently gettimeofday in the
572 first tick after the change will be slightly wrong. */
573
574#include <linux/workqueue.h>
575
576static unsigned int cpufreq_delayed_issched = 0;
577static unsigned int cpufreq_init = 0;
578static struct work_struct cpufreq_delayed_get_work;
579
580static void handle_cpufreq_delayed_get(void *v)
581{
582 unsigned int cpu;
583 for_each_online_cpu(cpu) {
584 cpufreq_get(cpu);
585 }
586 cpufreq_delayed_issched = 0;
587}
588
589/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
590 * to verify the CPU frequency the timing core thinks the CPU is running
591 * at is still correct.
592 */
593static void cpufreq_delayed_get(void)
594{
595 static int warned;
596 if (cpufreq_init && !cpufreq_delayed_issched) {
597 cpufreq_delayed_issched = 1;
598 if (!warned) {
599 warned = 1;
600 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
601 }
602 schedule_work(&cpufreq_delayed_get_work);
603 }
604}
605
606static unsigned int ref_freq = 0;
607static unsigned long loops_per_jiffy_ref = 0;
608
609static unsigned long cpu_khz_ref = 0;
610
611static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
612 void *data)
613{
614 struct cpufreq_freqs *freq = data;
615 unsigned long *lpj, dummy;
616
617 lpj = &dummy;
618 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
619#ifdef CONFIG_SMP
620 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
621#else
622 lpj = &boot_cpu_data.loops_per_jiffy;
623#endif
624
625
626
627 if (!ref_freq) {
628 ref_freq = freq->old;
629 loops_per_jiffy_ref = *lpj;
630 cpu_khz_ref = cpu_khz;
631 }
632 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
633 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
634 (val == CPUFREQ_RESUMECHANGE)) {
635 *lpj =
636 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
637
638 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
639 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
640 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
641 }
642
643 set_cyc2ns_scale(cpu_khz_ref / 1000);
644
645 return 0;
646}
647
648static struct notifier_block time_cpufreq_notifier_block = {
649 .notifier_call = time_cpufreq_notifier
650};
651
652static int __init cpufreq_tsc(void)
653{
654 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
655 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
656 CPUFREQ_TRANSITION_NOTIFIER))
657 cpufreq_init = 1;
658 return 0;
659}
660
661core_initcall(cpufreq_tsc);
662
663#endif
664
665/*
666 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
667 * it to the HPET timer of known frequency.
668 */
669
670#define TICK_COUNT 100000000
671
672static unsigned int __init hpet_calibrate_tsc(void)
673{
674 int tsc_start, hpet_start;
675 int tsc_now, hpet_now;
676 unsigned long flags;
677
678 local_irq_save(flags);
679 local_irq_disable();
680
681 hpet_start = hpet_readl(HPET_COUNTER);
682 rdtscl(tsc_start);
683
684 do {
685 local_irq_disable();
686 hpet_now = hpet_readl(HPET_COUNTER);
687 sync_core();
688 rdtscl(tsc_now);
689 local_irq_restore(flags);
690 } while ((tsc_now - tsc_start) < TICK_COUNT &&
691 (hpet_now - hpet_start) < TICK_COUNT);
692
693 return (tsc_now - tsc_start) * 1000000000L
694 / ((hpet_now - hpet_start) * hpet_period / 1000);
695}
696
697
698/*
699 * pit_calibrate_tsc() uses the speaker output (channel 2) of
700 * the PIT. This is better than using the timer interrupt output,
701 * because we can read the value of the speaker with just one inb(),
702 * where we need three i/o operations for the interrupt channel.
703 * We count how many ticks the TSC does in 50 ms.
704 */
705
706static unsigned int __init pit_calibrate_tsc(void)
707{
708 unsigned long start, end;
709 unsigned long flags;
710
711 spin_lock_irqsave(&i8253_lock, flags);
712
713 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
714
715 outb(0xb0, 0x43);
716 outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
717 outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
718 rdtscll(start);
719 sync_core();
720 while ((inb(0x61) & 0x20) == 0);
721 sync_core();
722 rdtscll(end);
723
724 spin_unlock_irqrestore(&i8253_lock, flags);
725
726 return (end - start) / 50;
727}
728
729#ifdef CONFIG_HPET
730static __init int late_hpet_init(void)
731{
732 struct hpet_data hd;
733 unsigned int ntimer;
734
735 if (!vxtime.hpet_address)
736 return -1;
737
738 memset(&hd, 0, sizeof (hd));
739
740 ntimer = hpet_readl(HPET_ID);
741 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
742 ntimer++;
743
744 /*
745 * Register with driver.
746 * Timer0 and Timer1 is used by platform.
747 */
748 hd.hd_phys_address = vxtime.hpet_address;
749 hd.hd_address = (void *)fix_to_virt(FIX_HPET_BASE);
750 hd.hd_nirqs = ntimer;
751 hd.hd_flags = HPET_DATA_PLATFORM;
752 hpet_reserve_timer(&hd, 0);
753#ifdef CONFIG_HPET_EMULATE_RTC
754 hpet_reserve_timer(&hd, 1);
755#endif
756 hd.hd_irq[0] = HPET_LEGACY_8254;
757 hd.hd_irq[1] = HPET_LEGACY_RTC;
758 if (ntimer > 2) {
759 struct hpet *hpet;
760 struct hpet_timer *timer;
761 int i;
762
763 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
764
765 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
766 timer++, i++)
767 hd.hd_irq[i] = (timer->hpet_config &
768 Tn_INT_ROUTE_CNF_MASK) >>
769 Tn_INT_ROUTE_CNF_SHIFT;
770
771 }
772
773 hpet_alloc(&hd);
774 return 0;
775}
776fs_initcall(late_hpet_init);
777#endif
778
779static int hpet_timer_stop_set_go(unsigned long tick)
780{
781 unsigned int cfg;
782
783/*
784 * Stop the timers and reset the main counter.
785 */
786
787 cfg = hpet_readl(HPET_CFG);
788 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
789 hpet_writel(cfg, HPET_CFG);
790 hpet_writel(0, HPET_COUNTER);
791 hpet_writel(0, HPET_COUNTER + 4);
792
793/*
794 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
795 * and period also hpet_tick.
796 */
797
798 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
799 HPET_TN_32BIT, HPET_T0_CFG);
800 hpet_writel(hpet_tick, HPET_T0_CMP);
801 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
802
803/*
804 * Go!
805 */
806
807 cfg |= HPET_CFG_ENABLE | HPET_CFG_LEGACY;
808 hpet_writel(cfg, HPET_CFG);
809
810 return 0;
811}
812
813static int hpet_init(void)
814{
815 unsigned int id;
816
817 if (!vxtime.hpet_address)
818 return -1;
819 set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
820 __set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
821
822/*
823 * Read the period, compute tick and quotient.
824 */
825
826 id = hpet_readl(HPET_ID);
827
828 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER) ||
829 !(id & HPET_ID_LEGSUP))
830 return -1;
831
832 hpet_period = hpet_readl(HPET_PERIOD);
833 if (hpet_period < 100000 || hpet_period > 100000000)
834 return -1;
835
836 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
837 hpet_period;
838
839 return hpet_timer_stop_set_go(hpet_tick);
840}
841
842static int hpet_reenable(void)
843{
844 return hpet_timer_stop_set_go(hpet_tick);
845}
846
847void __init pit_init(void)
848{
849 unsigned long flags;
850
851 spin_lock_irqsave(&i8253_lock, flags);
852 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
853 outb_p(LATCH & 0xff, 0x40); /* LSB */
854 outb_p(LATCH >> 8, 0x40); /* MSB */
855 spin_unlock_irqrestore(&i8253_lock, flags);
856}
857
858int __init time_setup(char *str)
859{
860 report_lost_ticks = 1;
861 return 1;
862}
863
864static struct irqaction irq0 = {
865 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
866};
867
868extern void __init config_acpi_tables(void);
869
870void __init time_init(void)
871{
872 char *timename;
873
874#ifdef HPET_HACK_ENABLE_DANGEROUS
875 if (!vxtime.hpet_address) {
876 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
877 "manually!\n");
878 outl(0x800038a0, 0xcf8);
879 outl(0xff000001, 0xcfc);
880 outl(0x800038a0, 0xcf8);
881 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
882 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
883 "at %#lx.\n", vxtime.hpet_address);
884 }
885#endif
886 if (nohpet)
887 vxtime.hpet_address = 0;
888
889 xtime.tv_sec = get_cmos_time();
890 xtime.tv_nsec = 0;
891
892 set_normalized_timespec(&wall_to_monotonic,
893 -xtime.tv_sec, -xtime.tv_nsec);
894
895 if (!hpet_init()) {
896 vxtime_hz = (1000000000000000L + hpet_period / 2) /
897 hpet_period;
898 cpu_khz = hpet_calibrate_tsc();
899 timename = "HPET";
900 } else {
901 pit_init();
902 cpu_khz = pit_calibrate_tsc();
903 timename = "PIT";
904 }
905
906 printk(KERN_INFO "time.c: Using %ld.%06ld MHz %s timer.\n",
907 vxtime_hz / 1000000, vxtime_hz % 1000000, timename);
908 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
909 cpu_khz / 1000, cpu_khz % 1000);
910 vxtime.mode = VXTIME_TSC;
911 vxtime.quot = (1000000L << 32) / vxtime_hz;
912 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
913 vxtime.hz = vxtime_hz;
914 rdtscll_sync(&vxtime.last_tsc);
915 setup_irq(0, &irq0);
916
917 set_cyc2ns_scale(cpu_khz / 1000);
918}
919
920void __init time_init_smp(void)
921{
922 char *timetype;
923
924 /*
925 * AMD systems with more than one CPU don't have fully synchronized
926 * TSCs. Always use HPET gettimeofday for these, although it is slower.
927 * Intel SMP systems usually have synchronized TSCs, so use always
928 * the TSC.
929 *
930 * Exceptions:
931 * IBM Summit2 checked by oem_force_hpet_timer().
932 * AMD dual core may also not need HPET. Check me.
933 *
934 * Can be turned off with "notsc".
935 */
936 if (num_online_cpus() > 1 &&
937 boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
938 notsc = 1;
939 /* Some systems will want to disable TSC and use HPET. */
940 if (oem_force_hpet_timer())
941 notsc = 1;
942 if (vxtime.hpet_address && notsc) {
943 timetype = "HPET";
944 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
945 vxtime.mode = VXTIME_HPET;
946 do_gettimeoffset = do_gettimeoffset_hpet;
947 } else {
948 timetype = vxtime.hpet_address ? "HPET/TSC" : "PIT/TSC";
949 vxtime.mode = VXTIME_TSC;
950 }
951
952 printk(KERN_INFO "time.c: Using %s based timekeeping.\n", timetype);
953}
954
955__setup("report_lost_ticks", time_setup);
956
957static long clock_cmos_diff;
958static unsigned long sleep_start;
959
960static int timer_suspend(struct sys_device *dev, u32 state)
961{
962 /*
963 * Estimate time zone so that set_time can update the clock
964 */
965 long cmos_time = get_cmos_time();
966
967 clock_cmos_diff = -cmos_time;
968 clock_cmos_diff += get_seconds();
969 sleep_start = cmos_time;
970 return 0;
971}
972
973static int timer_resume(struct sys_device *dev)
974{
975 unsigned long flags;
976 unsigned long sec;
977 unsigned long ctime = get_cmos_time();
978 unsigned long sleep_length = (ctime - sleep_start) * HZ;
979
980 if (vxtime.hpet_address)
981 hpet_reenable();
982 else
983 i8254_timer_resume();
984
985 sec = ctime + clock_cmos_diff;
986 write_seqlock_irqsave(&xtime_lock,flags);
987 xtime.tv_sec = sec;
988 xtime.tv_nsec = 0;
989 write_sequnlock_irqrestore(&xtime_lock,flags);
990 jiffies += sleep_length;
991 wall_jiffies += sleep_length;
992 return 0;
993}
994
995static struct sysdev_class timer_sysclass = {
996 .resume = timer_resume,
997 .suspend = timer_suspend,
998 set_kset_name("timer"),
999};
1000
1001
1002/* XXX this driverfs stuff should probably go elsewhere later -john */
1003static struct sys_device device_timer = {
1004 .id = 0,
1005 .cls = &timer_sysclass,
1006};
1007
1008static int time_init_device(void)
1009{
1010 int error = sysdev_class_register(&timer_sysclass);
1011 if (!error)
1012 error = sysdev_register(&device_timer);
1013 return error;
1014}
1015
1016device_initcall(time_init_device);
1017
1018#ifdef CONFIG_HPET_EMULATE_RTC
1019/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1020 * is enabled, we support RTC interrupt functionality in software.
1021 * RTC has 3 kinds of interrupts:
1022 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1023 * is updated
1024 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1025 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1026 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1027 * (1) and (2) above are implemented using polling at a frequency of
1028 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1029 * overhead. (DEFAULT_RTC_INT_FREQ)
1030 * For (3), we use interrupts at 64Hz or user specified periodic
1031 * frequency, whichever is higher.
1032 */
1033#include <linux/rtc.h>
1034
1035extern irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
1036
1037#define DEFAULT_RTC_INT_FREQ 64
1038#define RTC_NUM_INTS 1
1039
1040static unsigned long UIE_on;
1041static unsigned long prev_update_sec;
1042
1043static unsigned long AIE_on;
1044static struct rtc_time alarm_time;
1045
1046static unsigned long PIE_on;
1047static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1048static unsigned long PIE_count;
1049
1050static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1051
1052int is_hpet_enabled(void)
1053{
1054 return vxtime.hpet_address != 0;
1055}
1056
1057/*
1058 * Timer 1 for RTC, we do not use periodic interrupt feature,
1059 * even if HPET supports periodic interrupts on Timer 1.
1060 * The reason being, to set up a periodic interrupt in HPET, we need to
1061 * stop the main counter. And if we do that everytime someone diables/enables
1062 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1063 * So, for the time being, simulate the periodic interrupt in software.
1064 *
1065 * hpet_rtc_timer_init() is called for the first time and during subsequent
1066 * interuppts reinit happens through hpet_rtc_timer_reinit().
1067 */
1068int hpet_rtc_timer_init(void)
1069{
1070 unsigned int cfg, cnt;
1071 unsigned long flags;
1072
1073 if (!is_hpet_enabled())
1074 return 0;
1075 /*
1076 * Set the counter 1 and enable the interrupts.
1077 */
1078 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1079 hpet_rtc_int_freq = PIE_freq;
1080 else
1081 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1082
1083 local_irq_save(flags);
1084 cnt = hpet_readl(HPET_COUNTER);
1085 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1086 hpet_writel(cnt, HPET_T1_CMP);
1087 local_irq_restore(flags);
1088
1089 cfg = hpet_readl(HPET_T1_CFG);
1090 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1091 hpet_writel(cfg, HPET_T1_CFG);
1092
1093 return 1;
1094}
1095
1096static void hpet_rtc_timer_reinit(void)
1097{
1098 unsigned int cfg, cnt;
1099
1100 if (!(PIE_on | AIE_on | UIE_on))
1101 return;
1102
1103 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1104 hpet_rtc_int_freq = PIE_freq;
1105 else
1106 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1107
1108 /* It is more accurate to use the comparator value than current count.*/
1109 cnt = hpet_readl(HPET_T1_CMP);
1110 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1111 hpet_writel(cnt, HPET_T1_CMP);
1112
1113 cfg = hpet_readl(HPET_T1_CFG);
1114 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1115 hpet_writel(cfg, HPET_T1_CFG);
1116
1117 return;
1118}
1119
1120/*
1121 * The functions below are called from rtc driver.
1122 * Return 0 if HPET is not being used.
1123 * Otherwise do the necessary changes and return 1.
1124 */
1125int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1126{
1127 if (!is_hpet_enabled())
1128 return 0;
1129
1130 if (bit_mask & RTC_UIE)
1131 UIE_on = 0;
1132 if (bit_mask & RTC_PIE)
1133 PIE_on = 0;
1134 if (bit_mask & RTC_AIE)
1135 AIE_on = 0;
1136
1137 return 1;
1138}
1139
1140int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1141{
1142 int timer_init_reqd = 0;
1143
1144 if (!is_hpet_enabled())
1145 return 0;
1146
1147 if (!(PIE_on | AIE_on | UIE_on))
1148 timer_init_reqd = 1;
1149
1150 if (bit_mask & RTC_UIE) {
1151 UIE_on = 1;
1152 }
1153 if (bit_mask & RTC_PIE) {
1154 PIE_on = 1;
1155 PIE_count = 0;
1156 }
1157 if (bit_mask & RTC_AIE) {
1158 AIE_on = 1;
1159 }
1160
1161 if (timer_init_reqd)
1162 hpet_rtc_timer_init();
1163
1164 return 1;
1165}
1166
1167int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1168{
1169 if (!is_hpet_enabled())
1170 return 0;
1171
1172 alarm_time.tm_hour = hrs;
1173 alarm_time.tm_min = min;
1174 alarm_time.tm_sec = sec;
1175
1176 return 1;
1177}
1178
1179int hpet_set_periodic_freq(unsigned long freq)
1180{
1181 if (!is_hpet_enabled())
1182 return 0;
1183
1184 PIE_freq = freq;
1185 PIE_count = 0;
1186
1187 return 1;
1188}
1189
1190int hpet_rtc_dropped_irq(void)
1191{
1192 if (!is_hpet_enabled())
1193 return 0;
1194
1195 return 1;
1196}
1197
1198irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1199{
1200 struct rtc_time curr_time;
1201 unsigned long rtc_int_flag = 0;
1202 int call_rtc_interrupt = 0;
1203
1204 hpet_rtc_timer_reinit();
1205
1206 if (UIE_on | AIE_on) {
1207 rtc_get_rtc_time(&curr_time);
1208 }
1209 if (UIE_on) {
1210 if (curr_time.tm_sec != prev_update_sec) {
1211 /* Set update int info, call real rtc int routine */
1212 call_rtc_interrupt = 1;
1213 rtc_int_flag = RTC_UF;
1214 prev_update_sec = curr_time.tm_sec;
1215 }
1216 }
1217 if (PIE_on) {
1218 PIE_count++;
1219 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
1220 /* Set periodic int info, call real rtc int routine */
1221 call_rtc_interrupt = 1;
1222 rtc_int_flag |= RTC_PF;
1223 PIE_count = 0;
1224 }
1225 }
1226 if (AIE_on) {
1227 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
1228 (curr_time.tm_min == alarm_time.tm_min) &&
1229 (curr_time.tm_hour == alarm_time.tm_hour)) {
1230 /* Set alarm int info, call real rtc int routine */
1231 call_rtc_interrupt = 1;
1232 rtc_int_flag |= RTC_AF;
1233 }
1234 }
1235 if (call_rtc_interrupt) {
1236 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1237 rtc_interrupt(rtc_int_flag, dev_id, regs);
1238 }
1239 return IRQ_HANDLED;
1240}
1241#endif
1242
1243
1244
1245static int __init nohpet_setup(char *s)
1246{
1247 nohpet = 1;
1248 return 0;
1249}
1250
1251__setup("nohpet", nohpet_setup);
1252
1253
1254static int __init notsc_setup(char *s)
1255{
1256 notsc = 1;
1257 return 0;
1258}
1259
1260__setup("notsc", notsc_setup);
1261
1262