blob: 39d6a3db0b964a081d8cda51c76083ec8c682f10 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Precise Delay Loops for i386
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
Jiri Hladkye01b70e2008-06-02 12:00:19 +02006 * Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * The __delay function must _NOT_ be inlined as its execution time
9 * depends wildly on alignment on many x86 processors. The additional
10 * jump magic is needed to get the timing stable on all the CPU's
11 * we have to worry about.
12 */
13
john stultz6f84fa22006-06-26 00:25:11 -070014#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/sched.h>
Andrew Morton941e4922008-02-06 01:36:42 -080016#include <linux/timex.h>
Andrew Morton35d5d082007-11-14 17:00:41 -080017#include <linux/preempt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/delay.h>
john stultz6f84fa22006-06-26 00:25:11 -070019
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/processor.h>
21#include <asm/delay.h>
22#include <asm/timer.h>
23
24#ifdef CONFIG_SMP
john stultz6f84fa22006-06-26 00:25:11 -070025# include <asm/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#endif
27
john stultz6f84fa22006-06-26 00:25:11 -070028/* simple loop based delay: */
29static void delay_loop(unsigned long loops)
30{
Glauber Costaf0fbf0a2008-07-03 12:35:41 -030031 asm volatile(
Jiri Hladkye01b70e2008-06-02 12:00:19 +020032 " test %0,%0 \n"
33 " jz 3f \n"
34 " jmp 1f \n"
35
36 ".align 16 \n"
37 "1: jmp 2f \n"
38
39 ".align 16 \n"
Glauber Costaff1b15b2008-06-24 09:27:19 -030040 "2: dec %0 \n"
Jiri Hladkye01b70e2008-06-02 12:00:19 +020041 " jnz 2b \n"
Glauber Costaff1b15b2008-06-24 09:27:19 -030042 "3: dec %0 \n"
Jiri Hladkye01b70e2008-06-02 12:00:19 +020043
44 : /* we don't need output */
45 :"a" (loops)
46 );
john stultz6f84fa22006-06-26 00:25:11 -070047}
48
49/* TSC based delay: */
Thomas Gleixnera7f42552012-03-09 20:55:10 +010050static void delay_tsc(unsigned long __loops)
john stultz6f84fa22006-06-26 00:25:11 -070051{
Thomas Gleixnera7f42552012-03-09 20:55:10 +010052 u32 bclock, now, loops = __loops;
Steven Rostedt5c1ea082008-05-25 11:13:32 -040053 int cpu;
john stultz6f84fa22006-06-26 00:25:11 -070054
Steven Rostedt5c1ea082008-05-25 11:13:32 -040055 preempt_disable();
56 cpu = smp_processor_id();
Pallipadi, Venkateshe888d7f2009-06-25 16:44:31 -070057 rdtsc_barrier();
john stultz6f84fa22006-06-26 00:25:11 -070058 rdtscl(bclock);
Steven Rostedt5c1ea082008-05-25 11:13:32 -040059 for (;;) {
Pallipadi, Venkateshe888d7f2009-06-25 16:44:31 -070060 rdtsc_barrier();
john stultz6f84fa22006-06-26 00:25:11 -070061 rdtscl(now);
Steven Rostedt5c1ea082008-05-25 11:13:32 -040062 if ((now - bclock) >= loops)
63 break;
64
65 /* Allow RT tasks to run */
66 preempt_enable();
67 rep_nop();
68 preempt_disable();
69
70 /*
71 * It is possible that we moved to another CPU, and
72 * since TSC's are per-cpu we need to calculate
73 * that. The delay must guarantee that we wait "at
74 * least" the amount of time. Being moved to another
75 * CPU could make the wait longer but we just need to
76 * make sure we waited long enough. Rebalance the
77 * counter for this CPU.
78 */
79 if (unlikely(cpu != smp_processor_id())) {
80 loops -= (now - bclock);
81 cpu = smp_processor_id();
Pallipadi, Venkateshe888d7f2009-06-25 16:44:31 -070082 rdtsc_barrier();
Steven Rostedt5c1ea082008-05-25 11:13:32 -040083 rdtscl(bclock);
84 }
85 }
Andrew Morton35d5d082007-11-14 17:00:41 -080086 preempt_enable();
john stultz6f84fa22006-06-26 00:25:11 -070087}
88
89/*
90 * Since we calibrate only once at boot, this
91 * function should be set once at boot and not changed
92 */
93static void (*delay_fn)(unsigned long) = delay_loop;
94
95void use_tsc_delay(void)
96{
97 delay_fn = delay_tsc;
98}
99
Greg Kroah-Hartmana18e3692012-12-21 14:02:53 -0800100int read_current_timer(unsigned long *timer_val)
john stultz6f84fa22006-06-26 00:25:11 -0700101{
102 if (delay_fn == delay_tsc) {
Glauber Costaa76febe2008-06-24 09:52:36 -0300103 rdtscll(*timer_val);
john stultz6f84fa22006-06-26 00:25:11 -0700104 return 0;
105 }
106 return -1;
107}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109void __delay(unsigned long loops)
110{
john stultz6f84fa22006-06-26 00:25:11 -0700111 delay_fn(loops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112}
Glauber Costaf0fbf0a2008-07-03 12:35:41 -0300113EXPORT_SYMBOL(__delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115inline void __const_udelay(unsigned long xloops)
116{
117 int d0;
john stultz6f84fa22006-06-26 00:25:11 -0700118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 xloops *= 4;
Glauber Costaf0fbf0a2008-07-03 12:35:41 -0300120 asm("mull %%edx"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 :"=d" (xloops), "=&a" (d0)
john stultz6f84fa22006-06-26 00:25:11 -0700122 :"1" (xloops), "0"
Christoph Lameter357089f2010-12-16 12:14:43 -0600123 (this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4)));
john stultz6f84fa22006-06-26 00:25:11 -0700124
125 __delay(++xloops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126}
Glauber Costaf0fbf0a2008-07-03 12:35:41 -0300127EXPORT_SYMBOL(__const_udelay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129void __udelay(unsigned long usecs)
130{
john stultz6f84fa22006-06-26 00:25:11 -0700131 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
Glauber Costaf0fbf0a2008-07-03 12:35:41 -0300133EXPORT_SYMBOL(__udelay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135void __ndelay(unsigned long nsecs)
136{
john stultz6f84fa22006-06-26 00:25:11 -0700137 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700139EXPORT_SYMBOL(__ndelay);