blob: c8d5f207c3d4892cf23fc4ebd24b736b4023779a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_JIFFIES_H
2#define _LINUX_JIFFIES_H
3
Thomas Gleixner5cca7612006-01-09 20:52:20 -08004#include <linux/calc64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/time.h>
8#include <linux/timex.h>
9#include <asm/param.h> /* for HZ */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11/*
12 * The following defines establish the engineering parameters of the PLL
13 * model. The HZ variable establishes the timer interrupt frequency, 100 Hz
14 * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the
15 * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the
16 * nearest power of two in order to avoid hardware multiply operations.
17 */
18#if HZ >= 12 && HZ < 24
19# define SHIFT_HZ 4
20#elif HZ >= 24 && HZ < 48
21# define SHIFT_HZ 5
22#elif HZ >= 48 && HZ < 96
23# define SHIFT_HZ 6
24#elif HZ >= 96 && HZ < 192
25# define SHIFT_HZ 7
26#elif HZ >= 192 && HZ < 384
27# define SHIFT_HZ 8
28#elif HZ >= 384 && HZ < 768
29# define SHIFT_HZ 9
30#elif HZ >= 768 && HZ < 1536
31# define SHIFT_HZ 10
32#else
33# error You lose.
34#endif
35
36/* LATCH is used in the interval timer and ftape setup. */
37#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
38
Jordan Hargraveb20367a2006-04-07 19:50:18 +020039#define LATCH_HPET ((HPET_TICK_RATE + HZ/2) / HZ)
40
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, the we can
42 * improve accuracy by shifting LSH bits, hence calculating:
43 * (NOM << LSH) / DEN
44 * This however means trouble for large NOM, because (NOM << LSH) may no
45 * longer fit in 32 bits. The following way of calculating this gives us
46 * some slack, under the following conditions:
47 * - (NOM / DEN) fits in (32 - LSH) bits.
48 * - (NOM % DEN) fits in (32 - LSH) bits.
49 */
Uwe Zeisberger0d94df52006-07-30 03:04:02 -070050#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
51 + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
54#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
55
Jordan Hargraveb20367a2006-04-07 19:50:18 +020056#define ACTHZ_HPET (SH_DIV (HPET_TICK_RATE, LATCH_HPET, 8))
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
59#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
60
Jordan Hargraveb20367a2006-04-07 19:50:18 +020061#define TICK_NSEC_HPET (SH_DIV(1000000UL * 1000, ACTHZ_HPET, 8))
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
64#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
65
66/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */
67/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */
68#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
69
70/* some arch's have a small-data section that can be accessed register-relative
71 * but that can only take up to, say, 4-byte variables. jiffies being part of
72 * an 8-byte variable may not be correctly accessed unless we force the issue
73 */
74#define __jiffy_data __attribute__((section(".data")))
75
76/*
77 * The 64-bit value is not volatile - you MUST NOT read it
78 * without sampling the sequence number in xtime_lock.
79 * get_jiffies_64() will do this for you as appropriate.
80 */
81extern u64 __jiffy_data jiffies_64;
82extern unsigned long volatile __jiffy_data jiffies;
83
84#if (BITS_PER_LONG < 64)
85u64 get_jiffies_64(void);
86#else
87static inline u64 get_jiffies_64(void)
88{
89 return (u64)jiffies;
90}
91#endif
92
93/*
94 * These inlines deal with timer wrapping correctly. You are
95 * strongly encouraged to use them
96 * 1. Because people otherwise forget
97 * 2. Because if the timer wrap changes in future you won't have to
98 * alter your driver code.
99 *
100 * time_after(a,b) returns true if the time a is after time b.
101 *
102 * Do this with "<0" and ">=0" to only test the sign of the result. A
103 * good compiler would generate better code (and a really good compiler
104 * wouldn't care). Gcc is currently neither.
105 */
106#define time_after(a,b) \
107 (typecheck(unsigned long, a) && \
108 typecheck(unsigned long, b) && \
109 ((long)(b) - (long)(a) < 0))
110#define time_before(a,b) time_after(b,a)
111
112#define time_after_eq(a,b) \
113 (typecheck(unsigned long, a) && \
114 typecheck(unsigned long, b) && \
115 ((long)(a) - (long)(b) >= 0))
116#define time_before_eq(a,b) time_after_eq(b,a)
117
Dmitriy Zavin3b171672006-09-26 10:52:42 +0200118/* Same as above, but does so with platform independent 64bit types.
119 * These must be used when utilizing jiffies_64 (i.e. return value of
120 * get_jiffies_64() */
121#define time_after64(a,b) \
122 (typecheck(__u64, a) && \
123 typecheck(__u64, b) && \
124 ((__s64)(b) - (__s64)(a) < 0))
125#define time_before64(a,b) time_after64(b,a)
126
127#define time_after_eq64(a,b) \
128 (typecheck(__u64, a) && \
129 typecheck(__u64, b) && \
130 ((__s64)(a) - (__s64)(b) >= 0))
131#define time_before_eq64(a,b) time_after_eq64(b,a)
132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133/*
134 * Have the 32 bit jiffies value wrap 5 minutes after boot
135 * so jiffies wrap bugs show up earlier.
136 */
137#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
138
139/*
140 * Change timeval to jiffies, trying to avoid the
141 * most obvious overflows..
142 *
143 * And some not so obvious.
144 *
145 * Note that we don't want to return MAX_LONG, because
146 * for various timeout reasons we often end up having
147 * to wait "jiffies+1" in order to guarantee that we wait
148 * at _least_ "jiffies" - so "jiffies+1" had better still
149 * be positive.
150 */
151#define MAX_JIFFY_OFFSET ((~0UL >> 1)-1)
152
153/*
154 * We want to do realistic conversions of time so we need to use the same
155 * values the update wall clock code uses as the jiffies size. This value
156 * is: TICK_NSEC (which is defined in timex.h). This
157 * is a constant and is in nanoseconds. We will used scaled math
158 * with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and
159 * NSEC_JIFFIE_SC. Note that these defines contain nothing but
160 * constants and so are computed at compile time. SHIFT_HZ (computed in
161 * timex.h) adjusts the scaling for different HZ values.
162
163 * Scaled math??? What is that?
164 *
165 * Scaled math is a way to do integer math on values that would,
166 * otherwise, either overflow, underflow, or cause undesired div
167 * instructions to appear in the execution path. In short, we "scale"
168 * up the operands so they take more bits (more precision, less
169 * underflow), do the desired operation and then "scale" the result back
170 * by the same amount. If we do the scaling by shifting we avoid the
171 * costly mpy and the dastardly div instructions.
172
173 * Suppose, for example, we want to convert from seconds to jiffies
174 * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE. The
175 * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We
176 * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we
177 * might calculate at compile time, however, the result will only have
178 * about 3-4 bits of precision (less for smaller values of HZ).
179 *
180 * So, we scale as follows:
181 * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE);
182 * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE;
183 * Then we make SCALE a power of two so:
184 * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE;
185 * Now we define:
186 * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE))
187 * jiff = (sec * SEC_CONV) >> SCALE;
188 *
189 * Often the math we use will expand beyond 32-bits so we tell C how to
190 * do this and pass the 64-bit result of the mpy through the ">> SCALE"
191 * which should take the result back to 32-bits. We want this expansion
192 * to capture as much precision as possible. At the same time we don't
193 * want to overflow so we pick the SCALE to avoid this. In this file,
194 * that means using a different scale for each range of HZ values (as
195 * defined in timex.h).
196 *
197 * For those who want to know, gcc will give a 64-bit result from a "*"
198 * operator if the result is a long long AND at least one of the
199 * operands is cast to long long (usually just prior to the "*" so as
200 * not to confuse it into thinking it really has a 64-bit operand,
201 * which, buy the way, it can do, but it take more code and at least 2
202 * mpys).
203
204 * We also need to be aware that one second in nanoseconds is only a
205 * couple of bits away from overflowing a 32-bit word, so we MUST use
206 * 64-bits to get the full range time in nanoseconds.
207
208 */
209
210/*
211 * Here are the scales we will use. One for seconds, nanoseconds and
212 * microseconds.
213 *
214 * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and
215 * check if the sign bit is set. If not, we bump the shift count by 1.
216 * (Gets an extra bit of precision where we can use it.)
217 * We know it is set for HZ = 1024 and HZ = 100 not for 1000.
218 * Haven't tested others.
219
220 * Limits of cpp (for #if expressions) only long (no long long), but
221 * then we only need the most signicant bit.
222 */
223
224#define SEC_JIFFIE_SC (31 - SHIFT_HZ)
225#if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000)
226#undef SEC_JIFFIE_SC
227#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
228#endif
229#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
230#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
231#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
232 TICK_NSEC -1) / (u64)TICK_NSEC))
233
234#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
235 TICK_NSEC -1) / (u64)TICK_NSEC))
236#define USEC_CONVERSION \
237 ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
238 TICK_NSEC -1) / (u64)TICK_NSEC))
239/*
240 * USEC_ROUND is used in the timeval to jiffie conversion. See there
241 * for more details. It is the scaled resolution rounding value. Note
242 * that it is a 64-bit value. Since, when it is applied, we are already
243 * in jiffies (albit scaled), it is nothing but the bits we will shift
244 * off.
245 */
246#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
247/*
248 * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
249 * into seconds. The 64-bit case will overflow if we are not careful,
250 * so use the messy SH_DIV macro to do it. Still all constants.
251 */
252#if BITS_PER_LONG < 64
253# define MAX_SEC_IN_JIFFIES \
254 (long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
255#else /* take care of overflow on 64 bits machines */
256# define MAX_SEC_IN_JIFFIES \
257 (SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
258
259#endif
260
261/*
262 * Convert jiffies to milliseconds and back.
263 *
264 * Avoid unnecessary multiplications/divisions in the
265 * two most common HZ cases:
266 */
267static inline unsigned int jiffies_to_msecs(const unsigned long j)
268{
Nishanth Aravamudan84f902c2005-09-10 00:27:22 -0700269#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
270 return (MSEC_PER_SEC / HZ) * j;
271#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
272 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273#else
Nishanth Aravamudan84f902c2005-09-10 00:27:22 -0700274 return (j * MSEC_PER_SEC) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275#endif
276}
277
278static inline unsigned int jiffies_to_usecs(const unsigned long j)
279{
Nishanth Aravamudan84f902c2005-09-10 00:27:22 -0700280#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
281 return (USEC_PER_SEC / HZ) * j;
282#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
283 return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284#else
Nishanth Aravamudan84f902c2005-09-10 00:27:22 -0700285 return (j * USEC_PER_SEC) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286#endif
287}
288
289static inline unsigned long msecs_to_jiffies(const unsigned int m)
290{
291 if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
292 return MAX_JIFFY_OFFSET;
Nishanth Aravamudan84f902c2005-09-10 00:27:22 -0700293#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
294 return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
295#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
296 return m * (HZ / MSEC_PER_SEC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297#else
Nishanth Aravamudan84f902c2005-09-10 00:27:22 -0700298 return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299#endif
300}
301
302static inline unsigned long usecs_to_jiffies(const unsigned int u)
303{
304 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
305 return MAX_JIFFY_OFFSET;
Nishanth Aravamudan84f902c2005-09-10 00:27:22 -0700306#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
307 return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
308#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
309 return u * (HZ / USEC_PER_SEC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310#else
Nishanth Aravamudan84f902c2005-09-10 00:27:22 -0700311 return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312#endif
313}
314
315/*
316 * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
317 * that a remainder subtract here would not do the right thing as the
318 * resolution values don't fall on second boundries. I.e. the line:
319 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
320 *
321 * Rather, we just shift the bits off the right.
322 *
323 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
324 * value to a scaled second value.
325 */
326static __inline__ unsigned long
327timespec_to_jiffies(const struct timespec *value)
328{
329 unsigned long sec = value->tv_sec;
330 long nsec = value->tv_nsec + TICK_NSEC - 1;
331
332 if (sec >= MAX_SEC_IN_JIFFIES){
333 sec = MAX_SEC_IN_JIFFIES;
334 nsec = 0;
335 }
336 return (((u64)sec * SEC_CONVERSION) +
337 (((u64)nsec * NSEC_CONVERSION) >>
338 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
339
340}
341
342static __inline__ void
343jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
344{
345 /*
346 * Convert jiffies to nanoseconds and separate with
347 * one divide.
348 */
349 u64 nsec = (u64)jiffies * TICK_NSEC;
350 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
351}
352
353/* Same for "timeval"
354 *
355 * Well, almost. The problem here is that the real system resolution is
356 * in nanoseconds and the value being converted is in micro seconds.
357 * Also for some machines (those that use HZ = 1024, in-particular),
358 * there is a LARGE error in the tick size in microseconds.
359
360 * The solution we use is to do the rounding AFTER we convert the
361 * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
362 * Instruction wise, this should cost only an additional add with carry
363 * instruction above the way it was done above.
364 */
365static __inline__ unsigned long
366timeval_to_jiffies(const struct timeval *value)
367{
368 unsigned long sec = value->tv_sec;
369 long usec = value->tv_usec;
370
371 if (sec >= MAX_SEC_IN_JIFFIES){
372 sec = MAX_SEC_IN_JIFFIES;
373 usec = 0;
374 }
375 return (((u64)sec * SEC_CONVERSION) +
376 (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
377 (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
378}
379
380static __inline__ void
381jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
382{
383 /*
384 * Convert jiffies to nanoseconds and separate with
385 * one divide.
386 */
387 u64 nsec = (u64)jiffies * TICK_NSEC;
Thomas Gleixner5cca7612006-01-09 20:52:20 -0800388 long tv_usec;
389
390 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec);
391 tv_usec /= NSEC_PER_USEC;
392 value->tv_usec = tv_usec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393}
394
395/*
396 * Convert jiffies/jiffies_64 to clock_t and back.
397 */
398static inline clock_t jiffies_to_clock_t(long x)
399{
400#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
401 return x / (HZ / USER_HZ);
402#else
403 u64 tmp = (u64)x * TICK_NSEC;
404 do_div(tmp, (NSEC_PER_SEC / USER_HZ));
405 return (long)tmp;
406#endif
407}
408
409static inline unsigned long clock_t_to_jiffies(unsigned long x)
410{
411#if (HZ % USER_HZ)==0
412 if (x >= ~0UL / (HZ / USER_HZ))
413 return ~0UL;
414 return x * (HZ / USER_HZ);
415#else
416 u64 jif;
417
418 /* Don't worry about loss of precision here .. */
419 if (x >= ~0UL / HZ * USER_HZ)
420 return ~0UL;
421
422 /* .. but do try to contain it here */
423 jif = x * (u64) HZ;
424 do_div(jif, USER_HZ);
425 return jif;
426#endif
427}
428
429static inline u64 jiffies_64_to_clock_t(u64 x)
430{
431#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
432 do_div(x, HZ / USER_HZ);
433#else
434 /*
435 * There are better ways that don't overflow early,
436 * but even this doesn't overflow in hundreds of years
437 * in 64 bits, so..
438 */
439 x *= TICK_NSEC;
440 do_div(x, (NSEC_PER_SEC / USER_HZ));
441#endif
442 return x;
443}
444
445static inline u64 nsec_to_clock_t(u64 x)
446{
447#if (NSEC_PER_SEC % USER_HZ) == 0
448 do_div(x, (NSEC_PER_SEC / USER_HZ));
449#elif (USER_HZ % 512) == 0
450 x *= USER_HZ/512;
451 do_div(x, (NSEC_PER_SEC / 512));
452#else
453 /*
454 * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
455 * overflow after 64.99 years.
456 * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
457 */
458 x *= 9;
459 do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2))
460 / USER_HZ));
461#endif
462 return x;
463}
464
465#endif