blob: ceacc6626572a7c2e2c345ba136bd456a267ba64 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/timer.c
3 *
john stultz85240702007-05-08 00:27:59 -07004 * Kernel internal timers, basic process system calls
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22#include <linux/kernel_stat.h>
23#include <linux/module.h>
24#include <linux/interrupt.h>
25#include <linux/percpu.h>
26#include <linux/init.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070029#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/notifier.h>
31#include <linux/thread_info.h>
32#include <linux/time.h>
33#include <linux/jiffies.h>
34#include <linux/posix-timers.h>
35#include <linux/cpu.h>
36#include <linux/syscalls.h>
Adrian Bunk97a41e22006-01-08 01:02:17 -080037#include <linux/delay.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080038#include <linux/tick.h>
Ingo Molnar82f67cd2007-02-16 01:28:13 -080039#include <linux/kallsyms.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#include <asm/uaccess.h>
42#include <asm/unistd.h>
43#include <asm/div64.h>
44#include <asm/timex.h>
45#include <asm/io.h>
46
Thomas Gleixnerecea8d12005-10-30 15:03:00 -080047u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
48
49EXPORT_SYMBOL(jiffies_64);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/*
52 * per-CPU timer vector definitions:
53 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
55#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
56#define TVN_SIZE (1 << TVN_BITS)
57#define TVR_SIZE (1 << TVR_BITS)
58#define TVN_MASK (TVN_SIZE - 1)
59#define TVR_MASK (TVR_SIZE - 1)
60
Pavel Macheka6fa8e52008-01-30 13:30:00 +010061struct tvec {
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 struct list_head vec[TVN_SIZE];
Pavel Macheka6fa8e52008-01-30 13:30:00 +010063};
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Pavel Macheka6fa8e52008-01-30 13:30:00 +010065struct tvec_root {
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 struct list_head vec[TVR_SIZE];
Pavel Macheka6fa8e52008-01-30 13:30:00 +010067};
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
Pavel Macheka6fa8e52008-01-30 13:30:00 +010069struct tvec_base {
Oleg Nesterov3691c512006-03-31 02:30:30 -080070 spinlock_t lock;
71 struct timer_list *running_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 unsigned long timer_jiffies;
Pavel Macheka6fa8e52008-01-30 13:30:00 +010073 struct tvec_root tv1;
74 struct tvec tv2;
75 struct tvec tv3;
76 struct tvec tv4;
77 struct tvec tv5;
Venki Pallipadi6e453a62007-05-08 00:27:44 -070078} ____cacheline_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Pavel Macheka6fa8e52008-01-30 13:30:00 +010080struct tvec_base boot_tvec_bases;
Oleg Nesterov3691c512006-03-31 02:30:30 -080081EXPORT_SYMBOL(boot_tvec_bases);
Pavel Macheka6fa8e52008-01-30 13:30:00 +010082static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Venki Pallipadi6e453a62007-05-08 00:27:44 -070084/*
Pavel Macheka6fa8e52008-01-30 13:30:00 +010085 * Note that all tvec_bases are 2 byte aligned and lower bit of
Venki Pallipadi6e453a62007-05-08 00:27:44 -070086 * base in timer_list is guaranteed to be zero. Use the LSB for
87 * the new flag to indicate whether the timer is deferrable
88 */
89#define TBASE_DEFERRABLE_FLAG (0x1)
90
91/* Functions below help us manage 'deferrable' flag */
Pavel Macheka6fa8e52008-01-30 13:30:00 +010092static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
Venki Pallipadi6e453a62007-05-08 00:27:44 -070093{
akpm@linux-foundation.orge9910842007-05-10 03:16:01 -070094 return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
Venki Pallipadi6e453a62007-05-08 00:27:44 -070095}
96
Pavel Macheka6fa8e52008-01-30 13:30:00 +010097static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
Venki Pallipadi6e453a62007-05-08 00:27:44 -070098{
Pavel Macheka6fa8e52008-01-30 13:30:00 +010099 return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700100}
101
102static inline void timer_set_deferrable(struct timer_list *timer)
103{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100104 timer->base = ((struct tvec_base *)((unsigned long)(timer->base) |
Thomas Gleixner68194572007-07-19 01:49:16 -0700105 TBASE_DEFERRABLE_FLAG));
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700106}
107
108static inline void
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100109timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700110{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100111 timer->base = (struct tvec_base *)((unsigned long)(new_base) |
Thomas Gleixner68194572007-07-19 01:49:16 -0700112 tbase_get_deferrable(timer->base));
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700113}
114
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800115/**
116 * __round_jiffies - function to round jiffies to a full second
117 * @j: the time in (absolute) jiffies that should be rounded
118 * @cpu: the processor number on which the timeout will happen
119 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800120 * __round_jiffies() rounds an absolute time in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800121 * up or down to (approximately) full seconds. This is useful for timers
122 * for which the exact time they fire does not matter too much, as long as
123 * they fire approximately every X seconds.
124 *
125 * By rounding these timers to whole seconds, all such timers will fire
126 * at the same time, rather than at various times spread out. The goal
127 * of this is to have the CPU wake up less, which saves power.
128 *
129 * The exact rounding is skewed for each processor to avoid all
130 * processors firing at the exact same time, which could lead
131 * to lock contention or spurious cache line bouncing.
132 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800133 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800134 */
135unsigned long __round_jiffies(unsigned long j, int cpu)
136{
137 int rem;
138 unsigned long original = j;
139
140 /*
141 * We don't want all cpus firing their timers at once hitting the
142 * same lock or cachelines, so we skew each extra cpu with an extra
143 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
144 * already did this.
145 * The skew is done by adding 3*cpunr, then round, then subtract this
146 * extra offset again.
147 */
148 j += cpu * 3;
149
150 rem = j % HZ;
151
152 /*
153 * If the target jiffie is just after a whole second (which can happen
154 * due to delays of the timer irq, long irq off times etc etc) then
155 * we should round down to the whole second, not up. Use 1/4th second
156 * as cutoff for this rounding as an extreme upper bound for this.
157 */
158 if (rem < HZ/4) /* round down */
159 j = j - rem;
160 else /* round up */
161 j = j - rem + HZ;
162
163 /* now that we have rounded, subtract the extra skew again */
164 j -= cpu * 3;
165
166 if (j <= jiffies) /* rounding ate our timeout entirely; */
167 return original;
168 return j;
169}
170EXPORT_SYMBOL_GPL(__round_jiffies);
171
172/**
173 * __round_jiffies_relative - function to round jiffies to a full second
174 * @j: the time in (relative) jiffies that should be rounded
175 * @cpu: the processor number on which the timeout will happen
176 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800177 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800178 * up or down to (approximately) full seconds. This is useful for timers
179 * for which the exact time they fire does not matter too much, as long as
180 * they fire approximately every X seconds.
181 *
182 * By rounding these timers to whole seconds, all such timers will fire
183 * at the same time, rather than at various times spread out. The goal
184 * of this is to have the CPU wake up less, which saves power.
185 *
186 * The exact rounding is skewed for each processor to avoid all
187 * processors firing at the exact same time, which could lead
188 * to lock contention or spurious cache line bouncing.
189 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800190 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800191 */
192unsigned long __round_jiffies_relative(unsigned long j, int cpu)
193{
194 /*
195 * In theory the following code can skip a jiffy in case jiffies
196 * increments right between the addition and the later subtraction.
197 * However since the entire point of this function is to use approximate
198 * timeouts, it's entirely ok to not handle that.
199 */
200 return __round_jiffies(j + jiffies, cpu) - jiffies;
201}
202EXPORT_SYMBOL_GPL(__round_jiffies_relative);
203
204/**
205 * round_jiffies - function to round jiffies to a full second
206 * @j: the time in (absolute) jiffies that should be rounded
207 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800208 * round_jiffies() rounds an absolute time in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800209 * up or down to (approximately) full seconds. This is useful for timers
210 * for which the exact time they fire does not matter too much, as long as
211 * they fire approximately every X seconds.
212 *
213 * By rounding these timers to whole seconds, all such timers will fire
214 * at the same time, rather than at various times spread out. The goal
215 * of this is to have the CPU wake up less, which saves power.
216 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800217 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800218 */
219unsigned long round_jiffies(unsigned long j)
220{
221 return __round_jiffies(j, raw_smp_processor_id());
222}
223EXPORT_SYMBOL_GPL(round_jiffies);
224
225/**
226 * round_jiffies_relative - function to round jiffies to a full second
227 * @j: the time in (relative) jiffies that should be rounded
228 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800229 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800230 * up or down to (approximately) full seconds. This is useful for timers
231 * for which the exact time they fire does not matter too much, as long as
232 * they fire approximately every X seconds.
233 *
234 * By rounding these timers to whole seconds, all such timers will fire
235 * at the same time, rather than at various times spread out. The goal
236 * of this is to have the CPU wake up less, which saves power.
237 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800238 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800239 */
240unsigned long round_jiffies_relative(unsigned long j)
241{
242 return __round_jiffies_relative(j, raw_smp_processor_id());
243}
244EXPORT_SYMBOL_GPL(round_jiffies_relative);
245
246
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100247static inline void set_running_timer(struct tvec_base *base,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 struct timer_list *timer)
249{
250#ifdef CONFIG_SMP
Oleg Nesterov3691c512006-03-31 02:30:30 -0800251 base->running_timer = timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252#endif
253}
254
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100255static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256{
257 unsigned long expires = timer->expires;
258 unsigned long idx = expires - base->timer_jiffies;
259 struct list_head *vec;
260
261 if (idx < TVR_SIZE) {
262 int i = expires & TVR_MASK;
263 vec = base->tv1.vec + i;
264 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
265 int i = (expires >> TVR_BITS) & TVN_MASK;
266 vec = base->tv2.vec + i;
267 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
268 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
269 vec = base->tv3.vec + i;
270 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
271 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
272 vec = base->tv4.vec + i;
273 } else if ((signed long) idx < 0) {
274 /*
275 * Can happen if you add a timer with expires == jiffies,
276 * or you set a timer to go off in the past
277 */
278 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
279 } else {
280 int i;
281 /* If the timeout is larger than 0xffffffff on 64-bit
282 * architectures then we use the maximum timeout:
283 */
284 if (idx > 0xffffffffUL) {
285 idx = 0xffffffffUL;
286 expires = idx + base->timer_jiffies;
287 }
288 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
289 vec = base->tv5.vec + i;
290 }
291 /*
292 * Timers are FIFO:
293 */
294 list_add_tail(&timer->entry, vec);
295}
296
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800297#ifdef CONFIG_TIMER_STATS
298void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
299{
300 if (timer->start_site)
301 return;
302
303 timer->start_site = addr;
304 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
305 timer->start_pid = current->pid;
306}
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700307
308static void timer_stats_account_timer(struct timer_list *timer)
309{
310 unsigned int flag = 0;
311
312 if (unlikely(tbase_get_deferrable(timer->base)))
313 flag |= TIMER_STATS_FLAG_DEFERRABLE;
314
315 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
316 timer->function, timer->start_comm, flag);
317}
318
319#else
320static void timer_stats_account_timer(struct timer_list *timer) {}
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800321#endif
322
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700323#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
324
325static struct debug_obj_descr timer_debug_descr;
326
327/*
328 * fixup_init is called when:
329 * - an active object is initialized
330 */
331static int timer_fixup_init(void *addr, enum debug_obj_state state)
332{
333 struct timer_list *timer = addr;
334
335 switch (state) {
336 case ODEBUG_STATE_ACTIVE:
337 del_timer_sync(timer);
338 debug_object_init(timer, &timer_debug_descr);
339 return 1;
340 default:
341 return 0;
342 }
343}
344
345/*
346 * fixup_activate is called when:
347 * - an active object is activated
348 * - an unknown object is activated (might be a statically initialized object)
349 */
350static int timer_fixup_activate(void *addr, enum debug_obj_state state)
351{
352 struct timer_list *timer = addr;
353
354 switch (state) {
355
356 case ODEBUG_STATE_NOTAVAILABLE:
357 /*
358 * This is not really a fixup. The timer was
359 * statically initialized. We just make sure that it
360 * is tracked in the object tracker.
361 */
362 if (timer->entry.next == NULL &&
363 timer->entry.prev == TIMER_ENTRY_STATIC) {
364 debug_object_init(timer, &timer_debug_descr);
365 debug_object_activate(timer, &timer_debug_descr);
366 return 0;
367 } else {
368 WARN_ON_ONCE(1);
369 }
370 return 0;
371
372 case ODEBUG_STATE_ACTIVE:
373 WARN_ON(1);
374
375 default:
376 return 0;
377 }
378}
379
380/*
381 * fixup_free is called when:
382 * - an active object is freed
383 */
384static int timer_fixup_free(void *addr, enum debug_obj_state state)
385{
386 struct timer_list *timer = addr;
387
388 switch (state) {
389 case ODEBUG_STATE_ACTIVE:
390 del_timer_sync(timer);
391 debug_object_free(timer, &timer_debug_descr);
392 return 1;
393 default:
394 return 0;
395 }
396}
397
398static struct debug_obj_descr timer_debug_descr = {
399 .name = "timer_list",
400 .fixup_init = timer_fixup_init,
401 .fixup_activate = timer_fixup_activate,
402 .fixup_free = timer_fixup_free,
403};
404
405static inline void debug_timer_init(struct timer_list *timer)
406{
407 debug_object_init(timer, &timer_debug_descr);
408}
409
410static inline void debug_timer_activate(struct timer_list *timer)
411{
412 debug_object_activate(timer, &timer_debug_descr);
413}
414
415static inline void debug_timer_deactivate(struct timer_list *timer)
416{
417 debug_object_deactivate(timer, &timer_debug_descr);
418}
419
420static inline void debug_timer_free(struct timer_list *timer)
421{
422 debug_object_free(timer, &timer_debug_descr);
423}
424
425static void __init_timer(struct timer_list *timer);
426
427void init_timer_on_stack(struct timer_list *timer)
428{
429 debug_object_init_on_stack(timer, &timer_debug_descr);
430 __init_timer(timer);
431}
432EXPORT_SYMBOL_GPL(init_timer_on_stack);
433
434void destroy_timer_on_stack(struct timer_list *timer)
435{
436 debug_object_free(timer, &timer_debug_descr);
437}
438EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
439
440#else
441static inline void debug_timer_init(struct timer_list *timer) { }
442static inline void debug_timer_activate(struct timer_list *timer) { }
443static inline void debug_timer_deactivate(struct timer_list *timer) { }
444#endif
445
446static void __init_timer(struct timer_list *timer)
447{
448 timer->entry.next = NULL;
449 timer->base = __raw_get_cpu_var(tvec_bases);
450#ifdef CONFIG_TIMER_STATS
451 timer->start_site = NULL;
452 timer->start_pid = -1;
453 memset(timer->start_comm, 0, TASK_COMM_LEN);
454#endif
455}
456
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700457/**
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700458 * init_timer - initialize a timer.
459 * @timer: the timer to be initialized
460 *
461 * init_timer() must be done to a timer prior calling *any* of the
462 * other timer functions.
463 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800464void init_timer(struct timer_list *timer)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700465{
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700466 debug_timer_init(timer);
467 __init_timer(timer);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700468}
469EXPORT_SYMBOL(init_timer);
470
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800471void init_timer_deferrable(struct timer_list *timer)
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700472{
473 init_timer(timer);
474 timer_set_deferrable(timer);
475}
476EXPORT_SYMBOL(init_timer_deferrable);
477
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700478static inline void detach_timer(struct timer_list *timer,
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800479 int clear_pending)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700480{
481 struct list_head *entry = &timer->entry;
482
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700483 debug_timer_deactivate(timer);
484
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700485 __list_del(entry->prev, entry->next);
486 if (clear_pending)
487 entry->next = NULL;
488 entry->prev = LIST_POISON2;
489}
490
491/*
Oleg Nesterov3691c512006-03-31 02:30:30 -0800492 * We are using hashed locking: holding per_cpu(tvec_bases).lock
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700493 * means that all timers which are tied to this base via timer->base are
494 * locked, and the base itself is locked too.
495 *
496 * So __run_timers/migrate_timers can safely modify all timers which could
497 * be found on ->tvX lists.
498 *
499 * When the timer's base is locked, and the timer removed from list, it is
500 * possible to set timer->base = NULL and drop the lock: the timer remains
501 * locked.
502 */
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100503static struct tvec_base *lock_timer_base(struct timer_list *timer,
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700504 unsigned long *flags)
Josh Triplett89e7e3742006-09-29 01:59:36 -0700505 __acquires(timer->base->lock)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700506{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100507 struct tvec_base *base;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700508
509 for (;;) {
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100510 struct tvec_base *prelock_base = timer->base;
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700511 base = tbase_get_base(prelock_base);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700512 if (likely(base != NULL)) {
513 spin_lock_irqsave(&base->lock, *flags);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700514 if (likely(prelock_base == timer->base))
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700515 return base;
516 /* The timer has migrated to another CPU */
517 spin_unlock_irqrestore(&base->lock, *flags);
518 }
519 cpu_relax();
520 }
521}
522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523int __mod_timer(struct timer_list *timer, unsigned long expires)
524{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100525 struct tvec_base *base, *new_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 unsigned long flags;
527 int ret = 0;
528
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800529 timer_stats_timer_set_start_info(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 BUG_ON(!timer->function);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700532 base = lock_timer_base(timer, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700534 if (timer_pending(timer)) {
535 detach_timer(timer, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 ret = 1;
537 }
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700538
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700539 debug_timer_activate(timer);
540
Jan Beulicha4a61982006-03-24 03:15:54 -0800541 new_base = __get_cpu_var(tvec_bases);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700542
Oleg Nesterov3691c512006-03-31 02:30:30 -0800543 if (base != new_base) {
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700544 /*
545 * We are trying to schedule the timer on the local CPU.
546 * However we can't change timer's base while it is running,
547 * otherwise del_timer_sync() can't detect that the timer's
548 * handler yet has not finished. This also guarantees that
549 * the timer is serialized wrt itself.
550 */
Oleg Nesterova2c348f2006-03-31 02:30:31 -0800551 if (likely(base->running_timer != timer)) {
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700552 /* See the comment in lock_timer_base() */
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700553 timer_set_base(timer, NULL);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700554 spin_unlock(&base->lock);
Oleg Nesterova2c348f2006-03-31 02:30:31 -0800555 base = new_base;
556 spin_lock(&base->lock);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700557 timer_set_base(timer, base);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700558 }
559 }
560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 timer->expires = expires;
Oleg Nesterova2c348f2006-03-31 02:30:31 -0800562 internal_add_timer(base, timer);
563 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
565 return ret;
566}
567
568EXPORT_SYMBOL(__mod_timer);
569
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700570/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 * add_timer_on - start a timer on a particular CPU
572 * @timer: the timer to be added
573 * @cpu: the CPU to start it on
574 *
575 * This is not very scalable on SMP. Double adds are not possible.
576 */
577void add_timer_on(struct timer_list *timer, int cpu)
578{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100579 struct tvec_base *base = per_cpu(tvec_bases, cpu);
Thomas Gleixner68194572007-07-19 01:49:16 -0700580 unsigned long flags;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700581
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800582 timer_stats_timer_set_start_info(timer);
Thomas Gleixner68194572007-07-19 01:49:16 -0700583 BUG_ON(timer_pending(timer) || !timer->function);
Oleg Nesterov3691c512006-03-31 02:30:30 -0800584 spin_lock_irqsave(&base->lock, flags);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700585 timer_set_base(timer, base);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700586 debug_timer_activate(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 internal_add_timer(base, timer);
Thomas Gleixner06d83082008-03-22 09:20:24 +0100588 /*
589 * Check whether the other CPU is idle and needs to be
590 * triggered to reevaluate the timer wheel when nohz is
591 * active. We are protected against the other CPU fiddling
592 * with the timer by holding the timer base lock. This also
593 * makes sure that a CPU on the way to idle can not evaluate
594 * the timer wheel.
595 */
596 wake_up_idle_cpu(cpu);
Oleg Nesterov3691c512006-03-31 02:30:30 -0800597 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598}
599
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700600/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 * mod_timer - modify a timer's timeout
602 * @timer: the timer to be modified
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700603 * @expires: new timeout in jiffies
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800605 * mod_timer() is a more efficient way to update the expire field of an
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 * active timer (if the timer is inactive it will be activated)
607 *
608 * mod_timer(timer, expires) is equivalent to:
609 *
610 * del_timer(timer); timer->expires = expires; add_timer(timer);
611 *
612 * Note that if there are multiple unserialized concurrent users of the
613 * same timer, then mod_timer() is the only safe way to modify the timeout,
614 * since add_timer() cannot modify an already running timer.
615 *
616 * The function returns whether it has modified a pending timer or not.
617 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
618 * active timer returns 1.)
619 */
620int mod_timer(struct timer_list *timer, unsigned long expires)
621{
622 BUG_ON(!timer->function);
623
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800624 timer_stats_timer_set_start_info(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 /*
626 * This is a common optimization triggered by the
627 * networking code - if the timer is re-modified
628 * to be the same thing then just return:
629 */
630 if (timer->expires == expires && timer_pending(timer))
631 return 1;
632
633 return __mod_timer(timer, expires);
634}
635
636EXPORT_SYMBOL(mod_timer);
637
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700638/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 * del_timer - deactive a timer.
640 * @timer: the timer to be deactivated
641 *
642 * del_timer() deactivates a timer - this works on both active and inactive
643 * timers.
644 *
645 * The function returns whether it has deactivated a pending timer or not.
646 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
647 * active timer returns 1.)
648 */
649int del_timer(struct timer_list *timer)
650{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100651 struct tvec_base *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 unsigned long flags;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700653 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800655 timer_stats_timer_clear_start_info(timer);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700656 if (timer_pending(timer)) {
657 base = lock_timer_base(timer, &flags);
658 if (timer_pending(timer)) {
659 detach_timer(timer, 1);
660 ret = 1;
661 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700665 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666}
667
668EXPORT_SYMBOL(del_timer);
669
670#ifdef CONFIG_SMP
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700671/**
672 * try_to_del_timer_sync - Try to deactivate a timer
673 * @timer: timer do del
674 *
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700675 * This function tries to deactivate a timer. Upon successful (ret >= 0)
676 * exit the timer is not queued and the handler is not running on any CPU.
677 *
678 * It must not be called from interrupt contexts.
679 */
680int try_to_del_timer_sync(struct timer_list *timer)
681{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100682 struct tvec_base *base;
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700683 unsigned long flags;
684 int ret = -1;
685
686 base = lock_timer_base(timer, &flags);
687
688 if (base->running_timer == timer)
689 goto out;
690
691 ret = 0;
692 if (timer_pending(timer)) {
693 detach_timer(timer, 1);
694 ret = 1;
695 }
696out:
697 spin_unlock_irqrestore(&base->lock, flags);
698
699 return ret;
700}
701
David Howellse19dff12007-04-26 15:46:56 -0700702EXPORT_SYMBOL(try_to_del_timer_sync);
703
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700704/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 * del_timer_sync - deactivate a timer and wait for the handler to finish.
706 * @timer: the timer to be deactivated
707 *
708 * This function only differs from del_timer() on SMP: besides deactivating
709 * the timer it also makes sure the handler has finished executing on other
710 * CPUs.
711 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800712 * Synchronization rules: Callers must prevent restarting of the timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 * otherwise this function is meaningless. It must not be called from
714 * interrupt contexts. The caller must not hold locks which would prevent
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700715 * completion of the timer's handler. The timer's handler must not call
716 * add_timer_on(). Upon exit the timer is not queued and the handler is
717 * not running on any CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 *
719 * The function returns whether it has deactivated a pending timer or not.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 */
721int del_timer_sync(struct timer_list *timer)
722{
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700723 for (;;) {
724 int ret = try_to_del_timer_sync(timer);
725 if (ret >= 0)
726 return ret;
Andrew Mortona0009652006-07-14 00:24:06 -0700727 cpu_relax();
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700728 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729}
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731EXPORT_SYMBOL(del_timer_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732#endif
733
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100734static int cascade(struct tvec_base *base, struct tvec *tv, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735{
736 /* cascade all the timers from tv up one level */
Porpoise3439dd82006-06-23 02:05:56 -0700737 struct timer_list *timer, *tmp;
738 struct list_head tv_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Porpoise3439dd82006-06-23 02:05:56 -0700740 list_replace_init(tv->vec + index, &tv_list);
741
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 /*
Porpoise3439dd82006-06-23 02:05:56 -0700743 * We are removing _all_ timers from the list, so we
744 * don't have to detach them individually.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 */
Porpoise3439dd82006-06-23 02:05:56 -0700746 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700747 BUG_ON(tbase_get_base(timer->base) != base);
Porpoise3439dd82006-06-23 02:05:56 -0700748 internal_add_timer(base, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
751 return index;
752}
753
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700754#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
755
756/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 * __run_timers - run all expired timers (if any) on this CPU.
758 * @base: the timer vector to be processed.
759 *
760 * This function cascades all vectors and executes all expired timer
761 * vectors.
762 */
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100763static inline void __run_timers(struct tvec_base *base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764{
765 struct timer_list *timer;
766
Oleg Nesterov3691c512006-03-31 02:30:30 -0800767 spin_lock_irq(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 while (time_after_eq(jiffies, base->timer_jiffies)) {
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700769 struct list_head work_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 struct list_head *head = &work_list;
Thomas Gleixner68194572007-07-19 01:49:16 -0700771 int index = base->timer_jiffies & TVR_MASK;
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700772
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 /*
774 * Cascade timers:
775 */
776 if (!index &&
777 (!cascade(base, &base->tv2, INDEX(0))) &&
778 (!cascade(base, &base->tv3, INDEX(1))) &&
779 !cascade(base, &base->tv4, INDEX(2)))
780 cascade(base, &base->tv5, INDEX(3));
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700781 ++base->timer_jiffies;
782 list_replace_init(base->tv1.vec + index, &work_list);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700783 while (!list_empty(head)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 void (*fn)(unsigned long);
785 unsigned long data;
786
Pavel Emelianovb5e61812007-05-08 00:30:19 -0700787 timer = list_first_entry(head, struct timer_list,entry);
Thomas Gleixner68194572007-07-19 01:49:16 -0700788 fn = timer->function;
789 data = timer->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800791 timer_stats_account_timer(timer);
792
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 set_running_timer(base, timer);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700794 detach_timer(timer, 1);
Oleg Nesterov3691c512006-03-31 02:30:30 -0800795 spin_unlock_irq(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 {
Jesper Juhlbe5b4fb2005-06-23 00:09:09 -0700797 int preempt_count = preempt_count();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 fn(data);
799 if (preempt_count != preempt_count()) {
Pavel Machek4c9dc642008-01-30 13:30:00 +0100800 printk(KERN_ERR "huh, entered %p "
Jesper Juhlbe5b4fb2005-06-23 00:09:09 -0700801 "with preempt_count %08x, exited"
802 " with %08x?\n",
803 fn, preempt_count,
804 preempt_count());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 BUG();
806 }
807 }
Oleg Nesterov3691c512006-03-31 02:30:30 -0800808 spin_lock_irq(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 }
810 }
811 set_running_timer(base, NULL);
Oleg Nesterov3691c512006-03-31 02:30:30 -0800812 spin_unlock_irq(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813}
814
Thomas Gleixnerfd064b92007-02-16 01:27:47 -0800815#if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816/*
817 * Find out when the next timer event is due to happen. This
818 * is used on S/390 to stop all activity when a cpus is idle.
819 * This functions needs to be called disabled.
820 */
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100821static unsigned long __next_timer_interrupt(struct tvec_base *base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822{
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800823 unsigned long timer_jiffies = base->timer_jiffies;
Thomas Gleixnereaad0842007-05-29 23:47:39 +0200824 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800825 int index, slot, array, found = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 struct timer_list *nte;
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100827 struct tvec *varray[4];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
829 /* Look for timer events in tv1. */
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800830 index = slot = timer_jiffies & TVR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 do {
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800832 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
Thomas Gleixner68194572007-07-19 01:49:16 -0700833 if (tbase_get_deferrable(nte->base))
834 continue;
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700835
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800836 found = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 expires = nte->expires;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800838 /* Look at the cascade bucket(s)? */
839 if (!index || slot < index)
840 goto cascade;
841 return expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 }
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800843 slot = (slot + 1) & TVR_MASK;
844 } while (slot != index);
845
846cascade:
847 /* Calculate the next cascade event */
848 if (index)
849 timer_jiffies += TVR_SIZE - index;
850 timer_jiffies >>= TVR_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852 /* Check tv2-tv5. */
853 varray[0] = &base->tv2;
854 varray[1] = &base->tv3;
855 varray[2] = &base->tv4;
856 varray[3] = &base->tv5;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800857
858 for (array = 0; array < 4; array++) {
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100859 struct tvec *varp = varray[array];
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800860
861 index = slot = timer_jiffies & TVN_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 do {
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800863 list_for_each_entry(nte, varp->vec + slot, entry) {
864 found = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 if (time_before(nte->expires, expires))
866 expires = nte->expires;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800867 }
868 /*
869 * Do we still search for the first timer or are
870 * we looking up the cascade buckets ?
871 */
872 if (found) {
873 /* Look at the cascade bucket(s)? */
874 if (!index || slot < index)
875 break;
876 return expires;
877 }
878 slot = (slot + 1) & TVN_MASK;
879 } while (slot != index);
880
881 if (index)
882 timer_jiffies += TVN_SIZE - index;
883 timer_jiffies >>= TVN_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 }
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800885 return expires;
886}
887
888/*
889 * Check, if the next hrtimer event is before the next timer wheel
890 * event:
891 */
892static unsigned long cmp_next_hrtimer_event(unsigned long now,
893 unsigned long expires)
894{
895 ktime_t hr_delta = hrtimer_get_next_event();
896 struct timespec tsdelta;
Thomas Gleixner9501b6c2007-03-25 14:31:17 +0200897 unsigned long delta;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800898
899 if (hr_delta.tv64 == KTIME_MAX)
900 return expires;
901
Thomas Gleixner9501b6c2007-03-25 14:31:17 +0200902 /*
903 * Expired timer available, let it expire in the next tick
904 */
905 if (hr_delta.tv64 <= 0)
906 return now + 1;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800907
908 tsdelta = ktime_to_timespec(hr_delta);
Thomas Gleixner9501b6c2007-03-25 14:31:17 +0200909 delta = timespec_to_jiffies(&tsdelta);
Thomas Gleixnereaad0842007-05-29 23:47:39 +0200910
911 /*
912 * Limit the delta to the max value, which is checked in
913 * tick_nohz_stop_sched_tick():
914 */
915 if (delta > NEXT_TIMER_MAX_DELTA)
916 delta = NEXT_TIMER_MAX_DELTA;
917
Thomas Gleixner9501b6c2007-03-25 14:31:17 +0200918 /*
919 * Take rounding errors in to account and make sure, that it
920 * expires in the next tick. Otherwise we go into an endless
921 * ping pong due to tick_nohz_stop_sched_tick() retriggering
922 * the timer softirq
923 */
924 if (delta < 1)
925 delta = 1;
926 now += delta;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800927 if (time_before(now, expires))
928 return now;
929 return expires;
930}
931
932/**
Li Zefan8dce39c2007-11-05 14:51:10 -0800933 * get_next_timer_interrupt - return the jiffy of the next pending timer
Randy Dunlap05fb6bf2007-02-28 20:12:13 -0800934 * @now: current time (in jiffies)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800935 */
Thomas Gleixnerfd064b92007-02-16 01:27:47 -0800936unsigned long get_next_timer_interrupt(unsigned long now)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800937{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100938 struct tvec_base *base = __get_cpu_var(tvec_bases);
Thomas Gleixnerfd064b92007-02-16 01:27:47 -0800939 unsigned long expires;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800940
941 spin_lock(&base->lock);
942 expires = __next_timer_interrupt(base);
Oleg Nesterov3691c512006-03-31 02:30:30 -0800943 spin_unlock(&base->lock);
Tony Lindgren69239742006-03-06 15:42:45 -0800944
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800945 if (time_before_eq(expires, now))
946 return now;
Zachary Amsden0662b712006-05-20 15:00:24 -0700947
Thomas Gleixner1cfd6842007-02-16 01:27:46 -0800948 return cmp_next_hrtimer_event(now, expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949}
Thomas Gleixnerfd064b92007-02-16 01:27:47 -0800950
951#ifdef CONFIG_NO_IDLE_HZ
952unsigned long next_timer_interrupt(void)
953{
954 return get_next_timer_interrupt(jiffies);
955}
956#endif
957
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958#endif
959
Paul Mackerrasfa13a5a2007-11-09 22:39:38 +0100960#ifndef CONFIG_VIRT_CPU_ACCOUNTING
961void account_process_tick(struct task_struct *p, int user_tick)
962{
Michael Neuling06b8e872008-02-06 01:36:12 -0800963 cputime_t one_jiffy = jiffies_to_cputime(1);
964
Paul Mackerrasfa13a5a2007-11-09 22:39:38 +0100965 if (user_tick) {
Michael Neuling06b8e872008-02-06 01:36:12 -0800966 account_user_time(p, one_jiffy);
967 account_user_time_scaled(p, cputime_to_scaled(one_jiffy));
Paul Mackerrasfa13a5a2007-11-09 22:39:38 +0100968 } else {
Michael Neuling06b8e872008-02-06 01:36:12 -0800969 account_system_time(p, HARDIRQ_OFFSET, one_jiffy);
970 account_system_time_scaled(p, cputime_to_scaled(one_jiffy));
Paul Mackerrasfa13a5a2007-11-09 22:39:38 +0100971 }
972}
973#endif
974
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975/*
Daniel Walker5b4db0c2007-10-18 03:06:11 -0700976 * Called from the timer interrupt handler to charge one tick to the current
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 * process. user_tick is 1 if the tick is user time, 0 for system.
978 */
979void update_process_times(int user_tick)
980{
981 struct task_struct *p = current;
982 int cpu = smp_processor_id();
983
984 /* Note: this timer irq context must be accounted for as well. */
Paul Mackerrasfa13a5a2007-11-09 22:39:38 +0100985 account_process_tick(p, user_tick);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 run_local_timers();
987 if (rcu_pending(cpu))
988 rcu_check_callbacks(cpu, user_tick);
989 scheduler_tick();
Thomas Gleixner68194572007-07-19 01:49:16 -0700990 run_posix_cpu_timers(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991}
992
993/*
994 * Nr of active tasks - counted in fixed-point numbers
995 */
996static unsigned long count_active_tasks(void)
997{
Jack Steinerdb1b1fe2006-03-31 02:31:21 -0800998 return nr_active() * FIXED_1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999}
1000
1001/*
1002 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
1003 * imply that avenrun[] is the standard name for this kind of thing.
1004 * Nothing else seems to be standardized: the fractional size etc
1005 * all seem to differ on different machines.
1006 *
1007 * Requires xtime_lock to access.
1008 */
1009unsigned long avenrun[3];
1010
1011EXPORT_SYMBOL(avenrun);
1012
1013/*
1014 * calc_load - given tick count, update the avenrun load estimates.
1015 * This is called while holding a write_lock on xtime_lock.
1016 */
1017static inline void calc_load(unsigned long ticks)
1018{
1019 unsigned long active_tasks; /* fixed-point */
1020 static int count = LOAD_FREQ;
1021
Eric Dumazetcd7175e2006-12-13 00:35:45 -08001022 count -= ticks;
1023 if (unlikely(count < 0)) {
1024 active_tasks = count_active_tasks();
1025 do {
1026 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
1027 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
1028 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
1029 count += LOAD_FREQ;
1030 } while (count < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 }
1032}
1033
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 * This function runs timers and the timer-tq in bottom half context.
1036 */
1037static void run_timer_softirq(struct softirq_action *h)
1038{
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001039 struct tvec_base *base = __get_cpu_var(tvec_bases);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001041 hrtimer_run_pending();
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001042
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 if (time_after_eq(jiffies, base->timer_jiffies))
1044 __run_timers(base);
1045}
1046
1047/*
1048 * Called by the local, per-CPU timer interrupt on SMP.
1049 */
1050void run_local_timers(void)
1051{
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001052 hrtimer_run_queues();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 raise_softirq(TIMER_SOFTIRQ);
Ingo Molnar6687a972006-03-24 03:18:41 -08001054 softlockup_tick();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055}
1056
1057/*
1058 * Called by the timer interrupt. xtime_lock must already be taken
1059 * by the timer IRQ!
1060 */
Atsushi Nemoto3171a032006-09-29 02:00:32 -07001061static inline void update_times(unsigned long ticks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062{
john stultzad596172006-06-26 00:25:06 -07001063 update_wall_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 calc_load(ticks);
1065}
Thomas Gleixner68194572007-07-19 01:49:16 -07001066
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067/*
1068 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1069 * without sampling the sequence number in xtime_lock.
1070 * jiffies is defined in the linker script...
1071 */
1072
Atsushi Nemoto3171a032006-09-29 02:00:32 -07001073void do_timer(unsigned long ticks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074{
Atsushi Nemoto3171a032006-09-29 02:00:32 -07001075 jiffies_64 += ticks;
1076 update_times(ticks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077}
1078
1079#ifdef __ARCH_WANT_SYS_ALARM
1080
1081/*
1082 * For backwards compatibility? This can be done in libc so Alpha
1083 * and all newer ports shouldn't need it.
1084 */
1085asmlinkage unsigned long sys_alarm(unsigned int seconds)
1086{
Thomas Gleixnerc08b8a42006-03-25 03:06:33 -08001087 return alarm_setitimer(seconds);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088}
1089
1090#endif
1091
1092#ifndef __alpha__
1093
1094/*
1095 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1096 * should be moved into arch/i386 instead?
1097 */
1098
1099/**
1100 * sys_getpid - return the thread group id of the current process
1101 *
1102 * Note, despite the name, this returns the tgid not the pid. The tgid and
1103 * the pid are identical unless CLONE_THREAD was specified on clone() in
1104 * which case the tgid is the same in all threads of the same group.
1105 *
1106 * This is SMP safe as current->tgid does not change.
1107 */
1108asmlinkage long sys_getpid(void)
1109{
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001110 return task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111}
1112
1113/*
Kirill Korotaev6997a6f2006-08-13 23:24:23 -07001114 * Accessing ->real_parent is not SMP-safe, it could
1115 * change from under us. However, we can use a stale
1116 * value of ->real_parent under rcu_read_lock(), see
1117 * release_task()->call_rcu(delayed_put_task_struct).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 */
1119asmlinkage long sys_getppid(void)
1120{
1121 int pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
Kirill Korotaev6997a6f2006-08-13 23:24:23 -07001123 rcu_read_lock();
Pavel Emelyanov6c5f3e72008-02-08 04:19:20 -08001124 pid = task_tgid_vnr(current->real_parent);
Kirill Korotaev6997a6f2006-08-13 23:24:23 -07001125 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 return pid;
1128}
1129
1130asmlinkage long sys_getuid(void)
1131{
1132 /* Only we change this so SMP safe */
1133 return current->uid;
1134}
1135
1136asmlinkage long sys_geteuid(void)
1137{
1138 /* Only we change this so SMP safe */
1139 return current->euid;
1140}
1141
1142asmlinkage long sys_getgid(void)
1143{
1144 /* Only we change this so SMP safe */
1145 return current->gid;
1146}
1147
1148asmlinkage long sys_getegid(void)
1149{
1150 /* Only we change this so SMP safe */
1151 return current->egid;
1152}
1153
1154#endif
1155
1156static void process_timeout(unsigned long __data)
1157{
Ingo Molnar36c8b582006-07-03 00:25:41 -07001158 wake_up_process((struct task_struct *)__data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159}
1160
1161/**
1162 * schedule_timeout - sleep until timeout
1163 * @timeout: timeout value in jiffies
1164 *
1165 * Make the current task sleep until @timeout jiffies have
1166 * elapsed. The routine will return immediately unless
1167 * the current task state has been set (see set_current_state()).
1168 *
1169 * You can set the task state as follows -
1170 *
1171 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1172 * pass before the routine returns. The routine will return 0
1173 *
1174 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1175 * delivered to the current task. In this case the remaining time
1176 * in jiffies will be returned, or 0 if the timer expired in time
1177 *
1178 * The current task state is guaranteed to be TASK_RUNNING when this
1179 * routine returns.
1180 *
1181 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1182 * the CPU away without a bound on the timeout. In this case the return
1183 * value will be %MAX_SCHEDULE_TIMEOUT.
1184 *
1185 * In all cases the return value is guaranteed to be non-negative.
1186 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001187signed long __sched schedule_timeout(signed long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188{
1189 struct timer_list timer;
1190 unsigned long expire;
1191
1192 switch (timeout)
1193 {
1194 case MAX_SCHEDULE_TIMEOUT:
1195 /*
1196 * These two special cases are useful to be comfortable
1197 * in the caller. Nothing more. We could take
1198 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1199 * but I' d like to return a valid offset (>=0) to allow
1200 * the caller to do everything it want with the retval.
1201 */
1202 schedule();
1203 goto out;
1204 default:
1205 /*
1206 * Another bit of PARANOID. Note that the retval will be
1207 * 0 since no piece of kernel is supposed to do a check
1208 * for a negative retval of schedule_timeout() (since it
1209 * should never happens anyway). You just have the printk()
1210 * that will tell you if something is gone wrong and where.
1211 */
Andrew Morton5b149bc2006-12-22 01:10:14 -08001212 if (timeout < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 printk(KERN_ERR "schedule_timeout: wrong timeout "
Andrew Morton5b149bc2006-12-22 01:10:14 -08001214 "value %lx\n", timeout);
1215 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 current->state = TASK_RUNNING;
1217 goto out;
1218 }
1219 }
1220
1221 expire = timeout + jiffies;
1222
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -07001223 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
Oleg Nesterova8db2db2005-10-30 15:01:38 -08001224 __mod_timer(&timer, expire);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 schedule();
1226 del_singleshot_timer_sync(&timer);
1227
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -07001228 /* Remove the timer from the object tracker */
1229 destroy_timer_on_stack(&timer);
1230
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 timeout = expire - jiffies;
1232
1233 out:
1234 return timeout < 0 ? 0 : timeout;
1235}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236EXPORT_SYMBOL(schedule_timeout);
1237
Andrew Morton8a1c1752005-09-13 01:25:15 -07001238/*
1239 * We can use __set_current_state() here because schedule_timeout() calls
1240 * schedule() unconditionally.
1241 */
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001242signed long __sched schedule_timeout_interruptible(signed long timeout)
1243{
Andrew Mortona5a0d522005-10-30 15:01:42 -08001244 __set_current_state(TASK_INTERRUPTIBLE);
1245 return schedule_timeout(timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001246}
1247EXPORT_SYMBOL(schedule_timeout_interruptible);
1248
Matthew Wilcox294d5cc2007-12-06 11:59:46 -05001249signed long __sched schedule_timeout_killable(signed long timeout)
1250{
1251 __set_current_state(TASK_KILLABLE);
1252 return schedule_timeout(timeout);
1253}
1254EXPORT_SYMBOL(schedule_timeout_killable);
1255
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001256signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1257{
Andrew Mortona5a0d522005-10-30 15:01:42 -08001258 __set_current_state(TASK_UNINTERRUPTIBLE);
1259 return schedule_timeout(timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001260}
1261EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1262
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263/* Thread ID - the internal kernel "pid" */
1264asmlinkage long sys_gettid(void)
1265{
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001266 return task_pid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267}
1268
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001269/**
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001270 * do_sysinfo - fill in sysinfo struct
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001271 * @info: pointer to buffer to fill
Thomas Gleixner68194572007-07-19 01:49:16 -07001272 */
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001273int do_sysinfo(struct sysinfo *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 unsigned long mem_total, sav_total;
1276 unsigned int mem_unit, bitcount;
1277 unsigned long seq;
1278
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001279 memset(info, 0, sizeof(struct sysinfo));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
1281 do {
1282 struct timespec tp;
1283 seq = read_seqbegin(&xtime_lock);
1284
1285 /*
1286 * This is annoying. The below is the same thing
1287 * posix_get_clock_monotonic() does, but it wants to
1288 * take the lock which we want to cover the loads stuff
1289 * too.
1290 */
1291
1292 getnstimeofday(&tp);
1293 tp.tv_sec += wall_to_monotonic.tv_sec;
1294 tp.tv_nsec += wall_to_monotonic.tv_nsec;
Tomas Janousekd6214142007-07-15 23:39:42 -07001295 monotonic_to_bootbased(&tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1297 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1298 tp.tv_sec++;
1299 }
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001300 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001302 info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1303 info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1304 info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001306 info->procs = nr_threads;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 } while (read_seqretry(&xtime_lock, seq));
1308
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001309 si_meminfo(info);
1310 si_swapinfo(info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
1312 /*
1313 * If the sum of all the available memory (i.e. ram + swap)
1314 * is less than can be stored in a 32 bit unsigned long then
1315 * we can be binary compatible with 2.2.x kernels. If not,
1316 * well, in that case 2.2.x was broken anyways...
1317 *
1318 * -Erik Andersen <andersee@debian.org>
1319 */
1320
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001321 mem_total = info->totalram + info->totalswap;
1322 if (mem_total < info->totalram || mem_total < info->totalswap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 goto out;
1324 bitcount = 0;
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001325 mem_unit = info->mem_unit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 while (mem_unit > 1) {
1327 bitcount++;
1328 mem_unit >>= 1;
1329 sav_total = mem_total;
1330 mem_total <<= 1;
1331 if (mem_total < sav_total)
1332 goto out;
1333 }
1334
1335 /*
1336 * If mem_total did not overflow, multiply all memory values by
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001337 * info->mem_unit and set it to 1. This leaves things compatible
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1339 * kernels...
1340 */
1341
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001342 info->mem_unit = 1;
1343 info->totalram <<= bitcount;
1344 info->freeram <<= bitcount;
1345 info->sharedram <<= bitcount;
1346 info->bufferram <<= bitcount;
1347 info->totalswap <<= bitcount;
1348 info->freeswap <<= bitcount;
1349 info->totalhigh <<= bitcount;
1350 info->freehigh <<= bitcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001352out:
1353 return 0;
1354}
1355
1356asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1357{
1358 struct sysinfo val;
1359
1360 do_sysinfo(&val);
1361
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1363 return -EFAULT;
1364
1365 return 0;
1366}
1367
Adrian Bunkb4be6252007-12-18 18:05:58 +01001368static int __cpuinit init_timers_cpu(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369{
1370 int j;
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001371 struct tvec_base *base;
Adrian Bunkb4be6252007-12-18 18:05:58 +01001372 static char __cpuinitdata tvec_base_done[NR_CPUS];
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001373
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001374 if (!tvec_base_done[cpu]) {
Jan Beulicha4a61982006-03-24 03:15:54 -08001375 static char boot_done;
1376
Jan Beulicha4a61982006-03-24 03:15:54 -08001377 if (boot_done) {
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001378 /*
1379 * The APs use this path later in boot
1380 */
Christoph Lameter94f60302007-07-17 04:03:29 -07001381 base = kmalloc_node(sizeof(*base),
1382 GFP_KERNEL | __GFP_ZERO,
Jan Beulicha4a61982006-03-24 03:15:54 -08001383 cpu_to_node(cpu));
1384 if (!base)
1385 return -ENOMEM;
Venki Pallipadi6e453a62007-05-08 00:27:44 -07001386
1387 /* Make sure that tvec_base is 2 byte aligned */
1388 if (tbase_get_deferrable(base)) {
1389 WARN_ON(1);
1390 kfree(base);
1391 return -ENOMEM;
1392 }
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001393 per_cpu(tvec_bases, cpu) = base;
Jan Beulicha4a61982006-03-24 03:15:54 -08001394 } else {
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001395 /*
1396 * This is for the boot CPU - we use compile-time
1397 * static initialisation because per-cpu memory isn't
1398 * ready yet and because the memory allocators are not
1399 * initialised either.
1400 */
Jan Beulicha4a61982006-03-24 03:15:54 -08001401 boot_done = 1;
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001402 base = &boot_tvec_bases;
Jan Beulicha4a61982006-03-24 03:15:54 -08001403 }
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001404 tvec_base_done[cpu] = 1;
1405 } else {
1406 base = per_cpu(tvec_bases, cpu);
Jan Beulicha4a61982006-03-24 03:15:54 -08001407 }
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001408
Oleg Nesterov3691c512006-03-31 02:30:30 -08001409 spin_lock_init(&base->lock);
Ingo Molnard730e882006-07-03 00:25:10 -07001410
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 for (j = 0; j < TVN_SIZE; j++) {
1412 INIT_LIST_HEAD(base->tv5.vec + j);
1413 INIT_LIST_HEAD(base->tv4.vec + j);
1414 INIT_LIST_HEAD(base->tv3.vec + j);
1415 INIT_LIST_HEAD(base->tv2.vec + j);
1416 }
1417 for (j = 0; j < TVR_SIZE; j++)
1418 INIT_LIST_HEAD(base->tv1.vec + j);
1419
1420 base->timer_jiffies = jiffies;
Jan Beulicha4a61982006-03-24 03:15:54 -08001421 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422}
1423
1424#ifdef CONFIG_HOTPLUG_CPU
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001425static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426{
1427 struct timer_list *timer;
1428
1429 while (!list_empty(head)) {
Pavel Emelianovb5e61812007-05-08 00:30:19 -07001430 timer = list_first_entry(head, struct timer_list, entry);
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001431 detach_timer(timer, 0);
Venki Pallipadi6e453a62007-05-08 00:27:44 -07001432 timer_set_base(timer, new_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 internal_add_timer(new_base, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435}
1436
Randy Dunlap48ccf3d2008-01-21 17:18:25 -08001437static void __cpuinit migrate_timers(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438{
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001439 struct tvec_base *old_base;
1440 struct tvec_base *new_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 int i;
1442
1443 BUG_ON(cpu_online(cpu));
Jan Beulicha4a61982006-03-24 03:15:54 -08001444 old_base = per_cpu(tvec_bases, cpu);
1445 new_base = get_cpu_var(tvec_bases);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
1447 local_irq_disable();
Oleg Nesterov0d180402008-04-04 20:54:10 +02001448 spin_lock(&new_base->lock);
1449 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
Oleg Nesterov3691c512006-03-31 02:30:30 -08001451 BUG_ON(old_base->running_timer);
1452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 for (i = 0; i < TVR_SIZE; i++)
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001454 migrate_timer_list(new_base, old_base->tv1.vec + i);
1455 for (i = 0; i < TVN_SIZE; i++) {
1456 migrate_timer_list(new_base, old_base->tv2.vec + i);
1457 migrate_timer_list(new_base, old_base->tv3.vec + i);
1458 migrate_timer_list(new_base, old_base->tv4.vec + i);
1459 migrate_timer_list(new_base, old_base->tv5.vec + i);
1460 }
1461
Oleg Nesterov0d180402008-04-04 20:54:10 +02001462 spin_unlock(&old_base->lock);
1463 spin_unlock(&new_base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 local_irq_enable();
1465 put_cpu_var(tvec_bases);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466}
1467#endif /* CONFIG_HOTPLUG_CPU */
1468
Chandra Seetharaman8c78f302006-07-30 03:03:35 -07001469static int __cpuinit timer_cpu_notify(struct notifier_block *self,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 unsigned long action, void *hcpu)
1471{
1472 long cpu = (long)hcpu;
1473 switch(action) {
1474 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001475 case CPU_UP_PREPARE_FROZEN:
Jan Beulicha4a61982006-03-24 03:15:54 -08001476 if (init_timers_cpu(cpu) < 0)
1477 return NOTIFY_BAD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 break;
1479#ifdef CONFIG_HOTPLUG_CPU
1480 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001481 case CPU_DEAD_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 migrate_timers(cpu);
1483 break;
1484#endif
1485 default:
1486 break;
1487 }
1488 return NOTIFY_OK;
1489}
1490
Chandra Seetharaman8c78f302006-07-30 03:03:35 -07001491static struct notifier_block __cpuinitdata timers_nb = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 .notifier_call = timer_cpu_notify,
1493};
1494
1495
1496void __init init_timers(void)
1497{
Akinobu Mita07dccf32006-09-29 02:00:22 -07001498 int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 (void *)(long)smp_processor_id());
Akinobu Mita07dccf32006-09-29 02:00:22 -07001500
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001501 init_timer_stats();
1502
Akinobu Mita07dccf32006-09-29 02:00:22 -07001503 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 register_cpu_notifier(&timers_nb);
1505 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
1506}
1507
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508/**
1509 * msleep - sleep safely even with waitqueue interruptions
1510 * @msecs: Time in milliseconds to sleep for
1511 */
1512void msleep(unsigned int msecs)
1513{
1514 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1515
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07001516 while (timeout)
1517 timeout = schedule_timeout_uninterruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518}
1519
1520EXPORT_SYMBOL(msleep);
1521
1522/**
Domen Puncer96ec3ef2005-06-25 14:58:43 -07001523 * msleep_interruptible - sleep waiting for signals
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 * @msecs: Time in milliseconds to sleep for
1525 */
1526unsigned long msleep_interruptible(unsigned int msecs)
1527{
1528 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1529
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07001530 while (timeout && !signal_pending(current))
1531 timeout = schedule_timeout_interruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 return jiffies_to_msecs(timeout);
1533}
1534
1535EXPORT_SYMBOL(msleep_interruptible);