blob: 367d008584823a6fe01ed013cda8c3693fcfd761 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/timer.c
3 *
john stultz85240702007-05-08 00:27:59 -07004 * Kernel internal timers, basic process system calls
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22#include <linux/kernel_stat.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040023#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/interrupt.h>
25#include <linux/percpu.h>
26#include <linux/init.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070029#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/notifier.h>
31#include <linux/thread_info.h>
32#include <linux/time.h>
33#include <linux/jiffies.h>
34#include <linux/posix-timers.h>
35#include <linux/cpu.h>
36#include <linux/syscalls.h>
Adrian Bunk97a41e22006-01-08 01:02:17 -080037#include <linux/delay.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080038#include <linux/tick.h>
Ingo Molnar82f67cd2007-02-16 01:28:13 -080039#include <linux/kallsyms.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080040#include <linux/irq_work.h>
Arun R Bharadwajeea08f32009-04-16 12:16:41 +053041#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090042#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <asm/uaccess.h>
45#include <asm/unistd.h>
46#include <asm/div64.h>
47#include <asm/timex.h>
48#include <asm/io.h>
49
Xiao Guangrong2b022e32009-08-10 10:48:59 +080050#define CREATE_TRACE_POINTS
51#include <trace/events/timer.h>
52
Thomas Gleixnerecea8d12005-10-30 15:03:00 -080053u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
54
55EXPORT_SYMBOL(jiffies_64);
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/*
58 * per-CPU timer vector definitions:
59 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
61#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
62#define TVN_SIZE (1 << TVN_BITS)
63#define TVR_SIZE (1 << TVR_BITS)
64#define TVN_MASK (TVN_SIZE - 1)
65#define TVR_MASK (TVR_SIZE - 1)
Hildner, Christian26cff4e2012-10-08 15:49:03 +020066#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Pavel Macheka6fa8e52008-01-30 13:30:00 +010068struct tvec {
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 struct list_head vec[TVN_SIZE];
Pavel Macheka6fa8e52008-01-30 13:30:00 +010070};
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Pavel Macheka6fa8e52008-01-30 13:30:00 +010072struct tvec_root {
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 struct list_head vec[TVR_SIZE];
Pavel Macheka6fa8e52008-01-30 13:30:00 +010074};
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Pavel Macheka6fa8e52008-01-30 13:30:00 +010076struct tvec_base {
Oleg Nesterov3691c512006-03-31 02:30:30 -080077 spinlock_t lock;
78 struct timer_list *running_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 unsigned long timer_jiffies;
Martin Schwidefsky97fd9ed2009-07-21 20:25:05 +020080 unsigned long next_timer;
Thomas Gleixner99d5f3a2012-05-25 22:08:58 +000081 unsigned long active_timers;
Pavel Macheka6fa8e52008-01-30 13:30:00 +010082 struct tvec_root tv1;
83 struct tvec tv2;
84 struct tvec tv3;
85 struct tvec tv4;
86 struct tvec tv5;
Venki Pallipadi6e453a62007-05-08 00:27:44 -070087} ____cacheline_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Pavel Macheka6fa8e52008-01-30 13:30:00 +010089struct tvec_base boot_tvec_bases;
Oleg Nesterov3691c512006-03-31 02:30:30 -080090EXPORT_SYMBOL(boot_tvec_bases);
Pavel Macheka6fa8e52008-01-30 13:30:00 +010091static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Venki Pallipadi6e453a62007-05-08 00:27:44 -070093/* Functions below help us manage 'deferrable' flag */
Pavel Macheka6fa8e52008-01-30 13:30:00 +010094static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
Venki Pallipadi6e453a62007-05-08 00:27:44 -070095{
Tejun Heoe52b1db2012-08-08 11:10:25 -070096 return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
Venki Pallipadi6e453a62007-05-08 00:27:44 -070097}
98
Tejun Heoc5f66e92012-08-08 11:10:28 -070099static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
100{
101 return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
102}
103
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100104static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700105{
Tejun Heoe52b1db2012-08-08 11:10:25 -0700106 return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700107}
108
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700109static inline void
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100110timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700111{
Tejun Heoe52b1db2012-08-08 11:10:25 -0700112 unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
113
114 timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700115}
116
Alan Stern9c133c42008-11-06 08:42:48 +0100117static unsigned long round_jiffies_common(unsigned long j, int cpu,
118 bool force_up)
119{
120 int rem;
121 unsigned long original = j;
122
123 /*
124 * We don't want all cpus firing their timers at once hitting the
125 * same lock or cachelines, so we skew each extra cpu with an extra
126 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
127 * already did this.
128 * The skew is done by adding 3*cpunr, then round, then subtract this
129 * extra offset again.
130 */
131 j += cpu * 3;
132
133 rem = j % HZ;
134
135 /*
136 * If the target jiffie is just after a whole second (which can happen
137 * due to delays of the timer irq, long irq off times etc etc) then
138 * we should round down to the whole second, not up. Use 1/4th second
139 * as cutoff for this rounding as an extreme upper bound for this.
140 * But never round down if @force_up is set.
141 */
142 if (rem < HZ/4 && !force_up) /* round down */
143 j = j - rem;
144 else /* round up */
145 j = j - rem + HZ;
146
147 /* now that we have rounded, subtract the extra skew again */
148 j -= cpu * 3;
149
150 if (j <= jiffies) /* rounding ate our timeout entirely; */
151 return original;
152 return j;
153}
154
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800155/**
156 * __round_jiffies - function to round jiffies to a full second
157 * @j: the time in (absolute) jiffies that should be rounded
158 * @cpu: the processor number on which the timeout will happen
159 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800160 * __round_jiffies() rounds an absolute time in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800161 * up or down to (approximately) full seconds. This is useful for timers
162 * for which the exact time they fire does not matter too much, as long as
163 * they fire approximately every X seconds.
164 *
165 * By rounding these timers to whole seconds, all such timers will fire
166 * at the same time, rather than at various times spread out. The goal
167 * of this is to have the CPU wake up less, which saves power.
168 *
169 * The exact rounding is skewed for each processor to avoid all
170 * processors firing at the exact same time, which could lead
171 * to lock contention or spurious cache line bouncing.
172 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800173 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800174 */
175unsigned long __round_jiffies(unsigned long j, int cpu)
176{
Alan Stern9c133c42008-11-06 08:42:48 +0100177 return round_jiffies_common(j, cpu, false);
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800178}
179EXPORT_SYMBOL_GPL(__round_jiffies);
180
181/**
182 * __round_jiffies_relative - function to round jiffies to a full second
183 * @j: the time in (relative) jiffies that should be rounded
184 * @cpu: the processor number on which the timeout will happen
185 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800186 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800187 * up or down to (approximately) full seconds. This is useful for timers
188 * for which the exact time they fire does not matter too much, as long as
189 * they fire approximately every X seconds.
190 *
191 * By rounding these timers to whole seconds, all such timers will fire
192 * at the same time, rather than at various times spread out. The goal
193 * of this is to have the CPU wake up less, which saves power.
194 *
195 * The exact rounding is skewed for each processor to avoid all
196 * processors firing at the exact same time, which could lead
197 * to lock contention or spurious cache line bouncing.
198 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800199 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800200 */
201unsigned long __round_jiffies_relative(unsigned long j, int cpu)
202{
Alan Stern9c133c42008-11-06 08:42:48 +0100203 unsigned long j0 = jiffies;
204
205 /* Use j0 because jiffies might change while we run */
206 return round_jiffies_common(j + j0, cpu, false) - j0;
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800207}
208EXPORT_SYMBOL_GPL(__round_jiffies_relative);
209
210/**
211 * round_jiffies - function to round jiffies to a full second
212 * @j: the time in (absolute) jiffies that should be rounded
213 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800214 * round_jiffies() rounds an absolute time in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800215 * up or down to (approximately) full seconds. This is useful for timers
216 * for which the exact time they fire does not matter too much, as long as
217 * they fire approximately every X seconds.
218 *
219 * By rounding these timers to whole seconds, all such timers will fire
220 * at the same time, rather than at various times spread out. The goal
221 * of this is to have the CPU wake up less, which saves power.
222 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800223 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800224 */
225unsigned long round_jiffies(unsigned long j)
226{
Alan Stern9c133c42008-11-06 08:42:48 +0100227 return round_jiffies_common(j, raw_smp_processor_id(), false);
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800228}
229EXPORT_SYMBOL_GPL(round_jiffies);
230
231/**
232 * round_jiffies_relative - function to round jiffies to a full second
233 * @j: the time in (relative) jiffies that should be rounded
234 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800235 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800236 * up or down to (approximately) full seconds. This is useful for timers
237 * for which the exact time they fire does not matter too much, as long as
238 * they fire approximately every X seconds.
239 *
240 * By rounding these timers to whole seconds, all such timers will fire
241 * at the same time, rather than at various times spread out. The goal
242 * of this is to have the CPU wake up less, which saves power.
243 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800244 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800245 */
246unsigned long round_jiffies_relative(unsigned long j)
247{
248 return __round_jiffies_relative(j, raw_smp_processor_id());
249}
250EXPORT_SYMBOL_GPL(round_jiffies_relative);
251
Alan Stern9c133c42008-11-06 08:42:48 +0100252/**
253 * __round_jiffies_up - function to round jiffies up to a full second
254 * @j: the time in (absolute) jiffies that should be rounded
255 * @cpu: the processor number on which the timeout will happen
256 *
257 * This is the same as __round_jiffies() except that it will never
258 * round down. This is useful for timeouts for which the exact time
259 * of firing does not matter too much, as long as they don't fire too
260 * early.
261 */
262unsigned long __round_jiffies_up(unsigned long j, int cpu)
263{
264 return round_jiffies_common(j, cpu, true);
265}
266EXPORT_SYMBOL_GPL(__round_jiffies_up);
267
268/**
269 * __round_jiffies_up_relative - function to round jiffies up to a full second
270 * @j: the time in (relative) jiffies that should be rounded
271 * @cpu: the processor number on which the timeout will happen
272 *
273 * This is the same as __round_jiffies_relative() except that it will never
274 * round down. This is useful for timeouts for which the exact time
275 * of firing does not matter too much, as long as they don't fire too
276 * early.
277 */
278unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
279{
280 unsigned long j0 = jiffies;
281
282 /* Use j0 because jiffies might change while we run */
283 return round_jiffies_common(j + j0, cpu, true) - j0;
284}
285EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
286
287/**
288 * round_jiffies_up - function to round jiffies up to a full second
289 * @j: the time in (absolute) jiffies that should be rounded
290 *
291 * This is the same as round_jiffies() except that it will never
292 * round down. This is useful for timeouts for which the exact time
293 * of firing does not matter too much, as long as they don't fire too
294 * early.
295 */
296unsigned long round_jiffies_up(unsigned long j)
297{
298 return round_jiffies_common(j, raw_smp_processor_id(), true);
299}
300EXPORT_SYMBOL_GPL(round_jiffies_up);
301
302/**
303 * round_jiffies_up_relative - function to round jiffies up to a full second
304 * @j: the time in (relative) jiffies that should be rounded
305 *
306 * This is the same as round_jiffies_relative() except that it will never
307 * round down. This is useful for timeouts for which the exact time
308 * of firing does not matter too much, as long as they don't fire too
309 * early.
310 */
311unsigned long round_jiffies_up_relative(unsigned long j)
312{
313 return __round_jiffies_up_relative(j, raw_smp_processor_id());
314}
315EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
316
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800317/**
318 * set_timer_slack - set the allowed slack for a timer
Randy Dunlap0caa6212010-08-09 16:32:50 -0700319 * @timer: the timer to be modified
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800320 * @slack_hz: the amount of time (in jiffies) allowed for rounding
321 *
322 * Set the amount of time, in jiffies, that a certain timer has
323 * in terms of slack. By setting this value, the timer subsystem
324 * will schedule the actual timer somewhere between
325 * the time mod_timer() asks for, and that time plus the slack.
326 *
327 * By setting the slack to -1, a percentage of the delay is used
328 * instead.
329 */
330void set_timer_slack(struct timer_list *timer, int slack_hz)
331{
332 timer->slack = slack_hz;
333}
334EXPORT_SYMBOL_GPL(set_timer_slack);
335
Thomas Gleixnerfacbb4a2012-05-25 22:08:57 +0000336static void
337__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
339 unsigned long expires = timer->expires;
340 unsigned long idx = expires - base->timer_jiffies;
341 struct list_head *vec;
342
343 if (idx < TVR_SIZE) {
344 int i = expires & TVR_MASK;
345 vec = base->tv1.vec + i;
346 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
347 int i = (expires >> TVR_BITS) & TVN_MASK;
348 vec = base->tv2.vec + i;
349 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
350 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
351 vec = base->tv3.vec + i;
352 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
353 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
354 vec = base->tv4.vec + i;
355 } else if ((signed long) idx < 0) {
356 /*
357 * Can happen if you add a timer with expires == jiffies,
358 * or you set a timer to go off in the past
359 */
360 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
361 } else {
362 int i;
Hildner, Christian26cff4e2012-10-08 15:49:03 +0200363 /* If the timeout is larger than MAX_TVAL (on 64-bit
364 * architectures or with CONFIG_BASE_SMALL=1) then we
365 * use the maximum timeout.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 */
Hildner, Christian26cff4e2012-10-08 15:49:03 +0200367 if (idx > MAX_TVAL) {
368 idx = MAX_TVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 expires = idx + base->timer_jiffies;
370 }
371 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
372 vec = base->tv5.vec + i;
373 }
374 /*
375 * Timers are FIFO:
376 */
377 list_add_tail(&timer->entry, vec);
378}
379
Thomas Gleixnerfacbb4a2012-05-25 22:08:57 +0000380static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
381{
382 __internal_add_timer(base, timer);
383 /*
Thomas Gleixner99d5f3a2012-05-25 22:08:58 +0000384 * Update base->active_timers and base->next_timer
Thomas Gleixnerfacbb4a2012-05-25 22:08:57 +0000385 */
Thomas Gleixner99d5f3a2012-05-25 22:08:58 +0000386 if (!tbase_get_deferrable(timer->base)) {
387 if (time_before(timer->expires, base->next_timer))
388 base->next_timer = timer->expires;
389 base->active_timers++;
390 }
Thomas Gleixnerfacbb4a2012-05-25 22:08:57 +0000391}
392
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800393#ifdef CONFIG_TIMER_STATS
394void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
395{
396 if (timer->start_site)
397 return;
398
399 timer->start_site = addr;
400 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
401 timer->start_pid = current->pid;
402}
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700403
404static void timer_stats_account_timer(struct timer_list *timer)
405{
406 unsigned int flag = 0;
407
Heiko Carstens507e1232009-06-23 17:38:15 +0200408 if (likely(!timer->start_site))
409 return;
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700410 if (unlikely(tbase_get_deferrable(timer->base)))
411 flag |= TIMER_STATS_FLAG_DEFERRABLE;
412
413 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
414 timer->function, timer->start_comm, flag);
415}
416
417#else
418static void timer_stats_account_timer(struct timer_list *timer) {}
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800419#endif
420
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700421#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
422
423static struct debug_obj_descr timer_debug_descr;
424
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100425static void *timer_debug_hint(void *addr)
426{
427 return ((struct timer_list *) addr)->function;
428}
429
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700430/*
431 * fixup_init is called when:
432 * - an active object is initialized
433 */
434static int timer_fixup_init(void *addr, enum debug_obj_state state)
435{
436 struct timer_list *timer = addr;
437
438 switch (state) {
439 case ODEBUG_STATE_ACTIVE:
440 del_timer_sync(timer);
441 debug_object_init(timer, &timer_debug_descr);
442 return 1;
443 default:
444 return 0;
445 }
446}
447
Stephen Boydfb16b8c2011-11-07 19:48:26 -0800448/* Stub timer callback for improperly used timers. */
449static void stub_timer(unsigned long data)
450{
451 WARN_ON(1);
452}
453
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700454/*
455 * fixup_activate is called when:
456 * - an active object is activated
457 * - an unknown object is activated (might be a statically initialized object)
458 */
459static int timer_fixup_activate(void *addr, enum debug_obj_state state)
460{
461 struct timer_list *timer = addr;
462
463 switch (state) {
464
465 case ODEBUG_STATE_NOTAVAILABLE:
466 /*
467 * This is not really a fixup. The timer was
468 * statically initialized. We just make sure that it
469 * is tracked in the object tracker.
470 */
471 if (timer->entry.next == NULL &&
472 timer->entry.prev == TIMER_ENTRY_STATIC) {
473 debug_object_init(timer, &timer_debug_descr);
474 debug_object_activate(timer, &timer_debug_descr);
475 return 0;
476 } else {
Stephen Boydfb16b8c2011-11-07 19:48:26 -0800477 setup_timer(timer, stub_timer, 0);
478 return 1;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700479 }
480 return 0;
481
482 case ODEBUG_STATE_ACTIVE:
483 WARN_ON(1);
484
485 default:
486 return 0;
487 }
488}
489
490/*
491 * fixup_free is called when:
492 * - an active object is freed
493 */
494static int timer_fixup_free(void *addr, enum debug_obj_state state)
495{
496 struct timer_list *timer = addr;
497
498 switch (state) {
499 case ODEBUG_STATE_ACTIVE:
500 del_timer_sync(timer);
501 debug_object_free(timer, &timer_debug_descr);
502 return 1;
503 default:
504 return 0;
505 }
506}
507
Christine Chandc4218b2011-11-07 19:48:28 -0800508/*
509 * fixup_assert_init is called when:
510 * - an untracked/uninit-ed object is found
511 */
512static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
513{
514 struct timer_list *timer = addr;
515
516 switch (state) {
517 case ODEBUG_STATE_NOTAVAILABLE:
518 if (timer->entry.prev == TIMER_ENTRY_STATIC) {
519 /*
520 * This is not really a fixup. The timer was
521 * statically initialized. We just make sure that it
522 * is tracked in the object tracker.
523 */
524 debug_object_init(timer, &timer_debug_descr);
525 return 0;
526 } else {
527 setup_timer(timer, stub_timer, 0);
528 return 1;
529 }
530 default:
531 return 0;
532 }
533}
534
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700535static struct debug_obj_descr timer_debug_descr = {
Christine Chandc4218b2011-11-07 19:48:28 -0800536 .name = "timer_list",
537 .debug_hint = timer_debug_hint,
538 .fixup_init = timer_fixup_init,
539 .fixup_activate = timer_fixup_activate,
540 .fixup_free = timer_fixup_free,
541 .fixup_assert_init = timer_fixup_assert_init,
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700542};
543
544static inline void debug_timer_init(struct timer_list *timer)
545{
546 debug_object_init(timer, &timer_debug_descr);
547}
548
549static inline void debug_timer_activate(struct timer_list *timer)
550{
551 debug_object_activate(timer, &timer_debug_descr);
552}
553
554static inline void debug_timer_deactivate(struct timer_list *timer)
555{
556 debug_object_deactivate(timer, &timer_debug_descr);
557}
558
559static inline void debug_timer_free(struct timer_list *timer)
560{
561 debug_object_free(timer, &timer_debug_descr);
562}
563
Christine Chandc4218b2011-11-07 19:48:28 -0800564static inline void debug_timer_assert_init(struct timer_list *timer)
565{
566 debug_object_assert_init(timer, &timer_debug_descr);
567}
568
Tejun Heofc683992012-08-08 11:10:27 -0700569static void do_init_timer(struct timer_list *timer, unsigned int flags,
570 const char *name, struct lock_class_key *key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700571
Tejun Heofc683992012-08-08 11:10:27 -0700572void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
573 const char *name, struct lock_class_key *key)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700574{
575 debug_object_init_on_stack(timer, &timer_debug_descr);
Tejun Heofc683992012-08-08 11:10:27 -0700576 do_init_timer(timer, flags, name, key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700577}
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100578EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700579
580void destroy_timer_on_stack(struct timer_list *timer)
581{
582 debug_object_free(timer, &timer_debug_descr);
583}
584EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
585
586#else
587static inline void debug_timer_init(struct timer_list *timer) { }
588static inline void debug_timer_activate(struct timer_list *timer) { }
589static inline void debug_timer_deactivate(struct timer_list *timer) { }
Christine Chandc4218b2011-11-07 19:48:28 -0800590static inline void debug_timer_assert_init(struct timer_list *timer) { }
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700591#endif
592
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800593static inline void debug_init(struct timer_list *timer)
594{
595 debug_timer_init(timer);
596 trace_timer_init(timer);
597}
598
599static inline void
600debug_activate(struct timer_list *timer, unsigned long expires)
601{
602 debug_timer_activate(timer);
603 trace_timer_start(timer, expires);
604}
605
606static inline void debug_deactivate(struct timer_list *timer)
607{
608 debug_timer_deactivate(timer);
609 trace_timer_cancel(timer);
610}
611
Christine Chandc4218b2011-11-07 19:48:28 -0800612static inline void debug_assert_init(struct timer_list *timer)
613{
614 debug_timer_assert_init(timer);
615}
616
Tejun Heofc683992012-08-08 11:10:27 -0700617static void do_init_timer(struct timer_list *timer, unsigned int flags,
618 const char *name, struct lock_class_key *key)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700619{
Tejun Heofc683992012-08-08 11:10:27 -0700620 struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
621
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700622 timer->entry.next = NULL;
Tejun Heofc683992012-08-08 11:10:27 -0700623 timer->base = (void *)((unsigned long)base | flags);
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800624 timer->slack = -1;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700625#ifdef CONFIG_TIMER_STATS
626 timer->start_site = NULL;
627 timer->start_pid = -1;
628 memset(timer->start_comm, 0, TASK_COMM_LEN);
629#endif
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100630 lockdep_init_map(&timer->lockdep_map, name, key, 0);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700631}
632
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700633/**
Randy Dunlap633fe792009-04-01 17:47:23 -0700634 * init_timer_key - initialize a timer
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700635 * @timer: the timer to be initialized
Tejun Heofc683992012-08-08 11:10:27 -0700636 * @flags: timer flags
Randy Dunlap633fe792009-04-01 17:47:23 -0700637 * @name: name of the timer
638 * @key: lockdep class key of the fake lock used for tracking timer
639 * sync lock dependencies
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700640 *
Randy Dunlap633fe792009-04-01 17:47:23 -0700641 * init_timer_key() must be done to a timer prior calling *any* of the
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700642 * other timer functions.
643 */
Tejun Heofc683992012-08-08 11:10:27 -0700644void init_timer_key(struct timer_list *timer, unsigned int flags,
645 const char *name, struct lock_class_key *key)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700646{
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800647 debug_init(timer);
Tejun Heofc683992012-08-08 11:10:27 -0700648 do_init_timer(timer, flags, name, key);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700649}
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100650EXPORT_SYMBOL(init_timer_key);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700651
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000652static inline void detach_timer(struct timer_list *timer, bool clear_pending)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700653{
654 struct list_head *entry = &timer->entry;
655
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800656 debug_deactivate(timer);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700657
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700658 __list_del(entry->prev, entry->next);
659 if (clear_pending)
660 entry->next = NULL;
661 entry->prev = LIST_POISON2;
662}
663
Thomas Gleixner99d5f3a2012-05-25 22:08:58 +0000664static inline void
665detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
666{
667 detach_timer(timer, true);
668 if (!tbase_get_deferrable(timer->base))
Tejun Heoe52b1db2012-08-08 11:10:25 -0700669 base->active_timers--;
Thomas Gleixner99d5f3a2012-05-25 22:08:58 +0000670}
671
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000672static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
673 bool clear_pending)
674{
675 if (!timer_pending(timer))
676 return 0;
677
678 detach_timer(timer, clear_pending);
Thomas Gleixner99d5f3a2012-05-25 22:08:58 +0000679 if (!tbase_get_deferrable(timer->base)) {
Tejun Heoe52b1db2012-08-08 11:10:25 -0700680 base->active_timers--;
Thomas Gleixner99d5f3a2012-05-25 22:08:58 +0000681 if (timer->expires == base->next_timer)
682 base->next_timer = base->timer_jiffies;
683 }
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000684 return 1;
685}
686
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700687/*
Oleg Nesterov3691c512006-03-31 02:30:30 -0800688 * We are using hashed locking: holding per_cpu(tvec_bases).lock
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700689 * means that all timers which are tied to this base via timer->base are
690 * locked, and the base itself is locked too.
691 *
692 * So __run_timers/migrate_timers can safely modify all timers which could
693 * be found on ->tvX lists.
694 *
695 * When the timer's base is locked, and the timer removed from list, it is
696 * possible to set timer->base = NULL and drop the lock: the timer remains
697 * locked.
698 */
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100699static struct tvec_base *lock_timer_base(struct timer_list *timer,
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700700 unsigned long *flags)
Josh Triplett89e7e3742006-09-29 01:59:36 -0700701 __acquires(timer->base->lock)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700702{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100703 struct tvec_base *base;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700704
705 for (;;) {
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100706 struct tvec_base *prelock_base = timer->base;
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700707 base = tbase_get_base(prelock_base);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700708 if (likely(base != NULL)) {
709 spin_lock_irqsave(&base->lock, *flags);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700710 if (likely(prelock_base == timer->base))
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700711 return base;
712 /* The timer has migrated to another CPU */
713 spin_unlock_irqrestore(&base->lock, *flags);
714 }
715 cpu_relax();
716 }
717}
718
Ingo Molnar74019222009-02-18 12:23:29 +0100719static inline int
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530720__mod_timer(struct timer_list *timer, unsigned long expires,
721 bool pending_only, int pinned)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100723 struct tvec_base *base, *new_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 unsigned long flags;
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530725 int ret = 0 , cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800727 timer_stats_timer_set_start_info(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 BUG_ON(!timer->function);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700730 base = lock_timer_base(timer, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000732 ret = detach_if_pending(timer, base, false);
733 if (!ret && pending_only)
734 goto out_unlock;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700735
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800736 debug_activate(timer, expires);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700737
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530738 cpu = smp_processor_id();
739
740#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700741 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
742 cpu = get_nohz_timer_target();
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530743#endif
744 new_base = per_cpu(tvec_bases, cpu);
745
Oleg Nesterov3691c512006-03-31 02:30:30 -0800746 if (base != new_base) {
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700747 /*
748 * We are trying to schedule the timer on the local CPU.
749 * However we can't change timer's base while it is running,
750 * otherwise del_timer_sync() can't detect that the timer's
751 * handler yet has not finished. This also guarantees that
752 * the timer is serialized wrt itself.
753 */
Oleg Nesterova2c348f2006-03-31 02:30:31 -0800754 if (likely(base->running_timer != timer)) {
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700755 /* See the comment in lock_timer_base() */
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700756 timer_set_base(timer, NULL);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700757 spin_unlock(&base->lock);
Oleg Nesterova2c348f2006-03-31 02:30:31 -0800758 base = new_base;
759 spin_lock(&base->lock);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700760 timer_set_base(timer, base);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700761 }
762 }
763
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 timer->expires = expires;
Oleg Nesterova2c348f2006-03-31 02:30:31 -0800765 internal_add_timer(base, timer);
Ingo Molnar74019222009-02-18 12:23:29 +0100766
767out_unlock:
Oleg Nesterova2c348f2006-03-31 02:30:31 -0800768 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
770 return ret;
771}
772
Ingo Molnar74019222009-02-18 12:23:29 +0100773/**
774 * mod_timer_pending - modify a pending timer's timeout
775 * @timer: the pending timer to be modified
776 * @expires: new timeout in jiffies
777 *
778 * mod_timer_pending() is the same for pending timers as mod_timer(),
779 * but will not re-activate and modify already deleted timers.
780 *
781 * It is useful for unserialized use of timers.
782 */
783int mod_timer_pending(struct timer_list *timer, unsigned long expires)
784{
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530785 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
Ingo Molnar74019222009-02-18 12:23:29 +0100786}
787EXPORT_SYMBOL(mod_timer_pending);
788
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800789/*
790 * Decide where to put the timer while taking the slack into account
791 *
792 * Algorithm:
793 * 1) calculate the maximum (absolute) time
794 * 2) calculate the highest bit where the expires and new max are different
795 * 3) use this bit to make a mask
796 * 4) use the bitmask to round down the maximum time, so that all last
797 * bits are zeros
798 */
799static inline
800unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
801{
802 unsigned long expires_limit, mask;
803 int bit;
804
Thomas Gleixner8e63d772010-05-25 20:43:30 +0200805 if (timer->slack >= 0) {
Jeff Chuaf00e0472010-05-24 07:16:24 +0800806 expires_limit = expires + timer->slack;
Thomas Gleixner8e63d772010-05-25 20:43:30 +0200807 } else {
Sebastian Andrzej Siewior1c3cc112011-05-21 12:58:28 +0200808 long delta = expires - jiffies;
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800809
Sebastian Andrzej Siewior1c3cc112011-05-21 12:58:28 +0200810 if (delta < 256)
811 return expires;
812
813 expires_limit = expires + delta / 256;
Thomas Gleixner8e63d772010-05-25 20:43:30 +0200814 }
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800815 mask = expires ^ expires_limit;
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800816 if (mask == 0)
817 return expires;
818
819 bit = find_last_bit(&mask, BITS_PER_LONG);
820
821 mask = (1 << bit) - 1;
822
823 expires_limit = expires_limit & ~(mask);
824
825 return expires_limit;
826}
827
Ingo Molnar74019222009-02-18 12:23:29 +0100828/**
829 * mod_timer - modify a timer's timeout
830 * @timer: the timer to be modified
831 * @expires: new timeout in jiffies
832 *
833 * mod_timer() is a more efficient way to update the expire field of an
834 * active timer (if the timer is inactive it will be activated)
835 *
836 * mod_timer(timer, expires) is equivalent to:
837 *
838 * del_timer(timer); timer->expires = expires; add_timer(timer);
839 *
840 * Note that if there are multiple unserialized concurrent users of the
841 * same timer, then mod_timer() is the only safe way to modify the timeout,
842 * since add_timer() cannot modify an already running timer.
843 *
844 * The function returns whether it has modified a pending timer or not.
845 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
846 * active timer returns 1.)
847 */
848int mod_timer(struct timer_list *timer, unsigned long expires)
849{
Sebastian Andrzej Siewior1c3cc112011-05-21 12:58:28 +0200850 expires = apply_slack(timer, expires);
851
Ingo Molnar74019222009-02-18 12:23:29 +0100852 /*
853 * This is a common optimization triggered by the
854 * networking code - if the timer is re-modified
855 * to be the same thing then just return:
856 */
Pavel Roskin48411582009-07-18 16:46:02 -0400857 if (timer_pending(timer) && timer->expires == expires)
Ingo Molnar74019222009-02-18 12:23:29 +0100858 return 1;
859
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530860 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
Ingo Molnar74019222009-02-18 12:23:29 +0100861}
862EXPORT_SYMBOL(mod_timer);
863
864/**
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530865 * mod_timer_pinned - modify a timer's timeout
866 * @timer: the timer to be modified
867 * @expires: new timeout in jiffies
868 *
869 * mod_timer_pinned() is a way to update the expire field of an
870 * active timer (if the timer is inactive it will be activated)
Paul E. McKenney048a0e82012-04-26 10:52:27 -0700871 * and to ensure that the timer is scheduled on the current CPU.
872 *
873 * Note that this does not prevent the timer from being migrated
874 * when the current CPU goes offline. If this is a problem for
875 * you, use CPU-hotplug notifiers to handle it correctly, for
876 * example, cancelling the timer when the corresponding CPU goes
877 * offline.
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530878 *
879 * mod_timer_pinned(timer, expires) is equivalent to:
880 *
881 * del_timer(timer); timer->expires = expires; add_timer(timer);
882 */
883int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
884{
885 if (timer->expires == expires && timer_pending(timer))
886 return 1;
887
888 return __mod_timer(timer, expires, false, TIMER_PINNED);
889}
890EXPORT_SYMBOL(mod_timer_pinned);
891
892/**
Ingo Molnar74019222009-02-18 12:23:29 +0100893 * add_timer - start a timer
894 * @timer: the timer to be added
895 *
896 * The kernel will do a ->function(->data) callback from the
897 * timer interrupt at the ->expires point in the future. The
898 * current time is 'jiffies'.
899 *
900 * The timer's ->expires, ->function (and if the handler uses it, ->data)
901 * fields must be set prior calling this function.
902 *
903 * Timers with an ->expires field in the past will be executed in the next
904 * timer tick.
905 */
906void add_timer(struct timer_list *timer)
907{
908 BUG_ON(timer_pending(timer));
909 mod_timer(timer, timer->expires);
910}
911EXPORT_SYMBOL(add_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700913/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 * add_timer_on - start a timer on a particular CPU
915 * @timer: the timer to be added
916 * @cpu: the CPU to start it on
917 *
918 * This is not very scalable on SMP. Double adds are not possible.
919 */
920void add_timer_on(struct timer_list *timer, int cpu)
921{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100922 struct tvec_base *base = per_cpu(tvec_bases, cpu);
Thomas Gleixner68194572007-07-19 01:49:16 -0700923 unsigned long flags;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700924
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800925 timer_stats_timer_set_start_info(timer);
Thomas Gleixner68194572007-07-19 01:49:16 -0700926 BUG_ON(timer_pending(timer) || !timer->function);
Oleg Nesterov3691c512006-03-31 02:30:30 -0800927 spin_lock_irqsave(&base->lock, flags);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700928 timer_set_base(timer, base);
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800929 debug_activate(timer, timer->expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 internal_add_timer(base, timer);
Thomas Gleixner06d83082008-03-22 09:20:24 +0100931 /*
932 * Check whether the other CPU is idle and needs to be
933 * triggered to reevaluate the timer wheel when nohz is
934 * active. We are protected against the other CPU fiddling
935 * with the timer by holding the timer base lock. This also
936 * makes sure that a CPU on the way to idle can not evaluate
937 * the timer wheel.
938 */
939 wake_up_idle_cpu(cpu);
Oleg Nesterov3691c512006-03-31 02:30:30 -0800940 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941}
Andi Kleena9862e02009-05-19 22:49:07 +0200942EXPORT_SYMBOL_GPL(add_timer_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700944/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 * del_timer - deactive a timer.
946 * @timer: the timer to be deactivated
947 *
948 * del_timer() deactivates a timer - this works on both active and inactive
949 * timers.
950 *
951 * The function returns whether it has deactivated a pending timer or not.
952 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
953 * active timer returns 1.)
954 */
955int del_timer(struct timer_list *timer)
956{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100957 struct tvec_base *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 unsigned long flags;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700959 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960
Christine Chandc4218b2011-11-07 19:48:28 -0800961 debug_assert_init(timer);
962
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800963 timer_stats_timer_clear_start_info(timer);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700964 if (timer_pending(timer)) {
965 base = lock_timer_base(timer, &flags);
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000966 ret = detach_if_pending(timer, base, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700970 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972EXPORT_SYMBOL(del_timer);
973
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700974/**
975 * try_to_del_timer_sync - Try to deactivate a timer
976 * @timer: timer do del
977 *
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700978 * This function tries to deactivate a timer. Upon successful (ret >= 0)
979 * exit the timer is not queued and the handler is not running on any CPU.
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700980 */
981int try_to_del_timer_sync(struct timer_list *timer)
982{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100983 struct tvec_base *base;
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700984 unsigned long flags;
985 int ret = -1;
986
Christine Chandc4218b2011-11-07 19:48:28 -0800987 debug_assert_init(timer);
988
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700989 base = lock_timer_base(timer, &flags);
990
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000991 if (base->running_timer != timer) {
992 timer_stats_timer_clear_start_info(timer);
993 ret = detach_if_pending(timer, base, true);
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700994 }
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700995 spin_unlock_irqrestore(&base->lock, flags);
996
997 return ret;
998}
David Howellse19dff12007-04-26 15:46:56 -0700999EXPORT_SYMBOL(try_to_del_timer_sync);
1000
Yong Zhang6f1bc452010-10-20 15:57:31 -07001001#ifdef CONFIG_SMP
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001002/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1004 * @timer: the timer to be deactivated
1005 *
1006 * This function only differs from del_timer() on SMP: besides deactivating
1007 * the timer it also makes sure the handler has finished executing on other
1008 * CPUs.
1009 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08001010 * Synchronization rules: Callers must prevent restarting of the timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 * otherwise this function is meaningless. It must not be called from
Tejun Heoc5f66e92012-08-08 11:10:28 -07001012 * interrupt contexts unless the timer is an irqsafe one. The caller must
1013 * not hold locks which would prevent completion of the timer's
1014 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1015 * timer is not queued and the handler is not running on any CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 *
Tejun Heoc5f66e92012-08-08 11:10:28 -07001017 * Note: For !irqsafe timers, you must not hold locks that are held in
1018 * interrupt context while calling this function. Even if the lock has
1019 * nothing to do with the timer in question. Here's why:
Steven Rostedt48228f72011-02-08 12:39:54 -05001020 *
1021 * CPU0 CPU1
1022 * ---- ----
1023 * <SOFTIRQ>
1024 * call_timer_fn();
1025 * base->running_timer = mytimer;
1026 * spin_lock_irq(somelock);
1027 * <IRQ>
1028 * spin_lock(somelock);
1029 * del_timer_sync(mytimer);
1030 * while (base->running_timer == mytimer);
1031 *
1032 * Now del_timer_sync() will never return and never release somelock.
1033 * The interrupt on the other CPU is waiting to grab somelock but
1034 * it has interrupted the softirq that CPU0 is waiting to finish.
1035 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 * The function returns whether it has deactivated a pending timer or not.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 */
1038int del_timer_sync(struct timer_list *timer)
1039{
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001040#ifdef CONFIG_LOCKDEP
Peter Zijlstraf266a512011-02-03 15:09:41 +01001041 unsigned long flags;
1042
Steven Rostedt48228f72011-02-08 12:39:54 -05001043 /*
1044 * If lockdep gives a backtrace here, please reference
1045 * the synchronization rules above.
1046 */
Peter Zijlstra7ff20792011-02-08 15:18:00 +01001047 local_irq_save(flags);
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001048 lock_map_acquire(&timer->lockdep_map);
1049 lock_map_release(&timer->lockdep_map);
Peter Zijlstra7ff20792011-02-08 15:18:00 +01001050 local_irq_restore(flags);
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001051#endif
Yong Zhang466bd302010-10-20 15:57:33 -07001052 /*
1053 * don't use it in hardirq context, because it
1054 * could lead to deadlock.
1055 */
Tejun Heoc5f66e92012-08-08 11:10:28 -07001056 WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001057 for (;;) {
1058 int ret = try_to_del_timer_sync(timer);
1059 if (ret >= 0)
1060 return ret;
Andrew Mortona0009652006-07-14 00:24:06 -07001061 cpu_relax();
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001062 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063}
1064EXPORT_SYMBOL(del_timer_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065#endif
1066
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001067static int cascade(struct tvec_base *base, struct tvec *tv, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068{
1069 /* cascade all the timers from tv up one level */
Porpoise3439dd82006-06-23 02:05:56 -07001070 struct timer_list *timer, *tmp;
1071 struct list_head tv_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072
Porpoise3439dd82006-06-23 02:05:56 -07001073 list_replace_init(tv->vec + index, &tv_list);
1074
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 /*
Porpoise3439dd82006-06-23 02:05:56 -07001076 * We are removing _all_ timers from the list, so we
1077 * don't have to detach them individually.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 */
Porpoise3439dd82006-06-23 02:05:56 -07001079 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
Venki Pallipadi6e453a62007-05-08 00:27:44 -07001080 BUG_ON(tbase_get_base(timer->base) != base);
Thomas Gleixnerfacbb4a2012-05-25 22:08:57 +00001081 /* No accounting, while moving them */
1082 __internal_add_timer(base, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
1085 return index;
1086}
1087
Thomas Gleixner576da122010-03-12 21:10:29 +01001088static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1089 unsigned long data)
1090{
1091 int preempt_count = preempt_count();
1092
1093#ifdef CONFIG_LOCKDEP
1094 /*
1095 * It is permissible to free the timer from inside the
1096 * function that is called from it, this we need to take into
1097 * account for lockdep too. To avoid bogus "held lock freed"
1098 * warnings as well as problems when looking into
1099 * timer->lockdep_map, make a copy and use that here.
1100 */
Peter Zijlstra4d82a1d2012-05-15 08:06:19 -07001101 struct lockdep_map lockdep_map;
1102
1103 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
Thomas Gleixner576da122010-03-12 21:10:29 +01001104#endif
1105 /*
1106 * Couple the lock chain with the lock chain at
1107 * del_timer_sync() by acquiring the lock_map around the fn()
1108 * call here and in del_timer_sync().
1109 */
1110 lock_map_acquire(&lockdep_map);
1111
1112 trace_timer_expire_entry(timer);
1113 fn(data);
1114 trace_timer_expire_exit(timer);
1115
1116 lock_map_release(&lockdep_map);
1117
1118 if (preempt_count != preempt_count()) {
Thomas Gleixner802702e2010-03-12 20:13:23 +01001119 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1120 fn, preempt_count, preempt_count());
1121 /*
1122 * Restore the preempt count. That gives us a decent
1123 * chance to survive and extract information. If the
1124 * callback kept a lock held, bad luck, but not worse
1125 * than the BUG() we had.
1126 */
1127 preempt_count() = preempt_count;
Thomas Gleixner576da122010-03-12 21:10:29 +01001128 }
1129}
1130
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001131#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1132
1133/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 * __run_timers - run all expired timers (if any) on this CPU.
1135 * @base: the timer vector to be processed.
1136 *
1137 * This function cascades all vectors and executes all expired timer
1138 * vectors.
1139 */
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001140static inline void __run_timers(struct tvec_base *base)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141{
1142 struct timer_list *timer;
1143
Oleg Nesterov3691c512006-03-31 02:30:30 -08001144 spin_lock_irq(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 while (time_after_eq(jiffies, base->timer_jiffies)) {
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07001146 struct list_head work_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 struct list_head *head = &work_list;
Thomas Gleixner68194572007-07-19 01:49:16 -07001148 int index = base->timer_jiffies & TVR_MASK;
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07001149
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 /*
1151 * Cascade timers:
1152 */
1153 if (!index &&
1154 (!cascade(base, &base->tv2, INDEX(0))) &&
1155 (!cascade(base, &base->tv3, INDEX(1))) &&
1156 !cascade(base, &base->tv4, INDEX(2)))
1157 cascade(base, &base->tv5, INDEX(3));
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07001158 ++base->timer_jiffies;
1159 list_replace_init(base->tv1.vec + index, &work_list);
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001160 while (!list_empty(head)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 void (*fn)(unsigned long);
1162 unsigned long data;
Tejun Heoc5f66e92012-08-08 11:10:28 -07001163 bool irqsafe;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
Pavel Emelianovb5e61812007-05-08 00:30:19 -07001165 timer = list_first_entry(head, struct timer_list,entry);
Thomas Gleixner68194572007-07-19 01:49:16 -07001166 fn = timer->function;
1167 data = timer->data;
Tejun Heoc5f66e92012-08-08 11:10:28 -07001168 irqsafe = tbase_get_irqsafe(timer->base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001170 timer_stats_account_timer(timer);
1171
Yong Zhang6f1bc452010-10-20 15:57:31 -07001172 base->running_timer = timer;
Thomas Gleixner99d5f3a2012-05-25 22:08:58 +00001173 detach_expired_timer(timer, base);
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001174
Tejun Heoc5f66e92012-08-08 11:10:28 -07001175 if (irqsafe) {
1176 spin_unlock(&base->lock);
1177 call_timer_fn(timer, fn, data);
1178 spin_lock(&base->lock);
1179 } else {
1180 spin_unlock_irq(&base->lock);
1181 call_timer_fn(timer, fn, data);
1182 spin_lock_irq(&base->lock);
1183 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 }
1185 }
Yong Zhang6f1bc452010-10-20 15:57:31 -07001186 base->running_timer = NULL;
Oleg Nesterov3691c512006-03-31 02:30:30 -08001187 spin_unlock_irq(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188}
1189
Russell Kingee9c5782008-04-20 13:59:33 +01001190#ifdef CONFIG_NO_HZ
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191/*
1192 * Find out when the next timer event is due to happen. This
Randy Dunlap90cba642009-08-25 14:35:41 -07001193 * is used on S/390 to stop all activity when a CPU is idle.
1194 * This function needs to be called with interrupts disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 */
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001196static unsigned long __next_timer_interrupt(struct tvec_base *base)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197{
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001198 unsigned long timer_jiffies = base->timer_jiffies;
Thomas Gleixnereaad0842007-05-29 23:47:39 +02001199 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001200 int index, slot, array, found = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 struct timer_list *nte;
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001202 struct tvec *varray[4];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
1204 /* Look for timer events in tv1. */
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001205 index = slot = timer_jiffies & TVR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 do {
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001207 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
Thomas Gleixner68194572007-07-19 01:49:16 -07001208 if (tbase_get_deferrable(nte->base))
1209 continue;
Venki Pallipadi6e453a62007-05-08 00:27:44 -07001210
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001211 found = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 expires = nte->expires;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001213 /* Look at the cascade bucket(s)? */
1214 if (!index || slot < index)
1215 goto cascade;
1216 return expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 }
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001218 slot = (slot + 1) & TVR_MASK;
1219 } while (slot != index);
1220
1221cascade:
1222 /* Calculate the next cascade event */
1223 if (index)
1224 timer_jiffies += TVR_SIZE - index;
1225 timer_jiffies >>= TVR_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
1227 /* Check tv2-tv5. */
1228 varray[0] = &base->tv2;
1229 varray[1] = &base->tv3;
1230 varray[2] = &base->tv4;
1231 varray[3] = &base->tv5;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001232
1233 for (array = 0; array < 4; array++) {
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001234 struct tvec *varp = varray[array];
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001235
1236 index = slot = timer_jiffies & TVN_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 do {
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001238 list_for_each_entry(nte, varp->vec + slot, entry) {
Jon Huntera0419882009-05-01 13:10:23 -07001239 if (tbase_get_deferrable(nte->base))
1240 continue;
1241
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001242 found = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 if (time_before(nte->expires, expires))
1244 expires = nte->expires;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001245 }
1246 /*
1247 * Do we still search for the first timer or are
1248 * we looking up the cascade buckets ?
1249 */
1250 if (found) {
1251 /* Look at the cascade bucket(s)? */
1252 if (!index || slot < index)
1253 break;
1254 return expires;
1255 }
1256 slot = (slot + 1) & TVN_MASK;
1257 } while (slot != index);
1258
1259 if (index)
1260 timer_jiffies += TVN_SIZE - index;
1261 timer_jiffies >>= TVN_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 }
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001263 return expires;
1264}
1265
1266/*
1267 * Check, if the next hrtimer event is before the next timer wheel
1268 * event:
1269 */
1270static unsigned long cmp_next_hrtimer_event(unsigned long now,
1271 unsigned long expires)
1272{
1273 ktime_t hr_delta = hrtimer_get_next_event();
1274 struct timespec tsdelta;
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001275 unsigned long delta;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001276
1277 if (hr_delta.tv64 == KTIME_MAX)
1278 return expires;
1279
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001280 /*
1281 * Expired timer available, let it expire in the next tick
1282 */
1283 if (hr_delta.tv64 <= 0)
1284 return now + 1;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001285
1286 tsdelta = ktime_to_timespec(hr_delta);
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001287 delta = timespec_to_jiffies(&tsdelta);
Thomas Gleixnereaad0842007-05-29 23:47:39 +02001288
1289 /*
1290 * Limit the delta to the max value, which is checked in
1291 * tick_nohz_stop_sched_tick():
1292 */
1293 if (delta > NEXT_TIMER_MAX_DELTA)
1294 delta = NEXT_TIMER_MAX_DELTA;
1295
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001296 /*
1297 * Take rounding errors in to account and make sure, that it
1298 * expires in the next tick. Otherwise we go into an endless
1299 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1300 * the timer softirq
1301 */
1302 if (delta < 1)
1303 delta = 1;
1304 now += delta;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001305 if (time_before(now, expires))
1306 return now;
1307 return expires;
1308}
1309
1310/**
Li Zefan8dce39c2007-11-05 14:51:10 -08001311 * get_next_timer_interrupt - return the jiffy of the next pending timer
Randy Dunlap05fb6bf2007-02-28 20:12:13 -08001312 * @now: current time (in jiffies)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001313 */
Thomas Gleixnerfd064b92007-02-16 01:27:47 -08001314unsigned long get_next_timer_interrupt(unsigned long now)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001315{
Christoph Lameter74963512010-11-30 14:05:53 -06001316 struct tvec_base *base = __this_cpu_read(tvec_bases);
Thomas Gleixnere40468a2012-05-25 22:08:59 +00001317 unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001318
Heiko Carstensdbd87b52010-12-01 10:11:09 +01001319 /*
1320 * Pretend that there is no timer pending if the cpu is offline.
1321 * Possible pending timers will be migrated later to an active cpu.
1322 */
1323 if (cpu_is_offline(smp_processor_id()))
Thomas Gleixnere40468a2012-05-25 22:08:59 +00001324 return expires;
1325
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001326 spin_lock(&base->lock);
Thomas Gleixnere40468a2012-05-25 22:08:59 +00001327 if (base->active_timers) {
1328 if (time_before_eq(base->next_timer, base->timer_jiffies))
1329 base->next_timer = __next_timer_interrupt(base);
1330 expires = base->next_timer;
1331 }
Oleg Nesterov3691c512006-03-31 02:30:30 -08001332 spin_unlock(&base->lock);
Tony Lindgren69239742006-03-06 15:42:45 -08001333
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001334 if (time_before_eq(expires, now))
1335 return now;
Zachary Amsden0662b712006-05-20 15:00:24 -07001336
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001337 return cmp_next_hrtimer_event(now, expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338}
1339#endif
1340
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341/*
Daniel Walker5b4db0c2007-10-18 03:06:11 -07001342 * Called from the timer interrupt handler to charge one tick to the current
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 * process. user_tick is 1 if the tick is user time, 0 for system.
1344 */
1345void update_process_times(int user_tick)
1346{
1347 struct task_struct *p = current;
1348 int cpu = smp_processor_id();
1349
1350 /* Note: this timer irq context must be accounted for as well. */
Paul Mackerrasfa13a5a2007-11-09 22:39:38 +01001351 account_process_tick(p, user_tick);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 run_local_timers();
Paul E. McKenneya1572292009-08-22 13:56:51 -07001353 rcu_check_callbacks(cpu, user_tick);
Peter Zijlstrab845b512008-08-08 21:47:09 +02001354 printk_tick();
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001355#ifdef CONFIG_IRQ_WORK
1356 if (in_irq())
1357 irq_work_run();
1358#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 scheduler_tick();
Thomas Gleixner68194572007-07-19 01:49:16 -07001360 run_posix_cpu_timers(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361}
1362
1363/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 * This function runs timers and the timer-tq in bottom half context.
1365 */
1366static void run_timer_softirq(struct softirq_action *h)
1367{
Christoph Lameter74963512010-11-30 14:05:53 -06001368 struct tvec_base *base = __this_cpu_read(tvec_bases);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001370 hrtimer_run_pending();
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 if (time_after_eq(jiffies, base->timer_jiffies))
1373 __run_timers(base);
1374}
1375
1376/*
1377 * Called by the local, per-CPU timer interrupt on SMP.
1378 */
1379void run_local_timers(void)
1380{
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001381 hrtimer_run_queues();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 raise_softirq(TIMER_SOFTIRQ);
1383}
1384
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385#ifdef __ARCH_WANT_SYS_ALARM
1386
1387/*
1388 * For backwards compatibility? This can be done in libc so Alpha
1389 * and all newer ports shouldn't need it.
1390 */
Heiko Carstens58fd3aa2009-01-14 14:14:03 +01001391SYSCALL_DEFINE1(alarm, unsigned int, seconds)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392{
Thomas Gleixnerc08b8a42006-03-25 03:06:33 -08001393 return alarm_setitimer(seconds);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394}
1395
1396#endif
1397
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398/**
1399 * sys_getpid - return the thread group id of the current process
1400 *
1401 * Note, despite the name, this returns the tgid not the pid. The tgid and
1402 * the pid are identical unless CLONE_THREAD was specified on clone() in
1403 * which case the tgid is the same in all threads of the same group.
1404 *
1405 * This is SMP safe as current->tgid does not change.
1406 */
Heiko Carstens58fd3aa2009-01-14 14:14:03 +01001407SYSCALL_DEFINE0(getpid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408{
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001409 return task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410}
1411
1412/*
Kirill Korotaev6997a6f2006-08-13 23:24:23 -07001413 * Accessing ->real_parent is not SMP-safe, it could
1414 * change from under us. However, we can use a stale
1415 * value of ->real_parent under rcu_read_lock(), see
1416 * release_task()->call_rcu(delayed_put_task_struct).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 */
Heiko Carstensdbf040d2009-01-14 14:14:04 +01001418SYSCALL_DEFINE0(getppid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419{
1420 int pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
Kirill Korotaev6997a6f2006-08-13 23:24:23 -07001422 rcu_read_lock();
Mandeep Singh Baines031af1652011-12-08 14:34:44 -08001423 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
Kirill Korotaev6997a6f2006-08-13 23:24:23 -07001424 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 return pid;
1427}
1428
Heiko Carstensdbf040d2009-01-14 14:14:04 +01001429SYSCALL_DEFINE0(getuid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430{
1431 /* Only we change this so SMP safe */
Eric W. Biedermana29c33f2012-02-07 18:51:01 -08001432 return from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433}
1434
Heiko Carstensdbf040d2009-01-14 14:14:04 +01001435SYSCALL_DEFINE0(geteuid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436{
1437 /* Only we change this so SMP safe */
Eric W. Biedermana29c33f2012-02-07 18:51:01 -08001438 return from_kuid_munged(current_user_ns(), current_euid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439}
1440
Heiko Carstensdbf040d2009-01-14 14:14:04 +01001441SYSCALL_DEFINE0(getgid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442{
1443 /* Only we change this so SMP safe */
Eric W. Biedermana29c33f2012-02-07 18:51:01 -08001444 return from_kgid_munged(current_user_ns(), current_gid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445}
1446
Heiko Carstensdbf040d2009-01-14 14:14:04 +01001447SYSCALL_DEFINE0(getegid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448{
1449 /* Only we change this so SMP safe */
Eric W. Biedermana29c33f2012-02-07 18:51:01 -08001450 return from_kgid_munged(current_user_ns(), current_egid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451}
1452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453static void process_timeout(unsigned long __data)
1454{
Ingo Molnar36c8b582006-07-03 00:25:41 -07001455 wake_up_process((struct task_struct *)__data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456}
1457
1458/**
1459 * schedule_timeout - sleep until timeout
1460 * @timeout: timeout value in jiffies
1461 *
1462 * Make the current task sleep until @timeout jiffies have
1463 * elapsed. The routine will return immediately unless
1464 * the current task state has been set (see set_current_state()).
1465 *
1466 * You can set the task state as follows -
1467 *
1468 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1469 * pass before the routine returns. The routine will return 0
1470 *
1471 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1472 * delivered to the current task. In this case the remaining time
1473 * in jiffies will be returned, or 0 if the timer expired in time
1474 *
1475 * The current task state is guaranteed to be TASK_RUNNING when this
1476 * routine returns.
1477 *
1478 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1479 * the CPU away without a bound on the timeout. In this case the return
1480 * value will be %MAX_SCHEDULE_TIMEOUT.
1481 *
1482 * In all cases the return value is guaranteed to be non-negative.
1483 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001484signed long __sched schedule_timeout(signed long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485{
1486 struct timer_list timer;
1487 unsigned long expire;
1488
1489 switch (timeout)
1490 {
1491 case MAX_SCHEDULE_TIMEOUT:
1492 /*
1493 * These two special cases are useful to be comfortable
1494 * in the caller. Nothing more. We could take
1495 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1496 * but I' d like to return a valid offset (>=0) to allow
1497 * the caller to do everything it want with the retval.
1498 */
1499 schedule();
1500 goto out;
1501 default:
1502 /*
1503 * Another bit of PARANOID. Note that the retval will be
1504 * 0 since no piece of kernel is supposed to do a check
1505 * for a negative retval of schedule_timeout() (since it
1506 * should never happens anyway). You just have the printk()
1507 * that will tell you if something is gone wrong and where.
1508 */
Andrew Morton5b149bc2006-12-22 01:10:14 -08001509 if (timeout < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 printk(KERN_ERR "schedule_timeout: wrong timeout "
Andrew Morton5b149bc2006-12-22 01:10:14 -08001511 "value %lx\n", timeout);
1512 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 current->state = TASK_RUNNING;
1514 goto out;
1515 }
1516 }
1517
1518 expire = timeout + jiffies;
1519
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -07001520 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
Arun R Bharadwaj597d0272009-04-16 12:13:26 +05301521 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 schedule();
1523 del_singleshot_timer_sync(&timer);
1524
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -07001525 /* Remove the timer from the object tracker */
1526 destroy_timer_on_stack(&timer);
1527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 timeout = expire - jiffies;
1529
1530 out:
1531 return timeout < 0 ? 0 : timeout;
1532}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533EXPORT_SYMBOL(schedule_timeout);
1534
Andrew Morton8a1c1752005-09-13 01:25:15 -07001535/*
1536 * We can use __set_current_state() here because schedule_timeout() calls
1537 * schedule() unconditionally.
1538 */
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001539signed long __sched schedule_timeout_interruptible(signed long timeout)
1540{
Andrew Mortona5a0d522005-10-30 15:01:42 -08001541 __set_current_state(TASK_INTERRUPTIBLE);
1542 return schedule_timeout(timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001543}
1544EXPORT_SYMBOL(schedule_timeout_interruptible);
1545
Matthew Wilcox294d5cc2007-12-06 11:59:46 -05001546signed long __sched schedule_timeout_killable(signed long timeout)
1547{
1548 __set_current_state(TASK_KILLABLE);
1549 return schedule_timeout(timeout);
1550}
1551EXPORT_SYMBOL(schedule_timeout_killable);
1552
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001553signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1554{
Andrew Mortona5a0d522005-10-30 15:01:42 -08001555 __set_current_state(TASK_UNINTERRUPTIBLE);
1556 return schedule_timeout(timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001557}
1558EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1559
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560/* Thread ID - the internal kernel "pid" */
Heiko Carstens58fd3aa2009-01-14 14:14:03 +01001561SYSCALL_DEFINE0(gettid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562{
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001563 return task_pid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564}
1565
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001566/**
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001567 * do_sysinfo - fill in sysinfo struct
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001568 * @info: pointer to buffer to fill
Thomas Gleixner68194572007-07-19 01:49:16 -07001569 */
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001570int do_sysinfo(struct sysinfo *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 unsigned long mem_total, sav_total;
1573 unsigned int mem_unit, bitcount;
Thomas Gleixner2d024942009-05-02 20:08:52 +02001574 struct timespec tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001576 memset(info, 0, sizeof(struct sysinfo));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
Thomas Gleixner2d024942009-05-02 20:08:52 +02001578 ktime_get_ts(&tp);
1579 monotonic_to_bootbased(&tp);
1580 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581
Thomas Gleixner2d024942009-05-02 20:08:52 +02001582 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
Thomas Gleixner2d024942009-05-02 20:08:52 +02001584 info->procs = nr_threads;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001586 si_meminfo(info);
1587 si_swapinfo(info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588
1589 /*
1590 * If the sum of all the available memory (i.e. ram + swap)
1591 * is less than can be stored in a 32 bit unsigned long then
1592 * we can be binary compatible with 2.2.x kernels. If not,
1593 * well, in that case 2.2.x was broken anyways...
1594 *
1595 * -Erik Andersen <andersee@debian.org>
1596 */
1597
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001598 mem_total = info->totalram + info->totalswap;
1599 if (mem_total < info->totalram || mem_total < info->totalswap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 goto out;
1601 bitcount = 0;
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001602 mem_unit = info->mem_unit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 while (mem_unit > 1) {
1604 bitcount++;
1605 mem_unit >>= 1;
1606 sav_total = mem_total;
1607 mem_total <<= 1;
1608 if (mem_total < sav_total)
1609 goto out;
1610 }
1611
1612 /*
1613 * If mem_total did not overflow, multiply all memory values by
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001614 * info->mem_unit and set it to 1. This leaves things compatible
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1616 * kernels...
1617 */
1618
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001619 info->mem_unit = 1;
1620 info->totalram <<= bitcount;
1621 info->freeram <<= bitcount;
1622 info->sharedram <<= bitcount;
1623 info->bufferram <<= bitcount;
1624 info->totalswap <<= bitcount;
1625 info->freeswap <<= bitcount;
1626 info->totalhigh <<= bitcount;
1627 info->freehigh <<= bitcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001629out:
1630 return 0;
1631}
1632
Heiko Carstens1e7bfb22009-01-14 14:14:29 +01001633SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001634{
1635 struct sysinfo val;
1636
1637 do_sysinfo(&val);
1638
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1640 return -EFAULT;
1641
1642 return 0;
1643}
1644
Adrian Bunkb4be6252007-12-18 18:05:58 +01001645static int __cpuinit init_timers_cpu(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646{
1647 int j;
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001648 struct tvec_base *base;
Adrian Bunkb4be6252007-12-18 18:05:58 +01001649 static char __cpuinitdata tvec_base_done[NR_CPUS];
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001650
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001651 if (!tvec_base_done[cpu]) {
Jan Beulicha4a61982006-03-24 03:15:54 -08001652 static char boot_done;
1653
Jan Beulicha4a61982006-03-24 03:15:54 -08001654 if (boot_done) {
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001655 /*
1656 * The APs use this path later in boot
1657 */
Christoph Lameter94f60302007-07-17 04:03:29 -07001658 base = kmalloc_node(sizeof(*base),
1659 GFP_KERNEL | __GFP_ZERO,
Jan Beulicha4a61982006-03-24 03:15:54 -08001660 cpu_to_node(cpu));
1661 if (!base)
1662 return -ENOMEM;
Venki Pallipadi6e453a62007-05-08 00:27:44 -07001663
1664 /* Make sure that tvec_base is 2 byte aligned */
1665 if (tbase_get_deferrable(base)) {
1666 WARN_ON(1);
1667 kfree(base);
1668 return -ENOMEM;
1669 }
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001670 per_cpu(tvec_bases, cpu) = base;
Jan Beulicha4a61982006-03-24 03:15:54 -08001671 } else {
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001672 /*
1673 * This is for the boot CPU - we use compile-time
1674 * static initialisation because per-cpu memory isn't
1675 * ready yet and because the memory allocators are not
1676 * initialised either.
1677 */
Jan Beulicha4a61982006-03-24 03:15:54 -08001678 boot_done = 1;
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001679 base = &boot_tvec_bases;
Jan Beulicha4a61982006-03-24 03:15:54 -08001680 }
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001681 tvec_base_done[cpu] = 1;
1682 } else {
1683 base = per_cpu(tvec_bases, cpu);
Jan Beulicha4a61982006-03-24 03:15:54 -08001684 }
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001685
Oleg Nesterov3691c512006-03-31 02:30:30 -08001686 spin_lock_init(&base->lock);
Ingo Molnard730e882006-07-03 00:25:10 -07001687
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 for (j = 0; j < TVN_SIZE; j++) {
1689 INIT_LIST_HEAD(base->tv5.vec + j);
1690 INIT_LIST_HEAD(base->tv4.vec + j);
1691 INIT_LIST_HEAD(base->tv3.vec + j);
1692 INIT_LIST_HEAD(base->tv2.vec + j);
1693 }
1694 for (j = 0; j < TVR_SIZE; j++)
1695 INIT_LIST_HEAD(base->tv1.vec + j);
1696
1697 base->timer_jiffies = jiffies;
Martin Schwidefsky97fd9ed2009-07-21 20:25:05 +02001698 base->next_timer = base->timer_jiffies;
Thomas Gleixner99d5f3a2012-05-25 22:08:58 +00001699 base->active_timers = 0;
Jan Beulicha4a61982006-03-24 03:15:54 -08001700 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701}
1702
1703#ifdef CONFIG_HOTPLUG_CPU
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001704static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705{
1706 struct timer_list *timer;
1707
1708 while (!list_empty(head)) {
Pavel Emelianovb5e61812007-05-08 00:30:19 -07001709 timer = list_first_entry(head, struct timer_list, entry);
Thomas Gleixner99d5f3a2012-05-25 22:08:58 +00001710 /* We ignore the accounting on the dying cpu */
Thomas Gleixnerec44bc72012-05-25 22:08:57 +00001711 detach_timer(timer, false);
Venki Pallipadi6e453a62007-05-08 00:27:44 -07001712 timer_set_base(timer, new_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 internal_add_timer(new_base, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715}
1716
Randy Dunlap48ccf3d2008-01-21 17:18:25 -08001717static void __cpuinit migrate_timers(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718{
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001719 struct tvec_base *old_base;
1720 struct tvec_base *new_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 int i;
1722
1723 BUG_ON(cpu_online(cpu));
Jan Beulicha4a61982006-03-24 03:15:54 -08001724 old_base = per_cpu(tvec_bases, cpu);
1725 new_base = get_cpu_var(tvec_bases);
Oleg Nesterovd82f0b02008-08-20 16:46:04 -07001726 /*
1727 * The caller is globally serialized and nobody else
1728 * takes two locks at once, deadlock is not possible.
1729 */
1730 spin_lock_irq(&new_base->lock);
Oleg Nesterov0d180402008-04-04 20:54:10 +02001731 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
Oleg Nesterov3691c512006-03-31 02:30:30 -08001733 BUG_ON(old_base->running_timer);
1734
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 for (i = 0; i < TVR_SIZE; i++)
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001736 migrate_timer_list(new_base, old_base->tv1.vec + i);
1737 for (i = 0; i < TVN_SIZE; i++) {
1738 migrate_timer_list(new_base, old_base->tv2.vec + i);
1739 migrate_timer_list(new_base, old_base->tv3.vec + i);
1740 migrate_timer_list(new_base, old_base->tv4.vec + i);
1741 migrate_timer_list(new_base, old_base->tv5.vec + i);
1742 }
1743
Oleg Nesterov0d180402008-04-04 20:54:10 +02001744 spin_unlock(&old_base->lock);
Oleg Nesterovd82f0b02008-08-20 16:46:04 -07001745 spin_unlock_irq(&new_base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 put_cpu_var(tvec_bases);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747}
1748#endif /* CONFIG_HOTPLUG_CPU */
1749
Chandra Seetharaman8c78f302006-07-30 03:03:35 -07001750static int __cpuinit timer_cpu_notify(struct notifier_block *self,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 unsigned long action, void *hcpu)
1752{
1753 long cpu = (long)hcpu;
Akinobu Mita80b51842010-05-26 14:43:32 -07001754 int err;
1755
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 switch(action) {
1757 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001758 case CPU_UP_PREPARE_FROZEN:
Akinobu Mita80b51842010-05-26 14:43:32 -07001759 err = init_timers_cpu(cpu);
1760 if (err < 0)
1761 return notifier_from_errno(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 break;
1763#ifdef CONFIG_HOTPLUG_CPU
1764 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001765 case CPU_DEAD_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 migrate_timers(cpu);
1767 break;
1768#endif
1769 default:
1770 break;
1771 }
1772 return NOTIFY_OK;
1773}
1774
Chandra Seetharaman8c78f302006-07-30 03:03:35 -07001775static struct notifier_block __cpuinitdata timers_nb = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 .notifier_call = timer_cpu_notify,
1777};
1778
1779
1780void __init init_timers(void)
1781{
Tejun Heoe52b1db2012-08-08 11:10:25 -07001782 int err;
Akinobu Mita07dccf32006-09-29 02:00:22 -07001783
Tejun Heoe52b1db2012-08-08 11:10:25 -07001784 /* ensure there are enough low bits for flags in timer->base pointer */
1785 BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
1786
1787 err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1788 (void *)(long)smp_processor_id());
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001789 init_timer_stats();
1790
Akinobu Mita9e506f72010-06-04 14:15:04 -07001791 BUG_ON(err != NOTIFY_OK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 register_cpu_notifier(&timers_nb);
Carlos R. Mafra962cf362008-05-15 11:15:37 -03001793 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794}
1795
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796/**
1797 * msleep - sleep safely even with waitqueue interruptions
1798 * @msecs: Time in milliseconds to sleep for
1799 */
1800void msleep(unsigned int msecs)
1801{
1802 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1803
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07001804 while (timeout)
1805 timeout = schedule_timeout_uninterruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806}
1807
1808EXPORT_SYMBOL(msleep);
1809
1810/**
Domen Puncer96ec3ef2005-06-25 14:58:43 -07001811 * msleep_interruptible - sleep waiting for signals
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 * @msecs: Time in milliseconds to sleep for
1813 */
1814unsigned long msleep_interruptible(unsigned int msecs)
1815{
1816 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1817
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07001818 while (timeout && !signal_pending(current))
1819 timeout = schedule_timeout_interruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 return jiffies_to_msecs(timeout);
1821}
1822
1823EXPORT_SYMBOL(msleep_interruptible);
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001824
1825static int __sched do_usleep_range(unsigned long min, unsigned long max)
1826{
1827 ktime_t kmin;
1828 unsigned long delta;
1829
1830 kmin = ktime_set(0, min * NSEC_PER_USEC);
1831 delta = (max - min) * NSEC_PER_USEC;
1832 return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1833}
1834
1835/**
1836 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1837 * @min: Minimum time in usecs to sleep
1838 * @max: Maximum time in usecs to sleep
1839 */
1840void usleep_range(unsigned long min, unsigned long max)
1841{
1842 __set_current_state(TASK_UNINTERRUPTIBLE);
1843 do_usleep_range(min, max);
1844}
1845EXPORT_SYMBOL(usleep_range);