blob: 944ad64277a69d5249c67fd37f270384f4d41037 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/timer.c
3 *
Stephen Rothwell4a22f162013-04-30 15:27:37 -07004 * Kernel internal timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22#include <linux/kernel_stat.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040023#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/interrupt.h>
25#include <linux/percpu.h>
26#include <linux/init.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070029#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/notifier.h>
31#include <linux/thread_info.h>
32#include <linux/time.h>
33#include <linux/jiffies.h>
34#include <linux/posix-timers.h>
35#include <linux/cpu.h>
36#include <linux/syscalls.h>
Adrian Bunk97a41e22006-01-08 01:02:17 -080037#include <linux/delay.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080038#include <linux/tick.h>
Ingo Molnar82f67cd2007-02-16 01:28:13 -080039#include <linux/kallsyms.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080040#include <linux/irq_work.h>
Arun R Bharadwajeea08f32009-04-16 12:16:41 +053041#include <linux/sched.h>
Clark Williamscf4aebc22013-02-07 09:46:59 -060042#include <linux/sched/sysctl.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090043#include <linux/slab.h>
Stephen Rothwell1a0df592013-04-30 15:27:34 -070044#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#include <asm/uaccess.h>
47#include <asm/unistd.h>
48#include <asm/div64.h>
49#include <asm/timex.h>
50#include <asm/io.h>
51
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +000052#include "tick-internal.h"
53
Xiao Guangrong2b022e32009-08-10 10:48:59 +080054#define CREATE_TRACE_POINTS
55#include <trace/events/timer.h>
56
Andi Kleen40747ff2014-02-08 08:51:59 +010057__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
Thomas Gleixnerecea8d12005-10-30 15:03:00 -080058
59EXPORT_SYMBOL(jiffies_64);
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
Thomas Gleixner500462a2016-07-04 09:50:30 +000062 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
63 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
64 * level has a different granularity.
65 *
66 * The level granularity is: LVL_CLK_DIV ^ lvl
67 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
68 *
69 * The array level of a newly armed timer depends on the relative expiry
70 * time. The farther the expiry time is away the higher the array level and
71 * therefor the granularity becomes.
72 *
73 * Contrary to the original timer wheel implementation, which aims for 'exact'
74 * expiry of the timers, this implementation removes the need for recascading
75 * the timers into the lower array levels. The previous 'classic' timer wheel
76 * implementation of the kernel already violated the 'exact' expiry by adding
77 * slack to the expiry time to provide batched expiration. The granularity
78 * levels provide implicit batching.
79 *
80 * This is an optimization of the original timer wheel implementation for the
81 * majority of the timer wheel use cases: timeouts. The vast majority of
82 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
83 * the timeout expires it indicates that normal operation is disturbed, so it
84 * does not matter much whether the timeout comes with a slight delay.
85 *
86 * The only exception to this are networking timers with a small expiry
87 * time. They rely on the granularity. Those fit into the first wheel level,
88 * which has HZ granularity.
89 *
90 * We don't have cascading anymore. timers with a expiry time above the
91 * capacity of the last wheel level are force expired at the maximum timeout
92 * value of the last wheel level. From data sampling we know that the maximum
93 * value observed is 5 days (network connection tracking), so this should not
94 * be an issue.
95 *
96 * The currently chosen array constants values are a good compromise between
97 * array size and granularity.
98 *
99 * This results in the following granularity and range levels:
100 *
101 * HZ 1000 steps
102 * Level Offset Granularity Range
103 * 0 0 1 ms 0 ms - 63 ms
104 * 1 64 8 ms 64 ms - 511 ms
105 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
106 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
107 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
108 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
109 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
110 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
111 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
112 *
113 * HZ 300
114 * Level Offset Granularity Range
115 * 0 0 3 ms 0 ms - 210 ms
116 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
117 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
118 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
119 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
120 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
121 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
122 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
123 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
124 *
125 * HZ 250
126 * Level Offset Granularity Range
127 * 0 0 4 ms 0 ms - 255 ms
128 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
129 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
130 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
131 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
132 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
133 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
134 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
135 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
136 *
137 * HZ 100
138 * Level Offset Granularity Range
139 * 0 0 10 ms 0 ms - 630 ms
140 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
141 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
142 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
143 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
144 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
145 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
146 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Thomas Gleixner500462a2016-07-04 09:50:30 +0000149/* Clock divisor for the next level */
150#define LVL_CLK_SHIFT 3
151#define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
152#define LVL_CLK_MASK (LVL_CLK_DIV - 1)
153#define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
154#define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Thomas Gleixner500462a2016-07-04 09:50:30 +0000156/*
157 * The time start value for each level to select the bucket at enqueue
158 * time.
159 */
160#define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Thomas Gleixner500462a2016-07-04 09:50:30 +0000162/* Size of each clock level */
163#define LVL_BITS 6
164#define LVL_SIZE (1UL << LVL_BITS)
165#define LVL_MASK (LVL_SIZE - 1)
166#define LVL_OFFS(n) ((n) * LVL_SIZE)
167
168/* Level depth */
169#if HZ > 100
170# define LVL_DEPTH 9
171# else
172# define LVL_DEPTH 8
173#endif
174
175/* The cutoff (max. capacity of the wheel) */
176#define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
177#define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
178
179/*
180 * The resulting wheel size. If NOHZ is configured we allocate two
181 * wheels so we have a separate storage for the deferrable timers.
182 */
183#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
184
185#ifdef CONFIG_NO_HZ_COMMON
186# define NR_BASES 2
187# define BASE_STD 0
188# define BASE_DEF 1
189#else
190# define NR_BASES 1
191# define BASE_STD 0
192# define BASE_DEF 0
193#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000195struct timer_base {
Thomas Gleixner500462a2016-07-04 09:50:30 +0000196 spinlock_t lock;
197 struct timer_list *running_timer;
198 unsigned long clk;
Thomas Gleixnera683f392016-07-04 09:50:36 +0000199 unsigned long next_expiry;
Thomas Gleixner500462a2016-07-04 09:50:30 +0000200 unsigned int cpu;
201 bool migration_enabled;
202 bool nohz_active;
Thomas Gleixnera683f392016-07-04 09:50:36 +0000203 bool is_idle;
Thomas Gleixner500462a2016-07-04 09:50:30 +0000204 DECLARE_BITMAP(pending_map, WHEEL_SIZE);
205 struct hlist_head vectors[WHEEL_SIZE];
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700206} ____cacheline_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Thomas Gleixner500462a2016-07-04 09:50:30 +0000208static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700209
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000210#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
211unsigned int sysctl_timer_migration = 1;
212
Thomas Gleixner683be132015-05-26 22:50:35 +0000213void timers_update_migration(bool update_nohz)
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000214{
215 bool on = sysctl_timer_migration && tick_nohz_active;
216 unsigned int cpu;
217
218 /* Avoid the loop, if nothing to update */
Thomas Gleixner500462a2016-07-04 09:50:30 +0000219 if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000220 return;
221
222 for_each_possible_cpu(cpu) {
Thomas Gleixner500462a2016-07-04 09:50:30 +0000223 per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
224 per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000225 per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
Thomas Gleixner683be132015-05-26 22:50:35 +0000226 if (!update_nohz)
227 continue;
Thomas Gleixner500462a2016-07-04 09:50:30 +0000228 per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
229 per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
Thomas Gleixner683be132015-05-26 22:50:35 +0000230 per_cpu(hrtimer_bases.nohz_active, cpu) = true;
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000231 }
232}
233
234int timer_migration_handler(struct ctl_table *table, int write,
235 void __user *buffer, size_t *lenp,
236 loff_t *ppos)
237{
238 static DEFINE_MUTEX(mutex);
239 int ret;
240
241 mutex_lock(&mutex);
242 ret = proc_dointvec(table, write, buffer, lenp, ppos);
243 if (!ret && write)
Thomas Gleixner683be132015-05-26 22:50:35 +0000244 timers_update_migration(false);
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000245 mutex_unlock(&mutex);
246 return ret;
247}
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000248#endif
249
Alan Stern9c133c42008-11-06 08:42:48 +0100250static unsigned long round_jiffies_common(unsigned long j, int cpu,
251 bool force_up)
252{
253 int rem;
254 unsigned long original = j;
255
256 /*
257 * We don't want all cpus firing their timers at once hitting the
258 * same lock or cachelines, so we skew each extra cpu with an extra
259 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
260 * already did this.
261 * The skew is done by adding 3*cpunr, then round, then subtract this
262 * extra offset again.
263 */
264 j += cpu * 3;
265
266 rem = j % HZ;
267
268 /*
269 * If the target jiffie is just after a whole second (which can happen
270 * due to delays of the timer irq, long irq off times etc etc) then
271 * we should round down to the whole second, not up. Use 1/4th second
272 * as cutoff for this rounding as an extreme upper bound for this.
273 * But never round down if @force_up is set.
274 */
275 if (rem < HZ/4 && !force_up) /* round down */
276 j = j - rem;
277 else /* round up */
278 j = j - rem + HZ;
279
280 /* now that we have rounded, subtract the extra skew again */
281 j -= cpu * 3;
282
Bart Van Assche9e04d382013-05-21 20:43:50 +0200283 /*
284 * Make sure j is still in the future. Otherwise return the
285 * unmodified value.
286 */
287 return time_is_after_jiffies(j) ? j : original;
Alan Stern9c133c42008-11-06 08:42:48 +0100288}
289
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800290/**
291 * __round_jiffies - function to round jiffies to a full second
292 * @j: the time in (absolute) jiffies that should be rounded
293 * @cpu: the processor number on which the timeout will happen
294 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800295 * __round_jiffies() rounds an absolute time in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800296 * up or down to (approximately) full seconds. This is useful for timers
297 * for which the exact time they fire does not matter too much, as long as
298 * they fire approximately every X seconds.
299 *
300 * By rounding these timers to whole seconds, all such timers will fire
301 * at the same time, rather than at various times spread out. The goal
302 * of this is to have the CPU wake up less, which saves power.
303 *
304 * The exact rounding is skewed for each processor to avoid all
305 * processors firing at the exact same time, which could lead
306 * to lock contention or spurious cache line bouncing.
307 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800308 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800309 */
310unsigned long __round_jiffies(unsigned long j, int cpu)
311{
Alan Stern9c133c42008-11-06 08:42:48 +0100312 return round_jiffies_common(j, cpu, false);
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800313}
314EXPORT_SYMBOL_GPL(__round_jiffies);
315
316/**
317 * __round_jiffies_relative - function to round jiffies to a full second
318 * @j: the time in (relative) jiffies that should be rounded
319 * @cpu: the processor number on which the timeout will happen
320 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800321 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800322 * up or down to (approximately) full seconds. This is useful for timers
323 * for which the exact time they fire does not matter too much, as long as
324 * they fire approximately every X seconds.
325 *
326 * By rounding these timers to whole seconds, all such timers will fire
327 * at the same time, rather than at various times spread out. The goal
328 * of this is to have the CPU wake up less, which saves power.
329 *
330 * The exact rounding is skewed for each processor to avoid all
331 * processors firing at the exact same time, which could lead
332 * to lock contention or spurious cache line bouncing.
333 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800334 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800335 */
336unsigned long __round_jiffies_relative(unsigned long j, int cpu)
337{
Alan Stern9c133c42008-11-06 08:42:48 +0100338 unsigned long j0 = jiffies;
339
340 /* Use j0 because jiffies might change while we run */
341 return round_jiffies_common(j + j0, cpu, false) - j0;
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800342}
343EXPORT_SYMBOL_GPL(__round_jiffies_relative);
344
345/**
346 * round_jiffies - function to round jiffies to a full second
347 * @j: the time in (absolute) jiffies that should be rounded
348 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800349 * round_jiffies() rounds an absolute time in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800350 * up or down to (approximately) full seconds. This is useful for timers
351 * for which the exact time they fire does not matter too much, as long as
352 * they fire approximately every X seconds.
353 *
354 * By rounding these timers to whole seconds, all such timers will fire
355 * at the same time, rather than at various times spread out. The goal
356 * of this is to have the CPU wake up less, which saves power.
357 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800358 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800359 */
360unsigned long round_jiffies(unsigned long j)
361{
Alan Stern9c133c42008-11-06 08:42:48 +0100362 return round_jiffies_common(j, raw_smp_processor_id(), false);
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800363}
364EXPORT_SYMBOL_GPL(round_jiffies);
365
366/**
367 * round_jiffies_relative - function to round jiffies to a full second
368 * @j: the time in (relative) jiffies that should be rounded
369 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800370 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800371 * up or down to (approximately) full seconds. This is useful for timers
372 * for which the exact time they fire does not matter too much, as long as
373 * they fire approximately every X seconds.
374 *
375 * By rounding these timers to whole seconds, all such timers will fire
376 * at the same time, rather than at various times spread out. The goal
377 * of this is to have the CPU wake up less, which saves power.
378 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800379 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800380 */
381unsigned long round_jiffies_relative(unsigned long j)
382{
383 return __round_jiffies_relative(j, raw_smp_processor_id());
384}
385EXPORT_SYMBOL_GPL(round_jiffies_relative);
386
Alan Stern9c133c42008-11-06 08:42:48 +0100387/**
388 * __round_jiffies_up - function to round jiffies up to a full second
389 * @j: the time in (absolute) jiffies that should be rounded
390 * @cpu: the processor number on which the timeout will happen
391 *
392 * This is the same as __round_jiffies() except that it will never
393 * round down. This is useful for timeouts for which the exact time
394 * of firing does not matter too much, as long as they don't fire too
395 * early.
396 */
397unsigned long __round_jiffies_up(unsigned long j, int cpu)
398{
399 return round_jiffies_common(j, cpu, true);
400}
401EXPORT_SYMBOL_GPL(__round_jiffies_up);
402
403/**
404 * __round_jiffies_up_relative - function to round jiffies up to a full second
405 * @j: the time in (relative) jiffies that should be rounded
406 * @cpu: the processor number on which the timeout will happen
407 *
408 * This is the same as __round_jiffies_relative() except that it will never
409 * round down. This is useful for timeouts for which the exact time
410 * of firing does not matter too much, as long as they don't fire too
411 * early.
412 */
413unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
414{
415 unsigned long j0 = jiffies;
416
417 /* Use j0 because jiffies might change while we run */
418 return round_jiffies_common(j + j0, cpu, true) - j0;
419}
420EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
421
422/**
423 * round_jiffies_up - function to round jiffies up to a full second
424 * @j: the time in (absolute) jiffies that should be rounded
425 *
426 * This is the same as round_jiffies() except that it will never
427 * round down. This is useful for timeouts for which the exact time
428 * of firing does not matter too much, as long as they don't fire too
429 * early.
430 */
431unsigned long round_jiffies_up(unsigned long j)
432{
433 return round_jiffies_common(j, raw_smp_processor_id(), true);
434}
435EXPORT_SYMBOL_GPL(round_jiffies_up);
436
437/**
438 * round_jiffies_up_relative - function to round jiffies up to a full second
439 * @j: the time in (relative) jiffies that should be rounded
440 *
441 * This is the same as round_jiffies_relative() except that it will never
442 * round down. This is useful for timeouts for which the exact time
443 * of firing does not matter too much, as long as they don't fire too
444 * early.
445 */
446unsigned long round_jiffies_up_relative(unsigned long j)
447{
448 return __round_jiffies_up_relative(j, raw_smp_processor_id());
449}
450EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
451
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800452
Thomas Gleixner500462a2016-07-04 09:50:30 +0000453static inline unsigned int timer_get_idx(struct timer_list *timer)
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700454{
Thomas Gleixner500462a2016-07-04 09:50:30 +0000455 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700456}
Thomas Gleixner500462a2016-07-04 09:50:30 +0000457
458static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
459{
460 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
461 idx << TIMER_ARRAYSHIFT;
462}
463
464/*
465 * Helper function to calculate the array index for a given expiry
466 * time.
467 */
468static inline unsigned calc_index(unsigned expires, unsigned lvl)
469{
470 expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
471 return LVL_OFFS(lvl) + (expires & LVL_MASK);
472}
473
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000474static int calc_wheel_index(unsigned long expires, unsigned long clk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475{
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000476 unsigned long delta = expires - clk;
Thomas Gleixner500462a2016-07-04 09:50:30 +0000477 unsigned int idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
Thomas Gleixner500462a2016-07-04 09:50:30 +0000479 if (delta < LVL_START(1)) {
480 idx = calc_index(expires, 0);
481 } else if (delta < LVL_START(2)) {
482 idx = calc_index(expires, 1);
483 } else if (delta < LVL_START(3)) {
484 idx = calc_index(expires, 2);
485 } else if (delta < LVL_START(4)) {
486 idx = calc_index(expires, 3);
487 } else if (delta < LVL_START(5)) {
488 idx = calc_index(expires, 4);
489 } else if (delta < LVL_START(6)) {
490 idx = calc_index(expires, 5);
491 } else if (delta < LVL_START(7)) {
492 idx = calc_index(expires, 6);
493 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
494 idx = calc_index(expires, 7);
495 } else if ((long) delta < 0) {
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000496 idx = clk & LVL_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 } else {
Thomas Gleixner500462a2016-07-04 09:50:30 +0000498 /*
499 * Force expire obscene large timeouts to expire at the
500 * capacity limit of the wheel.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 */
Thomas Gleixner500462a2016-07-04 09:50:30 +0000502 if (expires >= WHEEL_TIMEOUT_CUTOFF)
503 expires = WHEEL_TIMEOUT_MAX;
Thomas Gleixner1bd04bf2015-05-26 22:50:26 +0000504
Thomas Gleixner500462a2016-07-04 09:50:30 +0000505 idx = calc_index(expires, LVL_DEPTH - 1);
506 }
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000507 return idx;
508}
509
510/*
511 * Enqueue the timer into the hash bucket, mark it pending in
512 * the bitmap and store the index in the timer flags.
513 */
514static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
515 unsigned int idx)
516{
517 hlist_add_head(&timer->entry, base->vectors + idx);
Thomas Gleixner500462a2016-07-04 09:50:30 +0000518 __set_bit(idx, base->pending_map);
519 timer_set_idx(timer, idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
522static void
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000523__internal_add_timer(struct timer_base *base, struct timer_list *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524{
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000525 unsigned int idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000527 idx = calc_wheel_index(timer->expires, base->clk);
528 enqueue_timer(base, timer, idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529}
530
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000531static void
532trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
Thomas Gleixnerfacbb4a2012-05-25 22:08:57 +0000533{
Thomas Gleixnera683f392016-07-04 09:50:36 +0000534 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
535 return;
Viresh Kumar9f6d9ba2014-06-22 01:29:14 +0200536
537 /*
Thomas Gleixnera683f392016-07-04 09:50:36 +0000538 * TODO: This wants some optimizing similar to the code below, but we
539 * will do that when we switch from push to pull for deferrable timers.
Viresh Kumar9f6d9ba2014-06-22 01:29:14 +0200540 */
Thomas Gleixnera683f392016-07-04 09:50:36 +0000541 if (timer->flags & TIMER_DEFERRABLE) {
542 if (tick_nohz_full_cpu(base->cpu))
Thomas Gleixner683be132015-05-26 22:50:35 +0000543 wake_up_nohz_cpu(base->cpu);
Thomas Gleixnera683f392016-07-04 09:50:36 +0000544 return;
Thomas Gleixner683be132015-05-26 22:50:35 +0000545 }
Thomas Gleixnera683f392016-07-04 09:50:36 +0000546
547 /*
548 * We might have to IPI the remote CPU if the base is idle and the
549 * timer is not deferrable. If the other CPU is on the way to idle
550 * then it can't set base->is_idle as we hold the base lock:
551 */
552 if (!base->is_idle)
553 return;
554
555 /* Check whether this is the new first expiring timer: */
556 if (time_after_eq(timer->expires, base->next_expiry))
557 return;
558
559 /*
560 * Set the next expiry time and kick the CPU so it can reevaluate the
561 * wheel:
562 */
563 base->next_expiry = timer->expires;
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000564 wake_up_nohz_cpu(base->cpu);
565}
566
567static void
568internal_add_timer(struct timer_base *base, struct timer_list *timer)
569{
570 __internal_add_timer(base, timer);
571 trigger_dyntick_cpu(base, timer);
Thomas Gleixnerfacbb4a2012-05-25 22:08:57 +0000572}
573
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800574#ifdef CONFIG_TIMER_STATS
575void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
576{
577 if (timer->start_site)
578 return;
579
580 timer->start_site = addr;
581 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
582 timer->start_pid = current->pid;
583}
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700584
585static void timer_stats_account_timer(struct timer_list *timer)
586{
Dmitry Vyukov3ed769b2015-09-18 15:54:23 +0200587 void *site;
588
589 /*
590 * start_site can be concurrently reset by
591 * timer_stats_timer_clear_start_info()
592 */
593 site = READ_ONCE(timer->start_site);
594 if (likely(!site))
Heiko Carstens507e1232009-06-23 17:38:15 +0200595 return;
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700596
Dmitry Vyukov3ed769b2015-09-18 15:54:23 +0200597 timer_stats_update_stats(timer, timer->start_pid, site,
Thomas Gleixnerc74441a2015-05-26 22:50:31 +0000598 timer->function, timer->start_comm,
599 timer->flags);
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700600}
601
602#else
603static void timer_stats_account_timer(struct timer_list *timer) {}
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800604#endif
605
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700606#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
607
608static struct debug_obj_descr timer_debug_descr;
609
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100610static void *timer_debug_hint(void *addr)
611{
612 return ((struct timer_list *) addr)->function;
613}
614
Du, Changbinb9fdac72016-05-19 17:09:41 -0700615static bool timer_is_static_object(void *addr)
616{
617 struct timer_list *timer = addr;
618
619 return (timer->entry.pprev == NULL &&
620 timer->entry.next == TIMER_ENTRY_STATIC);
621}
622
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700623/*
624 * fixup_init is called when:
625 * - an active object is initialized
626 */
Du, Changbine3252462016-05-19 17:09:29 -0700627static bool timer_fixup_init(void *addr, enum debug_obj_state state)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700628{
629 struct timer_list *timer = addr;
630
631 switch (state) {
632 case ODEBUG_STATE_ACTIVE:
633 del_timer_sync(timer);
634 debug_object_init(timer, &timer_debug_descr);
Du, Changbine3252462016-05-19 17:09:29 -0700635 return true;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700636 default:
Du, Changbine3252462016-05-19 17:09:29 -0700637 return false;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700638 }
639}
640
Stephen Boydfb16b8c2011-11-07 19:48:26 -0800641/* Stub timer callback for improperly used timers. */
642static void stub_timer(unsigned long data)
643{
644 WARN_ON(1);
645}
646
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700647/*
648 * fixup_activate is called when:
649 * - an active object is activated
Du, Changbinb9fdac72016-05-19 17:09:41 -0700650 * - an unknown non-static object is activated
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700651 */
Du, Changbine3252462016-05-19 17:09:29 -0700652static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700653{
654 struct timer_list *timer = addr;
655
656 switch (state) {
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700657 case ODEBUG_STATE_NOTAVAILABLE:
Du, Changbinb9fdac72016-05-19 17:09:41 -0700658 setup_timer(timer, stub_timer, 0);
659 return true;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700660
661 case ODEBUG_STATE_ACTIVE:
662 WARN_ON(1);
663
664 default:
Du, Changbine3252462016-05-19 17:09:29 -0700665 return false;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700666 }
667}
668
669/*
670 * fixup_free is called when:
671 * - an active object is freed
672 */
Du, Changbine3252462016-05-19 17:09:29 -0700673static bool timer_fixup_free(void *addr, enum debug_obj_state state)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700674{
675 struct timer_list *timer = addr;
676
677 switch (state) {
678 case ODEBUG_STATE_ACTIVE:
679 del_timer_sync(timer);
680 debug_object_free(timer, &timer_debug_descr);
Du, Changbine3252462016-05-19 17:09:29 -0700681 return true;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700682 default:
Du, Changbine3252462016-05-19 17:09:29 -0700683 return false;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700684 }
685}
686
Christine Chandc4218b2011-11-07 19:48:28 -0800687/*
688 * fixup_assert_init is called when:
689 * - an untracked/uninit-ed object is found
690 */
Du, Changbine3252462016-05-19 17:09:29 -0700691static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
Christine Chandc4218b2011-11-07 19:48:28 -0800692{
693 struct timer_list *timer = addr;
694
695 switch (state) {
696 case ODEBUG_STATE_NOTAVAILABLE:
Du, Changbinb9fdac72016-05-19 17:09:41 -0700697 setup_timer(timer, stub_timer, 0);
698 return true;
Christine Chandc4218b2011-11-07 19:48:28 -0800699 default:
Du, Changbine3252462016-05-19 17:09:29 -0700700 return false;
Christine Chandc4218b2011-11-07 19:48:28 -0800701 }
702}
703
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700704static struct debug_obj_descr timer_debug_descr = {
Christine Chandc4218b2011-11-07 19:48:28 -0800705 .name = "timer_list",
706 .debug_hint = timer_debug_hint,
Du, Changbinb9fdac72016-05-19 17:09:41 -0700707 .is_static_object = timer_is_static_object,
Christine Chandc4218b2011-11-07 19:48:28 -0800708 .fixup_init = timer_fixup_init,
709 .fixup_activate = timer_fixup_activate,
710 .fixup_free = timer_fixup_free,
711 .fixup_assert_init = timer_fixup_assert_init,
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700712};
713
714static inline void debug_timer_init(struct timer_list *timer)
715{
716 debug_object_init(timer, &timer_debug_descr);
717}
718
719static inline void debug_timer_activate(struct timer_list *timer)
720{
721 debug_object_activate(timer, &timer_debug_descr);
722}
723
724static inline void debug_timer_deactivate(struct timer_list *timer)
725{
726 debug_object_deactivate(timer, &timer_debug_descr);
727}
728
729static inline void debug_timer_free(struct timer_list *timer)
730{
731 debug_object_free(timer, &timer_debug_descr);
732}
733
Christine Chandc4218b2011-11-07 19:48:28 -0800734static inline void debug_timer_assert_init(struct timer_list *timer)
735{
736 debug_object_assert_init(timer, &timer_debug_descr);
737}
738
Tejun Heofc683992012-08-08 11:10:27 -0700739static void do_init_timer(struct timer_list *timer, unsigned int flags,
740 const char *name, struct lock_class_key *key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700741
Tejun Heofc683992012-08-08 11:10:27 -0700742void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
743 const char *name, struct lock_class_key *key)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700744{
745 debug_object_init_on_stack(timer, &timer_debug_descr);
Tejun Heofc683992012-08-08 11:10:27 -0700746 do_init_timer(timer, flags, name, key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700747}
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100748EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700749
750void destroy_timer_on_stack(struct timer_list *timer)
751{
752 debug_object_free(timer, &timer_debug_descr);
753}
754EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
755
756#else
757static inline void debug_timer_init(struct timer_list *timer) { }
758static inline void debug_timer_activate(struct timer_list *timer) { }
759static inline void debug_timer_deactivate(struct timer_list *timer) { }
Christine Chandc4218b2011-11-07 19:48:28 -0800760static inline void debug_timer_assert_init(struct timer_list *timer) { }
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700761#endif
762
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800763static inline void debug_init(struct timer_list *timer)
764{
765 debug_timer_init(timer);
766 trace_timer_init(timer);
767}
768
769static inline void
770debug_activate(struct timer_list *timer, unsigned long expires)
771{
772 debug_timer_activate(timer);
Thomas Gleixner0eeda712015-05-26 22:50:29 +0000773 trace_timer_start(timer, expires, timer->flags);
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800774}
775
776static inline void debug_deactivate(struct timer_list *timer)
777{
778 debug_timer_deactivate(timer);
779 trace_timer_cancel(timer);
780}
781
Christine Chandc4218b2011-11-07 19:48:28 -0800782static inline void debug_assert_init(struct timer_list *timer)
783{
784 debug_timer_assert_init(timer);
785}
786
Tejun Heofc683992012-08-08 11:10:27 -0700787static void do_init_timer(struct timer_list *timer, unsigned int flags,
788 const char *name, struct lock_class_key *key)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700789{
Thomas Gleixner1dabbce2015-05-26 22:50:28 +0000790 timer->entry.pprev = NULL;
Thomas Gleixner0eeda712015-05-26 22:50:29 +0000791 timer->flags = flags | raw_smp_processor_id();
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700792#ifdef CONFIG_TIMER_STATS
793 timer->start_site = NULL;
794 timer->start_pid = -1;
795 memset(timer->start_comm, 0, TASK_COMM_LEN);
796#endif
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100797 lockdep_init_map(&timer->lockdep_map, name, key, 0);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700798}
799
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700800/**
Randy Dunlap633fe792009-04-01 17:47:23 -0700801 * init_timer_key - initialize a timer
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700802 * @timer: the timer to be initialized
Tejun Heofc683992012-08-08 11:10:27 -0700803 * @flags: timer flags
Randy Dunlap633fe792009-04-01 17:47:23 -0700804 * @name: name of the timer
805 * @key: lockdep class key of the fake lock used for tracking timer
806 * sync lock dependencies
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700807 *
Randy Dunlap633fe792009-04-01 17:47:23 -0700808 * init_timer_key() must be done to a timer prior calling *any* of the
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700809 * other timer functions.
810 */
Tejun Heofc683992012-08-08 11:10:27 -0700811void init_timer_key(struct timer_list *timer, unsigned int flags,
812 const char *name, struct lock_class_key *key)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700813{
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800814 debug_init(timer);
Tejun Heofc683992012-08-08 11:10:27 -0700815 do_init_timer(timer, flags, name, key);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700816}
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100817EXPORT_SYMBOL(init_timer_key);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700818
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000819static inline void detach_timer(struct timer_list *timer, bool clear_pending)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700820{
Thomas Gleixner1dabbce2015-05-26 22:50:28 +0000821 struct hlist_node *entry = &timer->entry;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700822
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800823 debug_deactivate(timer);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700824
Thomas Gleixner1dabbce2015-05-26 22:50:28 +0000825 __hlist_del(entry);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700826 if (clear_pending)
Thomas Gleixner1dabbce2015-05-26 22:50:28 +0000827 entry->pprev = NULL;
828 entry->next = LIST_POISON2;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700829}
830
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000831static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000832 bool clear_pending)
833{
Thomas Gleixner500462a2016-07-04 09:50:30 +0000834 unsigned idx = timer_get_idx(timer);
835
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000836 if (!timer_pending(timer))
837 return 0;
838
Thomas Gleixner500462a2016-07-04 09:50:30 +0000839 if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
840 __clear_bit(idx, base->pending_map);
841
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000842 detach_timer(timer, clear_pending);
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000843 return 1;
844}
845
Thomas Gleixner500462a2016-07-04 09:50:30 +0000846static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
847{
848 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
849
850 /*
851 * If the timer is deferrable and nohz is active then we need to use
852 * the deferrable base.
853 */
854 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
855 (tflags & TIMER_DEFERRABLE))
856 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
857 return base;
858}
859
860static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
861{
862 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
863
864 /*
865 * If the timer is deferrable and nohz is active then we need to use
866 * the deferrable base.
867 */
868 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
869 (tflags & TIMER_DEFERRABLE))
870 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
871 return base;
872}
873
874static inline struct timer_base *get_timer_base(u32 tflags)
875{
876 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
877}
878
Thomas Gleixnera683f392016-07-04 09:50:36 +0000879#ifdef CONFIG_NO_HZ_COMMON
880static inline struct timer_base *
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +0000881get_target_base(struct timer_base *base, unsigned tflags)
Thomas Gleixner500462a2016-07-04 09:50:30 +0000882{
Thomas Gleixnera683f392016-07-04 09:50:36 +0000883#ifdef CONFIG_SMP
Thomas Gleixner500462a2016-07-04 09:50:30 +0000884 if ((tflags & TIMER_PINNED) || !base->migration_enabled)
885 return get_timer_this_cpu_base(tflags);
886 return get_timer_cpu_base(tflags, get_nohz_timer_target());
887#else
888 return get_timer_this_cpu_base(tflags);
889#endif
890}
891
Thomas Gleixnera683f392016-07-04 09:50:36 +0000892static inline void forward_timer_base(struct timer_base *base)
893{
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +0000894 unsigned long jnow = READ_ONCE(jiffies);
895
Thomas Gleixnera683f392016-07-04 09:50:36 +0000896 /*
897 * We only forward the base when it's idle and we have a delta between
898 * base clock and jiffies.
899 */
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +0000900 if (!base->is_idle || (long) (jnow - base->clk) < 2)
Thomas Gleixnera683f392016-07-04 09:50:36 +0000901 return;
902
903 /*
904 * If the next expiry value is > jiffies, then we fast forward to
905 * jiffies otherwise we forward to the next expiry value.
906 */
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +0000907 if (time_after(base->next_expiry, jnow))
908 base->clk = jnow;
Thomas Gleixnera683f392016-07-04 09:50:36 +0000909 else
910 base->clk = base->next_expiry;
911}
912#else
913static inline struct timer_base *
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +0000914get_target_base(struct timer_base *base, unsigned tflags)
Thomas Gleixnera683f392016-07-04 09:50:36 +0000915{
916 return get_timer_this_cpu_base(tflags);
917}
918
919static inline void forward_timer_base(struct timer_base *base) { }
920#endif
921
Thomas Gleixnera683f392016-07-04 09:50:36 +0000922
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700923/*
Thomas Gleixner500462a2016-07-04 09:50:30 +0000924 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
925 * that all timers which are tied to this base are locked, and the base itself
926 * is locked too.
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700927 *
928 * So __run_timers/migrate_timers can safely modify all timers which could
Thomas Gleixner500462a2016-07-04 09:50:30 +0000929 * be found in the base->vectors array.
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700930 *
Thomas Gleixner500462a2016-07-04 09:50:30 +0000931 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
932 * to wait until the migration is done.
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700933 */
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000934static struct timer_base *lock_timer_base(struct timer_list *timer,
Thomas Gleixner500462a2016-07-04 09:50:30 +0000935 unsigned long *flags)
Josh Triplett89e7e3742006-09-29 01:59:36 -0700936 __acquires(timer->base->lock)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700937{
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700938 for (;;) {
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000939 struct timer_base *base;
Thomas Gleixnerb8312752016-10-24 11:41:56 +0200940 u32 tf;
941
942 /*
943 * We need to use READ_ONCE() here, otherwise the compiler
944 * might re-read @tf between the check for TIMER_MIGRATING
945 * and spin_lock().
946 */
947 tf = READ_ONCE(timer->flags);
Thomas Gleixner0eeda712015-05-26 22:50:29 +0000948
949 if (!(tf & TIMER_MIGRATING)) {
Thomas Gleixner500462a2016-07-04 09:50:30 +0000950 base = get_timer_base(tf);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700951 spin_lock_irqsave(&base->lock, *flags);
Thomas Gleixner0eeda712015-05-26 22:50:29 +0000952 if (timer->flags == tf)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700953 return base;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700954 spin_unlock_irqrestore(&base->lock, *flags);
955 }
956 cpu_relax();
957 }
958}
959
Ingo Molnar74019222009-02-18 12:23:29 +0100960static inline int
Thomas Gleixner177ec0a2016-07-04 09:50:24 +0000961__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962{
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000963 struct timer_base *base, *new_base;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000964 unsigned int idx = UINT_MAX;
965 unsigned long clk = 0, flags;
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000966 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
Thomas Gleixner4da91522016-10-24 11:55:10 +0200968 BUG_ON(!timer->function);
969
Thomas Gleixner500462a2016-07-04 09:50:30 +0000970 /*
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000971 * This is a common optimization triggered by the networking code - if
972 * the timer is re-modified to have the same timeout or ends up in the
973 * same array bucket then just return:
Thomas Gleixner500462a2016-07-04 09:50:30 +0000974 */
975 if (timer_pending(timer)) {
976 if (timer->expires == expires)
977 return 1;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000978
Thomas Gleixner4da91522016-10-24 11:55:10 +0200979 /*
980 * We lock timer base and calculate the bucket index right
981 * here. If the timer ends up in the same bucket, then we
982 * just update the expiry time and avoid the whole
983 * dequeue/enqueue dance.
984 */
985 base = lock_timer_base(timer, &flags);
986
987 clk = base->clk;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000988 idx = calc_wheel_index(expires, clk);
989
990 /*
991 * Retrieve and compare the array index of the pending
992 * timer. If it matches set the expiry to the new value so a
993 * subsequent call will exit in the expires check above.
994 */
995 if (idx == timer_get_idx(timer)) {
996 timer->expires = expires;
Thomas Gleixner4da91522016-10-24 11:55:10 +0200997 ret = 1;
998 goto out_unlock;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000999 }
Thomas Gleixner4da91522016-10-24 11:55:10 +02001000 } else {
1001 base = lock_timer_base(timer, &flags);
Thomas Gleixner500462a2016-07-04 09:50:30 +00001002 }
1003
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001004 timer_stats_timer_set_start_info(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
Thomas Gleixnerec44bc72012-05-25 22:08:57 +00001006 ret = detach_if_pending(timer, base, false);
1007 if (!ret && pending_only)
1008 goto out_unlock;
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001009
Xiao Guangrong2b022e32009-08-10 10:48:59 +08001010 debug_activate(timer, expires);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -07001011
Thomas Gleixner500462a2016-07-04 09:50:30 +00001012 new_base = get_target_base(base, timer->flags);
Arun R Bharadwajeea08f32009-04-16 12:16:41 +05301013
Oleg Nesterov3691c512006-03-31 02:30:30 -08001014 if (base != new_base) {
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001015 /*
Thomas Gleixner500462a2016-07-04 09:50:30 +00001016 * We are trying to schedule the timer on the new base.
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001017 * However we can't change timer's base while it is running,
1018 * otherwise del_timer_sync() can't detect that the timer's
Thomas Gleixner500462a2016-07-04 09:50:30 +00001019 * handler yet has not finished. This also guarantees that the
1020 * timer is serialized wrt itself.
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001021 */
Oleg Nesterova2c348f2006-03-31 02:30:31 -08001022 if (likely(base->running_timer != timer)) {
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001023 /* See the comment in lock_timer_base() */
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001024 timer->flags |= TIMER_MIGRATING;
1025
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001026 spin_unlock(&base->lock);
Oleg Nesterova2c348f2006-03-31 02:30:31 -08001027 base = new_base;
1028 spin_lock(&base->lock);
Eric Dumazetd0023a12015-08-17 10:18:48 -07001029 WRITE_ONCE(timer->flags,
1030 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001031 }
1032 }
1033
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +00001034 /* Try to forward a stale timer base clock */
1035 forward_timer_base(base);
1036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 timer->expires = expires;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +00001038 /*
1039 * If 'idx' was calculated above and the base time did not advance
Thomas Gleixner4da91522016-10-24 11:55:10 +02001040 * between calculating 'idx' and possibly switching the base, only
1041 * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
1042 * we need to (re)calculate the wheel index via
1043 * internal_add_timer().
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +00001044 */
1045 if (idx != UINT_MAX && clk == base->clk) {
1046 enqueue_timer(base, timer, idx);
1047 trigger_dyntick_cpu(base, timer);
1048 } else {
1049 internal_add_timer(base, timer);
1050 }
Ingo Molnar74019222009-02-18 12:23:29 +01001051
1052out_unlock:
Oleg Nesterova2c348f2006-03-31 02:30:31 -08001053 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
1055 return ret;
1056}
1057
Ingo Molnar74019222009-02-18 12:23:29 +01001058/**
1059 * mod_timer_pending - modify a pending timer's timeout
1060 * @timer: the pending timer to be modified
1061 * @expires: new timeout in jiffies
1062 *
1063 * mod_timer_pending() is the same for pending timers as mod_timer(),
1064 * but will not re-activate and modify already deleted timers.
1065 *
1066 * It is useful for unserialized use of timers.
1067 */
1068int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1069{
Thomas Gleixner177ec0a2016-07-04 09:50:24 +00001070 return __mod_timer(timer, expires, true);
Ingo Molnar74019222009-02-18 12:23:29 +01001071}
1072EXPORT_SYMBOL(mod_timer_pending);
1073
1074/**
1075 * mod_timer - modify a timer's timeout
1076 * @timer: the timer to be modified
1077 * @expires: new timeout in jiffies
1078 *
1079 * mod_timer() is a more efficient way to update the expire field of an
1080 * active timer (if the timer is inactive it will be activated)
1081 *
1082 * mod_timer(timer, expires) is equivalent to:
1083 *
1084 * del_timer(timer); timer->expires = expires; add_timer(timer);
1085 *
1086 * Note that if there are multiple unserialized concurrent users of the
1087 * same timer, then mod_timer() is the only safe way to modify the timeout,
1088 * since add_timer() cannot modify an already running timer.
1089 *
1090 * The function returns whether it has modified a pending timer or not.
1091 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
1092 * active timer returns 1.)
1093 */
1094int mod_timer(struct timer_list *timer, unsigned long expires)
1095{
Thomas Gleixner177ec0a2016-07-04 09:50:24 +00001096 return __mod_timer(timer, expires, false);
Ingo Molnar74019222009-02-18 12:23:29 +01001097}
1098EXPORT_SYMBOL(mod_timer);
1099
1100/**
1101 * add_timer - start a timer
1102 * @timer: the timer to be added
1103 *
1104 * The kernel will do a ->function(->data) callback from the
1105 * timer interrupt at the ->expires point in the future. The
1106 * current time is 'jiffies'.
1107 *
1108 * The timer's ->expires, ->function (and if the handler uses it, ->data)
1109 * fields must be set prior calling this function.
1110 *
1111 * Timers with an ->expires field in the past will be executed in the next
1112 * timer tick.
1113 */
1114void add_timer(struct timer_list *timer)
1115{
1116 BUG_ON(timer_pending(timer));
1117 mod_timer(timer, timer->expires);
1118}
1119EXPORT_SYMBOL(add_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001121/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 * add_timer_on - start a timer on a particular CPU
1123 * @timer: the timer to be added
1124 * @cpu: the CPU to start it on
1125 *
1126 * This is not very scalable on SMP. Double adds are not possible.
1127 */
1128void add_timer_on(struct timer_list *timer, int cpu)
1129{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001130 struct timer_base *new_base, *base;
Thomas Gleixner68194572007-07-19 01:49:16 -07001131 unsigned long flags;
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001132
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001133 timer_stats_timer_set_start_info(timer);
Thomas Gleixner68194572007-07-19 01:49:16 -07001134 BUG_ON(timer_pending(timer) || !timer->function);
Tejun Heo22b886d2015-11-04 12:15:33 -05001135
Thomas Gleixner500462a2016-07-04 09:50:30 +00001136 new_base = get_timer_cpu_base(timer->flags, cpu);
1137
Tejun Heo22b886d2015-11-04 12:15:33 -05001138 /*
1139 * If @timer was on a different CPU, it should be migrated with the
1140 * old base locked to prevent other operations proceeding with the
1141 * wrong base locked. See lock_timer_base().
1142 */
1143 base = lock_timer_base(timer, &flags);
1144 if (base != new_base) {
1145 timer->flags |= TIMER_MIGRATING;
1146
1147 spin_unlock(&base->lock);
1148 base = new_base;
1149 spin_lock(&base->lock);
1150 WRITE_ONCE(timer->flags,
1151 (timer->flags & ~TIMER_BASEMASK) | cpu);
1152 }
1153
Xiao Guangrong2b022e32009-08-10 10:48:59 +08001154 debug_activate(timer, timer->expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 internal_add_timer(base, timer);
Oleg Nesterov3691c512006-03-31 02:30:30 -08001156 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157}
Andi Kleena9862e02009-05-19 22:49:07 +02001158EXPORT_SYMBOL_GPL(add_timer_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001160/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 * del_timer - deactive a timer.
1162 * @timer: the timer to be deactivated
1163 *
1164 * del_timer() deactivates a timer - this works on both active and inactive
1165 * timers.
1166 *
1167 * The function returns whether it has deactivated a pending timer or not.
1168 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1169 * active timer returns 1.)
1170 */
1171int del_timer(struct timer_list *timer)
1172{
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001173 struct timer_base *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 unsigned long flags;
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001175 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
Christine Chandc4218b2011-11-07 19:48:28 -08001177 debug_assert_init(timer);
1178
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001179 timer_stats_timer_clear_start_info(timer);
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001180 if (timer_pending(timer)) {
1181 base = lock_timer_base(timer, &flags);
Thomas Gleixnerec44bc72012-05-25 22:08:57 +00001182 ret = detach_if_pending(timer, base, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001186 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188EXPORT_SYMBOL(del_timer);
1189
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001190/**
1191 * try_to_del_timer_sync - Try to deactivate a timer
1192 * @timer: timer do del
1193 *
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001194 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1195 * exit the timer is not queued and the handler is not running on any CPU.
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001196 */
1197int try_to_del_timer_sync(struct timer_list *timer)
1198{
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001199 struct timer_base *base;
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001200 unsigned long flags;
1201 int ret = -1;
1202
Christine Chandc4218b2011-11-07 19:48:28 -08001203 debug_assert_init(timer);
1204
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001205 base = lock_timer_base(timer, &flags);
1206
Thomas Gleixnerec44bc72012-05-25 22:08:57 +00001207 if (base->running_timer != timer) {
1208 timer_stats_timer_clear_start_info(timer);
1209 ret = detach_if_pending(timer, base, true);
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001210 }
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001211 spin_unlock_irqrestore(&base->lock, flags);
1212
1213 return ret;
1214}
David Howellse19dff12007-04-26 15:46:56 -07001215EXPORT_SYMBOL(try_to_del_timer_sync);
1216
Yong Zhang6f1bc452010-10-20 15:57:31 -07001217#ifdef CONFIG_SMP
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001218/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1220 * @timer: the timer to be deactivated
1221 *
1222 * This function only differs from del_timer() on SMP: besides deactivating
1223 * the timer it also makes sure the handler has finished executing on other
1224 * CPUs.
1225 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08001226 * Synchronization rules: Callers must prevent restarting of the timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 * otherwise this function is meaningless. It must not be called from
Tejun Heoc5f66e92012-08-08 11:10:28 -07001228 * interrupt contexts unless the timer is an irqsafe one. The caller must
1229 * not hold locks which would prevent completion of the timer's
1230 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1231 * timer is not queued and the handler is not running on any CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 *
Tejun Heoc5f66e92012-08-08 11:10:28 -07001233 * Note: For !irqsafe timers, you must not hold locks that are held in
1234 * interrupt context while calling this function. Even if the lock has
1235 * nothing to do with the timer in question. Here's why:
Steven Rostedt48228f72011-02-08 12:39:54 -05001236 *
1237 * CPU0 CPU1
1238 * ---- ----
1239 * <SOFTIRQ>
1240 * call_timer_fn();
1241 * base->running_timer = mytimer;
1242 * spin_lock_irq(somelock);
1243 * <IRQ>
1244 * spin_lock(somelock);
1245 * del_timer_sync(mytimer);
1246 * while (base->running_timer == mytimer);
1247 *
1248 * Now del_timer_sync() will never return and never release somelock.
1249 * The interrupt on the other CPU is waiting to grab somelock but
1250 * it has interrupted the softirq that CPU0 is waiting to finish.
1251 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 * The function returns whether it has deactivated a pending timer or not.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 */
1254int del_timer_sync(struct timer_list *timer)
1255{
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001256#ifdef CONFIG_LOCKDEP
Peter Zijlstraf266a512011-02-03 15:09:41 +01001257 unsigned long flags;
1258
Steven Rostedt48228f72011-02-08 12:39:54 -05001259 /*
1260 * If lockdep gives a backtrace here, please reference
1261 * the synchronization rules above.
1262 */
Peter Zijlstra7ff20792011-02-08 15:18:00 +01001263 local_irq_save(flags);
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001264 lock_map_acquire(&timer->lockdep_map);
1265 lock_map_release(&timer->lockdep_map);
Peter Zijlstra7ff20792011-02-08 15:18:00 +01001266 local_irq_restore(flags);
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001267#endif
Yong Zhang466bd302010-10-20 15:57:33 -07001268 /*
1269 * don't use it in hardirq context, because it
1270 * could lead to deadlock.
1271 */
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001272 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001273 for (;;) {
1274 int ret = try_to_del_timer_sync(timer);
1275 if (ret >= 0)
1276 return ret;
Andrew Mortona0009652006-07-14 00:24:06 -07001277 cpu_relax();
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001278 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279}
1280EXPORT_SYMBOL(del_timer_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281#endif
1282
Thomas Gleixner576da122010-03-12 21:10:29 +01001283static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1284 unsigned long data)
1285{
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +02001286 int count = preempt_count();
Thomas Gleixner576da122010-03-12 21:10:29 +01001287
1288#ifdef CONFIG_LOCKDEP
1289 /*
1290 * It is permissible to free the timer from inside the
1291 * function that is called from it, this we need to take into
1292 * account for lockdep too. To avoid bogus "held lock freed"
1293 * warnings as well as problems when looking into
1294 * timer->lockdep_map, make a copy and use that here.
1295 */
Peter Zijlstra4d82a1d2012-05-15 08:06:19 -07001296 struct lockdep_map lockdep_map;
1297
1298 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
Thomas Gleixner576da122010-03-12 21:10:29 +01001299#endif
1300 /*
1301 * Couple the lock chain with the lock chain at
1302 * del_timer_sync() by acquiring the lock_map around the fn()
1303 * call here and in del_timer_sync().
1304 */
1305 lock_map_acquire(&lockdep_map);
1306
1307 trace_timer_expire_entry(timer);
1308 fn(data);
1309 trace_timer_expire_exit(timer);
1310
1311 lock_map_release(&lockdep_map);
1312
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +02001313 if (count != preempt_count()) {
Thomas Gleixner802702e2010-03-12 20:13:23 +01001314 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +02001315 fn, count, preempt_count());
Thomas Gleixner802702e2010-03-12 20:13:23 +01001316 /*
1317 * Restore the preempt count. That gives us a decent
1318 * chance to survive and extract information. If the
1319 * callback kept a lock held, bad luck, but not worse
1320 * than the BUG() we had.
1321 */
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +02001322 preempt_count_set(count);
Thomas Gleixner576da122010-03-12 21:10:29 +01001323 }
1324}
1325
Thomas Gleixner500462a2016-07-04 09:50:30 +00001326static void expire_timers(struct timer_base *base, struct hlist_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001328 while (!hlist_empty(head)) {
1329 struct timer_list *timer;
1330 void (*fn)(unsigned long);
1331 unsigned long data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332
Thomas Gleixner500462a2016-07-04 09:50:30 +00001333 timer = hlist_entry(head->first, struct timer_list, entry);
1334 timer_stats_account_timer(timer);
Thomas Gleixner3bb475a2015-05-26 22:50:24 +00001335
Thomas Gleixner500462a2016-07-04 09:50:30 +00001336 base->running_timer = timer;
1337 detach_timer(timer, true);
Thomas Gleixner3bb475a2015-05-26 22:50:24 +00001338
Thomas Gleixner500462a2016-07-04 09:50:30 +00001339 fn = timer->function;
1340 data = timer->data;
Thomas Gleixner3bb475a2015-05-26 22:50:24 +00001341
Thomas Gleixner500462a2016-07-04 09:50:30 +00001342 if (timer->flags & TIMER_IRQSAFE) {
1343 spin_unlock(&base->lock);
1344 call_timer_fn(timer, fn, data);
1345 spin_lock(&base->lock);
1346 } else {
1347 spin_unlock_irq(&base->lock);
1348 call_timer_fn(timer, fn, data);
1349 spin_lock_irq(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 }
1351 }
Thomas Gleixner500462a2016-07-04 09:50:30 +00001352}
1353
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001354static int __collect_expired_timers(struct timer_base *base,
1355 struct hlist_head *heads)
Thomas Gleixner500462a2016-07-04 09:50:30 +00001356{
1357 unsigned long clk = base->clk;
1358 struct hlist_head *vec;
1359 int i, levels = 0;
1360 unsigned int idx;
1361
1362 for (i = 0; i < LVL_DEPTH; i++) {
1363 idx = (clk & LVL_MASK) + i * LVL_SIZE;
1364
1365 if (__test_and_clear_bit(idx, base->pending_map)) {
1366 vec = base->vectors + idx;
1367 hlist_move_list(vec, heads++);
1368 levels++;
1369 }
1370 /* Is it time to look at the next level? */
1371 if (clk & LVL_CLK_MASK)
1372 break;
1373 /* Shift clock for the next level granularity */
1374 clk >>= LVL_CLK_SHIFT;
1375 }
1376 return levels;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377}
1378
Frederic Weisbecker3451d022011-08-10 23:21:01 +02001379#ifdef CONFIG_NO_HZ_COMMON
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380/*
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001381 * Find the next pending bucket of a level. Search from level start (@offset)
1382 * + @clk upwards and if nothing there, search from start of the level
1383 * (@offset) up to @offset + clk.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 */
Thomas Gleixner500462a2016-07-04 09:50:30 +00001385static int next_pending_bucket(struct timer_base *base, unsigned offset,
1386 unsigned clk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001388 unsigned pos, start = offset + clk;
1389 unsigned end = offset + LVL_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390
Thomas Gleixner500462a2016-07-04 09:50:30 +00001391 pos = find_next_bit(base->pending_map, end, start);
1392 if (pos < end)
1393 return pos - start;
Venki Pallipadi6e453a62007-05-08 00:27:44 -07001394
Thomas Gleixner500462a2016-07-04 09:50:30 +00001395 pos = find_next_bit(base->pending_map, start, offset);
1396 return pos < start ? pos + LVL_SIZE - start : -1;
1397}
1398
1399/*
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001400 * Search the first expiring timer in the various clock levels. Caller must
1401 * hold base->lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 */
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001403static unsigned long __next_timer_interrupt(struct timer_base *base)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001405 unsigned long clk, next, adj;
1406 unsigned lvl, offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
Thomas Gleixner500462a2016-07-04 09:50:30 +00001408 next = base->clk + NEXT_TIMER_MAX_DELTA;
1409 clk = base->clk;
1410 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
1411 int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001412
Thomas Gleixner500462a2016-07-04 09:50:30 +00001413 if (pos >= 0) {
1414 unsigned long tmp = clk + (unsigned long) pos;
1415
1416 tmp <<= LVL_SHIFT(lvl);
1417 if (time_before(tmp, next))
1418 next = tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 }
Thomas Gleixner500462a2016-07-04 09:50:30 +00001420 /*
1421 * Clock for the next level. If the current level clock lower
1422 * bits are zero, we look at the next level as is. If not we
1423 * need to advance it by one because that's going to be the
1424 * next expiring bucket in that level. base->clk is the next
1425 * expiring jiffie. So in case of:
1426 *
1427 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1428 * 0 0 0 0 0 0
1429 *
1430 * we have to look at all levels @index 0. With
1431 *
1432 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1433 * 0 0 0 0 0 2
1434 *
1435 * LVL0 has the next expiring bucket @index 2. The upper
1436 * levels have the next expiring bucket @index 1.
1437 *
1438 * In case that the propagation wraps the next level the same
1439 * rules apply:
1440 *
1441 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1442 * 0 0 0 0 F 2
1443 *
1444 * So after looking at LVL0 we get:
1445 *
1446 * LVL5 LVL4 LVL3 LVL2 LVL1
1447 * 0 0 0 1 0
1448 *
1449 * So no propagation from LVL1 to LVL2 because that happened
1450 * with the add already, but then we need to propagate further
1451 * from LVL2 to LVL3.
1452 *
1453 * So the simple check whether the lower bits of the current
1454 * level are 0 or not is sufficient for all cases.
1455 */
1456 adj = clk & LVL_CLK_MASK ? 1 : 0;
1457 clk >>= LVL_CLK_SHIFT;
1458 clk += adj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 }
Thomas Gleixner500462a2016-07-04 09:50:30 +00001460 return next;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001461}
1462
1463/*
1464 * Check, if the next hrtimer event is before the next timer wheel
1465 * event:
1466 */
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001467static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001468{
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001469 u64 nextevt = hrtimer_get_next_event();
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001470
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001471 /*
1472 * If high resolution timers are enabled
1473 * hrtimer_get_next_event() returns KTIME_MAX.
1474 */
1475 if (expires <= nextevt)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001476 return expires;
1477
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001478 /*
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001479 * If the next timer is already expired, return the tick base
1480 * time so the tick is fired immediately.
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001481 */
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001482 if (nextevt <= basem)
1483 return basem;
Thomas Gleixnereaad0842007-05-29 23:47:39 +02001484
1485 /*
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001486 * Round up to the next jiffie. High resolution timers are
1487 * off, so the hrtimers are expired in the tick and we need to
1488 * make sure that this tick really expires the timer to avoid
1489 * a ping pong of the nohz stop code.
1490 *
1491 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
Thomas Gleixnereaad0842007-05-29 23:47:39 +02001492 */
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001493 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001494}
1495
1496/**
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001497 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1498 * @basej: base time jiffies
1499 * @basem: base time clock monotonic
1500 *
1501 * Returns the tick aligned clock monotonic time of the next pending
1502 * timer or KTIME_MAX if no timer is pending.
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001503 */
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001504u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001505{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001506 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001507 u64 expires = KTIME_MAX;
1508 unsigned long nextevt;
Chris Metcalf46c8f0b2016-08-08 16:29:07 -04001509 bool is_max_delta;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001510
Heiko Carstensdbd87b52010-12-01 10:11:09 +01001511 /*
1512 * Pretend that there is no timer pending if the cpu is offline.
1513 * Possible pending timers will be migrated later to an active cpu.
1514 */
1515 if (cpu_is_offline(smp_processor_id()))
Thomas Gleixnere40468a2012-05-25 22:08:59 +00001516 return expires;
1517
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001518 spin_lock(&base->lock);
Thomas Gleixner500462a2016-07-04 09:50:30 +00001519 nextevt = __next_timer_interrupt(base);
Chris Metcalf46c8f0b2016-08-08 16:29:07 -04001520 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
Thomas Gleixnera683f392016-07-04 09:50:36 +00001521 base->next_expiry = nextevt;
1522 /*
Thomas Gleixner041ad7b2016-10-22 11:07:35 +00001523 * We have a fresh next event. Check whether we can forward the
1524 * base. We can only do that when @basej is past base->clk
1525 * otherwise we might rewind base->clk.
Thomas Gleixnera683f392016-07-04 09:50:36 +00001526 */
Thomas Gleixner041ad7b2016-10-22 11:07:35 +00001527 if (time_after(basej, base->clk)) {
1528 if (time_after(nextevt, basej))
1529 base->clk = basej;
1530 else if (time_after(nextevt, base->clk))
1531 base->clk = nextevt;
1532 }
Thomas Gleixnera683f392016-07-04 09:50:36 +00001533
1534 if (time_before_eq(nextevt, basej)) {
1535 expires = basem;
1536 base->is_idle = false;
1537 } else {
Chris Metcalf46c8f0b2016-08-08 16:29:07 -04001538 if (!is_max_delta)
Matija Glavinic Pecotic9ef8b232017-08-01 09:11:52 +02001539 expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
Thomas Gleixnera683f392016-07-04 09:50:36 +00001540 /*
1541 * If we expect to sleep more than a tick, mark the base idle:
1542 */
1543 if ((expires - basem) > TICK_NSEC)
1544 base->is_idle = true;
Thomas Gleixnere40468a2012-05-25 22:08:59 +00001545 }
Oleg Nesterov3691c512006-03-31 02:30:30 -08001546 spin_unlock(&base->lock);
Tony Lindgren69239742006-03-06 15:42:45 -08001547
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001548 return cmp_next_hrtimer_event(basem, expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549}
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001550
Thomas Gleixnera683f392016-07-04 09:50:36 +00001551/**
1552 * timer_clear_idle - Clear the idle state of the timer base
1553 *
1554 * Called with interrupts disabled
1555 */
1556void timer_clear_idle(void)
1557{
1558 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1559
1560 /*
1561 * We do this unlocked. The worst outcome is a remote enqueue sending
1562 * a pointless IPI, but taking the lock would just make the window for
1563 * sending the IPI a few instructions smaller for the cost of taking
1564 * the lock in the exit from idle path.
1565 */
1566 base->is_idle = false;
1567}
1568
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001569static int collect_expired_timers(struct timer_base *base,
1570 struct hlist_head *heads)
1571{
1572 /*
1573 * NOHZ optimization. After a long idle sleep we need to forward the
1574 * base to current jiffies. Avoid a loop by searching the bitfield for
1575 * the next expiring timer.
1576 */
1577 if ((long)(jiffies - base->clk) > 2) {
1578 unsigned long next = __next_timer_interrupt(base);
1579
1580 /*
1581 * If the next timer is ahead of time forward to current
Thomas Gleixnera683f392016-07-04 09:50:36 +00001582 * jiffies, otherwise forward to the next expiry time:
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001583 */
1584 if (time_after(next, jiffies)) {
1585 /* The call site will increment clock! */
1586 base->clk = jiffies - 1;
1587 return 0;
1588 }
1589 base->clk = next;
1590 }
1591 return __collect_expired_timers(base, heads);
1592}
1593#else
1594static inline int collect_expired_timers(struct timer_base *base,
1595 struct hlist_head *heads)
1596{
1597 return __collect_expired_timers(base, heads);
1598}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599#endif
1600
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601/*
Daniel Walker5b4db0c2007-10-18 03:06:11 -07001602 * Called from the timer interrupt handler to charge one tick to the current
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 * process. user_tick is 1 if the tick is user time, 0 for system.
1604 */
1605void update_process_times(int user_tick)
1606{
1607 struct task_struct *p = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608
1609 /* Note: this timer irq context must be accounted for as well. */
Paul Mackerrasfa13a5a2007-11-09 22:39:38 +01001610 account_process_tick(p, user_tick);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 run_local_timers();
Paul E. McKenneyc3377c2d2014-10-21 07:53:02 -07001612 rcu_check_callbacks(user_tick);
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001613#ifdef CONFIG_IRQ_WORK
1614 if (in_irq())
Frederic Weisbecker76a33062014-08-16 18:37:19 +02001615 irq_work_tick();
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001616#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 scheduler_tick();
Thomas Gleixner68194572007-07-19 01:49:16 -07001618 run_posix_cpu_timers(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619}
1620
Anna-Maria Gleixner73420fe2016-07-04 09:50:33 +00001621/**
1622 * __run_timers - run all expired timers (if any) on this CPU.
1623 * @base: the timer vector to be processed.
1624 */
1625static inline void __run_timers(struct timer_base *base)
1626{
1627 struct hlist_head heads[LVL_DEPTH];
1628 int levels;
1629
1630 if (!time_after_eq(jiffies, base->clk))
1631 return;
1632
1633 spin_lock_irq(&base->lock);
1634
1635 while (time_after_eq(jiffies, base->clk)) {
1636
1637 levels = collect_expired_timers(base, heads);
1638 base->clk++;
1639
1640 while (levels--)
1641 expire_timers(base, heads + levels);
1642 }
1643 base->running_timer = NULL;
1644 spin_unlock_irq(&base->lock);
1645}
1646
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 * This function runs timers and the timer-tq in bottom half context.
1649 */
Emese Revfy0766f782016-06-20 20:42:34 +02001650static __latent_entropy void run_timer_softirq(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001652 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
Thomas Gleixner500462a2016-07-04 09:50:30 +00001654 __run_timers(base);
1655 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
1656 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657}
1658
1659/*
1660 * Called by the local, per-CPU timer interrupt on SMP.
1661 */
1662void run_local_timers(void)
1663{
Thomas Gleixner4e858762016-07-04 09:50:37 +00001664 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1665
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001666 hrtimer_run_queues();
Thomas Gleixner4e858762016-07-04 09:50:37 +00001667 /* Raise the softirq only if required. */
1668 if (time_before(jiffies, base->clk)) {
1669 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
1670 return;
1671 /* CPU is awake, so check the deferrable base. */
1672 base++;
1673 if (time_before(jiffies, base->clk))
1674 return;
1675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 raise_softirq(TIMER_SOFTIRQ);
1677}
1678
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679#ifdef __ARCH_WANT_SYS_ALARM
1680
1681/*
1682 * For backwards compatibility? This can be done in libc so Alpha
1683 * and all newer ports shouldn't need it.
1684 */
Heiko Carstens58fd3aa2009-01-14 14:14:03 +01001685SYSCALL_DEFINE1(alarm, unsigned int, seconds)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686{
Thomas Gleixnerc08b8a42006-03-25 03:06:33 -08001687 return alarm_setitimer(seconds);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688}
1689
1690#endif
1691
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692static void process_timeout(unsigned long __data)
1693{
Ingo Molnar36c8b582006-07-03 00:25:41 -07001694 wake_up_process((struct task_struct *)__data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695}
1696
1697/**
1698 * schedule_timeout - sleep until timeout
1699 * @timeout: timeout value in jiffies
1700 *
1701 * Make the current task sleep until @timeout jiffies have
1702 * elapsed. The routine will return immediately unless
1703 * the current task state has been set (see set_current_state()).
1704 *
1705 * You can set the task state as follows -
1706 *
1707 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1708 * pass before the routine returns. The routine will return 0
1709 *
1710 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1711 * delivered to the current task. In this case the remaining time
1712 * in jiffies will be returned, or 0 if the timer expired in time
1713 *
1714 * The current task state is guaranteed to be TASK_RUNNING when this
1715 * routine returns.
1716 *
1717 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1718 * the CPU away without a bound on the timeout. In this case the return
1719 * value will be %MAX_SCHEDULE_TIMEOUT.
1720 *
1721 * In all cases the return value is guaranteed to be non-negative.
1722 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001723signed long __sched schedule_timeout(signed long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724{
1725 struct timer_list timer;
1726 unsigned long expire;
1727
1728 switch (timeout)
1729 {
1730 case MAX_SCHEDULE_TIMEOUT:
1731 /*
1732 * These two special cases are useful to be comfortable
1733 * in the caller. Nothing more. We could take
1734 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1735 * but I' d like to return a valid offset (>=0) to allow
1736 * the caller to do everything it want with the retval.
1737 */
1738 schedule();
1739 goto out;
1740 default:
1741 /*
1742 * Another bit of PARANOID. Note that the retval will be
1743 * 0 since no piece of kernel is supposed to do a check
1744 * for a negative retval of schedule_timeout() (since it
1745 * should never happens anyway). You just have the printk()
1746 * that will tell you if something is gone wrong and where.
1747 */
Andrew Morton5b149bc2006-12-22 01:10:14 -08001748 if (timeout < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 printk(KERN_ERR "schedule_timeout: wrong timeout "
Andrew Morton5b149bc2006-12-22 01:10:14 -08001750 "value %lx\n", timeout);
1751 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 current->state = TASK_RUNNING;
1753 goto out;
1754 }
1755 }
1756
1757 expire = timeout + jiffies;
1758
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -07001759 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
Thomas Gleixner177ec0a2016-07-04 09:50:24 +00001760 __mod_timer(&timer, expire, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 schedule();
1762 del_singleshot_timer_sync(&timer);
1763
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -07001764 /* Remove the timer from the object tracker */
1765 destroy_timer_on_stack(&timer);
1766
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 timeout = expire - jiffies;
1768
1769 out:
1770 return timeout < 0 ? 0 : timeout;
1771}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772EXPORT_SYMBOL(schedule_timeout);
1773
Andrew Morton8a1c1752005-09-13 01:25:15 -07001774/*
1775 * We can use __set_current_state() here because schedule_timeout() calls
1776 * schedule() unconditionally.
1777 */
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001778signed long __sched schedule_timeout_interruptible(signed long timeout)
1779{
Andrew Mortona5a0d522005-10-30 15:01:42 -08001780 __set_current_state(TASK_INTERRUPTIBLE);
1781 return schedule_timeout(timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001782}
1783EXPORT_SYMBOL(schedule_timeout_interruptible);
1784
Matthew Wilcox294d5cc2007-12-06 11:59:46 -05001785signed long __sched schedule_timeout_killable(signed long timeout)
1786{
1787 __set_current_state(TASK_KILLABLE);
1788 return schedule_timeout(timeout);
1789}
1790EXPORT_SYMBOL(schedule_timeout_killable);
1791
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001792signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1793{
Andrew Mortona5a0d522005-10-30 15:01:42 -08001794 __set_current_state(TASK_UNINTERRUPTIBLE);
1795 return schedule_timeout(timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001796}
1797EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1798
Andrew Morton69b27ba2016-03-25 14:20:21 -07001799/*
1800 * Like schedule_timeout_uninterruptible(), except this task will not contribute
1801 * to load average.
1802 */
1803signed long __sched schedule_timeout_idle(signed long timeout)
1804{
1805 __set_current_state(TASK_IDLE);
1806 return schedule_timeout(timeout);
1807}
1808EXPORT_SYMBOL(schedule_timeout_idle);
1809
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810#ifdef CONFIG_HOTPLUG_CPU
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001811static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812{
1813 struct timer_list *timer;
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001814 int cpu = new_base->cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
Thomas Gleixner1dabbce2015-05-26 22:50:28 +00001816 while (!hlist_empty(head)) {
1817 timer = hlist_entry(head->first, struct timer_list, entry);
Thomas Gleixnerec44bc72012-05-25 22:08:57 +00001818 detach_timer(timer, false);
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001819 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 internal_add_timer(new_base, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822}
1823
Richard Cochran24f73b92016-07-13 17:16:59 +00001824int timers_dead_cpu(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825{
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001826 struct timer_base *old_base;
1827 struct timer_base *new_base;
Thomas Gleixner500462a2016-07-04 09:50:30 +00001828 int b, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
1830 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831
Thomas Gleixner500462a2016-07-04 09:50:30 +00001832 for (b = 0; b < NR_BASES; b++) {
1833 old_base = per_cpu_ptr(&timer_bases[b], cpu);
1834 new_base = get_cpu_ptr(&timer_bases[b]);
1835 /*
1836 * The caller is globally serialized and nobody else
1837 * takes two locks at once, deadlock is not possible.
1838 */
1839 spin_lock_irq(&new_base->lock);
1840 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
Oleg Nesterov3691c512006-03-31 02:30:30 -08001841
Thomas Gleixner500462a2016-07-04 09:50:30 +00001842 BUG_ON(old_base->running_timer);
1843
1844 for (i = 0; i < WHEEL_SIZE; i++)
1845 migrate_timer_list(new_base, old_base->vectors + i);
1846
1847 spin_unlock(&old_base->lock);
1848 spin_unlock_irq(&new_base->lock);
1849 put_cpu_ptr(&timer_bases);
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001850 }
Richard Cochran24f73b92016-07-13 17:16:59 +00001851 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853
Peter Zijlstra3650b572015-03-31 20:49:02 +05301854#endif /* CONFIG_HOTPLUG_CPU */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001856static void __init init_timer_cpu(int cpu)
Viresh Kumar8def9062015-03-31 20:49:01 +05301857{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001858 struct timer_base *base;
1859 int i;
Peter Zijlstra3650b572015-03-31 20:49:02 +05301860
Thomas Gleixner500462a2016-07-04 09:50:30 +00001861 for (i = 0; i < NR_BASES; i++) {
1862 base = per_cpu_ptr(&timer_bases[i], cpu);
1863 base->cpu = cpu;
1864 spin_lock_init(&base->lock);
1865 base->clk = jiffies;
1866 }
Viresh Kumar8def9062015-03-31 20:49:01 +05301867}
1868
1869static void __init init_timer_cpus(void)
1870{
Viresh Kumar8def9062015-03-31 20:49:01 +05301871 int cpu;
1872
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001873 for_each_possible_cpu(cpu)
1874 init_timer_cpu(cpu);
Viresh Kumar8def9062015-03-31 20:49:01 +05301875}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876
1877void __init init_timers(void)
1878{
Viresh Kumar8def9062015-03-31 20:49:01 +05301879 init_timer_cpus();
Viresh Kumarc24a4a32014-02-28 14:15:21 +05301880 init_timer_stats();
Carlos R. Mafra962cf362008-05-15 11:15:37 -03001881 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882}
1883
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884/**
1885 * msleep - sleep safely even with waitqueue interruptions
1886 * @msecs: Time in milliseconds to sleep for
1887 */
1888void msleep(unsigned int msecs)
1889{
1890 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1891
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07001892 while (timeout)
1893 timeout = schedule_timeout_uninterruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894}
1895
1896EXPORT_SYMBOL(msleep);
1897
1898/**
Domen Puncer96ec3ef2005-06-25 14:58:43 -07001899 * msleep_interruptible - sleep waiting for signals
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 * @msecs: Time in milliseconds to sleep for
1901 */
1902unsigned long msleep_interruptible(unsigned int msecs)
1903{
1904 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1905
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07001906 while (timeout && !signal_pending(current))
1907 timeout = schedule_timeout_interruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 return jiffies_to_msecs(timeout);
1909}
1910
1911EXPORT_SYMBOL(msleep_interruptible);
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001912
Thomas Gleixner6deba082015-04-14 21:09:28 +00001913static void __sched do_usleep_range(unsigned long min, unsigned long max)
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001914{
1915 ktime_t kmin;
John Stultzda8b44d2016-03-17 14:20:51 -07001916 u64 delta;
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001917
1918 kmin = ktime_set(0, min * NSEC_PER_USEC);
John Stultzda8b44d2016-03-17 14:20:51 -07001919 delta = (u64)(max - min) * NSEC_PER_USEC;
Thomas Gleixner6deba082015-04-14 21:09:28 +00001920 schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001921}
1922
1923/**
Bjorn Helgaasb5227d02016-05-31 16:23:02 -05001924 * usleep_range - Sleep for an approximate time
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001925 * @min: Minimum time in usecs to sleep
1926 * @max: Maximum time in usecs to sleep
Bjorn Helgaasb5227d02016-05-31 16:23:02 -05001927 *
1928 * In non-atomic context where the exact wakeup time is flexible, use
1929 * usleep_range() instead of udelay(). The sleep improves responsiveness
1930 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
1931 * power usage by allowing hrtimers to take advantage of an already-
1932 * scheduled interrupt instead of scheduling a new one just for this sleep.
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001933 */
Thomas Gleixner2ad5d322015-04-14 21:09:30 +00001934void __sched usleep_range(unsigned long min, unsigned long max)
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001935{
1936 __set_current_state(TASK_UNINTERRUPTIBLE);
1937 do_usleep_range(min, max);
1938}
1939EXPORT_SYMBOL(usleep_range);