blob: 26c2f659a40f6fe2ad12e3c48810be092e03abda [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/timer.c
3 *
Stephen Rothwell4a22f162013-04-30 15:27:37 -07004 * Kernel internal timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22#include <linux/kernel_stat.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040023#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/interrupt.h>
25#include <linux/percpu.h>
26#include <linux/init.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070029#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/notifier.h>
31#include <linux/thread_info.h>
32#include <linux/time.h>
33#include <linux/jiffies.h>
34#include <linux/posix-timers.h>
35#include <linux/cpu.h>
36#include <linux/syscalls.h>
Adrian Bunk97a41e22006-01-08 01:02:17 -080037#include <linux/delay.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080038#include <linux/tick.h>
Ingo Molnar82f67cd2007-02-16 01:28:13 -080039#include <linux/kallsyms.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080040#include <linux/irq_work.h>
Arun R Bharadwajeea08f32009-04-16 12:16:41 +053041#include <linux/sched.h>
Clark Williamscf4aebc22013-02-07 09:46:59 -060042#include <linux/sched/sysctl.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090043#include <linux/slab.h>
Stephen Rothwell1a0df592013-04-30 15:27:34 -070044#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#include <asm/uaccess.h>
47#include <asm/unistd.h>
48#include <asm/div64.h>
49#include <asm/timex.h>
50#include <asm/io.h>
51
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +000052#include "tick-internal.h"
53
Xiao Guangrong2b022e32009-08-10 10:48:59 +080054#define CREATE_TRACE_POINTS
55#include <trace/events/timer.h>
56
Andi Kleen40747ff2014-02-08 08:51:59 +010057__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
Thomas Gleixnerecea8d12005-10-30 15:03:00 -080058
59EXPORT_SYMBOL(jiffies_64);
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
Thomas Gleixner500462a2016-07-04 09:50:30 +000062 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
63 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
64 * level has a different granularity.
65 *
66 * The level granularity is: LVL_CLK_DIV ^ lvl
67 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
68 *
69 * The array level of a newly armed timer depends on the relative expiry
70 * time. The farther the expiry time is away the higher the array level and
71 * therefor the granularity becomes.
72 *
73 * Contrary to the original timer wheel implementation, which aims for 'exact'
74 * expiry of the timers, this implementation removes the need for recascading
75 * the timers into the lower array levels. The previous 'classic' timer wheel
76 * implementation of the kernel already violated the 'exact' expiry by adding
77 * slack to the expiry time to provide batched expiration. The granularity
78 * levels provide implicit batching.
79 *
80 * This is an optimization of the original timer wheel implementation for the
81 * majority of the timer wheel use cases: timeouts. The vast majority of
82 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
83 * the timeout expires it indicates that normal operation is disturbed, so it
84 * does not matter much whether the timeout comes with a slight delay.
85 *
86 * The only exception to this are networking timers with a small expiry
87 * time. They rely on the granularity. Those fit into the first wheel level,
88 * which has HZ granularity.
89 *
90 * We don't have cascading anymore. timers with a expiry time above the
91 * capacity of the last wheel level are force expired at the maximum timeout
92 * value of the last wheel level. From data sampling we know that the maximum
93 * value observed is 5 days (network connection tracking), so this should not
94 * be an issue.
95 *
96 * The currently chosen array constants values are a good compromise between
97 * array size and granularity.
98 *
99 * This results in the following granularity and range levels:
100 *
101 * HZ 1000 steps
102 * Level Offset Granularity Range
103 * 0 0 1 ms 0 ms - 63 ms
104 * 1 64 8 ms 64 ms - 511 ms
105 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
106 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
107 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
108 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
109 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
110 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
111 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
112 *
113 * HZ 300
114 * Level Offset Granularity Range
115 * 0 0 3 ms 0 ms - 210 ms
116 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
117 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
118 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
119 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
120 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
121 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
122 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
123 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
124 *
125 * HZ 250
126 * Level Offset Granularity Range
127 * 0 0 4 ms 0 ms - 255 ms
128 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
129 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
130 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
131 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
132 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
133 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
134 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
135 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
136 *
137 * HZ 100
138 * Level Offset Granularity Range
139 * 0 0 10 ms 0 ms - 630 ms
140 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
141 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
142 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
143 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
144 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
145 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
146 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Thomas Gleixner500462a2016-07-04 09:50:30 +0000149/* Clock divisor for the next level */
150#define LVL_CLK_SHIFT 3
151#define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
152#define LVL_CLK_MASK (LVL_CLK_DIV - 1)
153#define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
154#define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Thomas Gleixner500462a2016-07-04 09:50:30 +0000156/*
157 * The time start value for each level to select the bucket at enqueue
158 * time.
159 */
160#define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Thomas Gleixner500462a2016-07-04 09:50:30 +0000162/* Size of each clock level */
163#define LVL_BITS 6
164#define LVL_SIZE (1UL << LVL_BITS)
165#define LVL_MASK (LVL_SIZE - 1)
166#define LVL_OFFS(n) ((n) * LVL_SIZE)
167
168/* Level depth */
169#if HZ > 100
170# define LVL_DEPTH 9
171# else
172# define LVL_DEPTH 8
173#endif
174
175/* The cutoff (max. capacity of the wheel) */
176#define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
177#define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
178
179/*
180 * The resulting wheel size. If NOHZ is configured we allocate two
181 * wheels so we have a separate storage for the deferrable timers.
182 */
183#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
184
185#ifdef CONFIG_NO_HZ_COMMON
186# define NR_BASES 2
187# define BASE_STD 0
188# define BASE_DEF 1
189#else
190# define NR_BASES 1
191# define BASE_STD 0
192# define BASE_DEF 0
193#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000195struct timer_base {
Thomas Gleixner500462a2016-07-04 09:50:30 +0000196 spinlock_t lock;
197 struct timer_list *running_timer;
198 unsigned long clk;
Thomas Gleixnera683f392016-07-04 09:50:36 +0000199 unsigned long next_expiry;
Thomas Gleixner500462a2016-07-04 09:50:30 +0000200 unsigned int cpu;
201 bool migration_enabled;
202 bool nohz_active;
Thomas Gleixnera683f392016-07-04 09:50:36 +0000203 bool is_idle;
Nicholas Piggin70b3fd52017-08-22 18:43:48 +1000204 bool must_forward_clk;
Thomas Gleixner500462a2016-07-04 09:50:30 +0000205 DECLARE_BITMAP(pending_map, WHEEL_SIZE);
206 struct hlist_head vectors[WHEEL_SIZE];
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700207} ____cacheline_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Thomas Gleixner500462a2016-07-04 09:50:30 +0000209static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700210
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000211#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
212unsigned int sysctl_timer_migration = 1;
213
Thomas Gleixner683be132015-05-26 22:50:35 +0000214void timers_update_migration(bool update_nohz)
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000215{
216 bool on = sysctl_timer_migration && tick_nohz_active;
217 unsigned int cpu;
218
219 /* Avoid the loop, if nothing to update */
Thomas Gleixner500462a2016-07-04 09:50:30 +0000220 if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000221 return;
222
223 for_each_possible_cpu(cpu) {
Thomas Gleixner500462a2016-07-04 09:50:30 +0000224 per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
225 per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000226 per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
Thomas Gleixner683be132015-05-26 22:50:35 +0000227 if (!update_nohz)
228 continue;
Thomas Gleixner500462a2016-07-04 09:50:30 +0000229 per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
230 per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
Thomas Gleixner683be132015-05-26 22:50:35 +0000231 per_cpu(hrtimer_bases.nohz_active, cpu) = true;
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000232 }
233}
234
235int timer_migration_handler(struct ctl_table *table, int write,
236 void __user *buffer, size_t *lenp,
237 loff_t *ppos)
238{
239 static DEFINE_MUTEX(mutex);
240 int ret;
241
242 mutex_lock(&mutex);
Myungho Jung4c000152017-04-19 15:24:50 -0700243 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000244 if (!ret && write)
Thomas Gleixner683be132015-05-26 22:50:35 +0000245 timers_update_migration(false);
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000246 mutex_unlock(&mutex);
247 return ret;
248}
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000249#endif
250
Alan Stern9c133c42008-11-06 08:42:48 +0100251static unsigned long round_jiffies_common(unsigned long j, int cpu,
252 bool force_up)
253{
254 int rem;
255 unsigned long original = j;
256
257 /*
258 * We don't want all cpus firing their timers at once hitting the
259 * same lock or cachelines, so we skew each extra cpu with an extra
260 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
261 * already did this.
262 * The skew is done by adding 3*cpunr, then round, then subtract this
263 * extra offset again.
264 */
265 j += cpu * 3;
266
267 rem = j % HZ;
268
269 /*
270 * If the target jiffie is just after a whole second (which can happen
271 * due to delays of the timer irq, long irq off times etc etc) then
272 * we should round down to the whole second, not up. Use 1/4th second
273 * as cutoff for this rounding as an extreme upper bound for this.
274 * But never round down if @force_up is set.
275 */
276 if (rem < HZ/4 && !force_up) /* round down */
277 j = j - rem;
278 else /* round up */
279 j = j - rem + HZ;
280
281 /* now that we have rounded, subtract the extra skew again */
282 j -= cpu * 3;
283
Bart Van Assche9e04d382013-05-21 20:43:50 +0200284 /*
285 * Make sure j is still in the future. Otherwise return the
286 * unmodified value.
287 */
288 return time_is_after_jiffies(j) ? j : original;
Alan Stern9c133c42008-11-06 08:42:48 +0100289}
290
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800291/**
292 * __round_jiffies - function to round jiffies to a full second
293 * @j: the time in (absolute) jiffies that should be rounded
294 * @cpu: the processor number on which the timeout will happen
295 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800296 * __round_jiffies() rounds an absolute time in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800297 * up or down to (approximately) full seconds. This is useful for timers
298 * for which the exact time they fire does not matter too much, as long as
299 * they fire approximately every X seconds.
300 *
301 * By rounding these timers to whole seconds, all such timers will fire
302 * at the same time, rather than at various times spread out. The goal
303 * of this is to have the CPU wake up less, which saves power.
304 *
305 * The exact rounding is skewed for each processor to avoid all
306 * processors firing at the exact same time, which could lead
307 * to lock contention or spurious cache line bouncing.
308 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800309 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800310 */
311unsigned long __round_jiffies(unsigned long j, int cpu)
312{
Alan Stern9c133c42008-11-06 08:42:48 +0100313 return round_jiffies_common(j, cpu, false);
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800314}
315EXPORT_SYMBOL_GPL(__round_jiffies);
316
317/**
318 * __round_jiffies_relative - function to round jiffies to a full second
319 * @j: the time in (relative) jiffies that should be rounded
320 * @cpu: the processor number on which the timeout will happen
321 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800322 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800323 * up or down to (approximately) full seconds. This is useful for timers
324 * for which the exact time they fire does not matter too much, as long as
325 * they fire approximately every X seconds.
326 *
327 * By rounding these timers to whole seconds, all such timers will fire
328 * at the same time, rather than at various times spread out. The goal
329 * of this is to have the CPU wake up less, which saves power.
330 *
331 * The exact rounding is skewed for each processor to avoid all
332 * processors firing at the exact same time, which could lead
333 * to lock contention or spurious cache line bouncing.
334 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800335 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800336 */
337unsigned long __round_jiffies_relative(unsigned long j, int cpu)
338{
Alan Stern9c133c42008-11-06 08:42:48 +0100339 unsigned long j0 = jiffies;
340
341 /* Use j0 because jiffies might change while we run */
342 return round_jiffies_common(j + j0, cpu, false) - j0;
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800343}
344EXPORT_SYMBOL_GPL(__round_jiffies_relative);
345
346/**
347 * round_jiffies - function to round jiffies to a full second
348 * @j: the time in (absolute) jiffies that should be rounded
349 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800350 * round_jiffies() rounds an absolute time in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800351 * up or down to (approximately) full seconds. This is useful for timers
352 * for which the exact time they fire does not matter too much, as long as
353 * they fire approximately every X seconds.
354 *
355 * By rounding these timers to whole seconds, all such timers will fire
356 * at the same time, rather than at various times spread out. The goal
357 * of this is to have the CPU wake up less, which saves power.
358 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800359 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800360 */
361unsigned long round_jiffies(unsigned long j)
362{
Alan Stern9c133c42008-11-06 08:42:48 +0100363 return round_jiffies_common(j, raw_smp_processor_id(), false);
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800364}
365EXPORT_SYMBOL_GPL(round_jiffies);
366
367/**
368 * round_jiffies_relative - function to round jiffies to a full second
369 * @j: the time in (relative) jiffies that should be rounded
370 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800371 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800372 * up or down to (approximately) full seconds. This is useful for timers
373 * for which the exact time they fire does not matter too much, as long as
374 * they fire approximately every X seconds.
375 *
376 * By rounding these timers to whole seconds, all such timers will fire
377 * at the same time, rather than at various times spread out. The goal
378 * of this is to have the CPU wake up less, which saves power.
379 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800380 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800381 */
382unsigned long round_jiffies_relative(unsigned long j)
383{
384 return __round_jiffies_relative(j, raw_smp_processor_id());
385}
386EXPORT_SYMBOL_GPL(round_jiffies_relative);
387
Alan Stern9c133c42008-11-06 08:42:48 +0100388/**
389 * __round_jiffies_up - function to round jiffies up to a full second
390 * @j: the time in (absolute) jiffies that should be rounded
391 * @cpu: the processor number on which the timeout will happen
392 *
393 * This is the same as __round_jiffies() except that it will never
394 * round down. This is useful for timeouts for which the exact time
395 * of firing does not matter too much, as long as they don't fire too
396 * early.
397 */
398unsigned long __round_jiffies_up(unsigned long j, int cpu)
399{
400 return round_jiffies_common(j, cpu, true);
401}
402EXPORT_SYMBOL_GPL(__round_jiffies_up);
403
404/**
405 * __round_jiffies_up_relative - function to round jiffies up to a full second
406 * @j: the time in (relative) jiffies that should be rounded
407 * @cpu: the processor number on which the timeout will happen
408 *
409 * This is the same as __round_jiffies_relative() except that it will never
410 * round down. This is useful for timeouts for which the exact time
411 * of firing does not matter too much, as long as they don't fire too
412 * early.
413 */
414unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
415{
416 unsigned long j0 = jiffies;
417
418 /* Use j0 because jiffies might change while we run */
419 return round_jiffies_common(j + j0, cpu, true) - j0;
420}
421EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
422
423/**
424 * round_jiffies_up - function to round jiffies up to a full second
425 * @j: the time in (absolute) jiffies that should be rounded
426 *
427 * This is the same as round_jiffies() except that it will never
428 * round down. This is useful for timeouts for which the exact time
429 * of firing does not matter too much, as long as they don't fire too
430 * early.
431 */
432unsigned long round_jiffies_up(unsigned long j)
433{
434 return round_jiffies_common(j, raw_smp_processor_id(), true);
435}
436EXPORT_SYMBOL_GPL(round_jiffies_up);
437
438/**
439 * round_jiffies_up_relative - function to round jiffies up to a full second
440 * @j: the time in (relative) jiffies that should be rounded
441 *
442 * This is the same as round_jiffies_relative() except that it will never
443 * round down. This is useful for timeouts for which the exact time
444 * of firing does not matter too much, as long as they don't fire too
445 * early.
446 */
447unsigned long round_jiffies_up_relative(unsigned long j)
448{
449 return __round_jiffies_up_relative(j, raw_smp_processor_id());
450}
451EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
452
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800453
Thomas Gleixner500462a2016-07-04 09:50:30 +0000454static inline unsigned int timer_get_idx(struct timer_list *timer)
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700455{
Thomas Gleixner500462a2016-07-04 09:50:30 +0000456 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700457}
Thomas Gleixner500462a2016-07-04 09:50:30 +0000458
459static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
460{
461 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
462 idx << TIMER_ARRAYSHIFT;
463}
464
465/*
466 * Helper function to calculate the array index for a given expiry
467 * time.
468 */
469static inline unsigned calc_index(unsigned expires, unsigned lvl)
470{
471 expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
472 return LVL_OFFS(lvl) + (expires & LVL_MASK);
473}
474
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000475static int calc_wheel_index(unsigned long expires, unsigned long clk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476{
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000477 unsigned long delta = expires - clk;
Thomas Gleixner500462a2016-07-04 09:50:30 +0000478 unsigned int idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
Thomas Gleixner500462a2016-07-04 09:50:30 +0000480 if (delta < LVL_START(1)) {
481 idx = calc_index(expires, 0);
482 } else if (delta < LVL_START(2)) {
483 idx = calc_index(expires, 1);
484 } else if (delta < LVL_START(3)) {
485 idx = calc_index(expires, 2);
486 } else if (delta < LVL_START(4)) {
487 idx = calc_index(expires, 3);
488 } else if (delta < LVL_START(5)) {
489 idx = calc_index(expires, 4);
490 } else if (delta < LVL_START(6)) {
491 idx = calc_index(expires, 5);
492 } else if (delta < LVL_START(7)) {
493 idx = calc_index(expires, 6);
494 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
495 idx = calc_index(expires, 7);
496 } else if ((long) delta < 0) {
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000497 idx = clk & LVL_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 } else {
Thomas Gleixner500462a2016-07-04 09:50:30 +0000499 /*
500 * Force expire obscene large timeouts to expire at the
501 * capacity limit of the wheel.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 */
Thomas Gleixner500462a2016-07-04 09:50:30 +0000503 if (expires >= WHEEL_TIMEOUT_CUTOFF)
504 expires = WHEEL_TIMEOUT_MAX;
Thomas Gleixner1bd04bf2015-05-26 22:50:26 +0000505
Thomas Gleixner500462a2016-07-04 09:50:30 +0000506 idx = calc_index(expires, LVL_DEPTH - 1);
507 }
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000508 return idx;
509}
510
511/*
512 * Enqueue the timer into the hash bucket, mark it pending in
513 * the bitmap and store the index in the timer flags.
514 */
515static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
516 unsigned int idx)
517{
518 hlist_add_head(&timer->entry, base->vectors + idx);
Thomas Gleixner500462a2016-07-04 09:50:30 +0000519 __set_bit(idx, base->pending_map);
520 timer_set_idx(timer, idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
523static void
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000524__internal_add_timer(struct timer_base *base, struct timer_list *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525{
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000526 unsigned int idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000528 idx = calc_wheel_index(timer->expires, base->clk);
529 enqueue_timer(base, timer, idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530}
531
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000532static void
533trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
Thomas Gleixnerfacbb4a2012-05-25 22:08:57 +0000534{
Thomas Gleixnera683f392016-07-04 09:50:36 +0000535 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
536 return;
Viresh Kumar9f6d9ba2014-06-22 01:29:14 +0200537
538 /*
Thomas Gleixnera683f392016-07-04 09:50:36 +0000539 * TODO: This wants some optimizing similar to the code below, but we
540 * will do that when we switch from push to pull for deferrable timers.
Viresh Kumar9f6d9ba2014-06-22 01:29:14 +0200541 */
Thomas Gleixnera683f392016-07-04 09:50:36 +0000542 if (timer->flags & TIMER_DEFERRABLE) {
543 if (tick_nohz_full_cpu(base->cpu))
Thomas Gleixner683be132015-05-26 22:50:35 +0000544 wake_up_nohz_cpu(base->cpu);
Thomas Gleixnera683f392016-07-04 09:50:36 +0000545 return;
Thomas Gleixner683be132015-05-26 22:50:35 +0000546 }
Thomas Gleixnera683f392016-07-04 09:50:36 +0000547
548 /*
549 * We might have to IPI the remote CPU if the base is idle and the
550 * timer is not deferrable. If the other CPU is on the way to idle
551 * then it can't set base->is_idle as we hold the base lock:
552 */
553 if (!base->is_idle)
554 return;
555
556 /* Check whether this is the new first expiring timer: */
557 if (time_after_eq(timer->expires, base->next_expiry))
558 return;
559
560 /*
561 * Set the next expiry time and kick the CPU so it can reevaluate the
562 * wheel:
563 */
564 base->next_expiry = timer->expires;
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000565 wake_up_nohz_cpu(base->cpu);
566}
567
568static void
569internal_add_timer(struct timer_base *base, struct timer_list *timer)
570{
571 __internal_add_timer(base, timer);
572 trigger_dyntick_cpu(base, timer);
Thomas Gleixnerfacbb4a2012-05-25 22:08:57 +0000573}
574
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800575#ifdef CONFIG_TIMER_STATS
576void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
577{
578 if (timer->start_site)
579 return;
580
581 timer->start_site = addr;
582 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
583 timer->start_pid = current->pid;
584}
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700585
586static void timer_stats_account_timer(struct timer_list *timer)
587{
Dmitry Vyukov3ed769b2015-09-18 15:54:23 +0200588 void *site;
589
590 /*
591 * start_site can be concurrently reset by
592 * timer_stats_timer_clear_start_info()
593 */
594 site = READ_ONCE(timer->start_site);
595 if (likely(!site))
Heiko Carstens507e1232009-06-23 17:38:15 +0200596 return;
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700597
Dmitry Vyukov3ed769b2015-09-18 15:54:23 +0200598 timer_stats_update_stats(timer, timer->start_pid, site,
Thomas Gleixnerc74441a2015-05-26 22:50:31 +0000599 timer->function, timer->start_comm,
600 timer->flags);
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700601}
602
603#else
604static void timer_stats_account_timer(struct timer_list *timer) {}
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800605#endif
606
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700607#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
608
609static struct debug_obj_descr timer_debug_descr;
610
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100611static void *timer_debug_hint(void *addr)
612{
613 return ((struct timer_list *) addr)->function;
614}
615
Du, Changbinb9fdac72016-05-19 17:09:41 -0700616static bool timer_is_static_object(void *addr)
617{
618 struct timer_list *timer = addr;
619
620 return (timer->entry.pprev == NULL &&
621 timer->entry.next == TIMER_ENTRY_STATIC);
622}
623
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700624/*
625 * fixup_init is called when:
626 * - an active object is initialized
627 */
Du, Changbine3252462016-05-19 17:09:29 -0700628static bool timer_fixup_init(void *addr, enum debug_obj_state state)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700629{
630 struct timer_list *timer = addr;
631
632 switch (state) {
633 case ODEBUG_STATE_ACTIVE:
634 del_timer_sync(timer);
635 debug_object_init(timer, &timer_debug_descr);
Du, Changbine3252462016-05-19 17:09:29 -0700636 return true;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700637 default:
Du, Changbine3252462016-05-19 17:09:29 -0700638 return false;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700639 }
640}
641
Stephen Boydfb16b8c2011-11-07 19:48:26 -0800642/* Stub timer callback for improperly used timers. */
643static void stub_timer(unsigned long data)
644{
645 WARN_ON(1);
646}
647
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700648/*
649 * fixup_activate is called when:
650 * - an active object is activated
Du, Changbinb9fdac72016-05-19 17:09:41 -0700651 * - an unknown non-static object is activated
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700652 */
Du, Changbine3252462016-05-19 17:09:29 -0700653static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700654{
655 struct timer_list *timer = addr;
656
657 switch (state) {
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700658 case ODEBUG_STATE_NOTAVAILABLE:
Du, Changbinb9fdac72016-05-19 17:09:41 -0700659 setup_timer(timer, stub_timer, 0);
660 return true;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700661
662 case ODEBUG_STATE_ACTIVE:
663 WARN_ON(1);
664
665 default:
Du, Changbine3252462016-05-19 17:09:29 -0700666 return false;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700667 }
668}
669
670/*
671 * fixup_free is called when:
672 * - an active object is freed
673 */
Du, Changbine3252462016-05-19 17:09:29 -0700674static bool timer_fixup_free(void *addr, enum debug_obj_state state)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700675{
676 struct timer_list *timer = addr;
677
678 switch (state) {
679 case ODEBUG_STATE_ACTIVE:
680 del_timer_sync(timer);
681 debug_object_free(timer, &timer_debug_descr);
Du, Changbine3252462016-05-19 17:09:29 -0700682 return true;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700683 default:
Du, Changbine3252462016-05-19 17:09:29 -0700684 return false;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700685 }
686}
687
Christine Chandc4218b2011-11-07 19:48:28 -0800688/*
689 * fixup_assert_init is called when:
690 * - an untracked/uninit-ed object is found
691 */
Du, Changbine3252462016-05-19 17:09:29 -0700692static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
Christine Chandc4218b2011-11-07 19:48:28 -0800693{
694 struct timer_list *timer = addr;
695
696 switch (state) {
697 case ODEBUG_STATE_NOTAVAILABLE:
Du, Changbinb9fdac72016-05-19 17:09:41 -0700698 setup_timer(timer, stub_timer, 0);
699 return true;
Christine Chandc4218b2011-11-07 19:48:28 -0800700 default:
Du, Changbine3252462016-05-19 17:09:29 -0700701 return false;
Christine Chandc4218b2011-11-07 19:48:28 -0800702 }
703}
704
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700705static struct debug_obj_descr timer_debug_descr = {
Christine Chandc4218b2011-11-07 19:48:28 -0800706 .name = "timer_list",
707 .debug_hint = timer_debug_hint,
Du, Changbinb9fdac72016-05-19 17:09:41 -0700708 .is_static_object = timer_is_static_object,
Christine Chandc4218b2011-11-07 19:48:28 -0800709 .fixup_init = timer_fixup_init,
710 .fixup_activate = timer_fixup_activate,
711 .fixup_free = timer_fixup_free,
712 .fixup_assert_init = timer_fixup_assert_init,
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700713};
714
715static inline void debug_timer_init(struct timer_list *timer)
716{
717 debug_object_init(timer, &timer_debug_descr);
718}
719
720static inline void debug_timer_activate(struct timer_list *timer)
721{
722 debug_object_activate(timer, &timer_debug_descr);
723}
724
725static inline void debug_timer_deactivate(struct timer_list *timer)
726{
727 debug_object_deactivate(timer, &timer_debug_descr);
728}
729
730static inline void debug_timer_free(struct timer_list *timer)
731{
732 debug_object_free(timer, &timer_debug_descr);
733}
734
Christine Chandc4218b2011-11-07 19:48:28 -0800735static inline void debug_timer_assert_init(struct timer_list *timer)
736{
737 debug_object_assert_init(timer, &timer_debug_descr);
738}
739
Tejun Heofc683992012-08-08 11:10:27 -0700740static void do_init_timer(struct timer_list *timer, unsigned int flags,
741 const char *name, struct lock_class_key *key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700742
Tejun Heofc683992012-08-08 11:10:27 -0700743void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
744 const char *name, struct lock_class_key *key)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700745{
746 debug_object_init_on_stack(timer, &timer_debug_descr);
Tejun Heofc683992012-08-08 11:10:27 -0700747 do_init_timer(timer, flags, name, key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700748}
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100749EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700750
751void destroy_timer_on_stack(struct timer_list *timer)
752{
753 debug_object_free(timer, &timer_debug_descr);
754}
755EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
756
757#else
758static inline void debug_timer_init(struct timer_list *timer) { }
759static inline void debug_timer_activate(struct timer_list *timer) { }
760static inline void debug_timer_deactivate(struct timer_list *timer) { }
Christine Chandc4218b2011-11-07 19:48:28 -0800761static inline void debug_timer_assert_init(struct timer_list *timer) { }
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700762#endif
763
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800764static inline void debug_init(struct timer_list *timer)
765{
766 debug_timer_init(timer);
767 trace_timer_init(timer);
768}
769
770static inline void
771debug_activate(struct timer_list *timer, unsigned long expires)
772{
773 debug_timer_activate(timer);
Thomas Gleixner0eeda712015-05-26 22:50:29 +0000774 trace_timer_start(timer, expires, timer->flags);
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800775}
776
777static inline void debug_deactivate(struct timer_list *timer)
778{
779 debug_timer_deactivate(timer);
780 trace_timer_cancel(timer);
781}
782
Christine Chandc4218b2011-11-07 19:48:28 -0800783static inline void debug_assert_init(struct timer_list *timer)
784{
785 debug_timer_assert_init(timer);
786}
787
Tejun Heofc683992012-08-08 11:10:27 -0700788static void do_init_timer(struct timer_list *timer, unsigned int flags,
789 const char *name, struct lock_class_key *key)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700790{
Thomas Gleixner1dabbce2015-05-26 22:50:28 +0000791 timer->entry.pprev = NULL;
Thomas Gleixner0eeda712015-05-26 22:50:29 +0000792 timer->flags = flags | raw_smp_processor_id();
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700793#ifdef CONFIG_TIMER_STATS
794 timer->start_site = NULL;
795 timer->start_pid = -1;
796 memset(timer->start_comm, 0, TASK_COMM_LEN);
797#endif
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100798 lockdep_init_map(&timer->lockdep_map, name, key, 0);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700799}
800
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700801/**
Randy Dunlap633fe792009-04-01 17:47:23 -0700802 * init_timer_key - initialize a timer
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700803 * @timer: the timer to be initialized
Tejun Heofc683992012-08-08 11:10:27 -0700804 * @flags: timer flags
Randy Dunlap633fe792009-04-01 17:47:23 -0700805 * @name: name of the timer
806 * @key: lockdep class key of the fake lock used for tracking timer
807 * sync lock dependencies
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700808 *
Randy Dunlap633fe792009-04-01 17:47:23 -0700809 * init_timer_key() must be done to a timer prior calling *any* of the
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700810 * other timer functions.
811 */
Tejun Heofc683992012-08-08 11:10:27 -0700812void init_timer_key(struct timer_list *timer, unsigned int flags,
813 const char *name, struct lock_class_key *key)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700814{
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800815 debug_init(timer);
Tejun Heofc683992012-08-08 11:10:27 -0700816 do_init_timer(timer, flags, name, key);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700817}
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100818EXPORT_SYMBOL(init_timer_key);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700819
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000820static inline void detach_timer(struct timer_list *timer, bool clear_pending)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700821{
Thomas Gleixner1dabbce2015-05-26 22:50:28 +0000822 struct hlist_node *entry = &timer->entry;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700823
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800824 debug_deactivate(timer);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700825
Thomas Gleixner1dabbce2015-05-26 22:50:28 +0000826 __hlist_del(entry);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700827 if (clear_pending)
Thomas Gleixner1dabbce2015-05-26 22:50:28 +0000828 entry->pprev = NULL;
829 entry->next = LIST_POISON2;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700830}
831
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000832static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000833 bool clear_pending)
834{
Thomas Gleixner500462a2016-07-04 09:50:30 +0000835 unsigned idx = timer_get_idx(timer);
836
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000837 if (!timer_pending(timer))
838 return 0;
839
Thomas Gleixner500462a2016-07-04 09:50:30 +0000840 if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
841 __clear_bit(idx, base->pending_map);
842
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000843 detach_timer(timer, clear_pending);
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000844 return 1;
845}
846
Thomas Gleixner500462a2016-07-04 09:50:30 +0000847static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
848{
849 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
850
851 /*
Anna-Maria Gleixnerd8406872017-12-22 15:51:12 +0100852 * If the timer is deferrable and NO_HZ_COMMON is set then we need
853 * to use the deferrable base.
Thomas Gleixner500462a2016-07-04 09:50:30 +0000854 */
Anna-Maria Gleixnerd8406872017-12-22 15:51:12 +0100855 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
Thomas Gleixner500462a2016-07-04 09:50:30 +0000856 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
857 return base;
858}
859
860static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
861{
862 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
863
864 /*
Anna-Maria Gleixnerd8406872017-12-22 15:51:12 +0100865 * If the timer is deferrable and NO_HZ_COMMON is set then we need
866 * to use the deferrable base.
Thomas Gleixner500462a2016-07-04 09:50:30 +0000867 */
Anna-Maria Gleixnerd8406872017-12-22 15:51:12 +0100868 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
Thomas Gleixner500462a2016-07-04 09:50:30 +0000869 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
870 return base;
871}
872
873static inline struct timer_base *get_timer_base(u32 tflags)
874{
875 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
876}
877
Thomas Gleixnera683f392016-07-04 09:50:36 +0000878#ifdef CONFIG_NO_HZ_COMMON
879static inline struct timer_base *
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +0000880get_target_base(struct timer_base *base, unsigned tflags)
Thomas Gleixner500462a2016-07-04 09:50:30 +0000881{
Thomas Gleixnera683f392016-07-04 09:50:36 +0000882#ifdef CONFIG_SMP
Thomas Gleixner500462a2016-07-04 09:50:30 +0000883 if ((tflags & TIMER_PINNED) || !base->migration_enabled)
884 return get_timer_this_cpu_base(tflags);
885 return get_timer_cpu_base(tflags, get_nohz_timer_target());
886#else
887 return get_timer_this_cpu_base(tflags);
888#endif
889}
890
Thomas Gleixnera683f392016-07-04 09:50:36 +0000891static inline void forward_timer_base(struct timer_base *base)
892{
Nicholas Piggin70b3fd52017-08-22 18:43:48 +1000893 unsigned long jnow;
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +0000894
Thomas Gleixnera683f392016-07-04 09:50:36 +0000895 /*
Nicholas Piggin70b3fd52017-08-22 18:43:48 +1000896 * We only forward the base when we are idle or have just come out of
897 * idle (must_forward_clk logic), and have a delta between base clock
898 * and jiffies. In the common case, run_timers will take care of it.
Thomas Gleixnera683f392016-07-04 09:50:36 +0000899 */
Nicholas Piggin70b3fd52017-08-22 18:43:48 +1000900 if (likely(!base->must_forward_clk))
901 return;
902
903 jnow = READ_ONCE(jiffies);
904 base->must_forward_clk = base->is_idle;
905 if ((long)(jnow - base->clk) < 2)
Thomas Gleixnera683f392016-07-04 09:50:36 +0000906 return;
907
908 /*
909 * If the next expiry value is > jiffies, then we fast forward to
910 * jiffies otherwise we forward to the next expiry value.
911 */
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +0000912 if (time_after(base->next_expiry, jnow))
913 base->clk = jnow;
Thomas Gleixnera683f392016-07-04 09:50:36 +0000914 else
915 base->clk = base->next_expiry;
916}
917#else
918static inline struct timer_base *
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +0000919get_target_base(struct timer_base *base, unsigned tflags)
Thomas Gleixnera683f392016-07-04 09:50:36 +0000920{
921 return get_timer_this_cpu_base(tflags);
922}
923
924static inline void forward_timer_base(struct timer_base *base) { }
925#endif
926
Thomas Gleixnera683f392016-07-04 09:50:36 +0000927
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700928/*
Thomas Gleixner500462a2016-07-04 09:50:30 +0000929 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
930 * that all timers which are tied to this base are locked, and the base itself
931 * is locked too.
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700932 *
933 * So __run_timers/migrate_timers can safely modify all timers which could
Thomas Gleixner500462a2016-07-04 09:50:30 +0000934 * be found in the base->vectors array.
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700935 *
Thomas Gleixner500462a2016-07-04 09:50:30 +0000936 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
937 * to wait until the migration is done.
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700938 */
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000939static struct timer_base *lock_timer_base(struct timer_list *timer,
Thomas Gleixner500462a2016-07-04 09:50:30 +0000940 unsigned long *flags)
Josh Triplett89e7e3742006-09-29 01:59:36 -0700941 __acquires(timer->base->lock)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700942{
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700943 for (;;) {
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000944 struct timer_base *base;
Thomas Gleixnerb8312752016-10-24 11:41:56 +0200945 u32 tf;
946
947 /*
948 * We need to use READ_ONCE() here, otherwise the compiler
949 * might re-read @tf between the check for TIMER_MIGRATING
950 * and spin_lock().
951 */
952 tf = READ_ONCE(timer->flags);
Thomas Gleixner0eeda712015-05-26 22:50:29 +0000953
954 if (!(tf & TIMER_MIGRATING)) {
Thomas Gleixner500462a2016-07-04 09:50:30 +0000955 base = get_timer_base(tf);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700956 spin_lock_irqsave(&base->lock, *flags);
Thomas Gleixner0eeda712015-05-26 22:50:29 +0000957 if (timer->flags == tf)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700958 return base;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700959 spin_unlock_irqrestore(&base->lock, *flags);
960 }
961 cpu_relax();
962 }
963}
964
Ingo Molnar74019222009-02-18 12:23:29 +0100965static inline int
Thomas Gleixner177ec0a2016-07-04 09:50:24 +0000966__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967{
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000968 struct timer_base *base, *new_base;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000969 unsigned int idx = UINT_MAX;
970 unsigned long clk = 0, flags;
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000971 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
Thomas Gleixner4da91522016-10-24 11:55:10 +0200973 BUG_ON(!timer->function);
974
Thomas Gleixner500462a2016-07-04 09:50:30 +0000975 /*
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000976 * This is a common optimization triggered by the networking code - if
977 * the timer is re-modified to have the same timeout or ends up in the
978 * same array bucket then just return:
Thomas Gleixner500462a2016-07-04 09:50:30 +0000979 */
980 if (timer_pending(timer)) {
Nicholas Piggin70b3fd52017-08-22 18:43:48 +1000981 /*
982 * The downside of this optimization is that it can result in
983 * larger granularity than you would get from adding a new
984 * timer with this expiry.
985 */
Thomas Gleixner500462a2016-07-04 09:50:30 +0000986 if (timer->expires == expires)
987 return 1;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000988
Thomas Gleixner4da91522016-10-24 11:55:10 +0200989 /*
990 * We lock timer base and calculate the bucket index right
991 * here. If the timer ends up in the same bucket, then we
992 * just update the expiry time and avoid the whole
993 * dequeue/enqueue dance.
994 */
995 base = lock_timer_base(timer, &flags);
Nicholas Piggin70b3fd52017-08-22 18:43:48 +1000996 forward_timer_base(base);
Thomas Gleixner4da91522016-10-24 11:55:10 +0200997
998 clk = base->clk;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000999 idx = calc_wheel_index(expires, clk);
1000
1001 /*
1002 * Retrieve and compare the array index of the pending
1003 * timer. If it matches set the expiry to the new value so a
1004 * subsequent call will exit in the expires check above.
1005 */
1006 if (idx == timer_get_idx(timer)) {
1007 timer->expires = expires;
Thomas Gleixner4da91522016-10-24 11:55:10 +02001008 ret = 1;
1009 goto out_unlock;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +00001010 }
Thomas Gleixner4da91522016-10-24 11:55:10 +02001011 } else {
1012 base = lock_timer_base(timer, &flags);
Nicholas Piggin70b3fd52017-08-22 18:43:48 +10001013 forward_timer_base(base);
Thomas Gleixner500462a2016-07-04 09:50:30 +00001014 }
1015
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001016 timer_stats_timer_set_start_info(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
Thomas Gleixnerec44bc72012-05-25 22:08:57 +00001018 ret = detach_if_pending(timer, base, false);
1019 if (!ret && pending_only)
1020 goto out_unlock;
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001021
Thomas Gleixner500462a2016-07-04 09:50:30 +00001022 new_base = get_target_base(base, timer->flags);
Arun R Bharadwajeea08f32009-04-16 12:16:41 +05301023
Oleg Nesterov3691c512006-03-31 02:30:30 -08001024 if (base != new_base) {
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001025 /*
Thomas Gleixner500462a2016-07-04 09:50:30 +00001026 * We are trying to schedule the timer on the new base.
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001027 * However we can't change timer's base while it is running,
1028 * otherwise del_timer_sync() can't detect that the timer's
Thomas Gleixner500462a2016-07-04 09:50:30 +00001029 * handler yet has not finished. This also guarantees that the
1030 * timer is serialized wrt itself.
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001031 */
Oleg Nesterova2c348f2006-03-31 02:30:31 -08001032 if (likely(base->running_timer != timer)) {
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001033 /* See the comment in lock_timer_base() */
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001034 timer->flags |= TIMER_MIGRATING;
1035
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001036 spin_unlock(&base->lock);
Oleg Nesterova2c348f2006-03-31 02:30:31 -08001037 base = new_base;
1038 spin_lock(&base->lock);
Eric Dumazetd0023a12015-08-17 10:18:48 -07001039 WRITE_ONCE(timer->flags,
1040 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
Nicholas Piggin70b3fd52017-08-22 18:43:48 +10001041 forward_timer_base(base);
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001042 }
1043 }
1044
Thomas Gleixner574e5432017-12-22 15:51:14 +01001045 debug_activate(timer, expires);
1046
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 timer->expires = expires;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +00001048 /*
1049 * If 'idx' was calculated above and the base time did not advance
Thomas Gleixner4da91522016-10-24 11:55:10 +02001050 * between calculating 'idx' and possibly switching the base, only
1051 * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
1052 * we need to (re)calculate the wheel index via
1053 * internal_add_timer().
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +00001054 */
1055 if (idx != UINT_MAX && clk == base->clk) {
1056 enqueue_timer(base, timer, idx);
1057 trigger_dyntick_cpu(base, timer);
1058 } else {
1059 internal_add_timer(base, timer);
1060 }
Ingo Molnar74019222009-02-18 12:23:29 +01001061
1062out_unlock:
Oleg Nesterova2c348f2006-03-31 02:30:31 -08001063 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064
1065 return ret;
1066}
1067
Ingo Molnar74019222009-02-18 12:23:29 +01001068/**
1069 * mod_timer_pending - modify a pending timer's timeout
1070 * @timer: the pending timer to be modified
1071 * @expires: new timeout in jiffies
1072 *
1073 * mod_timer_pending() is the same for pending timers as mod_timer(),
1074 * but will not re-activate and modify already deleted timers.
1075 *
1076 * It is useful for unserialized use of timers.
1077 */
1078int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1079{
Thomas Gleixner177ec0a2016-07-04 09:50:24 +00001080 return __mod_timer(timer, expires, true);
Ingo Molnar74019222009-02-18 12:23:29 +01001081}
1082EXPORT_SYMBOL(mod_timer_pending);
1083
1084/**
1085 * mod_timer - modify a timer's timeout
1086 * @timer: the timer to be modified
1087 * @expires: new timeout in jiffies
1088 *
1089 * mod_timer() is a more efficient way to update the expire field of an
1090 * active timer (if the timer is inactive it will be activated)
1091 *
1092 * mod_timer(timer, expires) is equivalent to:
1093 *
1094 * del_timer(timer); timer->expires = expires; add_timer(timer);
1095 *
1096 * Note that if there are multiple unserialized concurrent users of the
1097 * same timer, then mod_timer() is the only safe way to modify the timeout,
1098 * since add_timer() cannot modify an already running timer.
1099 *
1100 * The function returns whether it has modified a pending timer or not.
1101 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
1102 * active timer returns 1.)
1103 */
1104int mod_timer(struct timer_list *timer, unsigned long expires)
1105{
Thomas Gleixner177ec0a2016-07-04 09:50:24 +00001106 return __mod_timer(timer, expires, false);
Ingo Molnar74019222009-02-18 12:23:29 +01001107}
1108EXPORT_SYMBOL(mod_timer);
1109
1110/**
1111 * add_timer - start a timer
1112 * @timer: the timer to be added
1113 *
1114 * The kernel will do a ->function(->data) callback from the
1115 * timer interrupt at the ->expires point in the future. The
1116 * current time is 'jiffies'.
1117 *
1118 * The timer's ->expires, ->function (and if the handler uses it, ->data)
1119 * fields must be set prior calling this function.
1120 *
1121 * Timers with an ->expires field in the past will be executed in the next
1122 * timer tick.
1123 */
1124void add_timer(struct timer_list *timer)
1125{
1126 BUG_ON(timer_pending(timer));
1127 mod_timer(timer, timer->expires);
1128}
1129EXPORT_SYMBOL(add_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001131/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 * add_timer_on - start a timer on a particular CPU
1133 * @timer: the timer to be added
1134 * @cpu: the CPU to start it on
1135 *
1136 * This is not very scalable on SMP. Double adds are not possible.
1137 */
1138void add_timer_on(struct timer_list *timer, int cpu)
1139{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001140 struct timer_base *new_base, *base;
Thomas Gleixner68194572007-07-19 01:49:16 -07001141 unsigned long flags;
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001142
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001143 timer_stats_timer_set_start_info(timer);
Thomas Gleixner68194572007-07-19 01:49:16 -07001144 BUG_ON(timer_pending(timer) || !timer->function);
Tejun Heo22b886d2015-11-04 12:15:33 -05001145
Thomas Gleixner500462a2016-07-04 09:50:30 +00001146 new_base = get_timer_cpu_base(timer->flags, cpu);
1147
Tejun Heo22b886d2015-11-04 12:15:33 -05001148 /*
1149 * If @timer was on a different CPU, it should be migrated with the
1150 * old base locked to prevent other operations proceeding with the
1151 * wrong base locked. See lock_timer_base().
1152 */
1153 base = lock_timer_base(timer, &flags);
1154 if (base != new_base) {
1155 timer->flags |= TIMER_MIGRATING;
1156
1157 spin_unlock(&base->lock);
1158 base = new_base;
1159 spin_lock(&base->lock);
1160 WRITE_ONCE(timer->flags,
1161 (timer->flags & ~TIMER_BASEMASK) | cpu);
1162 }
Nicholas Piggin70b3fd52017-08-22 18:43:48 +10001163 forward_timer_base(base);
Tejun Heo22b886d2015-11-04 12:15:33 -05001164
Xiao Guangrong2b022e32009-08-10 10:48:59 +08001165 debug_activate(timer, timer->expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 internal_add_timer(base, timer);
Oleg Nesterov3691c512006-03-31 02:30:30 -08001167 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168}
Andi Kleena9862e02009-05-19 22:49:07 +02001169EXPORT_SYMBOL_GPL(add_timer_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001171/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 * del_timer - deactive a timer.
1173 * @timer: the timer to be deactivated
1174 *
1175 * del_timer() deactivates a timer - this works on both active and inactive
1176 * timers.
1177 *
1178 * The function returns whether it has deactivated a pending timer or not.
1179 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1180 * active timer returns 1.)
1181 */
1182int del_timer(struct timer_list *timer)
1183{
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001184 struct timer_base *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 unsigned long flags;
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001186 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
Christine Chandc4218b2011-11-07 19:48:28 -08001188 debug_assert_init(timer);
1189
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001190 timer_stats_timer_clear_start_info(timer);
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001191 if (timer_pending(timer)) {
1192 base = lock_timer_base(timer, &flags);
Thomas Gleixnerec44bc72012-05-25 22:08:57 +00001193 ret = detach_if_pending(timer, base, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001197 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199EXPORT_SYMBOL(del_timer);
1200
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001201/**
1202 * try_to_del_timer_sync - Try to deactivate a timer
1203 * @timer: timer do del
1204 *
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001205 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1206 * exit the timer is not queued and the handler is not running on any CPU.
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001207 */
1208int try_to_del_timer_sync(struct timer_list *timer)
1209{
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001210 struct timer_base *base;
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001211 unsigned long flags;
1212 int ret = -1;
1213
Christine Chandc4218b2011-11-07 19:48:28 -08001214 debug_assert_init(timer);
1215
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001216 base = lock_timer_base(timer, &flags);
1217
Thomas Gleixnerec44bc72012-05-25 22:08:57 +00001218 if (base->running_timer != timer) {
1219 timer_stats_timer_clear_start_info(timer);
1220 ret = detach_if_pending(timer, base, true);
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001221 }
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001222 spin_unlock_irqrestore(&base->lock, flags);
1223
1224 return ret;
1225}
David Howellse19dff12007-04-26 15:46:56 -07001226EXPORT_SYMBOL(try_to_del_timer_sync);
1227
Yong Zhang6f1bc452010-10-20 15:57:31 -07001228#ifdef CONFIG_SMP
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001229/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1231 * @timer: the timer to be deactivated
1232 *
1233 * This function only differs from del_timer() on SMP: besides deactivating
1234 * the timer it also makes sure the handler has finished executing on other
1235 * CPUs.
1236 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08001237 * Synchronization rules: Callers must prevent restarting of the timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 * otherwise this function is meaningless. It must not be called from
Tejun Heoc5f66e92012-08-08 11:10:28 -07001239 * interrupt contexts unless the timer is an irqsafe one. The caller must
1240 * not hold locks which would prevent completion of the timer's
1241 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1242 * timer is not queued and the handler is not running on any CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 *
Tejun Heoc5f66e92012-08-08 11:10:28 -07001244 * Note: For !irqsafe timers, you must not hold locks that are held in
1245 * interrupt context while calling this function. Even if the lock has
1246 * nothing to do with the timer in question. Here's why:
Steven Rostedt48228f72011-02-08 12:39:54 -05001247 *
1248 * CPU0 CPU1
1249 * ---- ----
1250 * <SOFTIRQ>
1251 * call_timer_fn();
1252 * base->running_timer = mytimer;
1253 * spin_lock_irq(somelock);
1254 * <IRQ>
1255 * spin_lock(somelock);
1256 * del_timer_sync(mytimer);
1257 * while (base->running_timer == mytimer);
1258 *
1259 * Now del_timer_sync() will never return and never release somelock.
1260 * The interrupt on the other CPU is waiting to grab somelock but
1261 * it has interrupted the softirq that CPU0 is waiting to finish.
1262 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 * The function returns whether it has deactivated a pending timer or not.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 */
1265int del_timer_sync(struct timer_list *timer)
1266{
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001267#ifdef CONFIG_LOCKDEP
Peter Zijlstraf266a512011-02-03 15:09:41 +01001268 unsigned long flags;
1269
Steven Rostedt48228f72011-02-08 12:39:54 -05001270 /*
1271 * If lockdep gives a backtrace here, please reference
1272 * the synchronization rules above.
1273 */
Peter Zijlstra7ff20792011-02-08 15:18:00 +01001274 local_irq_save(flags);
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001275 lock_map_acquire(&timer->lockdep_map);
1276 lock_map_release(&timer->lockdep_map);
Peter Zijlstra7ff20792011-02-08 15:18:00 +01001277 local_irq_restore(flags);
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001278#endif
Yong Zhang466bd302010-10-20 15:57:33 -07001279 /*
1280 * don't use it in hardirq context, because it
1281 * could lead to deadlock.
1282 */
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001283 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001284 for (;;) {
1285 int ret = try_to_del_timer_sync(timer);
1286 if (ret >= 0)
1287 return ret;
Andrew Mortona0009652006-07-14 00:24:06 -07001288 cpu_relax();
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001289 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290}
1291EXPORT_SYMBOL(del_timer_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292#endif
1293
Thomas Gleixner576da122010-03-12 21:10:29 +01001294static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1295 unsigned long data)
1296{
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +02001297 int count = preempt_count();
Thomas Gleixner576da122010-03-12 21:10:29 +01001298
1299#ifdef CONFIG_LOCKDEP
1300 /*
1301 * It is permissible to free the timer from inside the
1302 * function that is called from it, this we need to take into
1303 * account for lockdep too. To avoid bogus "held lock freed"
1304 * warnings as well as problems when looking into
1305 * timer->lockdep_map, make a copy and use that here.
1306 */
Peter Zijlstra4d82a1d2012-05-15 08:06:19 -07001307 struct lockdep_map lockdep_map;
1308
1309 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
Thomas Gleixner576da122010-03-12 21:10:29 +01001310#endif
1311 /*
1312 * Couple the lock chain with the lock chain at
1313 * del_timer_sync() by acquiring the lock_map around the fn()
1314 * call here and in del_timer_sync().
1315 */
1316 lock_map_acquire(&lockdep_map);
1317
1318 trace_timer_expire_entry(timer);
1319 fn(data);
1320 trace_timer_expire_exit(timer);
1321
1322 lock_map_release(&lockdep_map);
1323
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +02001324 if (count != preempt_count()) {
Thomas Gleixner802702e2010-03-12 20:13:23 +01001325 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +02001326 fn, count, preempt_count());
Thomas Gleixner802702e2010-03-12 20:13:23 +01001327 /*
1328 * Restore the preempt count. That gives us a decent
1329 * chance to survive and extract information. If the
1330 * callback kept a lock held, bad luck, but not worse
1331 * than the BUG() we had.
1332 */
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +02001333 preempt_count_set(count);
Thomas Gleixner576da122010-03-12 21:10:29 +01001334 }
1335}
1336
Thomas Gleixner500462a2016-07-04 09:50:30 +00001337static void expire_timers(struct timer_base *base, struct hlist_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001339 while (!hlist_empty(head)) {
1340 struct timer_list *timer;
1341 void (*fn)(unsigned long);
1342 unsigned long data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
Thomas Gleixner500462a2016-07-04 09:50:30 +00001344 timer = hlist_entry(head->first, struct timer_list, entry);
1345 timer_stats_account_timer(timer);
Thomas Gleixner3bb475a2015-05-26 22:50:24 +00001346
Thomas Gleixner500462a2016-07-04 09:50:30 +00001347 base->running_timer = timer;
1348 detach_timer(timer, true);
Thomas Gleixner3bb475a2015-05-26 22:50:24 +00001349
Thomas Gleixner500462a2016-07-04 09:50:30 +00001350 fn = timer->function;
1351 data = timer->data;
Thomas Gleixner3bb475a2015-05-26 22:50:24 +00001352
Thomas Gleixner500462a2016-07-04 09:50:30 +00001353 if (timer->flags & TIMER_IRQSAFE) {
1354 spin_unlock(&base->lock);
1355 call_timer_fn(timer, fn, data);
1356 spin_lock(&base->lock);
1357 } else {
1358 spin_unlock_irq(&base->lock);
1359 call_timer_fn(timer, fn, data);
1360 spin_lock_irq(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 }
1362 }
Thomas Gleixner500462a2016-07-04 09:50:30 +00001363}
1364
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001365static int __collect_expired_timers(struct timer_base *base,
1366 struct hlist_head *heads)
Thomas Gleixner500462a2016-07-04 09:50:30 +00001367{
1368 unsigned long clk = base->clk;
1369 struct hlist_head *vec;
1370 int i, levels = 0;
1371 unsigned int idx;
1372
1373 for (i = 0; i < LVL_DEPTH; i++) {
1374 idx = (clk & LVL_MASK) + i * LVL_SIZE;
1375
1376 if (__test_and_clear_bit(idx, base->pending_map)) {
1377 vec = base->vectors + idx;
1378 hlist_move_list(vec, heads++);
1379 levels++;
1380 }
1381 /* Is it time to look at the next level? */
1382 if (clk & LVL_CLK_MASK)
1383 break;
1384 /* Shift clock for the next level granularity */
1385 clk >>= LVL_CLK_SHIFT;
1386 }
1387 return levels;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388}
1389
Frederic Weisbecker3451d022011-08-10 23:21:01 +02001390#ifdef CONFIG_NO_HZ_COMMON
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391/*
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001392 * Find the next pending bucket of a level. Search from level start (@offset)
1393 * + @clk upwards and if nothing there, search from start of the level
1394 * (@offset) up to @offset + clk.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 */
Thomas Gleixner500462a2016-07-04 09:50:30 +00001396static int next_pending_bucket(struct timer_base *base, unsigned offset,
1397 unsigned clk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001399 unsigned pos, start = offset + clk;
1400 unsigned end = offset + LVL_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401
Thomas Gleixner500462a2016-07-04 09:50:30 +00001402 pos = find_next_bit(base->pending_map, end, start);
1403 if (pos < end)
1404 return pos - start;
Venki Pallipadi6e453a62007-05-08 00:27:44 -07001405
Thomas Gleixner500462a2016-07-04 09:50:30 +00001406 pos = find_next_bit(base->pending_map, start, offset);
1407 return pos < start ? pos + LVL_SIZE - start : -1;
1408}
1409
1410/*
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001411 * Search the first expiring timer in the various clock levels. Caller must
1412 * hold base->lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 */
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001414static unsigned long __next_timer_interrupt(struct timer_base *base)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001416 unsigned long clk, next, adj;
1417 unsigned lvl, offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418
Thomas Gleixner500462a2016-07-04 09:50:30 +00001419 next = base->clk + NEXT_TIMER_MAX_DELTA;
1420 clk = base->clk;
1421 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
1422 int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001423
Thomas Gleixner500462a2016-07-04 09:50:30 +00001424 if (pos >= 0) {
1425 unsigned long tmp = clk + (unsigned long) pos;
1426
1427 tmp <<= LVL_SHIFT(lvl);
1428 if (time_before(tmp, next))
1429 next = tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 }
Thomas Gleixner500462a2016-07-04 09:50:30 +00001431 /*
1432 * Clock for the next level. If the current level clock lower
1433 * bits are zero, we look at the next level as is. If not we
1434 * need to advance it by one because that's going to be the
1435 * next expiring bucket in that level. base->clk is the next
1436 * expiring jiffie. So in case of:
1437 *
1438 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1439 * 0 0 0 0 0 0
1440 *
1441 * we have to look at all levels @index 0. With
1442 *
1443 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1444 * 0 0 0 0 0 2
1445 *
1446 * LVL0 has the next expiring bucket @index 2. The upper
1447 * levels have the next expiring bucket @index 1.
1448 *
1449 * In case that the propagation wraps the next level the same
1450 * rules apply:
1451 *
1452 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1453 * 0 0 0 0 F 2
1454 *
1455 * So after looking at LVL0 we get:
1456 *
1457 * LVL5 LVL4 LVL3 LVL2 LVL1
1458 * 0 0 0 1 0
1459 *
1460 * So no propagation from LVL1 to LVL2 because that happened
1461 * with the add already, but then we need to propagate further
1462 * from LVL2 to LVL3.
1463 *
1464 * So the simple check whether the lower bits of the current
1465 * level are 0 or not is sufficient for all cases.
1466 */
1467 adj = clk & LVL_CLK_MASK ? 1 : 0;
1468 clk >>= LVL_CLK_SHIFT;
1469 clk += adj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 }
Thomas Gleixner500462a2016-07-04 09:50:30 +00001471 return next;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001472}
1473
1474/*
1475 * Check, if the next hrtimer event is before the next timer wheel
1476 * event:
1477 */
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001478static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001479{
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001480 u64 nextevt = hrtimer_get_next_event();
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001481
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001482 /*
1483 * If high resolution timers are enabled
1484 * hrtimer_get_next_event() returns KTIME_MAX.
1485 */
1486 if (expires <= nextevt)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001487 return expires;
1488
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001489 /*
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001490 * If the next timer is already expired, return the tick base
1491 * time so the tick is fired immediately.
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001492 */
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001493 if (nextevt <= basem)
1494 return basem;
Thomas Gleixnereaad0842007-05-29 23:47:39 +02001495
1496 /*
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001497 * Round up to the next jiffie. High resolution timers are
1498 * off, so the hrtimers are expired in the tick and we need to
1499 * make sure that this tick really expires the timer to avoid
1500 * a ping pong of the nohz stop code.
1501 *
1502 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
Thomas Gleixnereaad0842007-05-29 23:47:39 +02001503 */
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001504 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001505}
1506
1507/**
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001508 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1509 * @basej: base time jiffies
1510 * @basem: base time clock monotonic
1511 *
1512 * Returns the tick aligned clock monotonic time of the next pending
1513 * timer or KTIME_MAX if no timer is pending.
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001514 */
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001515u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001516{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001517 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001518 u64 expires = KTIME_MAX;
1519 unsigned long nextevt;
Chris Metcalf46c8f0b2016-08-08 16:29:07 -04001520 bool is_max_delta;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001521
Heiko Carstensdbd87b52010-12-01 10:11:09 +01001522 /*
1523 * Pretend that there is no timer pending if the cpu is offline.
1524 * Possible pending timers will be migrated later to an active cpu.
1525 */
1526 if (cpu_is_offline(smp_processor_id()))
Thomas Gleixnere40468a2012-05-25 22:08:59 +00001527 return expires;
1528
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001529 spin_lock(&base->lock);
Thomas Gleixner500462a2016-07-04 09:50:30 +00001530 nextevt = __next_timer_interrupt(base);
Chris Metcalf46c8f0b2016-08-08 16:29:07 -04001531 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
Thomas Gleixnera683f392016-07-04 09:50:36 +00001532 base->next_expiry = nextevt;
1533 /*
Thomas Gleixner041ad7b2016-10-22 11:07:35 +00001534 * We have a fresh next event. Check whether we can forward the
1535 * base. We can only do that when @basej is past base->clk
1536 * otherwise we might rewind base->clk.
Thomas Gleixnera683f392016-07-04 09:50:36 +00001537 */
Thomas Gleixner041ad7b2016-10-22 11:07:35 +00001538 if (time_after(basej, base->clk)) {
1539 if (time_after(nextevt, basej))
1540 base->clk = basej;
1541 else if (time_after(nextevt, base->clk))
1542 base->clk = nextevt;
1543 }
Thomas Gleixnera683f392016-07-04 09:50:36 +00001544
1545 if (time_before_eq(nextevt, basej)) {
1546 expires = basem;
1547 base->is_idle = false;
1548 } else {
Chris Metcalf46c8f0b2016-08-08 16:29:07 -04001549 if (!is_max_delta)
Matija Glavinic Pecotic9ef8b232017-08-01 09:11:52 +02001550 expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
Thomas Gleixnera683f392016-07-04 09:50:36 +00001551 /*
Nicholas Piggin70b3fd52017-08-22 18:43:48 +10001552 * If we expect to sleep more than a tick, mark the base idle.
1553 * Also the tick is stopped so any added timer must forward
1554 * the base clk itself to keep granularity small. This idle
1555 * logic is only maintained for the BASE_STD base, deferrable
1556 * timers may still see large granularity skew (by design).
Thomas Gleixnera683f392016-07-04 09:50:36 +00001557 */
Nicholas Piggin70b3fd52017-08-22 18:43:48 +10001558 if ((expires - basem) > TICK_NSEC) {
1559 base->must_forward_clk = true;
Thomas Gleixnera683f392016-07-04 09:50:36 +00001560 base->is_idle = true;
Nicholas Piggin70b3fd52017-08-22 18:43:48 +10001561 }
Thomas Gleixnere40468a2012-05-25 22:08:59 +00001562 }
Oleg Nesterov3691c512006-03-31 02:30:30 -08001563 spin_unlock(&base->lock);
Tony Lindgren69239742006-03-06 15:42:45 -08001564
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001565 return cmp_next_hrtimer_event(basem, expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566}
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001567
Thomas Gleixnera683f392016-07-04 09:50:36 +00001568/**
1569 * timer_clear_idle - Clear the idle state of the timer base
1570 *
1571 * Called with interrupts disabled
1572 */
1573void timer_clear_idle(void)
1574{
1575 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1576
1577 /*
1578 * We do this unlocked. The worst outcome is a remote enqueue sending
1579 * a pointless IPI, but taking the lock would just make the window for
1580 * sending the IPI a few instructions smaller for the cost of taking
1581 * the lock in the exit from idle path.
1582 */
1583 base->is_idle = false;
1584}
1585
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001586static int collect_expired_timers(struct timer_base *base,
1587 struct hlist_head *heads)
1588{
1589 /*
1590 * NOHZ optimization. After a long idle sleep we need to forward the
1591 * base to current jiffies. Avoid a loop by searching the bitfield for
1592 * the next expiring timer.
1593 */
1594 if ((long)(jiffies - base->clk) > 2) {
1595 unsigned long next = __next_timer_interrupt(base);
1596
1597 /*
1598 * If the next timer is ahead of time forward to current
Thomas Gleixnera683f392016-07-04 09:50:36 +00001599 * jiffies, otherwise forward to the next expiry time:
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001600 */
1601 if (time_after(next, jiffies)) {
1602 /* The call site will increment clock! */
1603 base->clk = jiffies - 1;
1604 return 0;
1605 }
1606 base->clk = next;
1607 }
1608 return __collect_expired_timers(base, heads);
1609}
1610#else
1611static inline int collect_expired_timers(struct timer_base *base,
1612 struct hlist_head *heads)
1613{
1614 return __collect_expired_timers(base, heads);
1615}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616#endif
1617
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618/*
Daniel Walker5b4db0c2007-10-18 03:06:11 -07001619 * Called from the timer interrupt handler to charge one tick to the current
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 * process. user_tick is 1 if the tick is user time, 0 for system.
1621 */
1622void update_process_times(int user_tick)
1623{
1624 struct task_struct *p = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
1626 /* Note: this timer irq context must be accounted for as well. */
Paul Mackerrasfa13a5a2007-11-09 22:39:38 +01001627 account_process_tick(p, user_tick);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 run_local_timers();
Paul E. McKenneyc3377c2d2014-10-21 07:53:02 -07001629 rcu_check_callbacks(user_tick);
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001630#ifdef CONFIG_IRQ_WORK
1631 if (in_irq())
Frederic Weisbecker76a33062014-08-16 18:37:19 +02001632 irq_work_tick();
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001633#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 scheduler_tick();
Thomas Gleixner68194572007-07-19 01:49:16 -07001635 run_posix_cpu_timers(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636}
1637
Anna-Maria Gleixner73420fe2016-07-04 09:50:33 +00001638/**
1639 * __run_timers - run all expired timers (if any) on this CPU.
1640 * @base: the timer vector to be processed.
1641 */
1642static inline void __run_timers(struct timer_base *base)
1643{
1644 struct hlist_head heads[LVL_DEPTH];
1645 int levels;
1646
1647 if (!time_after_eq(jiffies, base->clk))
1648 return;
1649
1650 spin_lock_irq(&base->lock);
1651
1652 while (time_after_eq(jiffies, base->clk)) {
1653
1654 levels = collect_expired_timers(base, heads);
1655 base->clk++;
1656
1657 while (levels--)
1658 expire_timers(base, heads + levels);
1659 }
1660 base->running_timer = NULL;
1661 spin_unlock_irq(&base->lock);
1662}
1663
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 * This function runs timers and the timer-tq in bottom half context.
1666 */
Emese Revfy0766f782016-06-20 20:42:34 +02001667static __latent_entropy void run_timer_softirq(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001669 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
Nicholas Piggin70b3fd52017-08-22 18:43:48 +10001671 /*
1672 * must_forward_clk must be cleared before running timers so that any
1673 * timer functions that call mod_timer will not try to forward the
1674 * base. idle trcking / clock forwarding logic is only used with
1675 * BASE_STD timers.
1676 *
1677 * The deferrable base does not do idle tracking at all, so we do
1678 * not forward it. This can result in very large variations in
1679 * granularity for deferrable timers, but they can be deferred for
1680 * long periods due to idle.
1681 */
1682 base->must_forward_clk = false;
1683
Thomas Gleixner500462a2016-07-04 09:50:30 +00001684 __run_timers(base);
Anna-Maria Gleixnerd8406872017-12-22 15:51:12 +01001685 if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
Thomas Gleixner500462a2016-07-04 09:50:30 +00001686 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687}
1688
1689/*
1690 * Called by the local, per-CPU timer interrupt on SMP.
1691 */
1692void run_local_timers(void)
1693{
Thomas Gleixner4e858762016-07-04 09:50:37 +00001694 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1695
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001696 hrtimer_run_queues();
Thomas Gleixner4e858762016-07-04 09:50:37 +00001697 /* Raise the softirq only if required. */
1698 if (time_before(jiffies, base->clk)) {
1699 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
1700 return;
1701 /* CPU is awake, so check the deferrable base. */
1702 base++;
1703 if (time_before(jiffies, base->clk))
1704 return;
1705 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 raise_softirq(TIMER_SOFTIRQ);
1707}
1708
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709#ifdef __ARCH_WANT_SYS_ALARM
1710
1711/*
1712 * For backwards compatibility? This can be done in libc so Alpha
1713 * and all newer ports shouldn't need it.
1714 */
Heiko Carstens58fd3aa2009-01-14 14:14:03 +01001715SYSCALL_DEFINE1(alarm, unsigned int, seconds)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716{
Thomas Gleixnerc08b8a42006-03-25 03:06:33 -08001717 return alarm_setitimer(seconds);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718}
1719
1720#endif
1721
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722static void process_timeout(unsigned long __data)
1723{
Ingo Molnar36c8b582006-07-03 00:25:41 -07001724 wake_up_process((struct task_struct *)__data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725}
1726
1727/**
1728 * schedule_timeout - sleep until timeout
1729 * @timeout: timeout value in jiffies
1730 *
1731 * Make the current task sleep until @timeout jiffies have
1732 * elapsed. The routine will return immediately unless
1733 * the current task state has been set (see set_current_state()).
1734 *
1735 * You can set the task state as follows -
1736 *
1737 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1738 * pass before the routine returns. The routine will return 0
1739 *
1740 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1741 * delivered to the current task. In this case the remaining time
1742 * in jiffies will be returned, or 0 if the timer expired in time
1743 *
1744 * The current task state is guaranteed to be TASK_RUNNING when this
1745 * routine returns.
1746 *
1747 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1748 * the CPU away without a bound on the timeout. In this case the return
1749 * value will be %MAX_SCHEDULE_TIMEOUT.
1750 *
1751 * In all cases the return value is guaranteed to be non-negative.
1752 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001753signed long __sched schedule_timeout(signed long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754{
1755 struct timer_list timer;
1756 unsigned long expire;
1757
1758 switch (timeout)
1759 {
1760 case MAX_SCHEDULE_TIMEOUT:
1761 /*
1762 * These two special cases are useful to be comfortable
1763 * in the caller. Nothing more. We could take
1764 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1765 * but I' d like to return a valid offset (>=0) to allow
1766 * the caller to do everything it want with the retval.
1767 */
1768 schedule();
1769 goto out;
1770 default:
1771 /*
1772 * Another bit of PARANOID. Note that the retval will be
1773 * 0 since no piece of kernel is supposed to do a check
1774 * for a negative retval of schedule_timeout() (since it
1775 * should never happens anyway). You just have the printk()
1776 * that will tell you if something is gone wrong and where.
1777 */
Andrew Morton5b149bc2006-12-22 01:10:14 -08001778 if (timeout < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 printk(KERN_ERR "schedule_timeout: wrong timeout "
Andrew Morton5b149bc2006-12-22 01:10:14 -08001780 "value %lx\n", timeout);
1781 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 current->state = TASK_RUNNING;
1783 goto out;
1784 }
1785 }
1786
1787 expire = timeout + jiffies;
1788
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -07001789 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
Thomas Gleixner177ec0a2016-07-04 09:50:24 +00001790 __mod_timer(&timer, expire, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 schedule();
1792 del_singleshot_timer_sync(&timer);
1793
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -07001794 /* Remove the timer from the object tracker */
1795 destroy_timer_on_stack(&timer);
1796
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 timeout = expire - jiffies;
1798
1799 out:
1800 return timeout < 0 ? 0 : timeout;
1801}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802EXPORT_SYMBOL(schedule_timeout);
1803
Andrew Morton8a1c1752005-09-13 01:25:15 -07001804/*
1805 * We can use __set_current_state() here because schedule_timeout() calls
1806 * schedule() unconditionally.
1807 */
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001808signed long __sched schedule_timeout_interruptible(signed long timeout)
1809{
Andrew Mortona5a0d522005-10-30 15:01:42 -08001810 __set_current_state(TASK_INTERRUPTIBLE);
1811 return schedule_timeout(timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001812}
1813EXPORT_SYMBOL(schedule_timeout_interruptible);
1814
Matthew Wilcox294d5cc2007-12-06 11:59:46 -05001815signed long __sched schedule_timeout_killable(signed long timeout)
1816{
1817 __set_current_state(TASK_KILLABLE);
1818 return schedule_timeout(timeout);
1819}
1820EXPORT_SYMBOL(schedule_timeout_killable);
1821
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001822signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1823{
Andrew Mortona5a0d522005-10-30 15:01:42 -08001824 __set_current_state(TASK_UNINTERRUPTIBLE);
1825 return schedule_timeout(timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001826}
1827EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1828
Andrew Morton69b27ba2016-03-25 14:20:21 -07001829/*
1830 * Like schedule_timeout_uninterruptible(), except this task will not contribute
1831 * to load average.
1832 */
1833signed long __sched schedule_timeout_idle(signed long timeout)
1834{
1835 __set_current_state(TASK_IDLE);
1836 return schedule_timeout(timeout);
1837}
1838EXPORT_SYMBOL(schedule_timeout_idle);
1839
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840#ifdef CONFIG_HOTPLUG_CPU
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001841static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842{
1843 struct timer_list *timer;
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001844 int cpu = new_base->cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845
Thomas Gleixner1dabbce2015-05-26 22:50:28 +00001846 while (!hlist_empty(head)) {
1847 timer = hlist_entry(head->first, struct timer_list, entry);
Thomas Gleixnerec44bc72012-05-25 22:08:57 +00001848 detach_timer(timer, false);
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001849 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 internal_add_timer(new_base, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852}
1853
Richard Cochran24f73b92016-07-13 17:16:59 +00001854int timers_dead_cpu(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855{
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001856 struct timer_base *old_base;
1857 struct timer_base *new_base;
Thomas Gleixner500462a2016-07-04 09:50:30 +00001858 int b, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859
1860 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
Thomas Gleixner500462a2016-07-04 09:50:30 +00001862 for (b = 0; b < NR_BASES; b++) {
1863 old_base = per_cpu_ptr(&timer_bases[b], cpu);
1864 new_base = get_cpu_ptr(&timer_bases[b]);
1865 /*
1866 * The caller is globally serialized and nobody else
1867 * takes two locks at once, deadlock is not possible.
1868 */
1869 spin_lock_irq(&new_base->lock);
1870 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
Oleg Nesterov3691c512006-03-31 02:30:30 -08001871
Thomas Gleixner500462a2016-07-04 09:50:30 +00001872 BUG_ON(old_base->running_timer);
1873
1874 for (i = 0; i < WHEEL_SIZE; i++)
1875 migrate_timer_list(new_base, old_base->vectors + i);
1876
1877 spin_unlock(&old_base->lock);
1878 spin_unlock_irq(&new_base->lock);
1879 put_cpu_ptr(&timer_bases);
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001880 }
Richard Cochran24f73b92016-07-13 17:16:59 +00001881 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883
Peter Zijlstra3650b572015-03-31 20:49:02 +05301884#endif /* CONFIG_HOTPLUG_CPU */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001886static void __init init_timer_cpu(int cpu)
Viresh Kumar8def9062015-03-31 20:49:01 +05301887{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001888 struct timer_base *base;
1889 int i;
Peter Zijlstra3650b572015-03-31 20:49:02 +05301890
Thomas Gleixner500462a2016-07-04 09:50:30 +00001891 for (i = 0; i < NR_BASES; i++) {
1892 base = per_cpu_ptr(&timer_bases[i], cpu);
1893 base->cpu = cpu;
1894 spin_lock_init(&base->lock);
1895 base->clk = jiffies;
1896 }
Viresh Kumar8def9062015-03-31 20:49:01 +05301897}
1898
1899static void __init init_timer_cpus(void)
1900{
Viresh Kumar8def9062015-03-31 20:49:01 +05301901 int cpu;
1902
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001903 for_each_possible_cpu(cpu)
1904 init_timer_cpu(cpu);
Viresh Kumar8def9062015-03-31 20:49:01 +05301905}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
1907void __init init_timers(void)
1908{
Viresh Kumar8def9062015-03-31 20:49:01 +05301909 init_timer_cpus();
Viresh Kumarc24a4a32014-02-28 14:15:21 +05301910 init_timer_stats();
Carlos R. Mafra962cf362008-05-15 11:15:37 -03001911 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912}
1913
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914/**
1915 * msleep - sleep safely even with waitqueue interruptions
1916 * @msecs: Time in milliseconds to sleep for
1917 */
1918void msleep(unsigned int msecs)
1919{
1920 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1921
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07001922 while (timeout)
1923 timeout = schedule_timeout_uninterruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924}
1925
1926EXPORT_SYMBOL(msleep);
1927
1928/**
Domen Puncer96ec3ef2005-06-25 14:58:43 -07001929 * msleep_interruptible - sleep waiting for signals
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 * @msecs: Time in milliseconds to sleep for
1931 */
1932unsigned long msleep_interruptible(unsigned int msecs)
1933{
1934 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1935
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07001936 while (timeout && !signal_pending(current))
1937 timeout = schedule_timeout_interruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 return jiffies_to_msecs(timeout);
1939}
1940
1941EXPORT_SYMBOL(msleep_interruptible);
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001942
Thomas Gleixner6deba082015-04-14 21:09:28 +00001943static void __sched do_usleep_range(unsigned long min, unsigned long max)
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001944{
1945 ktime_t kmin;
John Stultzda8b44d2016-03-17 14:20:51 -07001946 u64 delta;
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001947
1948 kmin = ktime_set(0, min * NSEC_PER_USEC);
John Stultzda8b44d2016-03-17 14:20:51 -07001949 delta = (u64)(max - min) * NSEC_PER_USEC;
Thomas Gleixner6deba082015-04-14 21:09:28 +00001950 schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001951}
1952
1953/**
Bjorn Helgaasb5227d02016-05-31 16:23:02 -05001954 * usleep_range - Sleep for an approximate time
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001955 * @min: Minimum time in usecs to sleep
1956 * @max: Maximum time in usecs to sleep
Bjorn Helgaasb5227d02016-05-31 16:23:02 -05001957 *
1958 * In non-atomic context where the exact wakeup time is flexible, use
1959 * usleep_range() instead of udelay(). The sleep improves responsiveness
1960 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
1961 * power usage by allowing hrtimers to take advantage of an already-
1962 * scheduled interrupt instead of scheduling a new one just for this sleep.
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001963 */
Thomas Gleixner2ad5d322015-04-14 21:09:30 +00001964void __sched usleep_range(unsigned long min, unsigned long max)
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001965{
1966 __set_current_state(TASK_UNINTERRUPTIBLE);
1967 do_usleep_range(min, max);
1968}
1969EXPORT_SYMBOL(usleep_range);