blob: 41481dc0d678666814d48d0b44ac16d30c849e4a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/timer.c
3 *
Stephen Rothwell4a22f162013-04-30 15:27:37 -07004 * Kernel internal timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22#include <linux/kernel_stat.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040023#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/interrupt.h>
25#include <linux/percpu.h>
26#include <linux/init.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070029#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/notifier.h>
31#include <linux/thread_info.h>
32#include <linux/time.h>
33#include <linux/jiffies.h>
34#include <linux/posix-timers.h>
35#include <linux/cpu.h>
36#include <linux/syscalls.h>
Adrian Bunk97a41e22006-01-08 01:02:17 -080037#include <linux/delay.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080038#include <linux/tick.h>
Ingo Molnar82f67cd2007-02-16 01:28:13 -080039#include <linux/kallsyms.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080040#include <linux/irq_work.h>
Arun R Bharadwajeea08f32009-04-16 12:16:41 +053041#include <linux/sched.h>
Clark Williamscf4aebc22013-02-07 09:46:59 -060042#include <linux/sched/sysctl.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090043#include <linux/slab.h>
Stephen Rothwell1a0df592013-04-30 15:27:34 -070044#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#include <asm/uaccess.h>
47#include <asm/unistd.h>
48#include <asm/div64.h>
49#include <asm/timex.h>
50#include <asm/io.h>
51
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +000052#include "tick-internal.h"
53
Xiao Guangrong2b022e32009-08-10 10:48:59 +080054#define CREATE_TRACE_POINTS
55#include <trace/events/timer.h>
56
Andi Kleen40747ff2014-02-08 08:51:59 +010057__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
Thomas Gleixnerecea8d12005-10-30 15:03:00 -080058
59EXPORT_SYMBOL(jiffies_64);
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
Thomas Gleixner500462a2016-07-04 09:50:30 +000062 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
63 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
64 * level has a different granularity.
65 *
66 * The level granularity is: LVL_CLK_DIV ^ lvl
67 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
68 *
69 * The array level of a newly armed timer depends on the relative expiry
70 * time. The farther the expiry time is away the higher the array level and
71 * therefor the granularity becomes.
72 *
73 * Contrary to the original timer wheel implementation, which aims for 'exact'
74 * expiry of the timers, this implementation removes the need for recascading
75 * the timers into the lower array levels. The previous 'classic' timer wheel
76 * implementation of the kernel already violated the 'exact' expiry by adding
77 * slack to the expiry time to provide batched expiration. The granularity
78 * levels provide implicit batching.
79 *
80 * This is an optimization of the original timer wheel implementation for the
81 * majority of the timer wheel use cases: timeouts. The vast majority of
82 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
83 * the timeout expires it indicates that normal operation is disturbed, so it
84 * does not matter much whether the timeout comes with a slight delay.
85 *
86 * The only exception to this are networking timers with a small expiry
87 * time. They rely on the granularity. Those fit into the first wheel level,
88 * which has HZ granularity.
89 *
90 * We don't have cascading anymore. timers with a expiry time above the
91 * capacity of the last wheel level are force expired at the maximum timeout
92 * value of the last wheel level. From data sampling we know that the maximum
93 * value observed is 5 days (network connection tracking), so this should not
94 * be an issue.
95 *
96 * The currently chosen array constants values are a good compromise between
97 * array size and granularity.
98 *
99 * This results in the following granularity and range levels:
100 *
101 * HZ 1000 steps
102 * Level Offset Granularity Range
103 * 0 0 1 ms 0 ms - 63 ms
104 * 1 64 8 ms 64 ms - 511 ms
105 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
106 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
107 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
108 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
109 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
110 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
111 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
112 *
113 * HZ 300
114 * Level Offset Granularity Range
115 * 0 0 3 ms 0 ms - 210 ms
116 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
117 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
118 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
119 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
120 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
121 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
122 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
123 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
124 *
125 * HZ 250
126 * Level Offset Granularity Range
127 * 0 0 4 ms 0 ms - 255 ms
128 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
129 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
130 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
131 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
132 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
133 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
134 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
135 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
136 *
137 * HZ 100
138 * Level Offset Granularity Range
139 * 0 0 10 ms 0 ms - 630 ms
140 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
141 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
142 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
143 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
144 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
145 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
146 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Thomas Gleixner500462a2016-07-04 09:50:30 +0000149/* Clock divisor for the next level */
150#define LVL_CLK_SHIFT 3
151#define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
152#define LVL_CLK_MASK (LVL_CLK_DIV - 1)
153#define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
154#define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Thomas Gleixner500462a2016-07-04 09:50:30 +0000156/*
157 * The time start value for each level to select the bucket at enqueue
158 * time.
159 */
160#define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Thomas Gleixner500462a2016-07-04 09:50:30 +0000162/* Size of each clock level */
163#define LVL_BITS 6
164#define LVL_SIZE (1UL << LVL_BITS)
165#define LVL_MASK (LVL_SIZE - 1)
166#define LVL_OFFS(n) ((n) * LVL_SIZE)
167
168/* Level depth */
169#if HZ > 100
170# define LVL_DEPTH 9
171# else
172# define LVL_DEPTH 8
173#endif
174
175/* The cutoff (max. capacity of the wheel) */
176#define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
177#define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
178
179/*
180 * The resulting wheel size. If NOHZ is configured we allocate two
181 * wheels so we have a separate storage for the deferrable timers.
182 */
183#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
184
185#ifdef CONFIG_NO_HZ_COMMON
186# define NR_BASES 2
187# define BASE_STD 0
188# define BASE_DEF 1
189#else
190# define NR_BASES 1
191# define BASE_STD 0
192# define BASE_DEF 0
193#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000195struct timer_base {
Thomas Gleixner500462a2016-07-04 09:50:30 +0000196 spinlock_t lock;
197 struct timer_list *running_timer;
198 unsigned long clk;
Thomas Gleixnera683f392016-07-04 09:50:36 +0000199 unsigned long next_expiry;
Thomas Gleixner500462a2016-07-04 09:50:30 +0000200 unsigned int cpu;
201 bool migration_enabled;
202 bool nohz_active;
Thomas Gleixnera683f392016-07-04 09:50:36 +0000203 bool is_idle;
Nicholas Piggin70b3fd52017-08-22 18:43:48 +1000204 bool must_forward_clk;
Thomas Gleixner500462a2016-07-04 09:50:30 +0000205 DECLARE_BITMAP(pending_map, WHEEL_SIZE);
206 struct hlist_head vectors[WHEEL_SIZE];
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700207} ____cacheline_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Thomas Gleixner500462a2016-07-04 09:50:30 +0000209static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
Kyle Yanc1f109c2017-02-24 16:49:27 -0800210struct timer_base timer_base_deferrable;
Lingutla Chandrasekharea6a3242018-03-01 20:41:57 +0530211static atomic_t deferrable_pending;
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700212
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000213#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
214unsigned int sysctl_timer_migration = 1;
215
Thomas Gleixner683be132015-05-26 22:50:35 +0000216void timers_update_migration(bool update_nohz)
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000217{
218 bool on = sysctl_timer_migration && tick_nohz_active;
219 unsigned int cpu;
220
221 /* Avoid the loop, if nothing to update */
Thomas Gleixner500462a2016-07-04 09:50:30 +0000222 if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000223 return;
224
225 for_each_possible_cpu(cpu) {
Thomas Gleixner500462a2016-07-04 09:50:30 +0000226 per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
227 per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000228 per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
Thomas Gleixner683be132015-05-26 22:50:35 +0000229 if (!update_nohz)
230 continue;
Thomas Gleixner500462a2016-07-04 09:50:30 +0000231 per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
232 per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
Thomas Gleixner683be132015-05-26 22:50:35 +0000233 per_cpu(hrtimer_bases.nohz_active, cpu) = true;
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000234 }
Kyle Yanc1f109c2017-02-24 16:49:27 -0800235
236 timer_base_deferrable.migration_enabled = on;
237 timer_base_deferrable.nohz_active = true;
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000238}
239
240int timer_migration_handler(struct ctl_table *table, int write,
241 void __user *buffer, size_t *lenp,
242 loff_t *ppos)
243{
244 static DEFINE_MUTEX(mutex);
245 int ret;
246
247 mutex_lock(&mutex);
Myungho Jung4c000152017-04-19 15:24:50 -0700248 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000249 if (!ret && write)
Thomas Gleixner683be132015-05-26 22:50:35 +0000250 timers_update_migration(false);
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000251 mutex_unlock(&mutex);
252 return ret;
253}
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000254#endif
255
Alan Stern9c133c42008-11-06 08:42:48 +0100256static unsigned long round_jiffies_common(unsigned long j, int cpu,
257 bool force_up)
258{
259 int rem;
260 unsigned long original = j;
261
262 /*
263 * We don't want all cpus firing their timers at once hitting the
264 * same lock or cachelines, so we skew each extra cpu with an extra
265 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
266 * already did this.
267 * The skew is done by adding 3*cpunr, then round, then subtract this
268 * extra offset again.
269 */
270 j += cpu * 3;
271
272 rem = j % HZ;
273
274 /*
275 * If the target jiffie is just after a whole second (which can happen
276 * due to delays of the timer irq, long irq off times etc etc) then
277 * we should round down to the whole second, not up. Use 1/4th second
278 * as cutoff for this rounding as an extreme upper bound for this.
279 * But never round down if @force_up is set.
280 */
281 if (rem < HZ/4 && !force_up) /* round down */
282 j = j - rem;
283 else /* round up */
284 j = j - rem + HZ;
285
286 /* now that we have rounded, subtract the extra skew again */
287 j -= cpu * 3;
288
Bart Van Assche9e04d382013-05-21 20:43:50 +0200289 /*
290 * Make sure j is still in the future. Otherwise return the
291 * unmodified value.
292 */
293 return time_is_after_jiffies(j) ? j : original;
Alan Stern9c133c42008-11-06 08:42:48 +0100294}
295
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800296/**
297 * __round_jiffies - function to round jiffies to a full second
298 * @j: the time in (absolute) jiffies that should be rounded
299 * @cpu: the processor number on which the timeout will happen
300 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800301 * __round_jiffies() rounds an absolute time in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800302 * up or down to (approximately) full seconds. This is useful for timers
303 * for which the exact time they fire does not matter too much, as long as
304 * they fire approximately every X seconds.
305 *
306 * By rounding these timers to whole seconds, all such timers will fire
307 * at the same time, rather than at various times spread out. The goal
308 * of this is to have the CPU wake up less, which saves power.
309 *
310 * The exact rounding is skewed for each processor to avoid all
311 * processors firing at the exact same time, which could lead
312 * to lock contention or spurious cache line bouncing.
313 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800314 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800315 */
316unsigned long __round_jiffies(unsigned long j, int cpu)
317{
Alan Stern9c133c42008-11-06 08:42:48 +0100318 return round_jiffies_common(j, cpu, false);
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800319}
320EXPORT_SYMBOL_GPL(__round_jiffies);
321
322/**
323 * __round_jiffies_relative - function to round jiffies to a full second
324 * @j: the time in (relative) jiffies that should be rounded
325 * @cpu: the processor number on which the timeout will happen
326 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800327 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800328 * up or down to (approximately) full seconds. This is useful for timers
329 * for which the exact time they fire does not matter too much, as long as
330 * they fire approximately every X seconds.
331 *
332 * By rounding these timers to whole seconds, all such timers will fire
333 * at the same time, rather than at various times spread out. The goal
334 * of this is to have the CPU wake up less, which saves power.
335 *
336 * The exact rounding is skewed for each processor to avoid all
337 * processors firing at the exact same time, which could lead
338 * to lock contention or spurious cache line bouncing.
339 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800340 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800341 */
342unsigned long __round_jiffies_relative(unsigned long j, int cpu)
343{
Alan Stern9c133c42008-11-06 08:42:48 +0100344 unsigned long j0 = jiffies;
345
346 /* Use j0 because jiffies might change while we run */
347 return round_jiffies_common(j + j0, cpu, false) - j0;
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800348}
349EXPORT_SYMBOL_GPL(__round_jiffies_relative);
350
351/**
352 * round_jiffies - function to round jiffies to a full second
353 * @j: the time in (absolute) jiffies that should be rounded
354 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800355 * round_jiffies() rounds an absolute time in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800356 * up or down to (approximately) full seconds. This is useful for timers
357 * for which the exact time they fire does not matter too much, as long as
358 * they fire approximately every X seconds.
359 *
360 * By rounding these timers to whole seconds, all such timers will fire
361 * at the same time, rather than at various times spread out. The goal
362 * of this is to have the CPU wake up less, which saves power.
363 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800364 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800365 */
366unsigned long round_jiffies(unsigned long j)
367{
Alan Stern9c133c42008-11-06 08:42:48 +0100368 return round_jiffies_common(j, raw_smp_processor_id(), false);
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800369}
370EXPORT_SYMBOL_GPL(round_jiffies);
371
372/**
373 * round_jiffies_relative - function to round jiffies to a full second
374 * @j: the time in (relative) jiffies that should be rounded
375 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800376 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800377 * up or down to (approximately) full seconds. This is useful for timers
378 * for which the exact time they fire does not matter too much, as long as
379 * they fire approximately every X seconds.
380 *
381 * By rounding these timers to whole seconds, all such timers will fire
382 * at the same time, rather than at various times spread out. The goal
383 * of this is to have the CPU wake up less, which saves power.
384 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800385 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800386 */
387unsigned long round_jiffies_relative(unsigned long j)
388{
389 return __round_jiffies_relative(j, raw_smp_processor_id());
390}
391EXPORT_SYMBOL_GPL(round_jiffies_relative);
392
Alan Stern9c133c42008-11-06 08:42:48 +0100393/**
394 * __round_jiffies_up - function to round jiffies up to a full second
395 * @j: the time in (absolute) jiffies that should be rounded
396 * @cpu: the processor number on which the timeout will happen
397 *
398 * This is the same as __round_jiffies() except that it will never
399 * round down. This is useful for timeouts for which the exact time
400 * of firing does not matter too much, as long as they don't fire too
401 * early.
402 */
403unsigned long __round_jiffies_up(unsigned long j, int cpu)
404{
405 return round_jiffies_common(j, cpu, true);
406}
407EXPORT_SYMBOL_GPL(__round_jiffies_up);
408
409/**
410 * __round_jiffies_up_relative - function to round jiffies up to a full second
411 * @j: the time in (relative) jiffies that should be rounded
412 * @cpu: the processor number on which the timeout will happen
413 *
414 * This is the same as __round_jiffies_relative() except that it will never
415 * round down. This is useful for timeouts for which the exact time
416 * of firing does not matter too much, as long as they don't fire too
417 * early.
418 */
419unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
420{
421 unsigned long j0 = jiffies;
422
423 /* Use j0 because jiffies might change while we run */
424 return round_jiffies_common(j + j0, cpu, true) - j0;
425}
426EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
427
428/**
429 * round_jiffies_up - function to round jiffies up to a full second
430 * @j: the time in (absolute) jiffies that should be rounded
431 *
432 * This is the same as round_jiffies() except that it will never
433 * round down. This is useful for timeouts for which the exact time
434 * of firing does not matter too much, as long as they don't fire too
435 * early.
436 */
437unsigned long round_jiffies_up(unsigned long j)
438{
439 return round_jiffies_common(j, raw_smp_processor_id(), true);
440}
441EXPORT_SYMBOL_GPL(round_jiffies_up);
442
443/**
444 * round_jiffies_up_relative - function to round jiffies up to a full second
445 * @j: the time in (relative) jiffies that should be rounded
446 *
447 * This is the same as round_jiffies_relative() except that it will never
448 * round down. This is useful for timeouts for which the exact time
449 * of firing does not matter too much, as long as they don't fire too
450 * early.
451 */
452unsigned long round_jiffies_up_relative(unsigned long j)
453{
454 return __round_jiffies_up_relative(j, raw_smp_processor_id());
455}
456EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
457
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800458
Thomas Gleixner500462a2016-07-04 09:50:30 +0000459static inline unsigned int timer_get_idx(struct timer_list *timer)
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700460{
Thomas Gleixner500462a2016-07-04 09:50:30 +0000461 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700462}
Thomas Gleixner500462a2016-07-04 09:50:30 +0000463
464static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
465{
466 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
467 idx << TIMER_ARRAYSHIFT;
468}
469
470/*
471 * Helper function to calculate the array index for a given expiry
472 * time.
473 */
474static inline unsigned calc_index(unsigned expires, unsigned lvl)
475{
Channagoud Kadabice49c272017-08-18 13:22:34 -0700476 if (expires & ~(UINT_MAX << LVL_SHIFT(lvl)))
477 expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
478 else
479 expires = expires >> LVL_SHIFT(lvl);
480
Thomas Gleixner500462a2016-07-04 09:50:30 +0000481 return LVL_OFFS(lvl) + (expires & LVL_MASK);
482}
483
Channagoud Kadabice49c272017-08-18 13:22:34 -0700484static inline unsigned int calc_index_min_granularity(unsigned int expires)
485{
486 return LVL_OFFS(0) + ((expires >> LVL_SHIFT(0)) & LVL_MASK);
487}
488
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000489static int calc_wheel_index(unsigned long expires, unsigned long clk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490{
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000491 unsigned long delta = expires - clk;
Thomas Gleixner500462a2016-07-04 09:50:30 +0000492 unsigned int idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
Thomas Gleixner500462a2016-07-04 09:50:30 +0000494 if (delta < LVL_START(1)) {
Channagoud Kadabice49c272017-08-18 13:22:34 -0700495 idx = calc_index_min_granularity(expires);
Thomas Gleixner500462a2016-07-04 09:50:30 +0000496 } else if (delta < LVL_START(2)) {
497 idx = calc_index(expires, 1);
498 } else if (delta < LVL_START(3)) {
499 idx = calc_index(expires, 2);
500 } else if (delta < LVL_START(4)) {
501 idx = calc_index(expires, 3);
502 } else if (delta < LVL_START(5)) {
503 idx = calc_index(expires, 4);
504 } else if (delta < LVL_START(6)) {
505 idx = calc_index(expires, 5);
506 } else if (delta < LVL_START(7)) {
507 idx = calc_index(expires, 6);
508 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
509 idx = calc_index(expires, 7);
510 } else if ((long) delta < 0) {
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000511 idx = clk & LVL_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 } else {
Thomas Gleixner500462a2016-07-04 09:50:30 +0000513 /*
514 * Force expire obscene large timeouts to expire at the
515 * capacity limit of the wheel.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 */
Thomas Gleixner500462a2016-07-04 09:50:30 +0000517 if (expires >= WHEEL_TIMEOUT_CUTOFF)
518 expires = WHEEL_TIMEOUT_MAX;
Thomas Gleixner1bd04bf2015-05-26 22:50:26 +0000519
Thomas Gleixner500462a2016-07-04 09:50:30 +0000520 idx = calc_index(expires, LVL_DEPTH - 1);
521 }
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000522 return idx;
523}
524
525/*
526 * Enqueue the timer into the hash bucket, mark it pending in
527 * the bitmap and store the index in the timer flags.
528 */
529static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
530 unsigned int idx)
531{
532 hlist_add_head(&timer->entry, base->vectors + idx);
Thomas Gleixner500462a2016-07-04 09:50:30 +0000533 __set_bit(idx, base->pending_map);
534 timer_set_idx(timer, idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
537static void
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000538__internal_add_timer(struct timer_base *base, struct timer_list *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539{
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000540 unsigned int idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000542 idx = calc_wheel_index(timer->expires, base->clk);
543 enqueue_timer(base, timer, idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544}
545
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000546static void
547trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
Thomas Gleixnerfacbb4a2012-05-25 22:08:57 +0000548{
Thomas Gleixnera683f392016-07-04 09:50:36 +0000549 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
550 return;
Viresh Kumar9f6d9ba2014-06-22 01:29:14 +0200551
552 /*
Thomas Gleixnera683f392016-07-04 09:50:36 +0000553 * TODO: This wants some optimizing similar to the code below, but we
554 * will do that when we switch from push to pull for deferrable timers.
Viresh Kumar9f6d9ba2014-06-22 01:29:14 +0200555 */
Thomas Gleixnera683f392016-07-04 09:50:36 +0000556 if (timer->flags & TIMER_DEFERRABLE) {
557 if (tick_nohz_full_cpu(base->cpu))
Thomas Gleixner683be132015-05-26 22:50:35 +0000558 wake_up_nohz_cpu(base->cpu);
Thomas Gleixnera683f392016-07-04 09:50:36 +0000559 return;
Thomas Gleixner683be132015-05-26 22:50:35 +0000560 }
Thomas Gleixnera683f392016-07-04 09:50:36 +0000561
562 /*
563 * We might have to IPI the remote CPU if the base is idle and the
564 * timer is not deferrable. If the other CPU is on the way to idle
565 * then it can't set base->is_idle as we hold the base lock:
566 */
567 if (!base->is_idle)
568 return;
569
570 /* Check whether this is the new first expiring timer: */
571 if (time_after_eq(timer->expires, base->next_expiry))
572 return;
573
574 /*
575 * Set the next expiry time and kick the CPU so it can reevaluate the
576 * wheel:
577 */
578 base->next_expiry = timer->expires;
Anna-Maria Gleixnerffdf0472016-07-04 09:50:39 +0000579 wake_up_nohz_cpu(base->cpu);
580}
581
582static void
583internal_add_timer(struct timer_base *base, struct timer_list *timer)
584{
585 __internal_add_timer(base, timer);
586 trigger_dyntick_cpu(base, timer);
Thomas Gleixnerfacbb4a2012-05-25 22:08:57 +0000587}
588
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700589#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
590
591static struct debug_obj_descr timer_debug_descr;
592
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100593static void *timer_debug_hint(void *addr)
594{
595 return ((struct timer_list *) addr)->function;
596}
597
Du, Changbinb9fdac72016-05-19 17:09:41 -0700598static bool timer_is_static_object(void *addr)
599{
600 struct timer_list *timer = addr;
601
602 return (timer->entry.pprev == NULL &&
603 timer->entry.next == TIMER_ENTRY_STATIC);
604}
605
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700606/*
607 * fixup_init is called when:
608 * - an active object is initialized
609 */
Du, Changbine3252462016-05-19 17:09:29 -0700610static bool timer_fixup_init(void *addr, enum debug_obj_state state)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700611{
612 struct timer_list *timer = addr;
613
614 switch (state) {
615 case ODEBUG_STATE_ACTIVE:
616 del_timer_sync(timer);
617 debug_object_init(timer, &timer_debug_descr);
Du, Changbine3252462016-05-19 17:09:29 -0700618 return true;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700619 default:
Du, Changbine3252462016-05-19 17:09:29 -0700620 return false;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700621 }
622}
623
Stephen Boydfb16b8c2011-11-07 19:48:26 -0800624/* Stub timer callback for improperly used timers. */
625static void stub_timer(unsigned long data)
626{
627 WARN_ON(1);
628}
629
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700630/*
631 * fixup_activate is called when:
632 * - an active object is activated
Du, Changbinb9fdac72016-05-19 17:09:41 -0700633 * - an unknown non-static object is activated
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700634 */
Du, Changbine3252462016-05-19 17:09:29 -0700635static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700636{
637 struct timer_list *timer = addr;
638
639 switch (state) {
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700640 case ODEBUG_STATE_NOTAVAILABLE:
Du, Changbinb9fdac72016-05-19 17:09:41 -0700641 setup_timer(timer, stub_timer, 0);
642 return true;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700643
644 case ODEBUG_STATE_ACTIVE:
645 WARN_ON(1);
646
647 default:
Du, Changbine3252462016-05-19 17:09:29 -0700648 return false;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700649 }
650}
651
652/*
653 * fixup_free is called when:
654 * - an active object is freed
655 */
Du, Changbine3252462016-05-19 17:09:29 -0700656static bool timer_fixup_free(void *addr, enum debug_obj_state state)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700657{
658 struct timer_list *timer = addr;
659
660 switch (state) {
661 case ODEBUG_STATE_ACTIVE:
662 del_timer_sync(timer);
663 debug_object_free(timer, &timer_debug_descr);
Du, Changbine3252462016-05-19 17:09:29 -0700664 return true;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700665 default:
Du, Changbine3252462016-05-19 17:09:29 -0700666 return false;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700667 }
668}
669
Christine Chandc4218b2011-11-07 19:48:28 -0800670/*
671 * fixup_assert_init is called when:
672 * - an untracked/uninit-ed object is found
673 */
Du, Changbine3252462016-05-19 17:09:29 -0700674static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
Christine Chandc4218b2011-11-07 19:48:28 -0800675{
676 struct timer_list *timer = addr;
677
678 switch (state) {
679 case ODEBUG_STATE_NOTAVAILABLE:
Du, Changbinb9fdac72016-05-19 17:09:41 -0700680 setup_timer(timer, stub_timer, 0);
681 return true;
Christine Chandc4218b2011-11-07 19:48:28 -0800682 default:
Du, Changbine3252462016-05-19 17:09:29 -0700683 return false;
Christine Chandc4218b2011-11-07 19:48:28 -0800684 }
685}
686
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700687static struct debug_obj_descr timer_debug_descr = {
Christine Chandc4218b2011-11-07 19:48:28 -0800688 .name = "timer_list",
689 .debug_hint = timer_debug_hint,
Du, Changbinb9fdac72016-05-19 17:09:41 -0700690 .is_static_object = timer_is_static_object,
Christine Chandc4218b2011-11-07 19:48:28 -0800691 .fixup_init = timer_fixup_init,
692 .fixup_activate = timer_fixup_activate,
693 .fixup_free = timer_fixup_free,
694 .fixup_assert_init = timer_fixup_assert_init,
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700695};
696
697static inline void debug_timer_init(struct timer_list *timer)
698{
699 debug_object_init(timer, &timer_debug_descr);
700}
701
702static inline void debug_timer_activate(struct timer_list *timer)
703{
704 debug_object_activate(timer, &timer_debug_descr);
705}
706
707static inline void debug_timer_deactivate(struct timer_list *timer)
708{
709 debug_object_deactivate(timer, &timer_debug_descr);
710}
711
712static inline void debug_timer_free(struct timer_list *timer)
713{
714 debug_object_free(timer, &timer_debug_descr);
715}
716
Christine Chandc4218b2011-11-07 19:48:28 -0800717static inline void debug_timer_assert_init(struct timer_list *timer)
718{
719 debug_object_assert_init(timer, &timer_debug_descr);
720}
721
Tejun Heofc683992012-08-08 11:10:27 -0700722static void do_init_timer(struct timer_list *timer, unsigned int flags,
723 const char *name, struct lock_class_key *key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700724
Tejun Heofc683992012-08-08 11:10:27 -0700725void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
726 const char *name, struct lock_class_key *key)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700727{
728 debug_object_init_on_stack(timer, &timer_debug_descr);
Tejun Heofc683992012-08-08 11:10:27 -0700729 do_init_timer(timer, flags, name, key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700730}
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100731EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700732
733void destroy_timer_on_stack(struct timer_list *timer)
734{
735 debug_object_free(timer, &timer_debug_descr);
736}
737EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
738
739#else
740static inline void debug_timer_init(struct timer_list *timer) { }
741static inline void debug_timer_activate(struct timer_list *timer) { }
742static inline void debug_timer_deactivate(struct timer_list *timer) { }
Christine Chandc4218b2011-11-07 19:48:28 -0800743static inline void debug_timer_assert_init(struct timer_list *timer) { }
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700744#endif
745
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800746static inline void debug_init(struct timer_list *timer)
747{
748 debug_timer_init(timer);
749 trace_timer_init(timer);
750}
751
752static inline void
753debug_activate(struct timer_list *timer, unsigned long expires)
754{
755 debug_timer_activate(timer);
Thomas Gleixner0eeda712015-05-26 22:50:29 +0000756 trace_timer_start(timer, expires, timer->flags);
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800757}
758
759static inline void debug_deactivate(struct timer_list *timer)
760{
761 debug_timer_deactivate(timer);
762 trace_timer_cancel(timer);
763}
764
Christine Chandc4218b2011-11-07 19:48:28 -0800765static inline void debug_assert_init(struct timer_list *timer)
766{
767 debug_timer_assert_init(timer);
768}
769
Tejun Heofc683992012-08-08 11:10:27 -0700770static void do_init_timer(struct timer_list *timer, unsigned int flags,
771 const char *name, struct lock_class_key *key)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700772{
Thomas Gleixner1dabbce2015-05-26 22:50:28 +0000773 timer->entry.pprev = NULL;
Thomas Gleixner0eeda712015-05-26 22:50:29 +0000774 timer->flags = flags | raw_smp_processor_id();
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100775 lockdep_init_map(&timer->lockdep_map, name, key, 0);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700776}
777
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700778/**
Randy Dunlap633fe792009-04-01 17:47:23 -0700779 * init_timer_key - initialize a timer
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700780 * @timer: the timer to be initialized
Tejun Heofc683992012-08-08 11:10:27 -0700781 * @flags: timer flags
Randy Dunlap633fe792009-04-01 17:47:23 -0700782 * @name: name of the timer
783 * @key: lockdep class key of the fake lock used for tracking timer
784 * sync lock dependencies
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700785 *
Randy Dunlap633fe792009-04-01 17:47:23 -0700786 * init_timer_key() must be done to a timer prior calling *any* of the
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700787 * other timer functions.
788 */
Tejun Heofc683992012-08-08 11:10:27 -0700789void init_timer_key(struct timer_list *timer, unsigned int flags,
790 const char *name, struct lock_class_key *key)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700791{
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800792 debug_init(timer);
Tejun Heofc683992012-08-08 11:10:27 -0700793 do_init_timer(timer, flags, name, key);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700794}
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100795EXPORT_SYMBOL(init_timer_key);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700796
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000797static inline void detach_timer(struct timer_list *timer, bool clear_pending)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700798{
Thomas Gleixner1dabbce2015-05-26 22:50:28 +0000799 struct hlist_node *entry = &timer->entry;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700800
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800801 debug_deactivate(timer);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700802
Thomas Gleixner1dabbce2015-05-26 22:50:28 +0000803 __hlist_del(entry);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700804 if (clear_pending)
Thomas Gleixner1dabbce2015-05-26 22:50:28 +0000805 entry->pprev = NULL;
806 entry->next = LIST_POISON2;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700807}
808
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000809static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000810 bool clear_pending)
811{
Thomas Gleixner500462a2016-07-04 09:50:30 +0000812 unsigned idx = timer_get_idx(timer);
813
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000814 if (!timer_pending(timer))
815 return 0;
816
Thomas Gleixner500462a2016-07-04 09:50:30 +0000817 if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
818 __clear_bit(idx, base->pending_map);
819
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000820 detach_timer(timer, clear_pending);
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000821 return 1;
822}
823
Thomas Gleixner500462a2016-07-04 09:50:30 +0000824static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
825{
826 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
827
828 /*
Anna-Maria Gleixnerd8406872017-12-22 15:51:12 +0100829 * If the timer is deferrable and NO_HZ_COMMON is set then we need
830 * to use the deferrable base.
Thomas Gleixner500462a2016-07-04 09:50:30 +0000831 */
Blagovest Kolenichev42d42592018-01-18 06:58:43 -0800832 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) {
Kyle Yanc1f109c2017-02-24 16:49:27 -0800833 base = &timer_base_deferrable;
834 if (tflags & TIMER_PINNED)
835 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
836 }
Thomas Gleixner500462a2016-07-04 09:50:30 +0000837 return base;
838}
839
840static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
841{
842 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
843
844 /*
Anna-Maria Gleixnerd8406872017-12-22 15:51:12 +0100845 * If the timer is deferrable and NO_HZ_COMMON is set then we need
846 * to use the deferrable base.
Thomas Gleixner500462a2016-07-04 09:50:30 +0000847 */
Blagovest Kolenichev42d42592018-01-18 06:58:43 -0800848 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) {
Kyle Yanc1f109c2017-02-24 16:49:27 -0800849 base = &timer_base_deferrable;
850 if (tflags & TIMER_PINNED)
851 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
Kyle Yandf065192017-03-13 17:09:41 -0700852 }
Thomas Gleixner500462a2016-07-04 09:50:30 +0000853 return base;
854}
855
856static inline struct timer_base *get_timer_base(u32 tflags)
857{
858 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
859}
860
Thomas Gleixnera683f392016-07-04 09:50:36 +0000861#ifdef CONFIG_NO_HZ_COMMON
862static inline struct timer_base *
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +0000863get_target_base(struct timer_base *base, unsigned tflags)
Thomas Gleixner500462a2016-07-04 09:50:30 +0000864{
Thomas Gleixnera683f392016-07-04 09:50:36 +0000865#ifdef CONFIG_SMP
Thomas Gleixner500462a2016-07-04 09:50:30 +0000866 if ((tflags & TIMER_PINNED) || !base->migration_enabled)
867 return get_timer_this_cpu_base(tflags);
868 return get_timer_cpu_base(tflags, get_nohz_timer_target());
869#else
870 return get_timer_this_cpu_base(tflags);
871#endif
872}
873
Thomas Gleixnera683f392016-07-04 09:50:36 +0000874static inline void forward_timer_base(struct timer_base *base)
875{
Nicholas Piggin70b3fd52017-08-22 18:43:48 +1000876 unsigned long jnow;
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +0000877
Thomas Gleixnera683f392016-07-04 09:50:36 +0000878 /*
Nicholas Piggin70b3fd52017-08-22 18:43:48 +1000879 * We only forward the base when we are idle or have just come out of
880 * idle (must_forward_clk logic), and have a delta between base clock
881 * and jiffies. In the common case, run_timers will take care of it.
Thomas Gleixnera683f392016-07-04 09:50:36 +0000882 */
Nicholas Piggin70b3fd52017-08-22 18:43:48 +1000883 if (likely(!base->must_forward_clk))
884 return;
885
886 jnow = READ_ONCE(jiffies);
887 base->must_forward_clk = base->is_idle;
888 if ((long)(jnow - base->clk) < 2)
Thomas Gleixnera683f392016-07-04 09:50:36 +0000889 return;
890
891 /*
892 * If the next expiry value is > jiffies, then we fast forward to
893 * jiffies otherwise we forward to the next expiry value.
894 */
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +0000895 if (time_after(base->next_expiry, jnow))
896 base->clk = jnow;
Thomas Gleixnera683f392016-07-04 09:50:36 +0000897 else
898 base->clk = base->next_expiry;
899}
900#else
901static inline struct timer_base *
Thomas Gleixner6bad6bc2016-10-22 11:07:37 +0000902get_target_base(struct timer_base *base, unsigned tflags)
Thomas Gleixnera683f392016-07-04 09:50:36 +0000903{
904 return get_timer_this_cpu_base(tflags);
905}
906
907static inline void forward_timer_base(struct timer_base *base) { }
908#endif
909
Thomas Gleixnera683f392016-07-04 09:50:36 +0000910
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700911/*
Thomas Gleixner500462a2016-07-04 09:50:30 +0000912 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
913 * that all timers which are tied to this base are locked, and the base itself
914 * is locked too.
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700915 *
916 * So __run_timers/migrate_timers can safely modify all timers which could
Thomas Gleixner500462a2016-07-04 09:50:30 +0000917 * be found in the base->vectors array.
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700918 *
Thomas Gleixner500462a2016-07-04 09:50:30 +0000919 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
920 * to wait until the migration is done.
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700921 */
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000922static struct timer_base *lock_timer_base(struct timer_list *timer,
Thomas Gleixner500462a2016-07-04 09:50:30 +0000923 unsigned long *flags)
Josh Triplett89e7e3742006-09-29 01:59:36 -0700924 __acquires(timer->base->lock)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700925{
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700926 for (;;) {
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000927 struct timer_base *base;
Thomas Gleixnerb8312752016-10-24 11:41:56 +0200928 u32 tf;
929
930 /*
931 * We need to use READ_ONCE() here, otherwise the compiler
932 * might re-read @tf between the check for TIMER_MIGRATING
933 * and spin_lock().
934 */
935 tf = READ_ONCE(timer->flags);
Thomas Gleixner0eeda712015-05-26 22:50:29 +0000936
937 if (!(tf & TIMER_MIGRATING)) {
Thomas Gleixner500462a2016-07-04 09:50:30 +0000938 base = get_timer_base(tf);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700939 spin_lock_irqsave(&base->lock, *flags);
Thomas Gleixner0eeda712015-05-26 22:50:29 +0000940 if (timer->flags == tf)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700941 return base;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700942 spin_unlock_irqrestore(&base->lock, *flags);
943 }
944 cpu_relax();
945 }
946}
947
Ingo Molnar74019222009-02-18 12:23:29 +0100948static inline int
Thomas Gleixner177ec0a2016-07-04 09:50:24 +0000949__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950{
Thomas Gleixner494af3e2016-07-04 09:50:28 +0000951 struct timer_base *base, *new_base;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000952 unsigned int idx = UINT_MAX;
953 unsigned long clk = 0, flags;
Thomas Gleixnerbc7a34b2015-05-26 22:50:33 +0000954 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955
Thomas Gleixner4da91522016-10-24 11:55:10 +0200956 BUG_ON(!timer->function);
957
Thomas Gleixner500462a2016-07-04 09:50:30 +0000958 /*
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000959 * This is a common optimization triggered by the networking code - if
960 * the timer is re-modified to have the same timeout or ends up in the
961 * same array bucket then just return:
Thomas Gleixner500462a2016-07-04 09:50:30 +0000962 */
963 if (timer_pending(timer)) {
Nicholas Piggin70b3fd52017-08-22 18:43:48 +1000964 /*
965 * The downside of this optimization is that it can result in
966 * larger granularity than you would get from adding a new
967 * timer with this expiry.
968 */
Thomas Gleixner500462a2016-07-04 09:50:30 +0000969 if (timer->expires == expires)
970 return 1;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000971
Thomas Gleixner4da91522016-10-24 11:55:10 +0200972 /*
973 * We lock timer base and calculate the bucket index right
974 * here. If the timer ends up in the same bucket, then we
975 * just update the expiry time and avoid the whole
976 * dequeue/enqueue dance.
977 */
978 base = lock_timer_base(timer, &flags);
Nicholas Piggin70b3fd52017-08-22 18:43:48 +1000979 forward_timer_base(base);
Thomas Gleixner4da91522016-10-24 11:55:10 +0200980
981 clk = base->clk;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000982 idx = calc_wheel_index(expires, clk);
983
984 /*
985 * Retrieve and compare the array index of the pending
986 * timer. If it matches set the expiry to the new value so a
987 * subsequent call will exit in the expires check above.
988 */
989 if (idx == timer_get_idx(timer)) {
990 timer->expires = expires;
Thomas Gleixner4da91522016-10-24 11:55:10 +0200991 ret = 1;
992 goto out_unlock;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +0000993 }
Thomas Gleixner4da91522016-10-24 11:55:10 +0200994 } else {
995 base = lock_timer_base(timer, &flags);
Nicholas Piggin70b3fd52017-08-22 18:43:48 +1000996 forward_timer_base(base);
Thomas Gleixner500462a2016-07-04 09:50:30 +0000997 }
998
Thomas Gleixnerec44bc72012-05-25 22:08:57 +0000999 ret = detach_if_pending(timer, base, false);
1000 if (!ret && pending_only)
1001 goto out_unlock;
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001002
Thomas Gleixner500462a2016-07-04 09:50:30 +00001003 new_base = get_target_base(base, timer->flags);
Arun R Bharadwajeea08f32009-04-16 12:16:41 +05301004
Oleg Nesterov3691c512006-03-31 02:30:30 -08001005 if (base != new_base) {
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001006 /*
Thomas Gleixner500462a2016-07-04 09:50:30 +00001007 * We are trying to schedule the timer on the new base.
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001008 * However we can't change timer's base while it is running,
1009 * otherwise del_timer_sync() can't detect that the timer's
Thomas Gleixner500462a2016-07-04 09:50:30 +00001010 * handler yet has not finished. This also guarantees that the
1011 * timer is serialized wrt itself.
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001012 */
Oleg Nesterova2c348f2006-03-31 02:30:31 -08001013 if (likely(base->running_timer != timer)) {
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001014 /* See the comment in lock_timer_base() */
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001015 timer->flags |= TIMER_MIGRATING;
1016
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001017 spin_unlock(&base->lock);
Oleg Nesterova2c348f2006-03-31 02:30:31 -08001018 base = new_base;
1019 spin_lock(&base->lock);
Eric Dumazetd0023a12015-08-17 10:18:48 -07001020 WRITE_ONCE(timer->flags,
1021 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
Nicholas Piggin70b3fd52017-08-22 18:43:48 +10001022 forward_timer_base(base);
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001023 }
1024 }
1025
Thomas Gleixner574e5432017-12-22 15:51:14 +01001026 debug_activate(timer, expires);
1027
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 timer->expires = expires;
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +00001029 /*
1030 * If 'idx' was calculated above and the base time did not advance
Thomas Gleixner4da91522016-10-24 11:55:10 +02001031 * between calculating 'idx' and possibly switching the base, only
1032 * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
1033 * we need to (re)calculate the wheel index via
1034 * internal_add_timer().
Anna-Maria Gleixnerf00c0af2016-07-04 09:50:40 +00001035 */
1036 if (idx != UINT_MAX && clk == base->clk) {
1037 enqueue_timer(base, timer, idx);
1038 trigger_dyntick_cpu(base, timer);
1039 } else {
1040 internal_add_timer(base, timer);
1041 }
Ingo Molnar74019222009-02-18 12:23:29 +01001042
1043out_unlock:
Oleg Nesterova2c348f2006-03-31 02:30:31 -08001044 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
1046 return ret;
1047}
1048
Ingo Molnar74019222009-02-18 12:23:29 +01001049/**
1050 * mod_timer_pending - modify a pending timer's timeout
1051 * @timer: the pending timer to be modified
1052 * @expires: new timeout in jiffies
1053 *
1054 * mod_timer_pending() is the same for pending timers as mod_timer(),
1055 * but will not re-activate and modify already deleted timers.
1056 *
1057 * It is useful for unserialized use of timers.
1058 */
1059int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1060{
Thomas Gleixner177ec0a2016-07-04 09:50:24 +00001061 return __mod_timer(timer, expires, true);
Ingo Molnar74019222009-02-18 12:23:29 +01001062}
1063EXPORT_SYMBOL(mod_timer_pending);
1064
1065/**
1066 * mod_timer - modify a timer's timeout
1067 * @timer: the timer to be modified
1068 * @expires: new timeout in jiffies
1069 *
1070 * mod_timer() is a more efficient way to update the expire field of an
1071 * active timer (if the timer is inactive it will be activated)
1072 *
1073 * mod_timer(timer, expires) is equivalent to:
1074 *
1075 * del_timer(timer); timer->expires = expires; add_timer(timer);
1076 *
1077 * Note that if there are multiple unserialized concurrent users of the
1078 * same timer, then mod_timer() is the only safe way to modify the timeout,
1079 * since add_timer() cannot modify an already running timer.
1080 *
1081 * The function returns whether it has modified a pending timer or not.
1082 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
1083 * active timer returns 1.)
1084 */
1085int mod_timer(struct timer_list *timer, unsigned long expires)
1086{
Thomas Gleixner177ec0a2016-07-04 09:50:24 +00001087 return __mod_timer(timer, expires, false);
Ingo Molnar74019222009-02-18 12:23:29 +01001088}
1089EXPORT_SYMBOL(mod_timer);
1090
1091/**
1092 * add_timer - start a timer
1093 * @timer: the timer to be added
1094 *
1095 * The kernel will do a ->function(->data) callback from the
1096 * timer interrupt at the ->expires point in the future. The
1097 * current time is 'jiffies'.
1098 *
1099 * The timer's ->expires, ->function (and if the handler uses it, ->data)
1100 * fields must be set prior calling this function.
1101 *
1102 * Timers with an ->expires field in the past will be executed in the next
1103 * timer tick.
1104 */
1105void add_timer(struct timer_list *timer)
1106{
1107 BUG_ON(timer_pending(timer));
1108 mod_timer(timer, timer->expires);
1109}
1110EXPORT_SYMBOL(add_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001112/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 * add_timer_on - start a timer on a particular CPU
1114 * @timer: the timer to be added
1115 * @cpu: the CPU to start it on
1116 *
1117 * This is not very scalable on SMP. Double adds are not possible.
1118 */
1119void add_timer_on(struct timer_list *timer, int cpu)
1120{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001121 struct timer_base *new_base, *base;
Thomas Gleixner68194572007-07-19 01:49:16 -07001122 unsigned long flags;
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001123
Thomas Gleixner68194572007-07-19 01:49:16 -07001124 BUG_ON(timer_pending(timer) || !timer->function);
Tejun Heo22b886d2015-11-04 12:15:33 -05001125
Thomas Gleixner500462a2016-07-04 09:50:30 +00001126 new_base = get_timer_cpu_base(timer->flags, cpu);
1127
Tejun Heo22b886d2015-11-04 12:15:33 -05001128 /*
1129 * If @timer was on a different CPU, it should be migrated with the
1130 * old base locked to prevent other operations proceeding with the
1131 * wrong base locked. See lock_timer_base().
1132 */
1133 base = lock_timer_base(timer, &flags);
1134 if (base != new_base) {
1135 timer->flags |= TIMER_MIGRATING;
1136
1137 spin_unlock(&base->lock);
1138 base = new_base;
1139 spin_lock(&base->lock);
1140 WRITE_ONCE(timer->flags,
1141 (timer->flags & ~TIMER_BASEMASK) | cpu);
1142 }
Nicholas Piggin70b3fd52017-08-22 18:43:48 +10001143 forward_timer_base(base);
Tejun Heo22b886d2015-11-04 12:15:33 -05001144
Xiao Guangrong2b022e32009-08-10 10:48:59 +08001145 debug_activate(timer, timer->expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 internal_add_timer(base, timer);
Oleg Nesterov3691c512006-03-31 02:30:30 -08001147 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148}
Andi Kleena9862e02009-05-19 22:49:07 +02001149EXPORT_SYMBOL_GPL(add_timer_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001151/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 * del_timer - deactive a timer.
1153 * @timer: the timer to be deactivated
1154 *
1155 * del_timer() deactivates a timer - this works on both active and inactive
1156 * timers.
1157 *
1158 * The function returns whether it has deactivated a pending timer or not.
1159 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1160 * active timer returns 1.)
1161 */
1162int del_timer(struct timer_list *timer)
1163{
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001164 struct timer_base *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 unsigned long flags;
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001166 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Christine Chandc4218b2011-11-07 19:48:28 -08001168 debug_assert_init(timer);
1169
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001170 if (timer_pending(timer)) {
1171 base = lock_timer_base(timer, &flags);
Thomas Gleixnerec44bc72012-05-25 22:08:57 +00001172 ret = detach_if_pending(timer, base, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001176 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178EXPORT_SYMBOL(del_timer);
1179
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001180/**
1181 * try_to_del_timer_sync - Try to deactivate a timer
1182 * @timer: timer do del
1183 *
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001184 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1185 * exit the timer is not queued and the handler is not running on any CPU.
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001186 */
1187int try_to_del_timer_sync(struct timer_list *timer)
1188{
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001189 struct timer_base *base;
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001190 unsigned long flags;
1191 int ret = -1;
1192
Christine Chandc4218b2011-11-07 19:48:28 -08001193 debug_assert_init(timer);
1194
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001195 base = lock_timer_base(timer, &flags);
1196
Kees Cooka967be82017-02-08 11:26:59 -08001197 if (base->running_timer != timer)
Thomas Gleixnerec44bc72012-05-25 22:08:57 +00001198 ret = detach_if_pending(timer, base, true);
Kees Cooka967be82017-02-08 11:26:59 -08001199
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001200 spin_unlock_irqrestore(&base->lock, flags);
1201
1202 return ret;
1203}
David Howellse19dff12007-04-26 15:46:56 -07001204EXPORT_SYMBOL(try_to_del_timer_sync);
1205
Yong Zhang6f1bc452010-10-20 15:57:31 -07001206#ifdef CONFIG_SMP
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001207/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1209 * @timer: the timer to be deactivated
1210 *
1211 * This function only differs from del_timer() on SMP: besides deactivating
1212 * the timer it also makes sure the handler has finished executing on other
1213 * CPUs.
1214 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08001215 * Synchronization rules: Callers must prevent restarting of the timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 * otherwise this function is meaningless. It must not be called from
Tejun Heoc5f66e92012-08-08 11:10:28 -07001217 * interrupt contexts unless the timer is an irqsafe one. The caller must
1218 * not hold locks which would prevent completion of the timer's
1219 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1220 * timer is not queued and the handler is not running on any CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 *
Tejun Heoc5f66e92012-08-08 11:10:28 -07001222 * Note: For !irqsafe timers, you must not hold locks that are held in
1223 * interrupt context while calling this function. Even if the lock has
1224 * nothing to do with the timer in question. Here's why:
Steven Rostedt48228f72011-02-08 12:39:54 -05001225 *
1226 * CPU0 CPU1
1227 * ---- ----
1228 * <SOFTIRQ>
1229 * call_timer_fn();
1230 * base->running_timer = mytimer;
1231 * spin_lock_irq(somelock);
1232 * <IRQ>
1233 * spin_lock(somelock);
1234 * del_timer_sync(mytimer);
1235 * while (base->running_timer == mytimer);
1236 *
1237 * Now del_timer_sync() will never return and never release somelock.
1238 * The interrupt on the other CPU is waiting to grab somelock but
1239 * it has interrupted the softirq that CPU0 is waiting to finish.
1240 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 * The function returns whether it has deactivated a pending timer or not.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 */
1243int del_timer_sync(struct timer_list *timer)
1244{
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001245#ifdef CONFIG_LOCKDEP
Peter Zijlstraf266a512011-02-03 15:09:41 +01001246 unsigned long flags;
1247
Steven Rostedt48228f72011-02-08 12:39:54 -05001248 /*
1249 * If lockdep gives a backtrace here, please reference
1250 * the synchronization rules above.
1251 */
Peter Zijlstra7ff20792011-02-08 15:18:00 +01001252 local_irq_save(flags);
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001253 lock_map_acquire(&timer->lockdep_map);
1254 lock_map_release(&timer->lockdep_map);
Peter Zijlstra7ff20792011-02-08 15:18:00 +01001255 local_irq_restore(flags);
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001256#endif
Yong Zhang466bd302010-10-20 15:57:33 -07001257 /*
1258 * don't use it in hardirq context, because it
1259 * could lead to deadlock.
1260 */
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001261 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001262 for (;;) {
1263 int ret = try_to_del_timer_sync(timer);
1264 if (ret >= 0)
1265 return ret;
Andrew Mortona0009652006-07-14 00:24:06 -07001266 cpu_relax();
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001267 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268}
1269EXPORT_SYMBOL(del_timer_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270#endif
1271
Thomas Gleixner576da122010-03-12 21:10:29 +01001272static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1273 unsigned long data)
1274{
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +02001275 int count = preempt_count();
Thomas Gleixner576da122010-03-12 21:10:29 +01001276
1277#ifdef CONFIG_LOCKDEP
1278 /*
1279 * It is permissible to free the timer from inside the
1280 * function that is called from it, this we need to take into
1281 * account for lockdep too. To avoid bogus "held lock freed"
1282 * warnings as well as problems when looking into
1283 * timer->lockdep_map, make a copy and use that here.
1284 */
Peter Zijlstra4d82a1d2012-05-15 08:06:19 -07001285 struct lockdep_map lockdep_map;
1286
1287 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
Thomas Gleixner576da122010-03-12 21:10:29 +01001288#endif
1289 /*
1290 * Couple the lock chain with the lock chain at
1291 * del_timer_sync() by acquiring the lock_map around the fn()
1292 * call here and in del_timer_sync().
1293 */
1294 lock_map_acquire(&lockdep_map);
1295
1296 trace_timer_expire_entry(timer);
1297 fn(data);
1298 trace_timer_expire_exit(timer);
1299
1300 lock_map_release(&lockdep_map);
1301
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +02001302 if (count != preempt_count()) {
Thomas Gleixner802702e2010-03-12 20:13:23 +01001303 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +02001304 fn, count, preempt_count());
Thomas Gleixner802702e2010-03-12 20:13:23 +01001305 /*
1306 * Restore the preempt count. That gives us a decent
1307 * chance to survive and extract information. If the
1308 * callback kept a lock held, bad luck, but not worse
1309 * than the BUG() we had.
1310 */
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +02001311 preempt_count_set(count);
Thomas Gleixner576da122010-03-12 21:10:29 +01001312 }
1313}
1314
Thomas Gleixner500462a2016-07-04 09:50:30 +00001315static void expire_timers(struct timer_base *base, struct hlist_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001317 while (!hlist_empty(head)) {
1318 struct timer_list *timer;
1319 void (*fn)(unsigned long);
1320 unsigned long data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Thomas Gleixner500462a2016-07-04 09:50:30 +00001322 timer = hlist_entry(head->first, struct timer_list, entry);
Thomas Gleixner3bb475a2015-05-26 22:50:24 +00001323
Thomas Gleixner500462a2016-07-04 09:50:30 +00001324 base->running_timer = timer;
1325 detach_timer(timer, true);
Thomas Gleixner3bb475a2015-05-26 22:50:24 +00001326
Thomas Gleixner500462a2016-07-04 09:50:30 +00001327 fn = timer->function;
1328 data = timer->data;
Thomas Gleixner3bb475a2015-05-26 22:50:24 +00001329
Thomas Gleixner500462a2016-07-04 09:50:30 +00001330 if (timer->flags & TIMER_IRQSAFE) {
1331 spin_unlock(&base->lock);
1332 call_timer_fn(timer, fn, data);
1333 spin_lock(&base->lock);
1334 } else {
1335 spin_unlock_irq(&base->lock);
1336 call_timer_fn(timer, fn, data);
1337 spin_lock_irq(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 }
1339 }
Thomas Gleixner500462a2016-07-04 09:50:30 +00001340}
1341
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001342static int __collect_expired_timers(struct timer_base *base,
1343 struct hlist_head *heads)
Thomas Gleixner500462a2016-07-04 09:50:30 +00001344{
1345 unsigned long clk = base->clk;
1346 struct hlist_head *vec;
1347 int i, levels = 0;
1348 unsigned int idx;
1349
1350 for (i = 0; i < LVL_DEPTH; i++) {
1351 idx = (clk & LVL_MASK) + i * LVL_SIZE;
1352
1353 if (__test_and_clear_bit(idx, base->pending_map)) {
1354 vec = base->vectors + idx;
1355 hlist_move_list(vec, heads++);
1356 levels++;
1357 }
1358 /* Is it time to look at the next level? */
1359 if (clk & LVL_CLK_MASK)
1360 break;
1361 /* Shift clock for the next level granularity */
1362 clk >>= LVL_CLK_SHIFT;
1363 }
1364 return levels;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365}
1366
Frederic Weisbecker3451d022011-08-10 23:21:01 +02001367#ifdef CONFIG_NO_HZ_COMMON
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368/*
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001369 * Find the next pending bucket of a level. Search from level start (@offset)
1370 * + @clk upwards and if nothing there, search from start of the level
1371 * (@offset) up to @offset + clk.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 */
Thomas Gleixner500462a2016-07-04 09:50:30 +00001373static int next_pending_bucket(struct timer_base *base, unsigned offset,
Channagoud Kadabice49c272017-08-18 13:22:34 -07001374 unsigned int clk, int lvl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375{
Channagoud Kadabice49c272017-08-18 13:22:34 -07001376 unsigned int pos_up = -1, pos_down, start = offset + clk;
Thomas Gleixner500462a2016-07-04 09:50:30 +00001377 unsigned end = offset + LVL_SIZE;
Channagoud Kadabice49c272017-08-18 13:22:34 -07001378 unsigned int pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
Thomas Gleixner500462a2016-07-04 09:50:30 +00001380 pos = find_next_bit(base->pending_map, end, start);
1381 if (pos < end)
Channagoud Kadabice49c272017-08-18 13:22:34 -07001382 pos_up = pos - start;
Venki Pallipadi6e453a62007-05-08 00:27:44 -07001383
Thomas Gleixner500462a2016-07-04 09:50:30 +00001384 pos = find_next_bit(base->pending_map, start, offset);
Channagoud Kadabice49c272017-08-18 13:22:34 -07001385 pos_down = pos < start ? pos + LVL_SIZE - start : -1;
Maria Yu219fe502017-10-25 14:26:07 +08001386 if (((pos_up + (u64)base->clk) << LVL_SHIFT(lvl)) >
1387 ((pos_down + (u64)base->clk) << LVL_SHIFT(lvl)))
Channagoud Kadabice49c272017-08-18 13:22:34 -07001388 return pos_down;
1389 return pos_up;
Thomas Gleixner500462a2016-07-04 09:50:30 +00001390}
1391
1392/*
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001393 * Search the first expiring timer in the various clock levels. Caller must
1394 * hold base->lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 */
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001396static unsigned long __next_timer_interrupt(struct timer_base *base)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001398 unsigned long clk, next, adj;
1399 unsigned lvl, offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400
Thomas Gleixner500462a2016-07-04 09:50:30 +00001401 next = base->clk + NEXT_TIMER_MAX_DELTA;
1402 clk = base->clk;
1403 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
Channagoud Kadabice49c272017-08-18 13:22:34 -07001404 int pos = next_pending_bucket(base, offset, clk & LVL_MASK,
1405 lvl);
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001406
Thomas Gleixner500462a2016-07-04 09:50:30 +00001407 if (pos >= 0) {
1408 unsigned long tmp = clk + (unsigned long) pos;
1409
1410 tmp <<= LVL_SHIFT(lvl);
1411 if (time_before(tmp, next))
1412 next = tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 }
Thomas Gleixner500462a2016-07-04 09:50:30 +00001414 /*
1415 * Clock for the next level. If the current level clock lower
1416 * bits are zero, we look at the next level as is. If not we
1417 * need to advance it by one because that's going to be the
1418 * next expiring bucket in that level. base->clk is the next
1419 * expiring jiffie. So in case of:
1420 *
1421 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1422 * 0 0 0 0 0 0
1423 *
1424 * we have to look at all levels @index 0. With
1425 *
1426 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1427 * 0 0 0 0 0 2
1428 *
1429 * LVL0 has the next expiring bucket @index 2. The upper
1430 * levels have the next expiring bucket @index 1.
1431 *
1432 * In case that the propagation wraps the next level the same
1433 * rules apply:
1434 *
1435 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1436 * 0 0 0 0 F 2
1437 *
1438 * So after looking at LVL0 we get:
1439 *
1440 * LVL5 LVL4 LVL3 LVL2 LVL1
1441 * 0 0 0 1 0
1442 *
1443 * So no propagation from LVL1 to LVL2 because that happened
1444 * with the add already, but then we need to propagate further
1445 * from LVL2 to LVL3.
1446 *
1447 * So the simple check whether the lower bits of the current
1448 * level are 0 or not is sufficient for all cases.
1449 */
1450 adj = clk & LVL_CLK_MASK ? 1 : 0;
1451 clk >>= LVL_CLK_SHIFT;
1452 clk += adj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 }
Thomas Gleixner500462a2016-07-04 09:50:30 +00001454 return next;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001455}
1456
1457/*
1458 * Check, if the next hrtimer event is before the next timer wheel
1459 * event:
1460 */
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001461static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001462{
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001463 u64 nextevt = hrtimer_get_next_event();
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001464
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001465 /*
1466 * If high resolution timers are enabled
1467 * hrtimer_get_next_event() returns KTIME_MAX.
1468 */
1469 if (expires <= nextevt)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001470 return expires;
1471
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001472 /*
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001473 * If the next timer is already expired, return the tick base
1474 * time so the tick is fired immediately.
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001475 */
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001476 if (nextevt <= basem)
1477 return basem;
Thomas Gleixnereaad0842007-05-29 23:47:39 +02001478
1479 /*
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001480 * Round up to the next jiffie. High resolution timers are
1481 * off, so the hrtimers are expired in the tick and we need to
1482 * make sure that this tick really expires the timer to avoid
1483 * a ping pong of the nohz stop code.
1484 *
1485 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
Thomas Gleixnereaad0842007-05-29 23:47:39 +02001486 */
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001487 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001488}
1489
Prasad Sodagudi602c4e22017-05-17 23:26:09 -07001490
1491#ifdef CONFIG_SMP
1492/*
1493 * check_pending_deferrable_timers - Check for unbound deferrable timer expiry
1494 * @cpu - Current CPU
1495 *
1496 * The function checks whether any global deferrable pending timers
1497 * are exipired or not. This function does not check cpu bounded
1498 * diferrable pending timers expiry.
1499 *
1500 * The function returns true when a cpu unbounded deferrable timer is expired.
1501 */
1502bool check_pending_deferrable_timers(int cpu)
1503{
1504 if (cpu == tick_do_timer_cpu ||
1505 tick_do_timer_cpu == TICK_DO_TIMER_NONE) {
1506 if (time_after_eq(jiffies, timer_base_deferrable.clk)
1507 && !atomic_cmpxchg(&deferrable_pending, 0, 1)) {
1508 return true;
1509 }
1510 }
1511 return false;
1512}
1513#endif
1514
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001515/**
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001516 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1517 * @basej: base time jiffies
1518 * @basem: base time clock monotonic
1519 *
1520 * Returns the tick aligned clock monotonic time of the next pending
1521 * timer or KTIME_MAX if no timer is pending.
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001522 */
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001523u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001524{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001525 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001526 u64 expires = KTIME_MAX;
1527 unsigned long nextevt;
Chris Metcalf46c8f0b2016-08-08 16:29:07 -04001528 bool is_max_delta;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001529
Heiko Carstensdbd87b52010-12-01 10:11:09 +01001530 /*
1531 * Pretend that there is no timer pending if the cpu is offline.
1532 * Possible pending timers will be migrated later to an active cpu.
1533 */
1534 if (cpu_is_offline(smp_processor_id()))
Thomas Gleixnere40468a2012-05-25 22:08:59 +00001535 return expires;
1536
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001537 spin_lock(&base->lock);
Thomas Gleixner500462a2016-07-04 09:50:30 +00001538 nextevt = __next_timer_interrupt(base);
Chris Metcalf46c8f0b2016-08-08 16:29:07 -04001539 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
Thomas Gleixnera683f392016-07-04 09:50:36 +00001540 base->next_expiry = nextevt;
1541 /*
Thomas Gleixner041ad7b2016-10-22 11:07:35 +00001542 * We have a fresh next event. Check whether we can forward the
1543 * base. We can only do that when @basej is past base->clk
1544 * otherwise we might rewind base->clk.
Thomas Gleixnera683f392016-07-04 09:50:36 +00001545 */
Thomas Gleixner041ad7b2016-10-22 11:07:35 +00001546 if (time_after(basej, base->clk)) {
1547 if (time_after(nextevt, basej))
1548 base->clk = basej;
1549 else if (time_after(nextevt, base->clk))
1550 base->clk = nextevt;
1551 }
Thomas Gleixnera683f392016-07-04 09:50:36 +00001552
1553 if (time_before_eq(nextevt, basej)) {
1554 expires = basem;
1555 base->is_idle = false;
1556 } else {
Chris Metcalf46c8f0b2016-08-08 16:29:07 -04001557 if (!is_max_delta)
Matija Glavinic Pecotic9ef8b232017-08-01 09:11:52 +02001558 expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
Thomas Gleixnera683f392016-07-04 09:50:36 +00001559 /*
Nicholas Piggin70b3fd52017-08-22 18:43:48 +10001560 * If we expect to sleep more than a tick, mark the base idle.
1561 * Also the tick is stopped so any added timer must forward
1562 * the base clk itself to keep granularity small. This idle
1563 * logic is only maintained for the BASE_STD base, deferrable
1564 * timers may still see large granularity skew (by design).
Thomas Gleixnera683f392016-07-04 09:50:36 +00001565 */
Nicholas Piggin70b3fd52017-08-22 18:43:48 +10001566 if ((expires - basem) > TICK_NSEC) {
1567 base->must_forward_clk = true;
Thomas Gleixnera683f392016-07-04 09:50:36 +00001568 base->is_idle = true;
Nicholas Piggin70b3fd52017-08-22 18:43:48 +10001569 }
Thomas Gleixnere40468a2012-05-25 22:08:59 +00001570 }
Oleg Nesterov3691c512006-03-31 02:30:30 -08001571 spin_unlock(&base->lock);
Tony Lindgren69239742006-03-06 15:42:45 -08001572
Thomas Gleixnerc1ad3482015-04-14 21:08:58 +00001573 return cmp_next_hrtimer_event(basem, expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574}
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001575
Thomas Gleixnera683f392016-07-04 09:50:36 +00001576/**
1577 * timer_clear_idle - Clear the idle state of the timer base
1578 *
1579 * Called with interrupts disabled
1580 */
1581void timer_clear_idle(void)
1582{
1583 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1584
1585 /*
1586 * We do this unlocked. The worst outcome is a remote enqueue sending
1587 * a pointless IPI, but taking the lock would just make the window for
1588 * sending the IPI a few instructions smaller for the cost of taking
1589 * the lock in the exit from idle path.
1590 */
1591 base->is_idle = false;
1592}
1593
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001594static int collect_expired_timers(struct timer_base *base,
1595 struct hlist_head *heads)
1596{
1597 /*
1598 * NOHZ optimization. After a long idle sleep we need to forward the
1599 * base to current jiffies. Avoid a loop by searching the bitfield for
1600 * the next expiring timer.
1601 */
1602 if ((long)(jiffies - base->clk) > 2) {
1603 unsigned long next = __next_timer_interrupt(base);
1604
1605 /*
1606 * If the next timer is ahead of time forward to current
Thomas Gleixnera683f392016-07-04 09:50:36 +00001607 * jiffies, otherwise forward to the next expiry time:
Anna-Maria Gleixner23696832016-07-04 09:50:34 +00001608 */
1609 if (time_after(next, jiffies)) {
1610 /* The call site will increment clock! */
1611 base->clk = jiffies - 1;
1612 return 0;
1613 }
1614 base->clk = next;
1615 }
1616 return __collect_expired_timers(base, heads);
1617}
1618#else
1619static inline int collect_expired_timers(struct timer_base *base,
1620 struct hlist_head *heads)
1621{
1622 return __collect_expired_timers(base, heads);
1623}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624#endif
1625
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626/*
Daniel Walker5b4db0c2007-10-18 03:06:11 -07001627 * Called from the timer interrupt handler to charge one tick to the current
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 * process. user_tick is 1 if the tick is user time, 0 for system.
1629 */
1630void update_process_times(int user_tick)
1631{
1632 struct task_struct *p = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
1634 /* Note: this timer irq context must be accounted for as well. */
Paul Mackerrasfa13a5a2007-11-09 22:39:38 +01001635 account_process_tick(p, user_tick);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 run_local_timers();
Paul E. McKenneyc3377c2d2014-10-21 07:53:02 -07001637 rcu_check_callbacks(user_tick);
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001638#ifdef CONFIG_IRQ_WORK
1639 if (in_irq())
Frederic Weisbecker76a33062014-08-16 18:37:19 +02001640 irq_work_tick();
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001641#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 scheduler_tick();
Thomas Gleixner68194572007-07-19 01:49:16 -07001643 run_posix_cpu_timers(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644}
1645
Anna-Maria Gleixner73420fe2016-07-04 09:50:33 +00001646/**
1647 * __run_timers - run all expired timers (if any) on this CPU.
1648 * @base: the timer vector to be processed.
1649 */
1650static inline void __run_timers(struct timer_base *base)
1651{
1652 struct hlist_head heads[LVL_DEPTH];
1653 int levels;
1654
1655 if (!time_after_eq(jiffies, base->clk))
1656 return;
1657
1658 spin_lock_irq(&base->lock);
1659
1660 while (time_after_eq(jiffies, base->clk)) {
1661
1662 levels = collect_expired_timers(base, heads);
1663 base->clk++;
1664
1665 while (levels--)
1666 expire_timers(base, heads + levels);
1667 }
1668 base->running_timer = NULL;
1669 spin_unlock_irq(&base->lock);
1670}
1671
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 * This function runs timers and the timer-tq in bottom half context.
1674 */
Emese Revfy0766f782016-06-20 20:42:34 +02001675static __latent_entropy void run_timer_softirq(struct softirq_action *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001677 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
Nicholas Piggin70b3fd52017-08-22 18:43:48 +10001679 /*
1680 * must_forward_clk must be cleared before running timers so that any
1681 * timer functions that call mod_timer will not try to forward the
1682 * base. idle trcking / clock forwarding logic is only used with
1683 * BASE_STD timers.
1684 *
1685 * The deferrable base does not do idle tracking at all, so we do
1686 * not forward it. This can result in very large variations in
1687 * granularity for deferrable timers, but they can be deferred for
1688 * long periods due to idle.
1689 */
1690 base->must_forward_clk = false;
1691
Thomas Gleixner500462a2016-07-04 09:50:30 +00001692 __run_timers(base);
Anna-Maria Gleixnerd8406872017-12-22 15:51:12 +01001693 if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
Thomas Gleixner500462a2016-07-04 09:50:30 +00001694 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
Prasad Sodagudi602c4e22017-05-17 23:26:09 -07001695
Lingutla Chandrasekharea6a3242018-03-01 20:41:57 +05301696 if ((atomic_cmpxchg(&deferrable_pending, 1, 0) &&
1697 tick_do_timer_cpu == TICK_DO_TIMER_NONE) ||
1698 tick_do_timer_cpu == smp_processor_id())
Prasad Sodagudi602c4e22017-05-17 23:26:09 -07001699 __run_timers(&timer_base_deferrable);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700}
1701
1702/*
1703 * Called by the local, per-CPU timer interrupt on SMP.
1704 */
1705void run_local_timers(void)
1706{
Thomas Gleixner4e858762016-07-04 09:50:37 +00001707 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1708
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001709 hrtimer_run_queues();
Thomas Gleixner4e858762016-07-04 09:50:37 +00001710 /* Raise the softirq only if required. */
1711 if (time_before(jiffies, base->clk)) {
Thomas Gleixner676109b2018-01-14 23:19:49 +01001712 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
Thomas Gleixner4e858762016-07-04 09:50:37 +00001713 return;
1714 /* CPU is awake, so check the deferrable base. */
1715 base++;
1716 if (time_before(jiffies, base->clk))
1717 return;
1718 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 raise_softirq(TIMER_SOFTIRQ);
1720}
1721
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722#ifdef __ARCH_WANT_SYS_ALARM
1723
1724/*
1725 * For backwards compatibility? This can be done in libc so Alpha
1726 * and all newer ports shouldn't need it.
1727 */
Heiko Carstens58fd3aa2009-01-14 14:14:03 +01001728SYSCALL_DEFINE1(alarm, unsigned int, seconds)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729{
Thomas Gleixnerc08b8a42006-03-25 03:06:33 -08001730 return alarm_setitimer(seconds);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731}
1732
1733#endif
1734
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735static void process_timeout(unsigned long __data)
1736{
Ingo Molnar36c8b582006-07-03 00:25:41 -07001737 wake_up_process((struct task_struct *)__data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738}
1739
1740/**
1741 * schedule_timeout - sleep until timeout
1742 * @timeout: timeout value in jiffies
1743 *
1744 * Make the current task sleep until @timeout jiffies have
1745 * elapsed. The routine will return immediately unless
1746 * the current task state has been set (see set_current_state()).
1747 *
1748 * You can set the task state as follows -
1749 *
1750 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1751 * pass before the routine returns. The routine will return 0
1752 *
1753 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1754 * delivered to the current task. In this case the remaining time
1755 * in jiffies will be returned, or 0 if the timer expired in time
1756 *
1757 * The current task state is guaranteed to be TASK_RUNNING when this
1758 * routine returns.
1759 *
1760 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1761 * the CPU away without a bound on the timeout. In this case the return
1762 * value will be %MAX_SCHEDULE_TIMEOUT.
1763 *
1764 * In all cases the return value is guaranteed to be non-negative.
1765 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001766signed long __sched schedule_timeout(signed long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767{
1768 struct timer_list timer;
1769 unsigned long expire;
1770
1771 switch (timeout)
1772 {
1773 case MAX_SCHEDULE_TIMEOUT:
1774 /*
1775 * These two special cases are useful to be comfortable
1776 * in the caller. Nothing more. We could take
1777 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1778 * but I' d like to return a valid offset (>=0) to allow
1779 * the caller to do everything it want with the retval.
1780 */
1781 schedule();
1782 goto out;
1783 default:
1784 /*
1785 * Another bit of PARANOID. Note that the retval will be
1786 * 0 since no piece of kernel is supposed to do a check
1787 * for a negative retval of schedule_timeout() (since it
1788 * should never happens anyway). You just have the printk()
1789 * that will tell you if something is gone wrong and where.
1790 */
Andrew Morton5b149bc2006-12-22 01:10:14 -08001791 if (timeout < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 printk(KERN_ERR "schedule_timeout: wrong timeout "
Andrew Morton5b149bc2006-12-22 01:10:14 -08001793 "value %lx\n", timeout);
1794 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 current->state = TASK_RUNNING;
1796 goto out;
1797 }
1798 }
1799
1800 expire = timeout + jiffies;
1801
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -07001802 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
Thomas Gleixner177ec0a2016-07-04 09:50:24 +00001803 __mod_timer(&timer, expire, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 schedule();
1805 del_singleshot_timer_sync(&timer);
1806
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -07001807 /* Remove the timer from the object tracker */
1808 destroy_timer_on_stack(&timer);
1809
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 timeout = expire - jiffies;
1811
1812 out:
1813 return timeout < 0 ? 0 : timeout;
1814}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815EXPORT_SYMBOL(schedule_timeout);
1816
Andrew Morton8a1c1752005-09-13 01:25:15 -07001817/*
1818 * We can use __set_current_state() here because schedule_timeout() calls
1819 * schedule() unconditionally.
1820 */
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001821signed long __sched schedule_timeout_interruptible(signed long timeout)
1822{
Andrew Mortona5a0d522005-10-30 15:01:42 -08001823 __set_current_state(TASK_INTERRUPTIBLE);
1824 return schedule_timeout(timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001825}
1826EXPORT_SYMBOL(schedule_timeout_interruptible);
1827
Matthew Wilcox294d5cc2007-12-06 11:59:46 -05001828signed long __sched schedule_timeout_killable(signed long timeout)
1829{
1830 __set_current_state(TASK_KILLABLE);
1831 return schedule_timeout(timeout);
1832}
1833EXPORT_SYMBOL(schedule_timeout_killable);
1834
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001835signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1836{
Andrew Mortona5a0d522005-10-30 15:01:42 -08001837 __set_current_state(TASK_UNINTERRUPTIBLE);
1838 return schedule_timeout(timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001839}
1840EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1841
Andrew Morton69b27ba2016-03-25 14:20:21 -07001842/*
1843 * Like schedule_timeout_uninterruptible(), except this task will not contribute
1844 * to load average.
1845 */
1846signed long __sched schedule_timeout_idle(signed long timeout)
1847{
1848 __set_current_state(TASK_IDLE);
1849 return schedule_timeout(timeout);
1850}
1851EXPORT_SYMBOL(schedule_timeout_idle);
1852
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853#ifdef CONFIG_HOTPLUG_CPU
Viresh Kumar9536efe2015-03-25 11:47:53 +05301854static void migrate_timer_list(struct timer_base *new_base,
1855 struct hlist_head *head, bool remove_pinned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856{
1857 struct timer_list *timer;
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001858 int cpu = new_base->cpu;
Viresh Kumar9536efe2015-03-25 11:47:53 +05301859 struct hlist_node *n;
1860 int is_pinned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
Viresh Kumar9536efe2015-03-25 11:47:53 +05301862 hlist_for_each_entry_safe(timer, n, head, entry) {
1863 is_pinned = timer->flags & TIMER_PINNED;
1864 if (!remove_pinned && is_pinned)
1865 continue;
1866
1867 detach_if_pending(timer, get_timer_base(timer->flags), false);
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001868 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 internal_add_timer(new_base, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871}
1872
Thomas Gleixner249d4a92017-12-27 21:37:25 +01001873int timers_prepare_cpu(unsigned int cpu)
1874{
1875 struct timer_base *base;
1876 int b;
1877
1878 for (b = 0; b < NR_BASES; b++) {
1879 base = per_cpu_ptr(&timer_bases[b], cpu);
1880 base->clk = jiffies;
1881 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
1882 base->is_idle = false;
1883 base->must_forward_clk = true;
1884 }
1885 return 0;
1886}
1887
Viresh Kumar9536efe2015-03-25 11:47:53 +05301888static void __migrate_timers(unsigned int cpu, bool remove_pinned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889{
Thomas Gleixner494af3e2016-07-04 09:50:28 +00001890 struct timer_base *old_base;
1891 struct timer_base *new_base;
Viresh Kumar9536efe2015-03-25 11:47:53 +05301892 unsigned long flags;
Thomas Gleixner500462a2016-07-04 09:50:30 +00001893 int b, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894
Thomas Gleixner500462a2016-07-04 09:50:30 +00001895 for (b = 0; b < NR_BASES; b++) {
1896 old_base = per_cpu_ptr(&timer_bases[b], cpu);
1897 new_base = get_cpu_ptr(&timer_bases[b]);
1898 /*
1899 * The caller is globally serialized and nobody else
1900 * takes two locks at once, deadlock is not possible.
1901 */
Viresh Kumar9536efe2015-03-25 11:47:53 +05301902 spin_lock_irqsave(&new_base->lock, flags);
Thomas Gleixner500462a2016-07-04 09:50:30 +00001903 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
Oleg Nesterov3691c512006-03-31 02:30:30 -08001904
Vikram Mulukutla78a643e2017-04-20 17:12:58 -07001905 if (!cpu_online(cpu))
1906 BUG_ON(old_base->running_timer);
Thomas Gleixner500462a2016-07-04 09:50:30 +00001907
1908 for (i = 0; i < WHEEL_SIZE; i++)
Viresh Kumar9536efe2015-03-25 11:47:53 +05301909 migrate_timer_list(new_base, old_base->vectors + i,
1910 remove_pinned);
Thomas Gleixner500462a2016-07-04 09:50:30 +00001911
1912 spin_unlock(&old_base->lock);
Viresh Kumar9536efe2015-03-25 11:47:53 +05301913 spin_unlock_irqrestore(&new_base->lock, flags);
Thomas Gleixner500462a2016-07-04 09:50:30 +00001914 put_cpu_ptr(&timer_bases);
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001915 }
Viresh Kumar9536efe2015-03-25 11:47:53 +05301916}
1917
1918int timers_dead_cpu(unsigned int cpu)
1919{
1920 BUG_ON(cpu_online(cpu));
1921 __migrate_timers(cpu, true);
Richard Cochran24f73b92016-07-13 17:16:59 +00001922 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
Santosh Shuklae92935e22015-03-25 16:09:32 +05301925void timer_quiesce_cpu(void *cpup)
1926{
1927 __migrate_timers(*(unsigned int *)cpup, false);
1928}
Santosh Shuklae92935e22015-03-25 16:09:32 +05301929
Peter Zijlstra3650b572015-03-31 20:49:02 +05301930#endif /* CONFIG_HOTPLUG_CPU */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001932static void __init init_timer_cpu(int cpu)
Viresh Kumar8def9062015-03-31 20:49:01 +05301933{
Thomas Gleixner500462a2016-07-04 09:50:30 +00001934 struct timer_base *base;
1935 int i;
Peter Zijlstra3650b572015-03-31 20:49:02 +05301936
Thomas Gleixner500462a2016-07-04 09:50:30 +00001937 for (i = 0; i < NR_BASES; i++) {
1938 base = per_cpu_ptr(&timer_bases[i], cpu);
1939 base->cpu = cpu;
1940 spin_lock_init(&base->lock);
1941 base->clk = jiffies;
1942 }
Viresh Kumar8def9062015-03-31 20:49:01 +05301943}
1944
Kyle Yane980f1e2017-03-07 11:51:38 -08001945static inline void init_timer_deferrable_global(void)
1946{
1947 timer_base_deferrable.cpu = nr_cpu_ids;
1948 spin_lock_init(&timer_base_deferrable.lock);
1949 timer_base_deferrable.clk = jiffies;
1950}
1951
Viresh Kumar8def9062015-03-31 20:49:01 +05301952static void __init init_timer_cpus(void)
1953{
Viresh Kumar8def9062015-03-31 20:49:01 +05301954 int cpu;
1955
Kyle Yane980f1e2017-03-07 11:51:38 -08001956 init_timer_deferrable_global();
1957
Thomas Gleixner0eeda712015-05-26 22:50:29 +00001958 for_each_possible_cpu(cpu)
1959 init_timer_cpu(cpu);
Viresh Kumar8def9062015-03-31 20:49:01 +05301960}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
1962void __init init_timers(void)
1963{
Viresh Kumar8def9062015-03-31 20:49:01 +05301964 init_timer_cpus();
Carlos R. Mafra962cf362008-05-15 11:15:37 -03001965 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966}
1967
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968/**
1969 * msleep - sleep safely even with waitqueue interruptions
1970 * @msecs: Time in milliseconds to sleep for
1971 */
1972void msleep(unsigned int msecs)
1973{
1974 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1975
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07001976 while (timeout)
1977 timeout = schedule_timeout_uninterruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978}
1979
1980EXPORT_SYMBOL(msleep);
1981
1982/**
Domen Puncer96ec3ef2005-06-25 14:58:43 -07001983 * msleep_interruptible - sleep waiting for signals
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 * @msecs: Time in milliseconds to sleep for
1985 */
1986unsigned long msleep_interruptible(unsigned int msecs)
1987{
1988 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1989
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07001990 while (timeout && !signal_pending(current))
1991 timeout = schedule_timeout_interruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 return jiffies_to_msecs(timeout);
1993}
1994
1995EXPORT_SYMBOL(msleep_interruptible);
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001996
Thomas Gleixner6deba082015-04-14 21:09:28 +00001997static void __sched do_usleep_range(unsigned long min, unsigned long max)
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001998{
1999 ktime_t kmin;
John Stultzda8b44d2016-03-17 14:20:51 -07002000 u64 delta;
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07002001
2002 kmin = ktime_set(0, min * NSEC_PER_USEC);
John Stultzda8b44d2016-03-17 14:20:51 -07002003 delta = (u64)(max - min) * NSEC_PER_USEC;
Thomas Gleixner6deba082015-04-14 21:09:28 +00002004 schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07002005}
2006
2007/**
Bjorn Helgaasb5227d02016-05-31 16:23:02 -05002008 * usleep_range - Sleep for an approximate time
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07002009 * @min: Minimum time in usecs to sleep
2010 * @max: Maximum time in usecs to sleep
Bjorn Helgaasb5227d02016-05-31 16:23:02 -05002011 *
2012 * In non-atomic context where the exact wakeup time is flexible, use
2013 * usleep_range() instead of udelay(). The sleep improves responsiveness
2014 * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
2015 * power usage by allowing hrtimers to take advantage of an already-
2016 * scheduled interrupt instead of scheduling a new one just for this sleep.
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07002017 */
Thomas Gleixner2ad5d322015-04-14 21:09:30 +00002018void __sched usleep_range(unsigned long min, unsigned long max)
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07002019{
2020 __set_current_state(TASK_UNINTERRUPTIBLE);
2021 do_usleep_range(min, max);
2022}
2023EXPORT_SYMBOL(usleep_range);