blob: 005c0ca81a32bced4b65a491a6964f2182e30be2 [file] [log] [blame]
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -08001/*
2 * linux/kernel/time/tick-broadcast.c
3 *
4 * This file contains functions which emulate a local clock-event
5 * device via a broadcast event source.
6 *
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 *
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
Russell Kingd7b90682008-04-17 07:46:24 +020017#include <linux/interrupt.h>
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -080018#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
Mark Rutland12ad1002013-01-14 17:05:22 +000021#include <linux/smp.h>
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -080022
23#include "tick-internal.h"
24
25/*
26 * Broadcast support for broken x86 hardware, where the local apic
27 * timer stops in C3 state.
28 */
29
Dmitri Vorobieva52f5c52009-05-01 13:10:21 -070030static struct tick_device tick_broadcast_device;
Thomas Gleixnerb352bc12013-03-05 14:25:32 +010031static cpumask_var_t tick_broadcast_mask;
32static cpumask_var_t tmpmask;
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +010033static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
Thomas Gleixneraa276e12008-06-09 19:15:00 +020034static int tick_broadcast_force;
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -080035
Thomas Gleixner5590a532007-07-21 04:37:35 -070036#ifdef CONFIG_TICK_ONESHOT
37static void tick_broadcast_clear_oneshot(int cpu);
38#else
39static inline void tick_broadcast_clear_oneshot(int cpu) { }
40#endif
41
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -080042/*
Ingo Molnar289f4802007-02-16 01:28:15 -080043 * Debugging: see timer_list.c
44 */
45struct tick_device *tick_get_broadcast_device(void)
46{
47 return &tick_broadcast_device;
48}
49
Rusty Russell6b954822009-01-01 10:12:25 +103050struct cpumask *tick_get_broadcast_mask(void)
Ingo Molnar289f4802007-02-16 01:28:15 -080051{
Thomas Gleixnerb352bc12013-03-05 14:25:32 +010052 return tick_broadcast_mask;
Ingo Molnar289f4802007-02-16 01:28:15 -080053}
54
55/*
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -080056 * Start the device in periodic mode
57 */
58static void tick_broadcast_start_periodic(struct clock_event_device *bc)
59{
Thomas Gleixner18de5bc2007-07-21 04:37:34 -070060 if (bc)
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -080061 tick_setup_periodic(bc, 1);
62}
63
64/*
65 * Check, if the device can be utilized as broadcast device:
66 */
67int tick_check_broadcast_device(struct clock_event_device *dev)
68{
Venki Pallipadi4a932322007-10-12 23:04:23 +020069 if ((tick_broadcast_device.evtdev &&
70 tick_broadcast_device.evtdev->rating >= dev->rating) ||
71 (dev->features & CLOCK_EVT_FEAT_C3STOP))
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -080072 return 0;
73
Thomas Gleixnerc1be8432011-12-02 12:34:16 +010074 clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -080075 tick_broadcast_device.evtdev = dev;
Thomas Gleixnerb352bc12013-03-05 14:25:32 +010076 if (!cpumask_empty(tick_broadcast_mask))
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -080077 tick_broadcast_start_periodic(dev);
78 return 1;
79}
80
81/*
82 * Check, if the device is the broadcast device
83 */
84int tick_is_broadcast_device(struct clock_event_device *dev)
85{
86 return (dev && tick_broadcast_device.evtdev == dev);
87}
88
Mark Rutland12ad1002013-01-14 17:05:22 +000089static void err_broadcast(const struct cpumask *mask)
90{
91 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
92}
93
Mark Rutland5d1d9a22013-02-08 15:24:07 +000094static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
95{
96 if (!dev->broadcast)
97 dev->broadcast = tick_broadcast;
98 if (!dev->broadcast) {
99 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
100 dev->name);
101 dev->broadcast = err_broadcast;
102 }
103}
104
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800105/*
106 * Check, if the device is disfunctional and a place holder, which
107 * needs to be handled by the broadcast device.
108 */
109int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
110{
111 unsigned long flags;
112 int ret = 0;
113
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100114 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800115
116 /*
117 * Devices might be registered with both periodic and oneshot
118 * mode disabled. This signals, that the device needs to be
119 * operated from the broadcast device and is a placeholder for
120 * the cpu local device.
121 */
122 if (!tick_device_is_functional(dev)) {
123 dev->event_handler = tick_handle_periodic;
Mark Rutland5d1d9a22013-02-08 15:24:07 +0000124 tick_device_setup_broadcast_func(dev);
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100125 cpumask_set_cpu(cpu, tick_broadcast_mask);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800126 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
127 ret = 1;
Thomas Gleixner5590a532007-07-21 04:37:35 -0700128 } else {
129 /*
130 * When the new device is not affected by the stop
131 * feature and the cpu is marked in the broadcast mask
132 * then clear the broadcast bit.
133 */
134 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
135 int cpu = smp_processor_id();
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100136 cpumask_clear_cpu(cpu, tick_broadcast_mask);
Thomas Gleixner5590a532007-07-21 04:37:35 -0700137 tick_broadcast_clear_oneshot(cpu);
Mark Rutland5d1d9a22013-02-08 15:24:07 +0000138 } else {
139 tick_device_setup_broadcast_func(dev);
Thomas Gleixner5590a532007-07-21 04:37:35 -0700140 }
141 }
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100142 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800143 return ret;
144}
145
Mark Rutland12572db2013-01-14 17:05:21 +0000146#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
147int tick_receive_broadcast(void)
148{
149 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
150 struct clock_event_device *evt = td->evtdev;
151
152 if (!evt)
153 return -ENODEV;
154
155 if (!evt->event_handler)
156 return -EINVAL;
157
158 evt->event_handler(evt);
159 return 0;
160}
161#endif
162
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800163/*
Rusty Russell6b954822009-01-01 10:12:25 +1030164 * Broadcast the event to the cpus, which are set in the mask (mangled).
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800165 */
Rusty Russell6b954822009-01-01 10:12:25 +1030166static void tick_do_broadcast(struct cpumask *mask)
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800167{
Thomas Gleixner186e3cb2008-01-30 13:30:01 +0100168 int cpu = smp_processor_id();
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800169 struct tick_device *td;
170
171 /*
172 * Check, if the current cpu is in the mask
173 */
Rusty Russell6b954822009-01-01 10:12:25 +1030174 if (cpumask_test_cpu(cpu, mask)) {
175 cpumask_clear_cpu(cpu, mask);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800176 td = &per_cpu(tick_cpu_device, cpu);
177 td->evtdev->event_handler(td->evtdev);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800178 }
179
Rusty Russell6b954822009-01-01 10:12:25 +1030180 if (!cpumask_empty(mask)) {
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800181 /*
182 * It might be necessary to actually check whether the devices
183 * have different broadcast functions. For now, just use the
184 * one of the first device. This works as long as we have this
185 * misfeature only on x86 (lapic)
186 */
Rusty Russell6b954822009-01-01 10:12:25 +1030187 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
188 td->evtdev->broadcast(mask);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800189 }
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800190}
191
192/*
193 * Periodic broadcast:
194 * - invoke the broadcast handlers
195 */
196static void tick_do_periodic_broadcast(void)
197{
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100198 raw_spin_lock(&tick_broadcast_lock);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800199
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100200 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
201 tick_do_broadcast(tmpmask);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800202
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100203 raw_spin_unlock(&tick_broadcast_lock);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800204}
205
206/*
207 * Event handler for periodic broadcast ticks
208 */
209static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
210{
Thomas Gleixnerd4496b32008-09-03 21:36:57 +0000211 ktime_t next;
212
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800213 tick_do_periodic_broadcast();
214
215 /*
216 * The device is in periodic mode. No reprogramming necessary:
217 */
218 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
219 return;
220
221 /*
222 * Setup the next period for devices, which do not have
Thomas Gleixnerd4496b32008-09-03 21:36:57 +0000223 * periodic mode. We read dev->next_event first and add to it
Uwe Kleine-König698f9312010-07-02 20:41:51 +0200224 * when the event already expired. clockevents_program_event()
Thomas Gleixnerd4496b32008-09-03 21:36:57 +0000225 * sets dev->next_event only when the event is really
226 * programmed to the device.
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800227 */
Thomas Gleixnerd4496b32008-09-03 21:36:57 +0000228 for (next = dev->next_event; ;) {
229 next = ktime_add(next, tick_period);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800230
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200231 if (!clockevents_program_event(dev, next, false))
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800232 return;
233 tick_do_periodic_broadcast();
234 }
235}
236
237/*
238 * Powerstate information: The system enters/leaves a state, where
239 * affected devices might stop
240 */
Suresh Siddhaf833bab2009-08-17 14:34:59 -0700241static void tick_do_broadcast_on_off(unsigned long *reason)
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800242{
243 struct clock_event_device *bc, *dev;
244 struct tick_device *td;
Suresh Siddhaf833bab2009-08-17 14:34:59 -0700245 unsigned long flags;
Thomas Gleixner9c17bcd2008-09-03 21:37:08 +0000246 int cpu, bc_stopped;
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800247
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100248 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800249
250 cpu = smp_processor_id();
251 td = &per_cpu(tick_cpu_device, cpu);
252 dev = td->evtdev;
253 bc = tick_broadcast_device.evtdev;
254
255 /*
Thomas Gleixner1595f452007-10-14 22:57:45 +0200256 * Is the device not affected by the powerstate ?
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800257 */
Thomas Gleixner1595f452007-10-14 22:57:45 +0200258 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800259 goto out;
260
Thomas Gleixner3dfbc882007-10-17 18:04:32 +0200261 if (!tick_device_is_functional(dev))
262 goto out;
Thomas Gleixner1595f452007-10-14 22:57:45 +0200263
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100264 bc_stopped = cpumask_empty(tick_broadcast_mask);
Thomas Gleixner9c17bcd2008-09-03 21:37:08 +0000265
Thomas Gleixner1595f452007-10-14 22:57:45 +0200266 switch (*reason) {
267 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
268 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100269 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
Thomas Gleixner07454bf2008-10-04 10:51:07 +0200270 if (tick_broadcast_device.mode ==
271 TICKDEV_MODE_PERIODIC)
Thomas Gleixner2344abb2008-09-16 11:32:50 -0700272 clockevents_shutdown(dev);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800273 }
Thomas Gleixner3dfbc882007-10-17 18:04:32 +0200274 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200275 tick_broadcast_force = 1;
Thomas Gleixner1595f452007-10-14 22:57:45 +0200276 break;
277 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200278 if (!tick_broadcast_force &&
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100279 cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
Thomas Gleixner07454bf2008-10-04 10:51:07 +0200280 if (tick_broadcast_device.mode ==
281 TICKDEV_MODE_PERIODIC)
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800282 tick_setup_periodic(dev, 0);
283 }
Thomas Gleixner1595f452007-10-14 22:57:45 +0200284 break;
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800285 }
286
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100287 if (cpumask_empty(tick_broadcast_mask)) {
Thomas Gleixner9c17bcd2008-09-03 21:37:08 +0000288 if (!bc_stopped)
Thomas Gleixner2344abb2008-09-16 11:32:50 -0700289 clockevents_shutdown(bc);
Thomas Gleixner9c17bcd2008-09-03 21:37:08 +0000290 } else if (bc_stopped) {
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800291 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
292 tick_broadcast_start_periodic(bc);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800293 else
294 tick_broadcast_setup_oneshot(bc);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800295 }
296out:
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100297 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800298}
299
300/*
301 * Powerstate information: The system enters/leaves a state, where
302 * affected devices might stop.
303 */
304void tick_broadcast_on_off(unsigned long reason, int *oncpu)
305{
Rusty Russell6b954822009-01-01 10:12:25 +1030306 if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
Glauber Costa833df312008-04-18 13:38:58 -0700307 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
Thomas Gleixner72fcde92007-05-23 13:57:30 -0700308 "offline CPU #%d\n", *oncpu);
Avi Kivitybf020cb2007-10-16 23:26:24 -0700309 else
Suresh Siddhaf833bab2009-08-17 14:34:59 -0700310 tick_do_broadcast_on_off(&reason);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800311}
312
313/*
314 * Set the periodic handler depending on broadcast on/off
315 */
316void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
317{
318 if (!broadcast)
319 dev->event_handler = tick_handle_periodic;
320 else
321 dev->event_handler = tick_handle_periodic_broadcast;
322}
323
324/*
325 * Remove a CPU from broadcasting
326 */
327void tick_shutdown_broadcast(unsigned int *cpup)
328{
329 struct clock_event_device *bc;
330 unsigned long flags;
331 unsigned int cpu = *cpup;
332
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100333 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800334
335 bc = tick_broadcast_device.evtdev;
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100336 cpumask_clear_cpu(cpu, tick_broadcast_mask);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800337
338 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100339 if (bc && cpumask_empty(tick_broadcast_mask))
Thomas Gleixner2344abb2008-09-16 11:32:50 -0700340 clockevents_shutdown(bc);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800341 }
342
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100343 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
Thomas Gleixnerf8381cb2007-02-16 01:28:02 -0800344}
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800345
Thomas Gleixner6321dd62007-03-06 08:25:42 +0100346void tick_suspend_broadcast(void)
347{
348 struct clock_event_device *bc;
349 unsigned long flags;
350
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100351 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
Thomas Gleixner6321dd62007-03-06 08:25:42 +0100352
353 bc = tick_broadcast_device.evtdev;
Thomas Gleixner18de5bc2007-07-21 04:37:34 -0700354 if (bc)
Thomas Gleixner2344abb2008-09-16 11:32:50 -0700355 clockevents_shutdown(bc);
Thomas Gleixner6321dd62007-03-06 08:25:42 +0100356
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100357 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
Thomas Gleixner6321dd62007-03-06 08:25:42 +0100358}
359
360int tick_resume_broadcast(void)
361{
362 struct clock_event_device *bc;
363 unsigned long flags;
364 int broadcast = 0;
365
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100366 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
Thomas Gleixner6321dd62007-03-06 08:25:42 +0100367
368 bc = tick_broadcast_device.evtdev;
Thomas Gleixner6321dd62007-03-06 08:25:42 +0100369
Thomas Gleixnercd05a1f2007-03-17 00:25:52 +0100370 if (bc) {
Thomas Gleixner18de5bc2007-07-21 04:37:34 -0700371 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
372
Thomas Gleixnercd05a1f2007-03-17 00:25:52 +0100373 switch (tick_broadcast_device.mode) {
374 case TICKDEV_MODE_PERIODIC:
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100375 if (!cpumask_empty(tick_broadcast_mask))
Thomas Gleixnercd05a1f2007-03-17 00:25:52 +0100376 tick_broadcast_start_periodic(bc);
Rusty Russell6b954822009-01-01 10:12:25 +1030377 broadcast = cpumask_test_cpu(smp_processor_id(),
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100378 tick_broadcast_mask);
Thomas Gleixnercd05a1f2007-03-17 00:25:52 +0100379 break;
380 case TICKDEV_MODE_ONESHOT:
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100381 if (!cpumask_empty(tick_broadcast_mask))
Suresh Siddhaa6371f82012-04-18 19:27:39 -0700382 broadcast = tick_resume_broadcast_oneshot(bc);
Thomas Gleixnercd05a1f2007-03-17 00:25:52 +0100383 break;
384 }
Thomas Gleixner6321dd62007-03-06 08:25:42 +0100385 }
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100386 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
Thomas Gleixner6321dd62007-03-06 08:25:42 +0100387
388 return broadcast;
389}
390
391
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800392#ifdef CONFIG_TICK_ONESHOT
393
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100394static cpumask_var_t tick_broadcast_oneshot_mask;
Thomas Gleixner26517f32013-03-06 11:18:35 +0000395static cpumask_var_t tick_broadcast_pending_mask;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800396
Ingo Molnar289f4802007-02-16 01:28:15 -0800397/*
Rusty Russell6b954822009-01-01 10:12:25 +1030398 * Exposed for debugging: see timer_list.c
Ingo Molnar289f4802007-02-16 01:28:15 -0800399 */
Rusty Russell6b954822009-01-01 10:12:25 +1030400struct cpumask *tick_get_broadcast_oneshot_mask(void)
Ingo Molnar289f4802007-02-16 01:28:15 -0800401{
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100402 return tick_broadcast_oneshot_mask;
Ingo Molnar289f4802007-02-16 01:28:15 -0800403}
404
Daniel Lezcanod2348fb2013-03-02 11:10:11 +0100405/*
406 * Set broadcast interrupt affinity
407 */
408static void tick_broadcast_set_affinity(struct clock_event_device *bc,
409 const struct cpumask *cpumask)
410{
411 if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
412 return;
413
414 if (cpumask_equal(bc->cpumask, cpumask))
415 return;
416
417 bc->cpumask = cpumask;
418 irq_set_affinity(bc->irq, bc->cpumask);
419}
420
421static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
Daniel Lezcanof9ae39d2013-03-02 11:10:10 +0100422 ktime_t expires, int force)
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800423{
Daniel Lezcanod2348fb2013-03-02 11:10:11 +0100424 int ret;
425
Thomas Gleixnerb9a6a2352012-04-18 17:31:58 +0200426 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
427 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
428
Daniel Lezcanod2348fb2013-03-02 11:10:11 +0100429 ret = clockevents_program_event(bc, expires, force);
430 if (!ret)
431 tick_broadcast_set_affinity(bc, cpumask_of(cpu));
432 return ret;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800433}
434
Thomas Gleixnercd05a1f2007-03-17 00:25:52 +0100435int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
436{
437 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
Thomas Gleixnerb7e113d2007-09-22 22:29:06 +0000438 return 0;
Thomas Gleixnercd05a1f2007-03-17 00:25:52 +0100439}
440
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800441/*
Thomas Gleixnerfb02fbc2008-10-17 10:01:23 +0200442 * Called from irq_enter() when idle was interrupted to reenable the
443 * per cpu device.
444 */
445void tick_check_oneshot_broadcast(int cpu)
446{
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100447 if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
Thomas Gleixnerfb02fbc2008-10-17 10:01:23 +0200448 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
449
450 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
451 }
452}
453
454/*
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800455 * Handle oneshot mode broadcasting
456 */
457static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
458{
459 struct tick_device *td;
Thomas Gleixnercdc6f272007-12-18 18:05:58 +0100460 ktime_t now, next_event;
Daniel Lezcanod2348fb2013-03-02 11:10:11 +0100461 int cpu, next_cpu = 0;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800462
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100463 raw_spin_lock(&tick_broadcast_lock);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800464again:
465 dev->next_event.tv64 = KTIME_MAX;
Thomas Gleixnercdc6f272007-12-18 18:05:58 +0100466 next_event.tv64 = KTIME_MAX;
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100467 cpumask_clear(tmpmask);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800468 now = ktime_get();
469 /* Find all expired events */
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100470 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800471 td = &per_cpu(tick_cpu_device, cpu);
Daniel Lezcanod2348fb2013-03-02 11:10:11 +0100472 if (td->evtdev->next_event.tv64 <= now.tv64) {
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100473 cpumask_set_cpu(cpu, tmpmask);
Thomas Gleixner26517f32013-03-06 11:18:35 +0000474 /*
475 * Mark the remote cpu in the pending mask, so
476 * it can avoid reprogramming the cpu local
477 * timer in tick_broadcast_oneshot_control().
478 */
479 cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
Daniel Lezcanod2348fb2013-03-02 11:10:11 +0100480 } else if (td->evtdev->next_event.tv64 < next_event.tv64) {
Thomas Gleixnercdc6f272007-12-18 18:05:58 +0100481 next_event.tv64 = td->evtdev->next_event.tv64;
Daniel Lezcanod2348fb2013-03-02 11:10:11 +0100482 next_cpu = cpu;
483 }
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800484 }
485
486 /*
Thomas Gleixnercdc6f272007-12-18 18:05:58 +0100487 * Wakeup the cpus which have an expired event.
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800488 */
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100489 tick_do_broadcast(tmpmask);
Thomas Gleixnercdc6f272007-12-18 18:05:58 +0100490
491 /*
492 * Two reasons for reprogram:
493 *
494 * - The global event did not expire any CPU local
495 * events. This happens in dyntick mode, as the maximum PIT
496 * delta is quite small.
497 *
498 * - There are pending events on sleeping CPUs which were not
499 * in the event mask
500 */
501 if (next_event.tv64 != KTIME_MAX) {
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800502 /*
Thomas Gleixnercdc6f272007-12-18 18:05:58 +0100503 * Rearm the broadcast device. If event expired,
504 * repeat the above
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800505 */
Daniel Lezcanod2348fb2013-03-02 11:10:11 +0100506 if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800507 goto again;
508 }
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100509 raw_spin_unlock(&tick_broadcast_lock);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800510}
511
512/*
513 * Powerstate information: The system enters/leaves a state, where
514 * affected devices might stop
515 */
516void tick_broadcast_oneshot_control(unsigned long reason)
517{
518 struct clock_event_device *bc, *dev;
519 struct tick_device *td;
520 unsigned long flags;
521 int cpu;
522
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800523 /*
524 * Periodic mode does not care about the enter/exit of power
525 * states
526 */
527 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
Andi Kleen7372b0b2011-05-04 15:09:27 -0700528 return;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800529
Andi Kleen7372b0b2011-05-04 15:09:27 -0700530 /*
531 * We are called with preemtion disabled from the depth of the
532 * idle code, so we can't be moved away.
533 */
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800534 cpu = smp_processor_id();
535 td = &per_cpu(tick_cpu_device, cpu);
536 dev = td->evtdev;
537
538 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
Andi Kleen7372b0b2011-05-04 15:09:27 -0700539 return;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800540
Andi Kleen7372b0b2011-05-04 15:09:27 -0700541 bc = tick_broadcast_device.evtdev;
542
543 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800544 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
Thomas Gleixner26517f32013-03-06 11:18:35 +0000545 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100546 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800547 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
548 if (dev->next_event.tv64 < bc->next_event.tv64)
Daniel Lezcanod2348fb2013-03-02 11:10:11 +0100549 tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800550 }
551 } else {
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100552 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800553 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
Thomas Gleixner26517f32013-03-06 11:18:35 +0000554 if (dev->next_event.tv64 == KTIME_MAX)
555 goto out;
556 /*
557 * The cpu which was handling the broadcast
558 * timer marked this cpu in the broadcast
559 * pending mask and fired the broadcast
560 * IPI. So we are going to handle the expired
561 * event anyway via the broadcast IPI
562 * handler. No need to reprogram the timer
563 * with an already expired event.
564 */
565 if (cpumask_test_and_clear_cpu(cpu,
566 tick_broadcast_pending_mask))
567 goto out;
568
569 tick_program_event(dev->next_event, 1);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800570 }
571 }
Thomas Gleixner26517f32013-03-06 11:18:35 +0000572out:
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100573 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800574}
575
Thomas Gleixner5590a532007-07-21 04:37:35 -0700576/*
577 * Reset the one shot broadcast for a cpu
578 *
579 * Called with tick_broadcast_lock held
580 */
581static void tick_broadcast_clear_oneshot(int cpu)
582{
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100583 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
Thomas Gleixner5590a532007-07-21 04:37:35 -0700584}
585
Rusty Russell6b954822009-01-01 10:12:25 +1030586static void tick_broadcast_init_next_event(struct cpumask *mask,
587 ktime_t expires)
Thomas Gleixner73007112008-09-06 03:01:45 +0200588{
589 struct tick_device *td;
590 int cpu;
591
Rusty Russell5db0e1e2009-01-01 10:12:29 +1030592 for_each_cpu(cpu, mask) {
Thomas Gleixner73007112008-09-06 03:01:45 +0200593 td = &per_cpu(tick_cpu_device, cpu);
594 if (td->evtdev)
595 td->evtdev->next_event = expires;
596 }
597}
598
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800599/**
Li Zefan8dce39c2007-11-05 14:51:10 -0800600 * tick_broadcast_setup_oneshot - setup the broadcast device
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800601 */
602void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
603{
Thomas Gleixner07f4beb2011-05-16 11:07:48 +0200604 int cpu = smp_processor_id();
605
Thomas Gleixner9c17bcd2008-09-03 21:37:08 +0000606 /* Set it up only once ! */
607 if (bc->event_handler != tick_handle_oneshot_broadcast) {
Thomas Gleixner73007112008-09-06 03:01:45 +0200608 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
Thomas Gleixner73007112008-09-06 03:01:45 +0200609
Thomas Gleixner9c17bcd2008-09-03 21:37:08 +0000610 bc->event_handler = tick_handle_oneshot_broadcast;
Thomas Gleixner73007112008-09-06 03:01:45 +0200611
612 /* Take the do_timer update */
613 tick_do_timer_cpu = cpu;
614
615 /*
616 * We must be careful here. There might be other CPUs
617 * waiting for periodic broadcast. We need to set the
618 * oneshot_mask bits for those and program the
619 * broadcast device to fire.
620 */
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100621 cpumask_copy(tmpmask, tick_broadcast_mask);
622 cpumask_clear_cpu(cpu, tmpmask);
623 cpumask_or(tick_broadcast_oneshot_mask,
624 tick_broadcast_oneshot_mask, tmpmask);
Thomas Gleixner73007112008-09-06 03:01:45 +0200625
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100626 if (was_periodic && !cpumask_empty(tmpmask)) {
Thomas Gleixnerb4350922012-04-18 12:08:23 +0200627 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100628 tick_broadcast_init_next_event(tmpmask,
Rusty Russell6b954822009-01-01 10:12:25 +1030629 tick_next_period);
Daniel Lezcanod2348fb2013-03-02 11:10:11 +0100630 tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
Thomas Gleixner73007112008-09-06 03:01:45 +0200631 } else
632 bc->next_event.tv64 = KTIME_MAX;
Thomas Gleixner07f4beb2011-05-16 11:07:48 +0200633 } else {
634 /*
635 * The first cpu which switches to oneshot mode sets
636 * the bit for all other cpus which are in the general
637 * (periodic) broadcast mask. So the bit is set and
638 * would prevent the first broadcast enter after this
639 * to program the bc device.
640 */
641 tick_broadcast_clear_oneshot(cpu);
Thomas Gleixner9c17bcd2008-09-03 21:37:08 +0000642 }
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800643}
644
645/*
646 * Select oneshot operating mode for the broadcast device
647 */
648void tick_broadcast_switch_to_oneshot(void)
649{
650 struct clock_event_device *bc;
651 unsigned long flags;
652
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100653 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
Suresh Siddhafa4da362012-04-09 15:41:44 -0700654
655 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800656 bc = tick_broadcast_device.evtdev;
657 if (bc)
658 tick_broadcast_setup_oneshot(bc);
Suresh Siddha77b0d602011-11-04 17:18:21 -0700659
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100660 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800661}
662
663
664/*
665 * Remove a dead CPU from broadcasting
666 */
667void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
668{
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800669 unsigned long flags;
670 unsigned int cpu = *cpup;
671
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100672 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800673
Thomas Gleixner31d9b392007-09-16 15:36:43 +0200674 /*
675 * Clear the broadcast mask flag for the dead cpu, but do not
676 * stop the broadcast device!
677 */
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100678 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800679
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100680 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800681}
682
Thomas Gleixner27ce4cb2008-09-22 19:04:02 +0200683/*
684 * Check, whether the broadcast device is in one shot mode
685 */
686int tick_broadcast_oneshot_active(void)
687{
688 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
689}
690
Thomas Gleixner3a142a02011-02-25 22:34:23 +0100691/*
692 * Check whether the broadcast device supports oneshot.
693 */
694bool tick_broadcast_oneshot_available(void)
695{
696 struct clock_event_device *bc = tick_broadcast_device.evtdev;
697
698 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
699}
700
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800701#endif
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100702
703void __init tick_broadcast_init(void)
704{
705 alloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
706 alloc_cpumask_var(&tmpmask, GFP_NOWAIT);
707#ifdef CONFIG_TICK_ONESHOT
708 alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
Thomas Gleixner26517f32013-03-06 11:18:35 +0000709 alloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
Thomas Gleixnerb352bc12013-03-05 14:25:32 +0100710#endif
711}