blob: a82170e2fa78e50fbefa29c6cf71b1eccadae175 [file] [log] [blame]
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
Paul Gortmaker83e3fa62012-04-01 16:38:37 -04008#include <linux/bug.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +08009#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040010#include <linux/export.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080011#include <linux/irq_work.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040012#include <linux/percpu.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080013#include <linux/hardirq.h>
Chris Metcalfef1f0982012-04-11 12:21:39 -040014#include <linux/irqflags.h>
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -040015#include <linux/sched.h>
16#include <linux/tick.h>
Steven Rostedtc0e980a2012-11-15 11:34:21 -050017#include <linux/cpu.h>
18#include <linux/notifier.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040019#include <asm/processor.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080020
Peter Zijlstrae360adb2010-10-14 14:01:34 +080021
Huang Ying38aaf802011-09-08 14:00:46 +080022static DEFINE_PER_CPU(struct llist_head, irq_work_list);
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -040023static DEFINE_PER_CPU(int, irq_work_raised);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080024
25/*
26 * Claim the entry so that no one else will poke at it.
27 */
Huang Ying38aaf802011-09-08 14:00:46 +080028static bool irq_work_claim(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080029{
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020030 unsigned long flags, oflags, nflags;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080031
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020032 /*
33 * Start with our best wish as a premise but only trust any
34 * flag value after cmpxchg() result.
35 */
36 flags = work->flags & ~IRQ_WORK_PENDING;
Huang Ying38aaf802011-09-08 14:00:46 +080037 for (;;) {
Huang Ying38aaf802011-09-08 14:00:46 +080038 nflags = flags | IRQ_WORK_FLAGS;
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020039 oflags = cmpxchg(&work->flags, flags, nflags);
40 if (oflags == flags)
Huang Ying38aaf802011-09-08 14:00:46 +080041 break;
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020042 if (oflags & IRQ_WORK_PENDING)
43 return false;
44 flags = oflags;
Huang Ying38aaf802011-09-08 14:00:46 +080045 cpu_relax();
46 }
Peter Zijlstrae360adb2010-10-14 14:01:34 +080047
48 return true;
49}
50
Peter Zijlstrae360adb2010-10-14 14:01:34 +080051void __weak arch_irq_work_raise(void)
52{
53 /*
54 * Lame architectures will get the timer tick callback
55 */
56}
57
58/*
anish kumarc02cf5f2013-02-03 22:08:23 +010059 * Enqueue the irq_work @entry unless it's already pending
60 * somewhere.
61 *
62 * Can be re-enqueued while the callback is still in progress.
Peter Zijlstrae360adb2010-10-14 14:01:34 +080063 */
Peter Zijlstracd578ab2014-02-11 16:01:16 +010064bool irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080065{
anish kumarc02cf5f2013-02-03 22:08:23 +010066 /* Only queue if not already pending */
67 if (!irq_work_claim(work))
Peter Zijlstracd578ab2014-02-11 16:01:16 +010068 return false;
anish kumarc02cf5f2013-02-03 22:08:23 +010069
70 /* Queue the entry and raise the IPI if needed. */
Christoph Lameter20b87692010-12-14 10:28:45 -060071 preempt_disable();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080072
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -040073 llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
74
75 /*
76 * If the work is not "lazy" or the tick is stopped, raise the irq
77 * work interrupt (if supported by the arch), otherwise, just wait
78 * for the next tick.
79 */
80 if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) {
81 if (!this_cpu_cmpxchg(irq_work_raised, 0, 1))
82 arch_irq_work_raise();
83 }
Peter Zijlstrae360adb2010-10-14 14:01:34 +080084
Christoph Lameter20b87692010-12-14 10:28:45 -060085 preempt_enable();
Peter Zijlstracd578ab2014-02-11 16:01:16 +010086
87 return true;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080088}
Peter Zijlstrae360adb2010-10-14 14:01:34 +080089EXPORT_SYMBOL_GPL(irq_work_queue);
90
Frederic Weisbecker00b42952012-11-07 21:03:07 +010091bool irq_work_needs_cpu(void)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080092{
Frederic Weisbecker00b42952012-11-07 21:03:07 +010093 struct llist_head *this_list;
94
95 this_list = &__get_cpu_var(irq_work_list);
96 if (llist_empty(this_list))
97 return false;
98
Steven Rostedt8aa2acc2012-11-15 12:52:44 -050099 /* All work should have been flushed before going offline */
100 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
101
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100102 return true;
103}
104
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500105static void __irq_work_run(void)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800106{
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -0400107 unsigned long flags;
Huang Ying38aaf802011-09-08 14:00:46 +0800108 struct irq_work *work;
109 struct llist_head *this_list;
110 struct llist_node *llnode;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800111
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -0400112
113 /*
114 * Reset the "raised" state right before we check the list because
115 * an NMI may enqueue after we find the list empty from the runner.
116 */
117 __this_cpu_write(irq_work_raised, 0);
118 barrier();
119
Huang Ying38aaf802011-09-08 14:00:46 +0800120 this_list = &__get_cpu_var(irq_work_list);
121 if (llist_empty(this_list))
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800122 return;
123
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800124 BUG_ON(!irqs_disabled());
125
Huang Ying38aaf802011-09-08 14:00:46 +0800126 llnode = llist_del_all(this_list);
127 while (llnode != NULL) {
128 work = llist_entry(llnode, struct irq_work, llnode);
Christoph Lameter20b87692010-12-14 10:28:45 -0600129
Peter Zijlstra924f8f52011-09-12 13:12:28 +0200130 llnode = llist_next(llnode);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800131
132 /*
Huang Ying38aaf802011-09-08 14:00:46 +0800133 * Clear the PENDING bit, after this point the @work
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800134 * can be re-used.
Frederic Weisbeckerc8446b72012-10-30 13:33:54 +0100135 * Make it immediately visible so that other CPUs trying
136 * to claim that work don't rely on us to handle their data
137 * while we are in the middle of the func.
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800138 */
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -0400139 flags = work->flags & ~IRQ_WORK_PENDING;
140 xchg(&work->flags, flags);
141
Huang Ying38aaf802011-09-08 14:00:46 +0800142 work->func(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800143 /*
144 * Clear the BUSY bit and return to the free state if
145 * no-one else claimed it meanwhile.
146 */
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -0400147 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800148 }
149}
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500150
151/*
152 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
153 * context with local IRQs disabled.
154 */
155void irq_work_run(void)
156{
157 BUG_ON(!in_irq());
158 __irq_work_run();
159}
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800160EXPORT_SYMBOL_GPL(irq_work_run);
161
162/*
163 * Synchronize against the irq_work @entry, ensures the entry is not
164 * currently in use.
165 */
Huang Ying38aaf802011-09-08 14:00:46 +0800166void irq_work_sync(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800167{
168 WARN_ON_ONCE(irqs_disabled());
169
Huang Ying38aaf802011-09-08 14:00:46 +0800170 while (work->flags & IRQ_WORK_BUSY)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800171 cpu_relax();
172}
173EXPORT_SYMBOL_GPL(irq_work_sync);
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500174
175#ifdef CONFIG_HOTPLUG_CPU
176static int irq_work_cpu_notify(struct notifier_block *self,
177 unsigned long action, void *hcpu)
178{
179 long cpu = (long)hcpu;
180
181 switch (action) {
182 case CPU_DYING:
183 /* Called from stop_machine */
184 if (WARN_ON_ONCE(cpu != smp_processor_id()))
185 break;
186 __irq_work_run();
187 break;
188 default:
189 break;
190 }
191 return NOTIFY_OK;
192}
193
194static struct notifier_block cpu_notify;
195
196static __init int irq_work_init_cpu_notifier(void)
197{
198 cpu_notify.notifier_call = irq_work_cpu_notify;
199 cpu_notify.priority = 0;
200 register_cpu_notifier(&cpu_notify);
201 return 0;
202}
203device_initcall(irq_work_init_cpu_notifier);
204
205#endif /* CONFIG_HOTPLUG_CPU */