blob: 1588e3b2871b9e28a68316f91ca418d44a75cba2 [file] [log] [blame]
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
Paul Gortmaker83e3fa62012-04-01 16:38:37 -04008#include <linux/bug.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +08009#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040010#include <linux/export.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080011#include <linux/irq_work.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040012#include <linux/percpu.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080013#include <linux/hardirq.h>
Chris Metcalfef1f0982012-04-11 12:21:39 -040014#include <linux/irqflags.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040015#include <asm/processor.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080016
17/*
18 * An entry can be in one of four states:
19 *
20 * free NULL, 0 -> {claimed} : free to be used
21 * claimed NULL, 3 -> {pending} : claimed to be enqueued
22 * pending next, 3 -> {busy} : queued, pending callback
23 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
Peter Zijlstrae360adb2010-10-14 14:01:34 +080024 */
25
26#define IRQ_WORK_PENDING 1UL
27#define IRQ_WORK_BUSY 2UL
28#define IRQ_WORK_FLAGS 3UL
29
Huang Ying38aaf802011-09-08 14:00:46 +080030static DEFINE_PER_CPU(struct llist_head, irq_work_list);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080031
32/*
33 * Claim the entry so that no one else will poke at it.
34 */
Huang Ying38aaf802011-09-08 14:00:46 +080035static bool irq_work_claim(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080036{
Huang Ying38aaf802011-09-08 14:00:46 +080037 unsigned long flags, nflags;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080038
Huang Ying38aaf802011-09-08 14:00:46 +080039 for (;;) {
40 flags = work->flags;
41 if (flags & IRQ_WORK_PENDING)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080042 return false;
Huang Ying38aaf802011-09-08 14:00:46 +080043 nflags = flags | IRQ_WORK_FLAGS;
44 if (cmpxchg(&work->flags, flags, nflags) == flags)
45 break;
46 cpu_relax();
47 }
Peter Zijlstrae360adb2010-10-14 14:01:34 +080048
49 return true;
50}
51
Peter Zijlstrae360adb2010-10-14 14:01:34 +080052void __weak arch_irq_work_raise(void)
53{
54 /*
55 * Lame architectures will get the timer tick callback
56 */
57}
58
59/*
60 * Queue the entry and raise the IPI if needed.
61 */
Huang Ying38aaf802011-09-08 14:00:46 +080062static void __irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080063{
Huang Ying38aaf802011-09-08 14:00:46 +080064 bool empty;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080065
Christoph Lameter20b87692010-12-14 10:28:45 -060066 preempt_disable();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080067
Huang Ying38aaf802011-09-08 14:00:46 +080068 empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
Peter Zijlstrae360adb2010-10-14 14:01:34 +080069 /* The list was empty, raise self-interrupt to start processing. */
Huang Ying38aaf802011-09-08 14:00:46 +080070 if (empty)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080071 arch_irq_work_raise();
72
Christoph Lameter20b87692010-12-14 10:28:45 -060073 preempt_enable();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080074}
75
76/*
77 * Enqueue the irq_work @entry, returns true on success, failure when the
78 * @entry was already enqueued by someone else.
79 *
80 * Can be re-enqueued while the callback is still in progress.
81 */
Huang Ying38aaf802011-09-08 14:00:46 +080082bool irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080083{
Huang Ying38aaf802011-09-08 14:00:46 +080084 if (!irq_work_claim(work)) {
Peter Zijlstrae360adb2010-10-14 14:01:34 +080085 /*
86 * Already enqueued, can't do!
87 */
88 return false;
89 }
90
Huang Ying38aaf802011-09-08 14:00:46 +080091 __irq_work_queue(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080092 return true;
93}
94EXPORT_SYMBOL_GPL(irq_work_queue);
95
96/*
97 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
98 * context with local IRQs disabled.
99 */
100void irq_work_run(void)
101{
Huang Ying38aaf802011-09-08 14:00:46 +0800102 struct irq_work *work;
103 struct llist_head *this_list;
104 struct llist_node *llnode;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800105
Huang Ying38aaf802011-09-08 14:00:46 +0800106 this_list = &__get_cpu_var(irq_work_list);
107 if (llist_empty(this_list))
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800108 return;
109
110 BUG_ON(!in_irq());
111 BUG_ON(!irqs_disabled());
112
Huang Ying38aaf802011-09-08 14:00:46 +0800113 llnode = llist_del_all(this_list);
114 while (llnode != NULL) {
115 work = llist_entry(llnode, struct irq_work, llnode);
Christoph Lameter20b87692010-12-14 10:28:45 -0600116
Peter Zijlstra924f8f52011-09-12 13:12:28 +0200117 llnode = llist_next(llnode);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800118
119 /*
Huang Ying38aaf802011-09-08 14:00:46 +0800120 * Clear the PENDING bit, after this point the @work
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800121 * can be re-used.
122 */
Huang Ying38aaf802011-09-08 14:00:46 +0800123 work->flags = IRQ_WORK_BUSY;
124 work->func(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800125 /*
126 * Clear the BUSY bit and return to the free state if
127 * no-one else claimed it meanwhile.
128 */
Huang Ying38aaf802011-09-08 14:00:46 +0800129 (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800130 }
131}
132EXPORT_SYMBOL_GPL(irq_work_run);
133
134/*
135 * Synchronize against the irq_work @entry, ensures the entry is not
136 * currently in use.
137 */
Huang Ying38aaf802011-09-08 14:00:46 +0800138void irq_work_sync(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800139{
140 WARN_ON_ONCE(irqs_disabled());
141
Huang Ying38aaf802011-09-08 14:00:46 +0800142 while (work->flags & IRQ_WORK_BUSY)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800143 cpu_relax();
144}
145EXPORT_SYMBOL_GPL(irq_work_sync);