blob: c3c46c72046e37898ebbedb567b7eede3d67ae60 [file] [log] [blame]
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
8#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -04009#include <linux/export.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080010#include <linux/irq_work.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040011#include <linux/percpu.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080012#include <linux/hardirq.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040013#include <asm/processor.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080014
15/*
16 * An entry can be in one of four states:
17 *
18 * free NULL, 0 -> {claimed} : free to be used
19 * claimed NULL, 3 -> {pending} : claimed to be enqueued
20 * pending next, 3 -> {busy} : queued, pending callback
21 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
Peter Zijlstrae360adb2010-10-14 14:01:34 +080022 */
23
24#define IRQ_WORK_PENDING 1UL
25#define IRQ_WORK_BUSY 2UL
26#define IRQ_WORK_FLAGS 3UL
27
Huang Ying38aaf802011-09-08 14:00:46 +080028static DEFINE_PER_CPU(struct llist_head, irq_work_list);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080029
30/*
31 * Claim the entry so that no one else will poke at it.
32 */
Huang Ying38aaf802011-09-08 14:00:46 +080033static bool irq_work_claim(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080034{
Huang Ying38aaf802011-09-08 14:00:46 +080035 unsigned long flags, nflags;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080036
Huang Ying38aaf802011-09-08 14:00:46 +080037 for (;;) {
38 flags = work->flags;
39 if (flags & IRQ_WORK_PENDING)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080040 return false;
Huang Ying38aaf802011-09-08 14:00:46 +080041 nflags = flags | IRQ_WORK_FLAGS;
42 if (cmpxchg(&work->flags, flags, nflags) == flags)
43 break;
44 cpu_relax();
45 }
Peter Zijlstrae360adb2010-10-14 14:01:34 +080046
47 return true;
48}
49
Peter Zijlstrae360adb2010-10-14 14:01:34 +080050void __weak arch_irq_work_raise(void)
51{
52 /*
53 * Lame architectures will get the timer tick callback
54 */
55}
56
57/*
58 * Queue the entry and raise the IPI if needed.
59 */
Huang Ying38aaf802011-09-08 14:00:46 +080060static void __irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080061{
Huang Ying38aaf802011-09-08 14:00:46 +080062 bool empty;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080063
Christoph Lameter20b87692010-12-14 10:28:45 -060064 preempt_disable();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080065
Huang Ying38aaf802011-09-08 14:00:46 +080066 empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
Peter Zijlstrae360adb2010-10-14 14:01:34 +080067 /* The list was empty, raise self-interrupt to start processing. */
Huang Ying38aaf802011-09-08 14:00:46 +080068 if (empty)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080069 arch_irq_work_raise();
70
Christoph Lameter20b87692010-12-14 10:28:45 -060071 preempt_enable();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080072}
73
74/*
75 * Enqueue the irq_work @entry, returns true on success, failure when the
76 * @entry was already enqueued by someone else.
77 *
78 * Can be re-enqueued while the callback is still in progress.
79 */
Huang Ying38aaf802011-09-08 14:00:46 +080080bool irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080081{
Huang Ying38aaf802011-09-08 14:00:46 +080082 if (!irq_work_claim(work)) {
Peter Zijlstrae360adb2010-10-14 14:01:34 +080083 /*
84 * Already enqueued, can't do!
85 */
86 return false;
87 }
88
Huang Ying38aaf802011-09-08 14:00:46 +080089 __irq_work_queue(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080090 return true;
91}
92EXPORT_SYMBOL_GPL(irq_work_queue);
93
94/*
95 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
96 * context with local IRQs disabled.
97 */
98void irq_work_run(void)
99{
Huang Ying38aaf802011-09-08 14:00:46 +0800100 struct irq_work *work;
101 struct llist_head *this_list;
102 struct llist_node *llnode;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800103
Huang Ying38aaf802011-09-08 14:00:46 +0800104 this_list = &__get_cpu_var(irq_work_list);
105 if (llist_empty(this_list))
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800106 return;
107
108 BUG_ON(!in_irq());
109 BUG_ON(!irqs_disabled());
110
Huang Ying38aaf802011-09-08 14:00:46 +0800111 llnode = llist_del_all(this_list);
112 while (llnode != NULL) {
113 work = llist_entry(llnode, struct irq_work, llnode);
Christoph Lameter20b87692010-12-14 10:28:45 -0600114
Peter Zijlstra924f8f52011-09-12 13:12:28 +0200115 llnode = llist_next(llnode);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800116
117 /*
Huang Ying38aaf802011-09-08 14:00:46 +0800118 * Clear the PENDING bit, after this point the @work
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800119 * can be re-used.
120 */
Huang Ying38aaf802011-09-08 14:00:46 +0800121 work->flags = IRQ_WORK_BUSY;
122 work->func(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800123 /*
124 * Clear the BUSY bit and return to the free state if
125 * no-one else claimed it meanwhile.
126 */
Huang Ying38aaf802011-09-08 14:00:46 +0800127 (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800128 }
129}
130EXPORT_SYMBOL_GPL(irq_work_run);
131
132/*
133 * Synchronize against the irq_work @entry, ensures the entry is not
134 * currently in use.
135 */
Huang Ying38aaf802011-09-08 14:00:46 +0800136void irq_work_sync(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800137{
138 WARN_ON_ONCE(irqs_disabled());
139
Huang Ying38aaf802011-09-08 14:00:46 +0800140 while (work->flags & IRQ_WORK_BUSY)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800141 cpu_relax();
142}
143EXPORT_SYMBOL_GPL(irq_work_sync);