blob: 0e2cde4f380ba7b8b50e37f8ba019538cfb9a6f2 [file] [log] [blame]
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/irq_work.h>
11#include <linux/hardirq.h>
12
13/*
14 * An entry can be in one of four states:
15 *
16 * free NULL, 0 -> {claimed} : free to be used
17 * claimed NULL, 3 -> {pending} : claimed to be enqueued
18 * pending next, 3 -> {busy} : queued, pending callback
19 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
Peter Zijlstrae360adb2010-10-14 14:01:34 +080020 */
21
22#define IRQ_WORK_PENDING 1UL
23#define IRQ_WORK_BUSY 2UL
24#define IRQ_WORK_FLAGS 3UL
25
Huang Ying38aaf802011-09-08 14:00:46 +080026static DEFINE_PER_CPU(struct llist_head, irq_work_list);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080027
28/*
29 * Claim the entry so that no one else will poke at it.
30 */
Huang Ying38aaf802011-09-08 14:00:46 +080031static bool irq_work_claim(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080032{
Huang Ying38aaf802011-09-08 14:00:46 +080033 unsigned long flags, nflags;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080034
Huang Ying38aaf802011-09-08 14:00:46 +080035 for (;;) {
36 flags = work->flags;
37 if (flags & IRQ_WORK_PENDING)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080038 return false;
Huang Ying38aaf802011-09-08 14:00:46 +080039 nflags = flags | IRQ_WORK_FLAGS;
40 if (cmpxchg(&work->flags, flags, nflags) == flags)
41 break;
42 cpu_relax();
43 }
Peter Zijlstrae360adb2010-10-14 14:01:34 +080044
45 return true;
46}
47
Peter Zijlstrae360adb2010-10-14 14:01:34 +080048void __weak arch_irq_work_raise(void)
49{
50 /*
51 * Lame architectures will get the timer tick callback
52 */
53}
54
55/*
56 * Queue the entry and raise the IPI if needed.
57 */
Huang Ying38aaf802011-09-08 14:00:46 +080058static void __irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080059{
Huang Ying38aaf802011-09-08 14:00:46 +080060 bool empty;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080061
Christoph Lameter20b87692010-12-14 10:28:45 -060062 preempt_disable();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080063
Huang Ying38aaf802011-09-08 14:00:46 +080064 empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
Peter Zijlstrae360adb2010-10-14 14:01:34 +080065 /* The list was empty, raise self-interrupt to start processing. */
Huang Ying38aaf802011-09-08 14:00:46 +080066 if (empty)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080067 arch_irq_work_raise();
68
Christoph Lameter20b87692010-12-14 10:28:45 -060069 preempt_enable();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080070}
71
72/*
73 * Enqueue the irq_work @entry, returns true on success, failure when the
74 * @entry was already enqueued by someone else.
75 *
76 * Can be re-enqueued while the callback is still in progress.
77 */
Huang Ying38aaf802011-09-08 14:00:46 +080078bool irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080079{
Huang Ying38aaf802011-09-08 14:00:46 +080080 if (!irq_work_claim(work)) {
Peter Zijlstrae360adb2010-10-14 14:01:34 +080081 /*
82 * Already enqueued, can't do!
83 */
84 return false;
85 }
86
Huang Ying38aaf802011-09-08 14:00:46 +080087 __irq_work_queue(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080088 return true;
89}
90EXPORT_SYMBOL_GPL(irq_work_queue);
91
92/*
93 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
94 * context with local IRQs disabled.
95 */
96void irq_work_run(void)
97{
Huang Ying38aaf802011-09-08 14:00:46 +080098 struct irq_work *work;
99 struct llist_head *this_list;
100 struct llist_node *llnode;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800101
Huang Ying38aaf802011-09-08 14:00:46 +0800102 this_list = &__get_cpu_var(irq_work_list);
103 if (llist_empty(this_list))
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800104 return;
105
106 BUG_ON(!in_irq());
107 BUG_ON(!irqs_disabled());
108
Huang Ying38aaf802011-09-08 14:00:46 +0800109 llnode = llist_del_all(this_list);
110 while (llnode != NULL) {
111 work = llist_entry(llnode, struct irq_work, llnode);
Christoph Lameter20b87692010-12-14 10:28:45 -0600112
Peter Zijlstra924f8f52011-09-12 13:12:28 +0200113 llnode = llist_next(llnode);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800114
115 /*
Huang Ying38aaf802011-09-08 14:00:46 +0800116 * Clear the PENDING bit, after this point the @work
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800117 * can be re-used.
118 */
Huang Ying38aaf802011-09-08 14:00:46 +0800119 work->flags = IRQ_WORK_BUSY;
120 work->func(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800121 /*
122 * Clear the BUSY bit and return to the free state if
123 * no-one else claimed it meanwhile.
124 */
Huang Ying38aaf802011-09-08 14:00:46 +0800125 (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800126 }
127}
128EXPORT_SYMBOL_GPL(irq_work_run);
129
130/*
131 * Synchronize against the irq_work @entry, ensures the entry is not
132 * currently in use.
133 */
Huang Ying38aaf802011-09-08 14:00:46 +0800134void irq_work_sync(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800135{
136 WARN_ON_ONCE(irqs_disabled());
137
Huang Ying38aaf802011-09-08 14:00:46 +0800138 while (work->flags & IRQ_WORK_BUSY)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800139 cpu_relax();
140}
141EXPORT_SYMBOL_GPL(irq_work_sync);