Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 1 | #ifndef _LINUX_IRQ_WORK_H |
| 2 | #define _LINUX_IRQ_WORK_H |
| 3 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 4 | #include <linux/llist.h> |
| 5 | |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 6 | /* |
| 7 | * An entry can be in one of four states: |
| 8 | * |
| 9 | * free NULL, 0 -> {claimed} : free to be used |
| 10 | * claimed NULL, 3 -> {pending} : claimed to be enqueued |
| 11 | * pending next, 3 -> {busy} : queued, pending callback |
| 12 | * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed |
| 13 | */ |
| 14 | |
| 15 | #define IRQ_WORK_PENDING 1UL |
| 16 | #define IRQ_WORK_BUSY 2UL |
| 17 | #define IRQ_WORK_FLAGS 3UL |
| 18 | #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ |
| 19 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 20 | struct irq_work { |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 21 | unsigned long flags; |
| 22 | struct llist_node llnode; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 23 | void (*func)(struct irq_work *); |
| 24 | }; |
| 25 | |
| 26 | static inline |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 27 | void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 28 | { |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 29 | work->flags = 0; |
| 30 | work->func = func; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 31 | } |
| 32 | |
Peter Zijlstra | 6a02ad66 | 2014-02-03 18:11:08 +0100 | [diff] [blame] | 33 | #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } |
| 34 | |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 35 | bool irq_work_queue(struct irq_work *work); |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 36 | |
| 37 | #ifdef CONFIG_SMP |
| 38 | bool irq_work_queue_on(struct irq_work *work, int cpu); |
| 39 | #endif |
| 40 | |
Frederic Weisbecker | 76a3306 | 2014-08-16 18:37:19 +0200 | [diff] [blame] | 41 | void irq_work_tick(void); |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 42 | void irq_work_sync(struct irq_work *work); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 43 | |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 44 | #ifdef CONFIG_IRQ_WORK |
Peter Zijlstra | c5c38ef | 2014-09-06 15:43:02 +0200 | [diff] [blame] | 45 | #include <asm/irq_work.h> |
| 46 | |
Steven Rostedt | 71ad00d | 2015-03-19 10:18:51 -0400 | [diff] [blame] | 47 | void irq_work_run(void); |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 48 | bool irq_work_needs_cpu(void); |
| 49 | #else |
James Hogan | fe8d526 | 2013-03-22 15:04:37 -0700 | [diff] [blame] | 50 | static inline bool irq_work_needs_cpu(void) { return false; } |
Steven Rostedt | 71ad00d | 2015-03-19 10:18:51 -0400 | [diff] [blame] | 51 | static inline void irq_work_run(void) { } |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 52 | #endif |
| 53 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 54 | #endif /* _LINUX_IRQ_WORK_H */ |