Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 2 | #ifndef _LINUX_IRQ_WORK_H |
| 3 | #define _LINUX_IRQ_WORK_H |
| 4 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 5 | #include <linux/llist.h> |
| 6 | |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 7 | /* |
| 8 | * An entry can be in one of four states: |
| 9 | * |
| 10 | * free NULL, 0 -> {claimed} : free to be used |
| 11 | * claimed NULL, 3 -> {pending} : claimed to be enqueued |
| 12 | * pending next, 3 -> {busy} : queued, pending callback |
| 13 | * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed |
| 14 | */ |
| 15 | |
| 16 | #define IRQ_WORK_PENDING 1UL |
| 17 | #define IRQ_WORK_BUSY 2UL |
| 18 | #define IRQ_WORK_FLAGS 3UL |
| 19 | #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ |
| 20 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 21 | struct irq_work { |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 22 | unsigned long flags; |
| 23 | struct llist_node llnode; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 24 | void (*func)(struct irq_work *); |
| 25 | }; |
| 26 | |
| 27 | static inline |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 28 | void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 29 | { |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 30 | work->flags = 0; |
| 31 | work->func = func; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 32 | } |
| 33 | |
Peter Zijlstra | 6a02ad66 | 2014-02-03 18:11:08 +0100 | [diff] [blame] | 34 | #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } |
| 35 | |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 36 | bool irq_work_queue(struct irq_work *work); |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 37 | bool irq_work_queue_on(struct irq_work *work, int cpu); |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 38 | |
Frederic Weisbecker | 76a3306 | 2014-08-16 18:37:19 +0200 | [diff] [blame] | 39 | void irq_work_tick(void); |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 40 | void irq_work_sync(struct irq_work *work); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 41 | |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 42 | #ifdef CONFIG_IRQ_WORK |
Peter Zijlstra | c5c38ef | 2014-09-06 15:43:02 +0200 | [diff] [blame] | 43 | #include <asm/irq_work.h> |
| 44 | |
Steven Rostedt | 71ad00d | 2015-03-19 10:18:51 -0400 | [diff] [blame] | 45 | void irq_work_run(void); |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 46 | bool irq_work_needs_cpu(void); |
| 47 | #else |
James Hogan | fe8d526 | 2013-03-22 15:04:37 -0700 | [diff] [blame] | 48 | static inline bool irq_work_needs_cpu(void) { return false; } |
Steven Rostedt | 71ad00d | 2015-03-19 10:18:51 -0400 | [diff] [blame] | 49 | static inline void irq_work_run(void) { } |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 50 | #endif |
| 51 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 52 | #endif /* _LINUX_IRQ_WORK_H */ |