Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_KTHREAD_H |
| 3 | #define _LINUX_KTHREAD_H |
| 4 | /* Simple interface for creating and stopping kernel threads without mess. */ |
| 5 | #include <linux/err.h> |
| 6 | #include <linux/sched.h> |
Shaohua Li | 05e3db9 | 2017-09-14 14:02:04 -0700 | [diff] [blame] | 7 | #include <linux/cgroup.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | |
Joe Perches | b9075fa | 2011-10-31 17:11:33 -0700 | [diff] [blame] | 9 | __printf(4, 5) |
Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 10 | struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), |
| 11 | void *data, |
| 12 | int node, |
Joe Perches | b9075fa | 2011-10-31 17:11:33 -0700 | [diff] [blame] | 13 | const char namefmt[], ...); |
Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 14 | |
Jonathan Corbet | e154ccc | 2016-10-11 13:55:53 -0700 | [diff] [blame] | 15 | /** |
| 16 | * kthread_create - create a kthread on the current node |
| 17 | * @threadfn: the function to run in the thread |
| 18 | * @data: data pointer for @threadfn() |
| 19 | * @namefmt: printf-style format string for the thread name |
Jonathan Corbet | d16977f | 2017-08-02 13:32:01 -0700 | [diff] [blame] | 20 | * @arg...: arguments for @namefmt. |
Jonathan Corbet | e154ccc | 2016-10-11 13:55:53 -0700 | [diff] [blame] | 21 | * |
| 22 | * This macro will create a kthread on the current node, leaving it in |
| 23 | * the stopped state. This is just a helper for kthread_create_on_node(); |
| 24 | * see the documentation there for more details. |
| 25 | */ |
Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 26 | #define kthread_create(threadfn, data, namefmt, arg...) \ |
Andrew Morton | e9f0698 | 2015-09-04 15:42:42 -0700 | [diff] [blame] | 27 | kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg) |
Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 28 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 30 | struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), |
| 31 | void *data, |
| 32 | unsigned int cpu, |
| 33 | const char *namefmt); |
| 34 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | /** |
Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 36 | * kthread_run - create and wake a thread. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | * @threadfn: the function to run until signal_pending(current). |
| 38 | * @data: data ptr for @threadfn. |
| 39 | * @namefmt: printf-style name for the thread. |
| 40 | * |
| 41 | * Description: Convenient wrapper for kthread_create() followed by |
Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 42 | * wake_up_process(). Returns the kthread or ERR_PTR(-ENOMEM). |
| 43 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #define kthread_run(threadfn, data, namefmt, ...) \ |
| 45 | ({ \ |
| 46 | struct task_struct *__k \ |
| 47 | = kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \ |
| 48 | if (!IS_ERR(__k)) \ |
| 49 | wake_up_process(__k); \ |
| 50 | __k; \ |
| 51 | }) |
| 52 | |
Oleg Nesterov | 1da5c46 | 2016-11-29 18:50:57 +0100 | [diff] [blame] | 53 | void free_kthread_struct(struct task_struct *k); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | void kthread_bind(struct task_struct *k, unsigned int cpu); |
Peter Zijlstra | 25834c7 | 2015-05-15 17:43:34 +0200 | [diff] [blame] | 55 | void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | int kthread_stop(struct task_struct *k); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 57 | bool kthread_should_stop(void); |
| 58 | bool kthread_should_park(void); |
Tejun Heo | 8a32c44 | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 59 | bool kthread_freezable_should_stop(bool *was_frozen); |
Tejun Heo | 82805ab | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 60 | void *kthread_data(struct task_struct *k); |
Petr Mladek | e700591 | 2016-10-11 13:55:17 -0700 | [diff] [blame] | 61 | void *kthread_probe_data(struct task_struct *k); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 62 | int kthread_park(struct task_struct *k); |
| 63 | void kthread_unpark(struct task_struct *k); |
| 64 | void kthread_parkme(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 66 | int kthreadd(void *unused); |
| 67 | extern struct task_struct *kthreadd_task; |
Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 68 | extern int tsk_fork_get_node(struct task_struct *tsk); |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 69 | |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 70 | /* |
| 71 | * Simple work processor based on kthread. |
| 72 | * |
| 73 | * This provides easier way to make use of kthreads. A kthread_work |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 74 | * can be queued and flushed using queue/kthread_flush_work() |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 75 | * respectively. Queued kthread_works are processed by a kthread |
| 76 | * running kthread_worker_fn(). |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 77 | */ |
| 78 | struct kthread_work; |
| 79 | typedef void (*kthread_work_func_t)(struct kthread_work *work); |
Kees Cook | fe5c3b6 | 2017-10-04 16:27:06 -0700 | [diff] [blame] | 80 | void kthread_delayed_work_timer_fn(struct timer_list *t); |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 81 | |
Petr Mladek | dbf5268 | 2016-10-11 13:55:50 -0700 | [diff] [blame] | 82 | enum { |
| 83 | KTW_FREEZABLE = 1 << 0, /* freeze during suspend */ |
| 84 | }; |
| 85 | |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 86 | struct kthread_worker { |
Petr Mladek | dbf5268 | 2016-10-11 13:55:50 -0700 | [diff] [blame] | 87 | unsigned int flags; |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 88 | spinlock_t lock; |
| 89 | struct list_head work_list; |
Petr Mladek | 22597dc | 2016-10-11 13:55:40 -0700 | [diff] [blame] | 90 | struct list_head delayed_work_list; |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 91 | struct task_struct *task; |
Tejun Heo | 46f3d97 | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 92 | struct kthread_work *current_work; |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 93 | }; |
| 94 | |
| 95 | struct kthread_work { |
| 96 | struct list_head node; |
| 97 | kthread_work_func_t func; |
Tejun Heo | 46f3d97 | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 98 | struct kthread_worker *worker; |
Petr Mladek | 37be45d | 2016-10-11 13:55:43 -0700 | [diff] [blame] | 99 | /* Number of canceling calls that are running at the moment. */ |
| 100 | int canceling; |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 101 | }; |
| 102 | |
Petr Mladek | 22597dc | 2016-10-11 13:55:40 -0700 | [diff] [blame] | 103 | struct kthread_delayed_work { |
| 104 | struct kthread_work work; |
| 105 | struct timer_list timer; |
| 106 | }; |
| 107 | |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 108 | #define KTHREAD_WORKER_INIT(worker) { \ |
Thomas Gleixner | 92578c0b | 2011-01-23 15:24:55 +0100 | [diff] [blame] | 109 | .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 110 | .work_list = LIST_HEAD_INIT((worker).work_list), \ |
Petr Mladek | 22597dc | 2016-10-11 13:55:40 -0700 | [diff] [blame] | 111 | .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | #define KTHREAD_WORK_INIT(work, fn) { \ |
| 115 | .node = LIST_HEAD_INIT((work).node), \ |
| 116 | .func = (fn), \ |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 117 | } |
| 118 | |
Petr Mladek | 22597dc | 2016-10-11 13:55:40 -0700 | [diff] [blame] | 119 | #define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \ |
| 120 | .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ |
Kees Cook | 841b86f | 2017-10-23 09:40:42 +0200 | [diff] [blame] | 121 | .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\ |
Petr Mladek | 22597dc | 2016-10-11 13:55:40 -0700 | [diff] [blame] | 122 | TIMER_IRQSAFE), \ |
| 123 | } |
| 124 | |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 125 | #define DEFINE_KTHREAD_WORKER(worker) \ |
| 126 | struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) |
| 127 | |
| 128 | #define DEFINE_KTHREAD_WORK(work, fn) \ |
| 129 | struct kthread_work work = KTHREAD_WORK_INIT(work, fn) |
| 130 | |
Petr Mladek | 22597dc | 2016-10-11 13:55:40 -0700 | [diff] [blame] | 131 | #define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn) \ |
| 132 | struct kthread_delayed_work dwork = \ |
| 133 | KTHREAD_DELAYED_WORK_INIT(dwork, fn) |
| 134 | |
Yong Zhang | 4f32e9b | 2010-12-22 10:27:53 +0100 | [diff] [blame] | 135 | /* |
Lai Jiangshan | 95847e1 | 2014-07-26 12:04:00 +0800 | [diff] [blame] | 136 | * kthread_worker.lock needs its own lockdep class key when defined on |
| 137 | * stack with lockdep enabled. Use the following macros in such cases. |
Yong Zhang | 4f32e9b | 2010-12-22 10:27:53 +0100 | [diff] [blame] | 138 | */ |
| 139 | #ifdef CONFIG_LOCKDEP |
| 140 | # define KTHREAD_WORKER_INIT_ONSTACK(worker) \ |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 141 | ({ kthread_init_worker(&worker); worker; }) |
Yong Zhang | 4f32e9b | 2010-12-22 10:27:53 +0100 | [diff] [blame] | 142 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ |
| 143 | struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) |
Yong Zhang | 4f32e9b | 2010-12-22 10:27:53 +0100 | [diff] [blame] | 144 | #else |
| 145 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) |
Yong Zhang | 4f32e9b | 2010-12-22 10:27:53 +0100 | [diff] [blame] | 146 | #endif |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 147 | |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 148 | extern void __kthread_init_worker(struct kthread_worker *worker, |
Yong Zhang | 4f32e9b | 2010-12-22 10:27:53 +0100 | [diff] [blame] | 149 | const char *name, struct lock_class_key *key); |
| 150 | |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 151 | #define kthread_init_worker(worker) \ |
Yong Zhang | 4f32e9b | 2010-12-22 10:27:53 +0100 | [diff] [blame] | 152 | do { \ |
| 153 | static struct lock_class_key __key; \ |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 154 | __kthread_init_worker((worker), "("#worker")->lock", &__key); \ |
Yong Zhang | 4f32e9b | 2010-12-22 10:27:53 +0100 | [diff] [blame] | 155 | } while (0) |
| 156 | |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 157 | #define kthread_init_work(work, fn) \ |
Yong Zhang | 4f32e9b | 2010-12-22 10:27:53 +0100 | [diff] [blame] | 158 | do { \ |
| 159 | memset((work), 0, sizeof(struct kthread_work)); \ |
| 160 | INIT_LIST_HEAD(&(work)->node); \ |
| 161 | (work)->func = (fn); \ |
Yong Zhang | 4f32e9b | 2010-12-22 10:27:53 +0100 | [diff] [blame] | 162 | } while (0) |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 163 | |
Petr Mladek | 22597dc | 2016-10-11 13:55:40 -0700 | [diff] [blame] | 164 | #define kthread_init_delayed_work(dwork, fn) \ |
| 165 | do { \ |
| 166 | kthread_init_work(&(dwork)->work, (fn)); \ |
Kees Cook | 919b250 | 2017-10-22 18:48:43 -0700 | [diff] [blame] | 167 | __init_timer(&(dwork)->timer, \ |
| 168 | kthread_delayed_work_timer_fn, \ |
| 169 | TIMER_IRQSAFE); \ |
Petr Mladek | 22597dc | 2016-10-11 13:55:40 -0700 | [diff] [blame] | 170 | } while (0) |
| 171 | |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 172 | int kthread_worker_fn(void *worker_ptr); |
| 173 | |
Petr Mladek | dbf5268 | 2016-10-11 13:55:50 -0700 | [diff] [blame] | 174 | __printf(2, 3) |
Petr Mladek | fbae2d4 | 2016-10-11 13:55:30 -0700 | [diff] [blame] | 175 | struct kthread_worker * |
Petr Mladek | dbf5268 | 2016-10-11 13:55:50 -0700 | [diff] [blame] | 176 | kthread_create_worker(unsigned int flags, const char namefmt[], ...); |
Petr Mladek | fbae2d4 | 2016-10-11 13:55:30 -0700 | [diff] [blame] | 177 | |
Nicolas Iooss | c0b942a | 2016-12-12 16:40:39 -0800 | [diff] [blame] | 178 | __printf(3, 4) struct kthread_worker * |
Petr Mladek | dbf5268 | 2016-10-11 13:55:50 -0700 | [diff] [blame] | 179 | kthread_create_worker_on_cpu(int cpu, unsigned int flags, |
| 180 | const char namefmt[], ...); |
Petr Mladek | fbae2d4 | 2016-10-11 13:55:30 -0700 | [diff] [blame] | 181 | |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 182 | bool kthread_queue_work(struct kthread_worker *worker, |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 183 | struct kthread_work *work); |
Petr Mladek | 22597dc | 2016-10-11 13:55:40 -0700 | [diff] [blame] | 184 | |
| 185 | bool kthread_queue_delayed_work(struct kthread_worker *worker, |
| 186 | struct kthread_delayed_work *dwork, |
| 187 | unsigned long delay); |
| 188 | |
Petr Mladek | 9a6b06c | 2016-10-11 13:55:46 -0700 | [diff] [blame] | 189 | bool kthread_mod_delayed_work(struct kthread_worker *worker, |
| 190 | struct kthread_delayed_work *dwork, |
| 191 | unsigned long delay); |
| 192 | |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 193 | void kthread_flush_work(struct kthread_work *work); |
| 194 | void kthread_flush_worker(struct kthread_worker *worker); |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 195 | |
Petr Mladek | 37be45d | 2016-10-11 13:55:43 -0700 | [diff] [blame] | 196 | bool kthread_cancel_work_sync(struct kthread_work *work); |
| 197 | bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); |
| 198 | |
Petr Mladek | 35033fe | 2016-10-11 13:55:33 -0700 | [diff] [blame] | 199 | void kthread_destroy_worker(struct kthread_worker *worker); |
| 200 | |
Shaohua Li | 0b508bc | 2017-09-26 11:02:12 -0700 | [diff] [blame] | 201 | #ifdef CONFIG_BLK_CGROUP |
Shaohua Li | 05e3db9 | 2017-09-14 14:02:04 -0700 | [diff] [blame] | 202 | void kthread_associate_blkcg(struct cgroup_subsys_state *css); |
| 203 | struct cgroup_subsys_state *kthread_blkcg(void); |
| 204 | #else |
| 205 | static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } |
| 206 | static inline struct cgroup_subsys_state *kthread_blkcg(void) |
| 207 | { |
| 208 | return NULL; |
| 209 | } |
| 210 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | #endif /* _LINUX_KTHREAD_H */ |