| Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2007 Oracle.  All rights reserved. | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 3 | * Copyright (C) 2014 Fujitsu.  All rights reserved. | 
| Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 4 | * | 
|  | 5 | * This program is free software; you can redistribute it and/or | 
|  | 6 | * modify it under the terms of the GNU General Public | 
|  | 7 | * License v2 as published by the Free Software Foundation. | 
|  | 8 | * | 
|  | 9 | * This program is distributed in the hope that it will be useful, | 
|  | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 12 | * General Public License for more details. | 
|  | 13 | * | 
|  | 14 | * You should have received a copy of the GNU General Public | 
|  | 15 | * License along with this program; if not, write to the | 
|  | 16 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 
|  | 17 | * Boston, MA 021110-1307, USA. | 
|  | 18 | */ | 
|  | 19 |  | 
|  | 20 | #include <linux/kthread.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 21 | #include <linux/slab.h> | 
| Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 22 | #include <linux/list.h> | 
|  | 23 | #include <linux/spinlock.h> | 
| Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 24 | #include <linux/freezer.h> | 
| Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 25 | #include "async-thread.h" | 
| Qu Wenruo | 52483bc | 2014-03-06 04:19:50 +0000 | [diff] [blame] | 26 | #include "ctree.h" | 
| Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 27 |  | 
| Qu Wenruo | a046e9c | 2014-02-28 10:46:18 +0800 | [diff] [blame] | 28 | #define WORK_DONE_BIT 0 | 
|  | 29 | #define WORK_ORDER_DONE_BIT 1 | 
|  | 30 | #define WORK_HIGH_PRIO_BIT 2 | 
| Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 31 |  | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 32 | #define NO_THRESHOLD (-1) | 
|  | 33 | #define DFT_THRESHOLD (32) | 
|  | 34 |  | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 35 | struct __btrfs_workqueue { | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 36 | struct workqueue_struct *normal_wq; | 
|  | 37 | /* List head pointing to ordered work list */ | 
|  | 38 | struct list_head ordered_list; | 
|  | 39 |  | 
|  | 40 | /* Spinlock for ordered_list */ | 
|  | 41 | spinlock_t list_lock; | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 42 |  | 
|  | 43 | /* Thresholding related variants */ | 
|  | 44 | atomic_t pending; | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 45 |  | 
|  | 46 | /* Up limit of concurrency workers */ | 
|  | 47 | int limit_active; | 
|  | 48 |  | 
|  | 49 | /* Current number of concurrency workers */ | 
|  | 50 | int current_active; | 
|  | 51 |  | 
|  | 52 | /* Threshold to change current_active */ | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 53 | int thresh; | 
|  | 54 | unsigned int count; | 
|  | 55 | spinlock_t thres_lock; | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 56 | }; | 
|  | 57 |  | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 58 | struct btrfs_workqueue { | 
|  | 59 | struct __btrfs_workqueue *normal; | 
|  | 60 | struct __btrfs_workqueue *high; | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 61 | }; | 
|  | 62 |  | 
| Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 63 | static void normal_work_helper(struct btrfs_work *work); | 
|  | 64 |  | 
|  | 65 | #define BTRFS_WORK_HELPER(name)					\ | 
|  | 66 | void btrfs_##name(struct work_struct *arg)				\ | 
|  | 67 | {									\ | 
|  | 68 | struct btrfs_work *work = container_of(arg, struct btrfs_work,	\ | 
|  | 69 | normal_work);		\ | 
|  | 70 | normal_work_helper(work);					\ | 
|  | 71 | } | 
|  | 72 |  | 
|  | 73 | BTRFS_WORK_HELPER(worker_helper); | 
|  | 74 | BTRFS_WORK_HELPER(delalloc_helper); | 
|  | 75 | BTRFS_WORK_HELPER(flush_delalloc_helper); | 
|  | 76 | BTRFS_WORK_HELPER(cache_helper); | 
|  | 77 | BTRFS_WORK_HELPER(submit_helper); | 
|  | 78 | BTRFS_WORK_HELPER(fixup_helper); | 
|  | 79 | BTRFS_WORK_HELPER(endio_helper); | 
|  | 80 | BTRFS_WORK_HELPER(endio_meta_helper); | 
|  | 81 | BTRFS_WORK_HELPER(endio_meta_write_helper); | 
|  | 82 | BTRFS_WORK_HELPER(endio_raid56_helper); | 
| Miao Xie | 8b110e3 | 2014-09-12 18:44:03 +0800 | [diff] [blame] | 83 | BTRFS_WORK_HELPER(endio_repair_helper); | 
| Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 84 | BTRFS_WORK_HELPER(rmw_helper); | 
|  | 85 | BTRFS_WORK_HELPER(endio_write_helper); | 
|  | 86 | BTRFS_WORK_HELPER(freespace_write_helper); | 
|  | 87 | BTRFS_WORK_HELPER(delayed_meta_helper); | 
|  | 88 | BTRFS_WORK_HELPER(readahead_helper); | 
|  | 89 | BTRFS_WORK_HELPER(qgroup_rescan_helper); | 
|  | 90 | BTRFS_WORK_HELPER(extent_refs_helper); | 
|  | 91 | BTRFS_WORK_HELPER(scrub_helper); | 
|  | 92 | BTRFS_WORK_HELPER(scrubwrc_helper); | 
|  | 93 | BTRFS_WORK_HELPER(scrubnc_helper); | 
| Zhao Lei | 20b2e30 | 2015-06-04 20:09:15 +0800 | [diff] [blame] | 94 | BTRFS_WORK_HELPER(scrubparity_helper); | 
| Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 95 |  | 
|  | 96 | static struct __btrfs_workqueue * | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 97 | __btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active, | 
| Qu Wenruo | c3a4689 | 2014-03-12 08:05:33 +0000 | [diff] [blame] | 98 | int thresh) | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 99 | { | 
| David Sterba | 61dd5ae | 2015-12-01 18:04:30 +0100 | [diff] [blame] | 100 | struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 101 |  | 
| David Sterba | 5d99a998 | 2014-09-29 19:20:37 +0200 | [diff] [blame] | 102 | if (!ret) | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 103 | return NULL; | 
|  | 104 |  | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 105 | ret->limit_active = limit_active; | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 106 | atomic_set(&ret->pending, 0); | 
|  | 107 | if (thresh == 0) | 
|  | 108 | thresh = DFT_THRESHOLD; | 
|  | 109 | /* For low threshold, disabling threshold is a better choice */ | 
|  | 110 | if (thresh < DFT_THRESHOLD) { | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 111 | ret->current_active = limit_active; | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 112 | ret->thresh = NO_THRESHOLD; | 
|  | 113 | } else { | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 114 | /* | 
|  | 115 | * For threshold-able wq, let its concurrency grow on demand. | 
|  | 116 | * Use minimal max_active at alloc time to reduce resource | 
|  | 117 | * usage. | 
|  | 118 | */ | 
|  | 119 | ret->current_active = 1; | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 120 | ret->thresh = thresh; | 
|  | 121 | } | 
|  | 122 |  | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 123 | if (flags & WQ_HIGHPRI) | 
|  | 124 | ret->normal_wq = alloc_workqueue("%s-%s-high", flags, | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 125 | ret->current_active, "btrfs", | 
|  | 126 | name); | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 127 | else | 
|  | 128 | ret->normal_wq = alloc_workqueue("%s-%s", flags, | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 129 | ret->current_active, "btrfs", | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 130 | name); | 
| David Sterba | 5d99a998 | 2014-09-29 19:20:37 +0200 | [diff] [blame] | 131 | if (!ret->normal_wq) { | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 132 | kfree(ret); | 
|  | 133 | return NULL; | 
|  | 134 | } | 
|  | 135 |  | 
|  | 136 | INIT_LIST_HEAD(&ret->ordered_list); | 
|  | 137 | spin_lock_init(&ret->list_lock); | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 138 | spin_lock_init(&ret->thres_lock); | 
| Qu Wenruo | c3a4689 | 2014-03-12 08:05:33 +0000 | [diff] [blame] | 139 | trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI); | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 140 | return ret; | 
|  | 141 | } | 
|  | 142 |  | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 143 | static inline void | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 144 | __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 145 |  | 
| Qu Wenruo | c3a4689 | 2014-03-12 08:05:33 +0000 | [diff] [blame] | 146 | struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, | 
| David Sterba | 6f01105 | 2015-02-16 18:34:01 +0100 | [diff] [blame] | 147 | unsigned int flags, | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 148 | int limit_active, | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 149 | int thresh) | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 150 | { | 
| David Sterba | 61dd5ae | 2015-12-01 18:04:30 +0100 | [diff] [blame] | 151 | struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 152 |  | 
| David Sterba | 5d99a998 | 2014-09-29 19:20:37 +0200 | [diff] [blame] | 153 | if (!ret) | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 154 | return NULL; | 
|  | 155 |  | 
|  | 156 | ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI, | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 157 | limit_active, thresh); | 
| David Sterba | 5d99a998 | 2014-09-29 19:20:37 +0200 | [diff] [blame] | 158 | if (!ret->normal) { | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 159 | kfree(ret); | 
|  | 160 | return NULL; | 
|  | 161 | } | 
|  | 162 |  | 
|  | 163 | if (flags & WQ_HIGHPRI) { | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 164 | ret->high = __btrfs_alloc_workqueue(name, flags, limit_active, | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 165 | thresh); | 
| David Sterba | 5d99a998 | 2014-09-29 19:20:37 +0200 | [diff] [blame] | 166 | if (!ret->high) { | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 167 | __btrfs_destroy_workqueue(ret->normal); | 
|  | 168 | kfree(ret); | 
|  | 169 | return NULL; | 
|  | 170 | } | 
|  | 171 | } | 
|  | 172 | return ret; | 
|  | 173 | } | 
|  | 174 |  | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 175 | /* | 
|  | 176 | * Hook for threshold which will be called in btrfs_queue_work. | 
|  | 177 | * This hook WILL be called in IRQ handler context, | 
|  | 178 | * so workqueue_set_max_active MUST NOT be called in this hook | 
|  | 179 | */ | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 180 | static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 181 | { | 
|  | 182 | if (wq->thresh == NO_THRESHOLD) | 
|  | 183 | return; | 
|  | 184 | atomic_inc(&wq->pending); | 
|  | 185 | } | 
|  | 186 |  | 
|  | 187 | /* | 
|  | 188 | * Hook for threshold which will be called before executing the work, | 
|  | 189 | * This hook is called in kthread content. | 
|  | 190 | * So workqueue_set_max_active is called here. | 
|  | 191 | */ | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 192 | static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 193 | { | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 194 | int new_current_active; | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 195 | long pending; | 
|  | 196 | int need_change = 0; | 
|  | 197 |  | 
|  | 198 | if (wq->thresh == NO_THRESHOLD) | 
|  | 199 | return; | 
|  | 200 |  | 
|  | 201 | atomic_dec(&wq->pending); | 
|  | 202 | spin_lock(&wq->thres_lock); | 
|  | 203 | /* | 
|  | 204 | * Use wq->count to limit the calling frequency of | 
|  | 205 | * workqueue_set_max_active. | 
|  | 206 | */ | 
|  | 207 | wq->count++; | 
|  | 208 | wq->count %= (wq->thresh / 4); | 
|  | 209 | if (!wq->count) | 
|  | 210 | goto  out; | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 211 | new_current_active = wq->current_active; | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 212 |  | 
|  | 213 | /* | 
|  | 214 | * pending may be changed later, but it's OK since we really | 
|  | 215 | * don't need it so accurate to calculate new_max_active. | 
|  | 216 | */ | 
|  | 217 | pending = atomic_read(&wq->pending); | 
|  | 218 | if (pending > wq->thresh) | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 219 | new_current_active++; | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 220 | if (pending < wq->thresh / 2) | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 221 | new_current_active--; | 
|  | 222 | new_current_active = clamp_val(new_current_active, 1, wq->limit_active); | 
|  | 223 | if (new_current_active != wq->current_active)  { | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 224 | need_change = 1; | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 225 | wq->current_active = new_current_active; | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 226 | } | 
|  | 227 | out: | 
|  | 228 | spin_unlock(&wq->thres_lock); | 
|  | 229 |  | 
|  | 230 | if (need_change) { | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 231 | workqueue_set_max_active(wq->normal_wq, wq->current_active); | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 232 | } | 
|  | 233 | } | 
|  | 234 |  | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 235 | static void run_ordered_work(struct __btrfs_workqueue *wq) | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 236 | { | 
|  | 237 | struct list_head *list = &wq->ordered_list; | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 238 | struct btrfs_work *work; | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 239 | spinlock_t *lock = &wq->list_lock; | 
|  | 240 | unsigned long flags; | 
|  | 241 |  | 
|  | 242 | while (1) { | 
|  | 243 | spin_lock_irqsave(lock, flags); | 
|  | 244 | if (list_empty(list)) | 
|  | 245 | break; | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 246 | work = list_entry(list->next, struct btrfs_work, | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 247 | ordered_list); | 
|  | 248 | if (!test_bit(WORK_DONE_BIT, &work->flags)) | 
|  | 249 | break; | 
|  | 250 |  | 
|  | 251 | /* | 
|  | 252 | * we are going to call the ordered done function, but | 
|  | 253 | * we leave the work item on the list as a barrier so | 
|  | 254 | * that later work items that are done don't have their | 
|  | 255 | * functions called before this one returns | 
|  | 256 | */ | 
|  | 257 | if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) | 
|  | 258 | break; | 
| Qu Wenruo | 52483bc | 2014-03-06 04:19:50 +0000 | [diff] [blame] | 259 | trace_btrfs_ordered_sched(work); | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 260 | spin_unlock_irqrestore(lock, flags); | 
|  | 261 | work->ordered_func(work); | 
|  | 262 |  | 
|  | 263 | /* now take the lock again and drop our item from the list */ | 
|  | 264 | spin_lock_irqsave(lock, flags); | 
|  | 265 | list_del(&work->ordered_list); | 
|  | 266 | spin_unlock_irqrestore(lock, flags); | 
|  | 267 |  | 
|  | 268 | /* | 
|  | 269 | * we don't want to call the ordered free functions | 
|  | 270 | * with the lock held though | 
|  | 271 | */ | 
|  | 272 | work->ordered_free(work); | 
| Qu Wenruo | 52483bc | 2014-03-06 04:19:50 +0000 | [diff] [blame] | 273 | trace_btrfs_all_work_done(work); | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 274 | } | 
|  | 275 | spin_unlock_irqrestore(lock, flags); | 
|  | 276 | } | 
|  | 277 |  | 
| Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 278 | static void normal_work_helper(struct btrfs_work *work) | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 279 | { | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 280 | struct __btrfs_workqueue *wq; | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 281 | int need_order = 0; | 
|  | 282 |  | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 283 | /* | 
|  | 284 | * We should not touch things inside work in the following cases: | 
|  | 285 | * 1) after work->func() if it has no ordered_free | 
|  | 286 | *    Since the struct is freed in work->func(). | 
|  | 287 | * 2) after setting WORK_DONE_BIT | 
|  | 288 | *    The work may be freed in other threads almost instantly. | 
|  | 289 | * So we save the needed things here. | 
|  | 290 | */ | 
|  | 291 | if (work->ordered_func) | 
|  | 292 | need_order = 1; | 
|  | 293 | wq = work->wq; | 
|  | 294 |  | 
| Qu Wenruo | 52483bc | 2014-03-06 04:19:50 +0000 | [diff] [blame] | 295 | trace_btrfs_work_sched(work); | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 296 | thresh_exec_hook(wq); | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 297 | work->func(work); | 
|  | 298 | if (need_order) { | 
|  | 299 | set_bit(WORK_DONE_BIT, &work->flags); | 
|  | 300 | run_ordered_work(wq); | 
|  | 301 | } | 
| Qu Wenruo | 52483bc | 2014-03-06 04:19:50 +0000 | [diff] [blame] | 302 | if (!need_order) | 
|  | 303 | trace_btrfs_all_work_done(work); | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 304 | } | 
|  | 305 |  | 
| Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 306 | void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, | 
| Qu Wenruo | 6db8914 | 2014-03-06 04:19:50 +0000 | [diff] [blame] | 307 | btrfs_func_t func, | 
|  | 308 | btrfs_func_t ordered_func, | 
|  | 309 | btrfs_func_t ordered_free) | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 310 | { | 
|  | 311 | work->func = func; | 
|  | 312 | work->ordered_func = ordered_func; | 
|  | 313 | work->ordered_free = ordered_free; | 
| Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 314 | INIT_WORK(&work->normal_work, uniq_func); | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 315 | INIT_LIST_HEAD(&work->ordered_list); | 
|  | 316 | work->flags = 0; | 
|  | 317 | } | 
|  | 318 |  | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 319 | static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, | 
|  | 320 | struct btrfs_work *work) | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 321 | { | 
|  | 322 | unsigned long flags; | 
|  | 323 |  | 
|  | 324 | work->wq = wq; | 
| Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 325 | thresh_queue_hook(wq); | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 326 | if (work->ordered_func) { | 
|  | 327 | spin_lock_irqsave(&wq->list_lock, flags); | 
|  | 328 | list_add_tail(&work->ordered_list, &wq->ordered_list); | 
|  | 329 | spin_unlock_irqrestore(&wq->list_lock, flags); | 
|  | 330 | } | 
| Qu Wenruo | 52483bc | 2014-03-06 04:19:50 +0000 | [diff] [blame] | 331 | trace_btrfs_work_queued(work); | 
| Qu Wenruo | 0a95b85 | 2016-01-22 09:28:38 +0800 | [diff] [blame] | 332 | queue_work(wq->normal_wq, &work->normal_work); | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 333 | } | 
|  | 334 |  | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 335 | void btrfs_queue_work(struct btrfs_workqueue *wq, | 
|  | 336 | struct btrfs_work *work) | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 337 | { | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 338 | struct __btrfs_workqueue *dest_wq; | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 339 |  | 
|  | 340 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) | 
|  | 341 | dest_wq = wq->high; | 
|  | 342 | else | 
|  | 343 | dest_wq = wq->normal; | 
|  | 344 | __btrfs_queue_work(dest_wq, work); | 
|  | 345 | } | 
|  | 346 |  | 
|  | 347 | static inline void | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 348 | __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq) | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 349 | { | 
|  | 350 | destroy_workqueue(wq->normal_wq); | 
| Qu Wenruo | c3a4689 | 2014-03-12 08:05:33 +0000 | [diff] [blame] | 351 | trace_btrfs_workqueue_destroy(wq); | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 352 | kfree(wq); | 
|  | 353 | } | 
|  | 354 |  | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 355 | void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 356 | { | 
|  | 357 | if (!wq) | 
|  | 358 | return; | 
|  | 359 | if (wq->high) | 
|  | 360 | __btrfs_destroy_workqueue(wq->high); | 
|  | 361 | __btrfs_destroy_workqueue(wq->normal); | 
| Filipe Manana | ef66af1 | 2014-03-11 14:31:44 +0000 | [diff] [blame] | 362 | kfree(wq); | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 363 | } | 
|  | 364 |  | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 365 | void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active) | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 366 | { | 
| Sergei Trofimovich | 800ee22 | 2014-04-07 10:55:46 +0300 | [diff] [blame] | 367 | if (!wq) | 
|  | 368 | return; | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 369 | wq->normal->limit_active = limit_active; | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 370 | if (wq->high) | 
| Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 371 | wq->high->limit_active = limit_active; | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 372 | } | 
|  | 373 |  | 
| Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 374 | void btrfs_set_work_high_priority(struct btrfs_work *work) | 
| Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 375 | { | 
|  | 376 | set_bit(WORK_HIGH_PRIO_BIT, &work->flags); | 
| Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 377 | } |