Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2007 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/kthread.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 21 | #include <linux/list.h> |
| 22 | #include <linux/spinlock.h> |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 23 | #include <linux/freezer.h> |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 24 | #include "async-thread.h" |
| 25 | |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 26 | #define WORK_QUEUED_BIT 0 |
| 27 | #define WORK_DONE_BIT 1 |
| 28 | #define WORK_ORDER_DONE_BIT 2 |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 29 | #define WORK_HIGH_PRIO_BIT 3 |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 30 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 31 | /* |
| 32 | * container for the kthread task pointer and the list of pending work |
| 33 | * One of these is allocated per thread. |
| 34 | */ |
| 35 | struct btrfs_worker_thread { |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 36 | /* pool we belong to */ |
| 37 | struct btrfs_workers *workers; |
| 38 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 39 | /* list of struct btrfs_work that are waiting for service */ |
| 40 | struct list_head pending; |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 41 | struct list_head prio_pending; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 42 | |
| 43 | /* list of worker threads from struct btrfs_workers */ |
| 44 | struct list_head worker_list; |
| 45 | |
| 46 | /* kthread */ |
| 47 | struct task_struct *task; |
| 48 | |
| 49 | /* number of things on the pending list */ |
| 50 | atomic_t num_pending; |
Chris Mason | 5386323 | 2008-08-15 15:34:18 -0400 | [diff] [blame] | 51 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 52 | /* reference counter for this struct */ |
| 53 | atomic_t refs; |
| 54 | |
Chris Mason | 4854ddd | 2008-08-15 15:34:17 -0400 | [diff] [blame] | 55 | unsigned long sequence; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 56 | |
| 57 | /* protects the pending list. */ |
| 58 | spinlock_t lock; |
| 59 | |
| 60 | /* set to non-zero when this thread is already awake and kicking */ |
| 61 | int working; |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 62 | |
| 63 | /* are we currently idle */ |
| 64 | int idle; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 65 | }; |
| 66 | |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 67 | static int __btrfs_start_workers(struct btrfs_workers *workers); |
| 68 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 69 | /* |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 70 | * btrfs_start_workers uses kthread_run, which can block waiting for memory |
| 71 | * for a very long time. It will actually throttle on page writeback, |
| 72 | * and so it may not make progress until after our btrfs worker threads |
| 73 | * process all of the pending work structs in their queue |
| 74 | * |
| 75 | * This means we can't use btrfs_start_workers from inside a btrfs worker |
| 76 | * thread that is used as part of cleaning dirty memory, which pretty much |
| 77 | * involves all of the worker threads. |
| 78 | * |
| 79 | * Instead we have a helper queue who never has more than one thread |
| 80 | * where we scheduler thread start operations. This worker_start struct |
| 81 | * is used to contain the work and hold a pointer to the queue that needs |
| 82 | * another worker. |
| 83 | */ |
| 84 | struct worker_start { |
| 85 | struct btrfs_work work; |
| 86 | struct btrfs_workers *queue; |
| 87 | }; |
| 88 | |
| 89 | static void start_new_worker_func(struct btrfs_work *work) |
| 90 | { |
| 91 | struct worker_start *start; |
| 92 | start = container_of(work, struct worker_start, work); |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 93 | __btrfs_start_workers(start->queue); |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 94 | kfree(start); |
| 95 | } |
| 96 | |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 97 | /* |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 98 | * helper function to move a thread onto the idle list after it |
| 99 | * has finished some requests. |
| 100 | */ |
| 101 | static void check_idle_worker(struct btrfs_worker_thread *worker) |
| 102 | { |
| 103 | if (!worker->idle && atomic_read(&worker->num_pending) < |
| 104 | worker->workers->idle_thresh / 2) { |
| 105 | unsigned long flags; |
| 106 | spin_lock_irqsave(&worker->workers->lock, flags); |
| 107 | worker->idle = 1; |
Chris Mason | 3e99d8e | 2009-09-15 19:57:42 -0400 | [diff] [blame] | 108 | |
| 109 | /* the list may be empty if the worker is just starting */ |
| 110 | if (!list_empty(&worker->worker_list)) { |
| 111 | list_move(&worker->worker_list, |
| 112 | &worker->workers->idle_list); |
| 113 | } |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 114 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
| 115 | } |
| 116 | } |
| 117 | |
| 118 | /* |
| 119 | * helper function to move a thread off the idle list after new |
| 120 | * pending work is added. |
| 121 | */ |
| 122 | static void check_busy_worker(struct btrfs_worker_thread *worker) |
| 123 | { |
| 124 | if (worker->idle && atomic_read(&worker->num_pending) >= |
| 125 | worker->workers->idle_thresh) { |
| 126 | unsigned long flags; |
| 127 | spin_lock_irqsave(&worker->workers->lock, flags); |
| 128 | worker->idle = 0; |
Chris Mason | 3e99d8e | 2009-09-15 19:57:42 -0400 | [diff] [blame] | 129 | |
| 130 | if (!list_empty(&worker->worker_list)) { |
| 131 | list_move_tail(&worker->worker_list, |
| 132 | &worker->workers->worker_list); |
| 133 | } |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 134 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
| 135 | } |
| 136 | } |
| 137 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 138 | static void check_pending_worker_creates(struct btrfs_worker_thread *worker) |
| 139 | { |
| 140 | struct btrfs_workers *workers = worker->workers; |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 141 | struct worker_start *start; |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 142 | unsigned long flags; |
| 143 | |
| 144 | rmb(); |
| 145 | if (!workers->atomic_start_pending) |
| 146 | return; |
| 147 | |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 148 | start = kzalloc(sizeof(*start), GFP_NOFS); |
| 149 | if (!start) |
| 150 | return; |
| 151 | |
| 152 | start->work.func = start_new_worker_func; |
| 153 | start->queue = workers; |
| 154 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 155 | spin_lock_irqsave(&workers->lock, flags); |
| 156 | if (!workers->atomic_start_pending) |
| 157 | goto out; |
| 158 | |
| 159 | workers->atomic_start_pending = 0; |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 160 | if (workers->num_workers + workers->num_workers_starting >= |
| 161 | workers->max_workers) |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 162 | goto out; |
| 163 | |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 164 | workers->num_workers_starting += 1; |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 165 | spin_unlock_irqrestore(&workers->lock, flags); |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 166 | btrfs_queue_worker(workers->atomic_worker_start, &start->work); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 167 | return; |
| 168 | |
| 169 | out: |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 170 | kfree(start); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 171 | spin_unlock_irqrestore(&workers->lock, flags); |
| 172 | } |
| 173 | |
Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 174 | static noinline void run_ordered_completions(struct btrfs_workers *workers, |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 175 | struct btrfs_work *work) |
| 176 | { |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 177 | if (!workers->ordered) |
Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 178 | return; |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 179 | |
| 180 | set_bit(WORK_DONE_BIT, &work->flags); |
| 181 | |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 182 | spin_lock(&workers->order_lock); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 183 | |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 184 | while (1) { |
| 185 | if (!list_empty(&workers->prio_order_list)) { |
| 186 | work = list_entry(workers->prio_order_list.next, |
| 187 | struct btrfs_work, order_list); |
| 188 | } else if (!list_empty(&workers->order_list)) { |
| 189 | work = list_entry(workers->order_list.next, |
| 190 | struct btrfs_work, order_list); |
| 191 | } else { |
| 192 | break; |
| 193 | } |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 194 | if (!test_bit(WORK_DONE_BIT, &work->flags)) |
| 195 | break; |
| 196 | |
| 197 | /* we are going to call the ordered done function, but |
| 198 | * we leave the work item on the list as a barrier so |
| 199 | * that later work items that are done don't have their |
| 200 | * functions called before this one returns |
| 201 | */ |
| 202 | if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) |
| 203 | break; |
| 204 | |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 205 | spin_unlock(&workers->order_lock); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 206 | |
| 207 | work->ordered_func(work); |
| 208 | |
| 209 | /* now take the lock again and call the freeing code */ |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 210 | spin_lock(&workers->order_lock); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 211 | list_del(&work->order_list); |
| 212 | work->ordered_free(work); |
| 213 | } |
| 214 | |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 215 | spin_unlock(&workers->order_lock); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 216 | } |
| 217 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 218 | static void put_worker(struct btrfs_worker_thread *worker) |
| 219 | { |
| 220 | if (atomic_dec_and_test(&worker->refs)) |
| 221 | kfree(worker); |
| 222 | } |
| 223 | |
| 224 | static int try_worker_shutdown(struct btrfs_worker_thread *worker) |
| 225 | { |
| 226 | int freeit = 0; |
| 227 | |
| 228 | spin_lock_irq(&worker->lock); |
Chris Mason | 627e421 | 2009-09-15 20:00:36 -0400 | [diff] [blame] | 229 | spin_lock(&worker->workers->lock); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 230 | if (worker->workers->num_workers > 1 && |
| 231 | worker->idle && |
| 232 | !worker->working && |
| 233 | !list_empty(&worker->worker_list) && |
| 234 | list_empty(&worker->prio_pending) && |
Chris Mason | 6e74057 | 2009-09-15 20:02:33 -0400 | [diff] [blame] | 235 | list_empty(&worker->pending) && |
| 236 | atomic_read(&worker->num_pending) == 0) { |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 237 | freeit = 1; |
| 238 | list_del_init(&worker->worker_list); |
| 239 | worker->workers->num_workers--; |
| 240 | } |
Chris Mason | 627e421 | 2009-09-15 20:00:36 -0400 | [diff] [blame] | 241 | spin_unlock(&worker->workers->lock); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 242 | spin_unlock_irq(&worker->lock); |
| 243 | |
| 244 | if (freeit) |
| 245 | put_worker(worker); |
| 246 | return freeit; |
| 247 | } |
| 248 | |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 249 | static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker, |
| 250 | struct list_head *prio_head, |
| 251 | struct list_head *head) |
| 252 | { |
| 253 | struct btrfs_work *work = NULL; |
| 254 | struct list_head *cur = NULL; |
| 255 | |
| 256 | if(!list_empty(prio_head)) |
| 257 | cur = prio_head->next; |
| 258 | |
| 259 | smp_mb(); |
| 260 | if (!list_empty(&worker->prio_pending)) |
| 261 | goto refill; |
| 262 | |
| 263 | if (!list_empty(head)) |
| 264 | cur = head->next; |
| 265 | |
| 266 | if (cur) |
| 267 | goto out; |
| 268 | |
| 269 | refill: |
| 270 | spin_lock_irq(&worker->lock); |
| 271 | list_splice_tail_init(&worker->prio_pending, prio_head); |
| 272 | list_splice_tail_init(&worker->pending, head); |
| 273 | |
| 274 | if (!list_empty(prio_head)) |
| 275 | cur = prio_head->next; |
| 276 | else if (!list_empty(head)) |
| 277 | cur = head->next; |
| 278 | spin_unlock_irq(&worker->lock); |
| 279 | |
| 280 | if (!cur) |
| 281 | goto out_fail; |
| 282 | |
| 283 | out: |
| 284 | work = list_entry(cur, struct btrfs_work, list); |
| 285 | |
| 286 | out_fail: |
| 287 | return work; |
| 288 | } |
| 289 | |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 290 | /* |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 291 | * main loop for servicing work items |
| 292 | */ |
| 293 | static int worker_loop(void *arg) |
| 294 | { |
| 295 | struct btrfs_worker_thread *worker = arg; |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 296 | struct list_head head; |
| 297 | struct list_head prio_head; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 298 | struct btrfs_work *work; |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 299 | |
| 300 | INIT_LIST_HEAD(&head); |
| 301 | INIT_LIST_HEAD(&prio_head); |
| 302 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 303 | do { |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 304 | again: |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 305 | while (1) { |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 306 | |
| 307 | |
| 308 | work = get_next_work(worker, &prio_head, &head); |
| 309 | if (!work) |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 310 | break; |
| 311 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 312 | list_del(&work->list); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 313 | clear_bit(WORK_QUEUED_BIT, &work->flags); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 314 | |
| 315 | work->worker = worker; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 316 | |
| 317 | work->func(work); |
| 318 | |
| 319 | atomic_dec(&worker->num_pending); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 320 | /* |
| 321 | * unless this is an ordered work queue, |
| 322 | * 'work' was probably freed by func above. |
| 323 | */ |
| 324 | run_ordered_completions(worker->workers, work); |
| 325 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 326 | check_pending_worker_creates(worker); |
Chris Mason | 8f3b65a | 2011-12-15 09:29:43 -0500 | [diff] [blame] | 327 | cond_resched(); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 328 | } |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 329 | |
| 330 | spin_lock_irq(&worker->lock); |
| 331 | check_idle_worker(worker); |
| 332 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 333 | if (freezing(current)) { |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 334 | worker->working = 0; |
| 335 | spin_unlock_irq(&worker->lock); |
Tejun Heo | a0acae0 | 2011-11-21 12:32:22 -0800 | [diff] [blame] | 336 | try_to_freeze(); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 337 | } else { |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 338 | spin_unlock_irq(&worker->lock); |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 339 | if (!kthread_should_stop()) { |
| 340 | cpu_relax(); |
| 341 | /* |
| 342 | * we've dropped the lock, did someone else |
| 343 | * jump_in? |
| 344 | */ |
| 345 | smp_mb(); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 346 | if (!list_empty(&worker->pending) || |
| 347 | !list_empty(&worker->prio_pending)) |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 348 | continue; |
| 349 | |
| 350 | /* |
| 351 | * this short schedule allows more work to |
| 352 | * come in without the queue functions |
| 353 | * needing to go through wake_up_process() |
| 354 | * |
| 355 | * worker->working is still 1, so nobody |
| 356 | * is going to try and wake us up |
| 357 | */ |
| 358 | schedule_timeout(1); |
| 359 | smp_mb(); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 360 | if (!list_empty(&worker->pending) || |
| 361 | !list_empty(&worker->prio_pending)) |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 362 | continue; |
| 363 | |
Amit Gud | b5555f7 | 2009-04-02 17:01:27 -0400 | [diff] [blame] | 364 | if (kthread_should_stop()) |
| 365 | break; |
| 366 | |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 367 | /* still no more work?, sleep for real */ |
| 368 | spin_lock_irq(&worker->lock); |
| 369 | set_current_state(TASK_INTERRUPTIBLE); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 370 | if (!list_empty(&worker->pending) || |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 371 | !list_empty(&worker->prio_pending)) { |
| 372 | spin_unlock_irq(&worker->lock); |
Chris Mason | ed3b3d314 | 2010-05-25 10:12:41 -0400 | [diff] [blame] | 373 | set_current_state(TASK_RUNNING); |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 374 | goto again; |
| 375 | } |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 376 | |
| 377 | /* |
| 378 | * this makes sure we get a wakeup when someone |
| 379 | * adds something new to the queue |
| 380 | */ |
| 381 | worker->working = 0; |
| 382 | spin_unlock_irq(&worker->lock); |
| 383 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 384 | if (!kthread_should_stop()) { |
| 385 | schedule_timeout(HZ * 120); |
| 386 | if (!worker->working && |
| 387 | try_worker_shutdown(worker)) { |
| 388 | return 0; |
| 389 | } |
| 390 | } |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 391 | } |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 392 | __set_current_state(TASK_RUNNING); |
| 393 | } |
| 394 | } while (!kthread_should_stop()); |
| 395 | return 0; |
| 396 | } |
| 397 | |
| 398 | /* |
| 399 | * this will wait for all the worker threads to shutdown |
| 400 | */ |
Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 401 | void btrfs_stop_workers(struct btrfs_workers *workers) |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 402 | { |
| 403 | struct list_head *cur; |
| 404 | struct btrfs_worker_thread *worker; |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 405 | int can_stop; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 406 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 407 | spin_lock_irq(&workers->lock); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 408 | list_splice_init(&workers->idle_list, &workers->worker_list); |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 409 | while (!list_empty(&workers->worker_list)) { |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 410 | cur = workers->worker_list.next; |
| 411 | worker = list_entry(cur, struct btrfs_worker_thread, |
| 412 | worker_list); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 413 | |
| 414 | atomic_inc(&worker->refs); |
| 415 | workers->num_workers -= 1; |
| 416 | if (!list_empty(&worker->worker_list)) { |
| 417 | list_del_init(&worker->worker_list); |
| 418 | put_worker(worker); |
| 419 | can_stop = 1; |
| 420 | } else |
| 421 | can_stop = 0; |
| 422 | spin_unlock_irq(&workers->lock); |
| 423 | if (can_stop) |
| 424 | kthread_stop(worker->task); |
| 425 | spin_lock_irq(&workers->lock); |
| 426 | put_worker(worker); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 427 | } |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 428 | spin_unlock_irq(&workers->lock); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 429 | } |
| 430 | |
| 431 | /* |
| 432 | * simple init on struct btrfs_workers |
| 433 | */ |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 434 | void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max, |
| 435 | struct btrfs_workers *async_helper) |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 436 | { |
| 437 | workers->num_workers = 0; |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 438 | workers->num_workers_starting = 0; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 439 | INIT_LIST_HEAD(&workers->worker_list); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 440 | INIT_LIST_HEAD(&workers->idle_list); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 441 | INIT_LIST_HEAD(&workers->order_list); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 442 | INIT_LIST_HEAD(&workers->prio_order_list); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 443 | spin_lock_init(&workers->lock); |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 444 | spin_lock_init(&workers->order_lock); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 445 | workers->max_workers = max; |
Chris Mason | 61b4944 | 2008-07-31 15:42:53 -0400 | [diff] [blame] | 446 | workers->idle_thresh = 32; |
Chris Mason | 5443be4 | 2008-08-15 15:34:16 -0400 | [diff] [blame] | 447 | workers->name = name; |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 448 | workers->ordered = 0; |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 449 | workers->atomic_start_pending = 0; |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 450 | workers->atomic_worker_start = async_helper; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 451 | } |
| 452 | |
| 453 | /* |
| 454 | * starts new worker threads. This does not enforce the max worker |
| 455 | * count in case you need to temporarily go past it. |
| 456 | */ |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 457 | static int __btrfs_start_workers(struct btrfs_workers *workers) |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 458 | { |
| 459 | struct btrfs_worker_thread *worker; |
| 460 | int ret = 0; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 461 | |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 462 | worker = kzalloc(sizeof(*worker), GFP_NOFS); |
| 463 | if (!worker) { |
| 464 | ret = -ENOMEM; |
| 465 | goto fail; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 466 | } |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 467 | |
| 468 | INIT_LIST_HEAD(&worker->pending); |
| 469 | INIT_LIST_HEAD(&worker->prio_pending); |
| 470 | INIT_LIST_HEAD(&worker->worker_list); |
| 471 | spin_lock_init(&worker->lock); |
| 472 | |
| 473 | atomic_set(&worker->num_pending, 0); |
| 474 | atomic_set(&worker->refs, 1); |
| 475 | worker->workers = workers; |
| 476 | worker->task = kthread_run(worker_loop, worker, |
| 477 | "btrfs-%s-%d", workers->name, |
| 478 | workers->num_workers + 1); |
| 479 | if (IS_ERR(worker->task)) { |
| 480 | ret = PTR_ERR(worker->task); |
| 481 | kfree(worker); |
| 482 | goto fail; |
| 483 | } |
| 484 | spin_lock_irq(&workers->lock); |
| 485 | list_add_tail(&worker->worker_list, &workers->idle_list); |
| 486 | worker->idle = 1; |
| 487 | workers->num_workers++; |
| 488 | workers->num_workers_starting--; |
| 489 | WARN_ON(workers->num_workers_starting < 0); |
| 490 | spin_unlock_irq(&workers->lock); |
| 491 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 492 | return 0; |
| 493 | fail: |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 494 | spin_lock_irq(&workers->lock); |
| 495 | workers->num_workers_starting--; |
| 496 | spin_unlock_irq(&workers->lock); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 497 | return ret; |
| 498 | } |
| 499 | |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 500 | int btrfs_start_workers(struct btrfs_workers *workers) |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 501 | { |
| 502 | spin_lock_irq(&workers->lock); |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 503 | workers->num_workers_starting++; |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 504 | spin_unlock_irq(&workers->lock); |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 505 | return __btrfs_start_workers(workers); |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 506 | } |
| 507 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 508 | /* |
| 509 | * run through the list and find a worker thread that doesn't have a lot |
| 510 | * to do right now. This can return null if we aren't yet at the thread |
| 511 | * count limit and all of the threads are busy. |
| 512 | */ |
| 513 | static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) |
| 514 | { |
| 515 | struct btrfs_worker_thread *worker; |
| 516 | struct list_head *next; |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 517 | int enforce_min; |
| 518 | |
| 519 | enforce_min = (workers->num_workers + workers->num_workers_starting) < |
| 520 | workers->max_workers; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 521 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 522 | /* |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 523 | * if we find an idle thread, don't move it to the end of the |
| 524 | * idle list. This improves the chance that the next submission |
| 525 | * will reuse the same thread, and maybe catch it while it is still |
| 526 | * working |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 527 | */ |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 528 | if (!list_empty(&workers->idle_list)) { |
| 529 | next = workers->idle_list.next; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 530 | worker = list_entry(next, struct btrfs_worker_thread, |
| 531 | worker_list); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 532 | return worker; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 533 | } |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 534 | if (enforce_min || list_empty(&workers->worker_list)) |
| 535 | return NULL; |
| 536 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 537 | /* |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 538 | * if we pick a busy task, move the task to the end of the list. |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 539 | * hopefully this will keep things somewhat evenly balanced. |
| 540 | * Do the move in batches based on the sequence number. This groups |
| 541 | * requests submitted at roughly the same time onto the same worker. |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 542 | */ |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 543 | next = workers->worker_list.next; |
| 544 | worker = list_entry(next, struct btrfs_worker_thread, worker_list); |
Chris Mason | 4854ddd | 2008-08-15 15:34:17 -0400 | [diff] [blame] | 545 | worker->sequence++; |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 546 | |
Chris Mason | 5386323 | 2008-08-15 15:34:18 -0400 | [diff] [blame] | 547 | if (worker->sequence % workers->idle_thresh == 0) |
Chris Mason | 4854ddd | 2008-08-15 15:34:17 -0400 | [diff] [blame] | 548 | list_move_tail(next, &workers->worker_list); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 549 | return worker; |
| 550 | } |
| 551 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 552 | /* |
| 553 | * selects a worker thread to take the next job. This will either find |
| 554 | * an idle worker, start a new worker up to the max count, or just return |
| 555 | * one of the existing busy workers. |
| 556 | */ |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 557 | static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) |
| 558 | { |
| 559 | struct btrfs_worker_thread *worker; |
| 560 | unsigned long flags; |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 561 | struct list_head *fallback; |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 562 | int ret; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 563 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 564 | spin_lock_irqsave(&workers->lock, flags); |
Chris Mason | 8d532b2 | 2011-12-23 07:53:00 -0500 | [diff] [blame] | 565 | again: |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 566 | worker = next_worker(workers); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 567 | |
| 568 | if (!worker) { |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 569 | if (workers->num_workers + workers->num_workers_starting >= |
| 570 | workers->max_workers) { |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 571 | goto fallback; |
| 572 | } else if (workers->atomic_worker_start) { |
| 573 | workers->atomic_start_pending = 1; |
| 574 | goto fallback; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 575 | } else { |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 576 | workers->num_workers_starting++; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 577 | spin_unlock_irqrestore(&workers->lock, flags); |
| 578 | /* we're below the limit, start another worker */ |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 579 | ret = __btrfs_start_workers(workers); |
Chris Mason | 8d532b2 | 2011-12-23 07:53:00 -0500 | [diff] [blame] | 580 | spin_lock_irqsave(&workers->lock, flags); |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 581 | if (ret) |
| 582 | goto fallback; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 583 | goto again; |
| 584 | } |
| 585 | } |
Chris Mason | 6e74057 | 2009-09-15 20:02:33 -0400 | [diff] [blame] | 586 | goto found; |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 587 | |
| 588 | fallback: |
| 589 | fallback = NULL; |
| 590 | /* |
| 591 | * we have failed to find any workers, just |
| 592 | * return the first one we can find. |
| 593 | */ |
| 594 | if (!list_empty(&workers->worker_list)) |
| 595 | fallback = workers->worker_list.next; |
| 596 | if (!list_empty(&workers->idle_list)) |
| 597 | fallback = workers->idle_list.next; |
| 598 | BUG_ON(!fallback); |
| 599 | worker = list_entry(fallback, |
| 600 | struct btrfs_worker_thread, worker_list); |
Chris Mason | 6e74057 | 2009-09-15 20:02:33 -0400 | [diff] [blame] | 601 | found: |
| 602 | /* |
| 603 | * this makes sure the worker doesn't exit before it is placed |
| 604 | * onto a busy/idle list |
| 605 | */ |
| 606 | atomic_inc(&worker->num_pending); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 607 | spin_unlock_irqrestore(&workers->lock, flags); |
| 608 | return worker; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 609 | } |
| 610 | |
| 611 | /* |
| 612 | * btrfs_requeue_work just puts the work item back on the tail of the list |
| 613 | * it was taken from. It is intended for use with long running work functions |
| 614 | * that make some progress and want to give the cpu up for others. |
| 615 | */ |
Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 616 | void btrfs_requeue_work(struct btrfs_work *work) |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 617 | { |
| 618 | struct btrfs_worker_thread *worker = work->worker; |
| 619 | unsigned long flags; |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 620 | int wake = 0; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 621 | |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 622 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 623 | return; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 624 | |
| 625 | spin_lock_irqsave(&worker->lock, flags); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 626 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) |
| 627 | list_add_tail(&work->list, &worker->prio_pending); |
| 628 | else |
| 629 | list_add_tail(&work->list, &worker->pending); |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 630 | atomic_inc(&worker->num_pending); |
Chris Mason | 75ccf47 | 2008-09-30 19:24:06 -0400 | [diff] [blame] | 631 | |
| 632 | /* by definition we're busy, take ourselves off the idle |
| 633 | * list |
| 634 | */ |
| 635 | if (worker->idle) { |
Julia Lawall | 29c5e8c | 2009-07-22 16:49:00 -0400 | [diff] [blame] | 636 | spin_lock(&worker->workers->lock); |
Chris Mason | 75ccf47 | 2008-09-30 19:24:06 -0400 | [diff] [blame] | 637 | worker->idle = 0; |
| 638 | list_move_tail(&worker->worker_list, |
Chris Mason | 6e74057 | 2009-09-15 20:02:33 -0400 | [diff] [blame] | 639 | &worker->workers->worker_list); |
Julia Lawall | 29c5e8c | 2009-07-22 16:49:00 -0400 | [diff] [blame] | 640 | spin_unlock(&worker->workers->lock); |
Chris Mason | 75ccf47 | 2008-09-30 19:24:06 -0400 | [diff] [blame] | 641 | } |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 642 | if (!worker->working) { |
| 643 | wake = 1; |
| 644 | worker->working = 1; |
| 645 | } |
Chris Mason | 75ccf47 | 2008-09-30 19:24:06 -0400 | [diff] [blame] | 646 | |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 647 | if (wake) |
| 648 | wake_up_process(worker->task); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 649 | spin_unlock_irqrestore(&worker->lock, flags); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 650 | } |
| 651 | |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 652 | void btrfs_set_work_high_prio(struct btrfs_work *work) |
| 653 | { |
| 654 | set_bit(WORK_HIGH_PRIO_BIT, &work->flags); |
| 655 | } |
| 656 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 657 | /* |
| 658 | * places a struct btrfs_work into the pending queue of one of the kthreads |
| 659 | */ |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 660 | void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 661 | { |
| 662 | struct btrfs_worker_thread *worker; |
| 663 | unsigned long flags; |
| 664 | int wake = 0; |
| 665 | |
| 666 | /* don't requeue something already on a list */ |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 667 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 668 | return; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 669 | |
| 670 | worker = find_worker(workers); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 671 | if (workers->ordered) { |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 672 | /* |
| 673 | * you're not allowed to do ordered queues from an |
| 674 | * interrupt handler |
| 675 | */ |
| 676 | spin_lock(&workers->order_lock); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 677 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) { |
| 678 | list_add_tail(&work->order_list, |
| 679 | &workers->prio_order_list); |
| 680 | } else { |
| 681 | list_add_tail(&work->order_list, &workers->order_list); |
| 682 | } |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 683 | spin_unlock(&workers->order_lock); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 684 | } else { |
| 685 | INIT_LIST_HEAD(&work->order_list); |
| 686 | } |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 687 | |
| 688 | spin_lock_irqsave(&worker->lock, flags); |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 689 | |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 690 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) |
| 691 | list_add_tail(&work->list, &worker->prio_pending); |
| 692 | else |
| 693 | list_add_tail(&work->list, &worker->pending); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 694 | check_busy_worker(worker); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 695 | |
| 696 | /* |
| 697 | * avoid calling into wake_up_process if this thread has already |
| 698 | * been kicked |
| 699 | */ |
| 700 | if (!worker->working) |
| 701 | wake = 1; |
| 702 | worker->working = 1; |
| 703 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 704 | if (wake) |
| 705 | wake_up_process(worker->task); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 706 | spin_unlock_irqrestore(&worker->lock, flags); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 707 | } |