Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2007 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/kthread.h> |
| 20 | #include <linux/list.h> |
| 21 | #include <linux/spinlock.h> |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 22 | #include <linux/freezer.h> |
| 23 | #include <linux/ftrace.h> |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 24 | #include "async-thread.h" |
| 25 | |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 26 | #define WORK_QUEUED_BIT 0 |
| 27 | #define WORK_DONE_BIT 1 |
| 28 | #define WORK_ORDER_DONE_BIT 2 |
| 29 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 30 | /* |
| 31 | * container for the kthread task pointer and the list of pending work |
| 32 | * One of these is allocated per thread. |
| 33 | */ |
| 34 | struct btrfs_worker_thread { |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 35 | /* pool we belong to */ |
| 36 | struct btrfs_workers *workers; |
| 37 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 38 | /* list of struct btrfs_work that are waiting for service */ |
| 39 | struct list_head pending; |
| 40 | |
| 41 | /* list of worker threads from struct btrfs_workers */ |
| 42 | struct list_head worker_list; |
| 43 | |
| 44 | /* kthread */ |
| 45 | struct task_struct *task; |
| 46 | |
| 47 | /* number of things on the pending list */ |
| 48 | atomic_t num_pending; |
Chris Mason | 5386323 | 2008-08-15 15:34:18 -0400 | [diff] [blame] | 49 | |
Chris Mason | 4854ddd | 2008-08-15 15:34:17 -0400 | [diff] [blame] | 50 | unsigned long sequence; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 51 | |
| 52 | /* protects the pending list. */ |
| 53 | spinlock_t lock; |
| 54 | |
| 55 | /* set to non-zero when this thread is already awake and kicking */ |
| 56 | int working; |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 57 | |
| 58 | /* are we currently idle */ |
| 59 | int idle; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 60 | }; |
| 61 | |
| 62 | /* |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 63 | * helper function to move a thread onto the idle list after it |
| 64 | * has finished some requests. |
| 65 | */ |
| 66 | static void check_idle_worker(struct btrfs_worker_thread *worker) |
| 67 | { |
| 68 | if (!worker->idle && atomic_read(&worker->num_pending) < |
| 69 | worker->workers->idle_thresh / 2) { |
| 70 | unsigned long flags; |
| 71 | spin_lock_irqsave(&worker->workers->lock, flags); |
| 72 | worker->idle = 1; |
| 73 | list_move(&worker->worker_list, &worker->workers->idle_list); |
| 74 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
| 75 | } |
| 76 | } |
| 77 | |
| 78 | /* |
| 79 | * helper function to move a thread off the idle list after new |
| 80 | * pending work is added. |
| 81 | */ |
| 82 | static void check_busy_worker(struct btrfs_worker_thread *worker) |
| 83 | { |
| 84 | if (worker->idle && atomic_read(&worker->num_pending) >= |
| 85 | worker->workers->idle_thresh) { |
| 86 | unsigned long flags; |
| 87 | spin_lock_irqsave(&worker->workers->lock, flags); |
| 88 | worker->idle = 0; |
| 89 | list_move_tail(&worker->worker_list, |
| 90 | &worker->workers->worker_list); |
| 91 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
| 92 | } |
| 93 | } |
| 94 | |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 95 | static noinline int run_ordered_completions(struct btrfs_workers *workers, |
| 96 | struct btrfs_work *work) |
| 97 | { |
| 98 | unsigned long flags; |
| 99 | |
| 100 | if (!workers->ordered) |
| 101 | return 0; |
| 102 | |
| 103 | set_bit(WORK_DONE_BIT, &work->flags); |
| 104 | |
| 105 | spin_lock_irqsave(&workers->lock, flags); |
| 106 | |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 107 | while (!list_empty(&workers->order_list)) { |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 108 | work = list_entry(workers->order_list.next, |
| 109 | struct btrfs_work, order_list); |
| 110 | |
| 111 | if (!test_bit(WORK_DONE_BIT, &work->flags)) |
| 112 | break; |
| 113 | |
| 114 | /* we are going to call the ordered done function, but |
| 115 | * we leave the work item on the list as a barrier so |
| 116 | * that later work items that are done don't have their |
| 117 | * functions called before this one returns |
| 118 | */ |
| 119 | if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) |
| 120 | break; |
| 121 | |
| 122 | spin_unlock_irqrestore(&workers->lock, flags); |
| 123 | |
| 124 | work->ordered_func(work); |
| 125 | |
| 126 | /* now take the lock again and call the freeing code */ |
| 127 | spin_lock_irqsave(&workers->lock, flags); |
| 128 | list_del(&work->order_list); |
| 129 | work->ordered_free(work); |
| 130 | } |
| 131 | |
| 132 | spin_unlock_irqrestore(&workers->lock, flags); |
| 133 | return 0; |
| 134 | } |
| 135 | |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 136 | /* |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 137 | * main loop for servicing work items |
| 138 | */ |
| 139 | static int worker_loop(void *arg) |
| 140 | { |
| 141 | struct btrfs_worker_thread *worker = arg; |
| 142 | struct list_head *cur; |
| 143 | struct btrfs_work *work; |
| 144 | do { |
| 145 | spin_lock_irq(&worker->lock); |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 146 | again_locked: |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 147 | while (!list_empty(&worker->pending)) { |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 148 | cur = worker->pending.next; |
| 149 | work = list_entry(cur, struct btrfs_work, list); |
| 150 | list_del(&work->list); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 151 | clear_bit(WORK_QUEUED_BIT, &work->flags); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 152 | |
| 153 | work->worker = worker; |
| 154 | spin_unlock_irq(&worker->lock); |
| 155 | |
| 156 | work->func(work); |
| 157 | |
| 158 | atomic_dec(&worker->num_pending); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 159 | /* |
| 160 | * unless this is an ordered work queue, |
| 161 | * 'work' was probably freed by func above. |
| 162 | */ |
| 163 | run_ordered_completions(worker->workers, work); |
| 164 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 165 | spin_lock_irq(&worker->lock); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 166 | check_idle_worker(worker); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 167 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 168 | } |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 169 | if (freezing(current)) { |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 170 | worker->working = 0; |
| 171 | spin_unlock_irq(&worker->lock); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 172 | refrigerator(); |
| 173 | } else { |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 174 | spin_unlock_irq(&worker->lock); |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 175 | if (!kthread_should_stop()) { |
| 176 | cpu_relax(); |
| 177 | /* |
| 178 | * we've dropped the lock, did someone else |
| 179 | * jump_in? |
| 180 | */ |
| 181 | smp_mb(); |
| 182 | if (!list_empty(&worker->pending)) |
| 183 | continue; |
| 184 | |
| 185 | /* |
| 186 | * this short schedule allows more work to |
| 187 | * come in without the queue functions |
| 188 | * needing to go through wake_up_process() |
| 189 | * |
| 190 | * worker->working is still 1, so nobody |
| 191 | * is going to try and wake us up |
| 192 | */ |
| 193 | schedule_timeout(1); |
| 194 | smp_mb(); |
| 195 | if (!list_empty(&worker->pending)) |
| 196 | continue; |
| 197 | |
| 198 | /* still no more work?, sleep for real */ |
| 199 | spin_lock_irq(&worker->lock); |
| 200 | set_current_state(TASK_INTERRUPTIBLE); |
| 201 | if (!list_empty(&worker->pending)) |
| 202 | goto again_locked; |
| 203 | |
| 204 | /* |
| 205 | * this makes sure we get a wakeup when someone |
| 206 | * adds something new to the queue |
| 207 | */ |
| 208 | worker->working = 0; |
| 209 | spin_unlock_irq(&worker->lock); |
| 210 | |
yanhai zhu | 0df49b9 | 2008-11-12 14:36:58 -0500 | [diff] [blame] | 211 | schedule(); |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 212 | } |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 213 | __set_current_state(TASK_RUNNING); |
| 214 | } |
| 215 | } while (!kthread_should_stop()); |
| 216 | return 0; |
| 217 | } |
| 218 | |
| 219 | /* |
| 220 | * this will wait for all the worker threads to shutdown |
| 221 | */ |
| 222 | int btrfs_stop_workers(struct btrfs_workers *workers) |
| 223 | { |
| 224 | struct list_head *cur; |
| 225 | struct btrfs_worker_thread *worker; |
| 226 | |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 227 | list_splice_init(&workers->idle_list, &workers->worker_list); |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 228 | while (!list_empty(&workers->worker_list)) { |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 229 | cur = workers->worker_list.next; |
| 230 | worker = list_entry(cur, struct btrfs_worker_thread, |
| 231 | worker_list); |
| 232 | kthread_stop(worker->task); |
| 233 | list_del(&worker->worker_list); |
| 234 | kfree(worker); |
| 235 | } |
| 236 | return 0; |
| 237 | } |
| 238 | |
| 239 | /* |
| 240 | * simple init on struct btrfs_workers |
| 241 | */ |
Chris Mason | 5443be4 | 2008-08-15 15:34:16 -0400 | [diff] [blame] | 242 | void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max) |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 243 | { |
| 244 | workers->num_workers = 0; |
| 245 | INIT_LIST_HEAD(&workers->worker_list); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 246 | INIT_LIST_HEAD(&workers->idle_list); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 247 | INIT_LIST_HEAD(&workers->order_list); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 248 | spin_lock_init(&workers->lock); |
| 249 | workers->max_workers = max; |
Chris Mason | 61b4944 | 2008-07-31 15:42:53 -0400 | [diff] [blame] | 250 | workers->idle_thresh = 32; |
Chris Mason | 5443be4 | 2008-08-15 15:34:16 -0400 | [diff] [blame] | 251 | workers->name = name; |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 252 | workers->ordered = 0; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 253 | } |
| 254 | |
| 255 | /* |
| 256 | * starts new worker threads. This does not enforce the max worker |
| 257 | * count in case you need to temporarily go past it. |
| 258 | */ |
| 259 | int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) |
| 260 | { |
| 261 | struct btrfs_worker_thread *worker; |
| 262 | int ret = 0; |
| 263 | int i; |
| 264 | |
| 265 | for (i = 0; i < num_workers; i++) { |
| 266 | worker = kzalloc(sizeof(*worker), GFP_NOFS); |
| 267 | if (!worker) { |
| 268 | ret = -ENOMEM; |
| 269 | goto fail; |
| 270 | } |
| 271 | |
| 272 | INIT_LIST_HEAD(&worker->pending); |
| 273 | INIT_LIST_HEAD(&worker->worker_list); |
| 274 | spin_lock_init(&worker->lock); |
| 275 | atomic_set(&worker->num_pending, 0); |
Chris Mason | 5443be4 | 2008-08-15 15:34:16 -0400 | [diff] [blame] | 276 | worker->task = kthread_run(worker_loop, worker, |
| 277 | "btrfs-%s-%d", workers->name, |
| 278 | workers->num_workers + i); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 279 | worker->workers = workers; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 280 | if (IS_ERR(worker->task)) { |
Li Zefan | 3bf1041 | 2008-07-30 09:24:37 -0400 | [diff] [blame] | 281 | kfree(worker); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 282 | ret = PTR_ERR(worker->task); |
| 283 | goto fail; |
| 284 | } |
| 285 | |
| 286 | spin_lock_irq(&workers->lock); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 287 | list_add_tail(&worker->worker_list, &workers->idle_list); |
Chris Mason | 4854ddd | 2008-08-15 15:34:17 -0400 | [diff] [blame] | 288 | worker->idle = 1; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 289 | workers->num_workers++; |
| 290 | spin_unlock_irq(&workers->lock); |
| 291 | } |
| 292 | return 0; |
| 293 | fail: |
| 294 | btrfs_stop_workers(workers); |
| 295 | return ret; |
| 296 | } |
| 297 | |
| 298 | /* |
| 299 | * run through the list and find a worker thread that doesn't have a lot |
| 300 | * to do right now. This can return null if we aren't yet at the thread |
| 301 | * count limit and all of the threads are busy. |
| 302 | */ |
| 303 | static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) |
| 304 | { |
| 305 | struct btrfs_worker_thread *worker; |
| 306 | struct list_head *next; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 307 | int enforce_min = workers->num_workers < workers->max_workers; |
| 308 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 309 | /* |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 310 | * if we find an idle thread, don't move it to the end of the |
| 311 | * idle list. This improves the chance that the next submission |
| 312 | * will reuse the same thread, and maybe catch it while it is still |
| 313 | * working |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 314 | */ |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 315 | if (!list_empty(&workers->idle_list)) { |
| 316 | next = workers->idle_list.next; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 317 | worker = list_entry(next, struct btrfs_worker_thread, |
| 318 | worker_list); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 319 | return worker; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 320 | } |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 321 | if (enforce_min || list_empty(&workers->worker_list)) |
| 322 | return NULL; |
| 323 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 324 | /* |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 325 | * if we pick a busy task, move the task to the end of the list. |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 326 | * hopefully this will keep things somewhat evenly balanced. |
| 327 | * Do the move in batches based on the sequence number. This groups |
| 328 | * requests submitted at roughly the same time onto the same worker. |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 329 | */ |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 330 | next = workers->worker_list.next; |
| 331 | worker = list_entry(next, struct btrfs_worker_thread, worker_list); |
Chris Mason | 4854ddd | 2008-08-15 15:34:17 -0400 | [diff] [blame] | 332 | atomic_inc(&worker->num_pending); |
| 333 | worker->sequence++; |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 334 | |
Chris Mason | 5386323 | 2008-08-15 15:34:18 -0400 | [diff] [blame] | 335 | if (worker->sequence % workers->idle_thresh == 0) |
Chris Mason | 4854ddd | 2008-08-15 15:34:17 -0400 | [diff] [blame] | 336 | list_move_tail(next, &workers->worker_list); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 337 | return worker; |
| 338 | } |
| 339 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 340 | /* |
| 341 | * selects a worker thread to take the next job. This will either find |
| 342 | * an idle worker, start a new worker up to the max count, or just return |
| 343 | * one of the existing busy workers. |
| 344 | */ |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 345 | static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) |
| 346 | { |
| 347 | struct btrfs_worker_thread *worker; |
| 348 | unsigned long flags; |
| 349 | |
| 350 | again: |
| 351 | spin_lock_irqsave(&workers->lock, flags); |
| 352 | worker = next_worker(workers); |
| 353 | spin_unlock_irqrestore(&workers->lock, flags); |
| 354 | |
| 355 | if (!worker) { |
| 356 | spin_lock_irqsave(&workers->lock, flags); |
| 357 | if (workers->num_workers >= workers->max_workers) { |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 358 | struct list_head *fallback = NULL; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 359 | /* |
| 360 | * we have failed to find any workers, just |
| 361 | * return the force one |
| 362 | */ |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 363 | if (!list_empty(&workers->worker_list)) |
| 364 | fallback = workers->worker_list.next; |
| 365 | if (!list_empty(&workers->idle_list)) |
| 366 | fallback = workers->idle_list.next; |
| 367 | BUG_ON(!fallback); |
| 368 | worker = list_entry(fallback, |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 369 | struct btrfs_worker_thread, worker_list); |
| 370 | spin_unlock_irqrestore(&workers->lock, flags); |
| 371 | } else { |
| 372 | spin_unlock_irqrestore(&workers->lock, flags); |
| 373 | /* we're below the limit, start another worker */ |
| 374 | btrfs_start_workers(workers, 1); |
| 375 | goto again; |
| 376 | } |
| 377 | } |
| 378 | return worker; |
| 379 | } |
| 380 | |
| 381 | /* |
| 382 | * btrfs_requeue_work just puts the work item back on the tail of the list |
| 383 | * it was taken from. It is intended for use with long running work functions |
| 384 | * that make some progress and want to give the cpu up for others. |
| 385 | */ |
| 386 | int btrfs_requeue_work(struct btrfs_work *work) |
| 387 | { |
| 388 | struct btrfs_worker_thread *worker = work->worker; |
| 389 | unsigned long flags; |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 390 | int wake = 0; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 391 | |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 392 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 393 | goto out; |
| 394 | |
| 395 | spin_lock_irqsave(&worker->lock, flags); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 396 | list_add_tail(&work->list, &worker->pending); |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 397 | atomic_inc(&worker->num_pending); |
Chris Mason | 75ccf47 | 2008-09-30 19:24:06 -0400 | [diff] [blame] | 398 | |
| 399 | /* by definition we're busy, take ourselves off the idle |
| 400 | * list |
| 401 | */ |
| 402 | if (worker->idle) { |
| 403 | spin_lock_irqsave(&worker->workers->lock, flags); |
| 404 | worker->idle = 0; |
| 405 | list_move_tail(&worker->worker_list, |
| 406 | &worker->workers->worker_list); |
| 407 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
| 408 | } |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 409 | if (!worker->working) { |
| 410 | wake = 1; |
| 411 | worker->working = 1; |
| 412 | } |
Chris Mason | 75ccf47 | 2008-09-30 19:24:06 -0400 | [diff] [blame] | 413 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 414 | spin_unlock_irqrestore(&worker->lock, flags); |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 415 | if (wake) |
| 416 | wake_up_process(worker->task); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 417 | out: |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 418 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 419 | return 0; |
| 420 | } |
| 421 | |
| 422 | /* |
| 423 | * places a struct btrfs_work into the pending queue of one of the kthreads |
| 424 | */ |
| 425 | int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) |
| 426 | { |
| 427 | struct btrfs_worker_thread *worker; |
| 428 | unsigned long flags; |
| 429 | int wake = 0; |
| 430 | |
| 431 | /* don't requeue something already on a list */ |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 432 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 433 | goto out; |
| 434 | |
| 435 | worker = find_worker(workers); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 436 | if (workers->ordered) { |
| 437 | spin_lock_irqsave(&workers->lock, flags); |
| 438 | list_add_tail(&work->order_list, &workers->order_list); |
| 439 | spin_unlock_irqrestore(&workers->lock, flags); |
| 440 | } else { |
| 441 | INIT_LIST_HEAD(&work->order_list); |
| 442 | } |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 443 | |
| 444 | spin_lock_irqsave(&worker->lock, flags); |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 445 | |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 446 | list_add_tail(&work->list, &worker->pending); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 447 | atomic_inc(&worker->num_pending); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 448 | check_busy_worker(worker); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 449 | |
| 450 | /* |
| 451 | * avoid calling into wake_up_process if this thread has already |
| 452 | * been kicked |
| 453 | */ |
| 454 | if (!worker->working) |
| 455 | wake = 1; |
| 456 | worker->working = 1; |
| 457 | |
| 458 | spin_unlock_irqrestore(&worker->lock, flags); |
| 459 | |
| 460 | if (wake) |
| 461 | wake_up_process(worker->task); |
| 462 | out: |
| 463 | return 0; |
| 464 | } |