Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2007 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/kthread.h> |
| 20 | #include <linux/list.h> |
| 21 | #include <linux/spinlock.h> |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 22 | #include <linux/freezer.h> |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 23 | #include "async-thread.h" |
| 24 | |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 25 | #define WORK_QUEUED_BIT 0 |
| 26 | #define WORK_DONE_BIT 1 |
| 27 | #define WORK_ORDER_DONE_BIT 2 |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 28 | #define WORK_HIGH_PRIO_BIT 3 |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 29 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 30 | /* |
| 31 | * container for the kthread task pointer and the list of pending work |
| 32 | * One of these is allocated per thread. |
| 33 | */ |
| 34 | struct btrfs_worker_thread { |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 35 | /* pool we belong to */ |
| 36 | struct btrfs_workers *workers; |
| 37 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 38 | /* list of struct btrfs_work that are waiting for service */ |
| 39 | struct list_head pending; |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 40 | struct list_head prio_pending; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 41 | |
| 42 | /* list of worker threads from struct btrfs_workers */ |
| 43 | struct list_head worker_list; |
| 44 | |
| 45 | /* kthread */ |
| 46 | struct task_struct *task; |
| 47 | |
| 48 | /* number of things on the pending list */ |
| 49 | atomic_t num_pending; |
Chris Mason | 5386323 | 2008-08-15 15:34:18 -0400 | [diff] [blame] | 50 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 51 | /* reference counter for this struct */ |
| 52 | atomic_t refs; |
| 53 | |
Chris Mason | 4854ddd | 2008-08-15 15:34:17 -0400 | [diff] [blame] | 54 | unsigned long sequence; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 55 | |
| 56 | /* protects the pending list. */ |
| 57 | spinlock_t lock; |
| 58 | |
| 59 | /* set to non-zero when this thread is already awake and kicking */ |
| 60 | int working; |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 61 | |
| 62 | /* are we currently idle */ |
| 63 | int idle; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 64 | }; |
| 65 | |
| 66 | /* |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 67 | * helper function to move a thread onto the idle list after it |
| 68 | * has finished some requests. |
| 69 | */ |
| 70 | static void check_idle_worker(struct btrfs_worker_thread *worker) |
| 71 | { |
| 72 | if (!worker->idle && atomic_read(&worker->num_pending) < |
| 73 | worker->workers->idle_thresh / 2) { |
| 74 | unsigned long flags; |
| 75 | spin_lock_irqsave(&worker->workers->lock, flags); |
| 76 | worker->idle = 1; |
Chris Mason | 3e99d8e | 2009-09-15 19:57:42 -0400 | [diff] [blame] | 77 | |
| 78 | /* the list may be empty if the worker is just starting */ |
| 79 | if (!list_empty(&worker->worker_list)) { |
| 80 | list_move(&worker->worker_list, |
| 81 | &worker->workers->idle_list); |
| 82 | } |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 83 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
| 84 | } |
| 85 | } |
| 86 | |
| 87 | /* |
| 88 | * helper function to move a thread off the idle list after new |
| 89 | * pending work is added. |
| 90 | */ |
| 91 | static void check_busy_worker(struct btrfs_worker_thread *worker) |
| 92 | { |
| 93 | if (worker->idle && atomic_read(&worker->num_pending) >= |
| 94 | worker->workers->idle_thresh) { |
| 95 | unsigned long flags; |
| 96 | spin_lock_irqsave(&worker->workers->lock, flags); |
| 97 | worker->idle = 0; |
Chris Mason | 3e99d8e | 2009-09-15 19:57:42 -0400 | [diff] [blame] | 98 | |
| 99 | if (!list_empty(&worker->worker_list)) { |
| 100 | list_move_tail(&worker->worker_list, |
| 101 | &worker->workers->worker_list); |
| 102 | } |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 103 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
| 104 | } |
| 105 | } |
| 106 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 107 | static void check_pending_worker_creates(struct btrfs_worker_thread *worker) |
| 108 | { |
| 109 | struct btrfs_workers *workers = worker->workers; |
| 110 | unsigned long flags; |
| 111 | |
| 112 | rmb(); |
| 113 | if (!workers->atomic_start_pending) |
| 114 | return; |
| 115 | |
| 116 | spin_lock_irqsave(&workers->lock, flags); |
| 117 | if (!workers->atomic_start_pending) |
| 118 | goto out; |
| 119 | |
| 120 | workers->atomic_start_pending = 0; |
| 121 | if (workers->num_workers >= workers->max_workers) |
| 122 | goto out; |
| 123 | |
| 124 | spin_unlock_irqrestore(&workers->lock, flags); |
| 125 | btrfs_start_workers(workers, 1); |
| 126 | return; |
| 127 | |
| 128 | out: |
| 129 | spin_unlock_irqrestore(&workers->lock, flags); |
| 130 | } |
| 131 | |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 132 | static noinline int run_ordered_completions(struct btrfs_workers *workers, |
| 133 | struct btrfs_work *work) |
| 134 | { |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 135 | if (!workers->ordered) |
| 136 | return 0; |
| 137 | |
| 138 | set_bit(WORK_DONE_BIT, &work->flags); |
| 139 | |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 140 | spin_lock(&workers->order_lock); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 141 | |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 142 | while (1) { |
| 143 | if (!list_empty(&workers->prio_order_list)) { |
| 144 | work = list_entry(workers->prio_order_list.next, |
| 145 | struct btrfs_work, order_list); |
| 146 | } else if (!list_empty(&workers->order_list)) { |
| 147 | work = list_entry(workers->order_list.next, |
| 148 | struct btrfs_work, order_list); |
| 149 | } else { |
| 150 | break; |
| 151 | } |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 152 | if (!test_bit(WORK_DONE_BIT, &work->flags)) |
| 153 | break; |
| 154 | |
| 155 | /* we are going to call the ordered done function, but |
| 156 | * we leave the work item on the list as a barrier so |
| 157 | * that later work items that are done don't have their |
| 158 | * functions called before this one returns |
| 159 | */ |
| 160 | if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) |
| 161 | break; |
| 162 | |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 163 | spin_unlock(&workers->order_lock); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 164 | |
| 165 | work->ordered_func(work); |
| 166 | |
| 167 | /* now take the lock again and call the freeing code */ |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 168 | spin_lock(&workers->order_lock); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 169 | list_del(&work->order_list); |
| 170 | work->ordered_free(work); |
| 171 | } |
| 172 | |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 173 | spin_unlock(&workers->order_lock); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 174 | return 0; |
| 175 | } |
| 176 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 177 | static void put_worker(struct btrfs_worker_thread *worker) |
| 178 | { |
| 179 | if (atomic_dec_and_test(&worker->refs)) |
| 180 | kfree(worker); |
| 181 | } |
| 182 | |
| 183 | static int try_worker_shutdown(struct btrfs_worker_thread *worker) |
| 184 | { |
| 185 | int freeit = 0; |
| 186 | |
| 187 | spin_lock_irq(&worker->lock); |
Chris Mason | 627e421 | 2009-09-15 20:00:36 -0400 | [diff] [blame] | 188 | spin_lock(&worker->workers->lock); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 189 | if (worker->workers->num_workers > 1 && |
| 190 | worker->idle && |
| 191 | !worker->working && |
| 192 | !list_empty(&worker->worker_list) && |
| 193 | list_empty(&worker->prio_pending) && |
Chris Mason | 6e74057 | 2009-09-15 20:02:33 -0400 | [diff] [blame] | 194 | list_empty(&worker->pending) && |
| 195 | atomic_read(&worker->num_pending) == 0) { |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 196 | freeit = 1; |
| 197 | list_del_init(&worker->worker_list); |
| 198 | worker->workers->num_workers--; |
| 199 | } |
Chris Mason | 627e421 | 2009-09-15 20:00:36 -0400 | [diff] [blame] | 200 | spin_unlock(&worker->workers->lock); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 201 | spin_unlock_irq(&worker->lock); |
| 202 | |
| 203 | if (freeit) |
| 204 | put_worker(worker); |
| 205 | return freeit; |
| 206 | } |
| 207 | |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 208 | static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker, |
| 209 | struct list_head *prio_head, |
| 210 | struct list_head *head) |
| 211 | { |
| 212 | struct btrfs_work *work = NULL; |
| 213 | struct list_head *cur = NULL; |
| 214 | |
| 215 | if(!list_empty(prio_head)) |
| 216 | cur = prio_head->next; |
| 217 | |
| 218 | smp_mb(); |
| 219 | if (!list_empty(&worker->prio_pending)) |
| 220 | goto refill; |
| 221 | |
| 222 | if (!list_empty(head)) |
| 223 | cur = head->next; |
| 224 | |
| 225 | if (cur) |
| 226 | goto out; |
| 227 | |
| 228 | refill: |
| 229 | spin_lock_irq(&worker->lock); |
| 230 | list_splice_tail_init(&worker->prio_pending, prio_head); |
| 231 | list_splice_tail_init(&worker->pending, head); |
| 232 | |
| 233 | if (!list_empty(prio_head)) |
| 234 | cur = prio_head->next; |
| 235 | else if (!list_empty(head)) |
| 236 | cur = head->next; |
| 237 | spin_unlock_irq(&worker->lock); |
| 238 | |
| 239 | if (!cur) |
| 240 | goto out_fail; |
| 241 | |
| 242 | out: |
| 243 | work = list_entry(cur, struct btrfs_work, list); |
| 244 | |
| 245 | out_fail: |
| 246 | return work; |
| 247 | } |
| 248 | |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 249 | /* |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 250 | * main loop for servicing work items |
| 251 | */ |
| 252 | static int worker_loop(void *arg) |
| 253 | { |
| 254 | struct btrfs_worker_thread *worker = arg; |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 255 | struct list_head head; |
| 256 | struct list_head prio_head; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 257 | struct btrfs_work *work; |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 258 | |
| 259 | INIT_LIST_HEAD(&head); |
| 260 | INIT_LIST_HEAD(&prio_head); |
| 261 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 262 | do { |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 263 | again: |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 264 | while (1) { |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 265 | |
| 266 | |
| 267 | work = get_next_work(worker, &prio_head, &head); |
| 268 | if (!work) |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 269 | break; |
| 270 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 271 | list_del(&work->list); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 272 | clear_bit(WORK_QUEUED_BIT, &work->flags); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 273 | |
| 274 | work->worker = worker; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 275 | |
| 276 | work->func(work); |
| 277 | |
| 278 | atomic_dec(&worker->num_pending); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 279 | /* |
| 280 | * unless this is an ordered work queue, |
| 281 | * 'work' was probably freed by func above. |
| 282 | */ |
| 283 | run_ordered_completions(worker->workers, work); |
| 284 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 285 | check_pending_worker_creates(worker); |
| 286 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 287 | } |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 288 | |
| 289 | spin_lock_irq(&worker->lock); |
| 290 | check_idle_worker(worker); |
| 291 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 292 | if (freezing(current)) { |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 293 | worker->working = 0; |
| 294 | spin_unlock_irq(&worker->lock); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 295 | refrigerator(); |
| 296 | } else { |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 297 | spin_unlock_irq(&worker->lock); |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 298 | if (!kthread_should_stop()) { |
| 299 | cpu_relax(); |
| 300 | /* |
| 301 | * we've dropped the lock, did someone else |
| 302 | * jump_in? |
| 303 | */ |
| 304 | smp_mb(); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 305 | if (!list_empty(&worker->pending) || |
| 306 | !list_empty(&worker->prio_pending)) |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 307 | continue; |
| 308 | |
| 309 | /* |
| 310 | * this short schedule allows more work to |
| 311 | * come in without the queue functions |
| 312 | * needing to go through wake_up_process() |
| 313 | * |
| 314 | * worker->working is still 1, so nobody |
| 315 | * is going to try and wake us up |
| 316 | */ |
| 317 | schedule_timeout(1); |
| 318 | smp_mb(); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 319 | if (!list_empty(&worker->pending) || |
| 320 | !list_empty(&worker->prio_pending)) |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 321 | continue; |
| 322 | |
Amit Gud | b5555f7 | 2009-04-02 17:01:27 -0400 | [diff] [blame] | 323 | if (kthread_should_stop()) |
| 324 | break; |
| 325 | |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 326 | /* still no more work?, sleep for real */ |
| 327 | spin_lock_irq(&worker->lock); |
| 328 | set_current_state(TASK_INTERRUPTIBLE); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 329 | if (!list_empty(&worker->pending) || |
Chris Mason | 4f878e8 | 2009-08-07 09:27:38 -0400 | [diff] [blame] | 330 | !list_empty(&worker->prio_pending)) { |
| 331 | spin_unlock_irq(&worker->lock); |
| 332 | goto again; |
| 333 | } |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 334 | |
| 335 | /* |
| 336 | * this makes sure we get a wakeup when someone |
| 337 | * adds something new to the queue |
| 338 | */ |
| 339 | worker->working = 0; |
| 340 | spin_unlock_irq(&worker->lock); |
| 341 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 342 | if (!kthread_should_stop()) { |
| 343 | schedule_timeout(HZ * 120); |
| 344 | if (!worker->working && |
| 345 | try_worker_shutdown(worker)) { |
| 346 | return 0; |
| 347 | } |
| 348 | } |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 349 | } |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 350 | __set_current_state(TASK_RUNNING); |
| 351 | } |
| 352 | } while (!kthread_should_stop()); |
| 353 | return 0; |
| 354 | } |
| 355 | |
| 356 | /* |
| 357 | * this will wait for all the worker threads to shutdown |
| 358 | */ |
| 359 | int btrfs_stop_workers(struct btrfs_workers *workers) |
| 360 | { |
| 361 | struct list_head *cur; |
| 362 | struct btrfs_worker_thread *worker; |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 363 | int can_stop; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 364 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 365 | spin_lock_irq(&workers->lock); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 366 | list_splice_init(&workers->idle_list, &workers->worker_list); |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 367 | while (!list_empty(&workers->worker_list)) { |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 368 | cur = workers->worker_list.next; |
| 369 | worker = list_entry(cur, struct btrfs_worker_thread, |
| 370 | worker_list); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 371 | |
| 372 | atomic_inc(&worker->refs); |
| 373 | workers->num_workers -= 1; |
| 374 | if (!list_empty(&worker->worker_list)) { |
| 375 | list_del_init(&worker->worker_list); |
| 376 | put_worker(worker); |
| 377 | can_stop = 1; |
| 378 | } else |
| 379 | can_stop = 0; |
| 380 | spin_unlock_irq(&workers->lock); |
| 381 | if (can_stop) |
| 382 | kthread_stop(worker->task); |
| 383 | spin_lock_irq(&workers->lock); |
| 384 | put_worker(worker); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 385 | } |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 386 | spin_unlock_irq(&workers->lock); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 387 | return 0; |
| 388 | } |
| 389 | |
| 390 | /* |
| 391 | * simple init on struct btrfs_workers |
| 392 | */ |
Chris Mason | 5443be4 | 2008-08-15 15:34:16 -0400 | [diff] [blame] | 393 | void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max) |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 394 | { |
| 395 | workers->num_workers = 0; |
| 396 | INIT_LIST_HEAD(&workers->worker_list); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 397 | INIT_LIST_HEAD(&workers->idle_list); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 398 | INIT_LIST_HEAD(&workers->order_list); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 399 | INIT_LIST_HEAD(&workers->prio_order_list); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 400 | spin_lock_init(&workers->lock); |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 401 | spin_lock_init(&workers->order_lock); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 402 | workers->max_workers = max; |
Chris Mason | 61b4944 | 2008-07-31 15:42:53 -0400 | [diff] [blame] | 403 | workers->idle_thresh = 32; |
Chris Mason | 5443be4 | 2008-08-15 15:34:16 -0400 | [diff] [blame] | 404 | workers->name = name; |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 405 | workers->ordered = 0; |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 406 | workers->atomic_start_pending = 0; |
| 407 | workers->atomic_worker_start = 0; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 408 | } |
| 409 | |
| 410 | /* |
| 411 | * starts new worker threads. This does not enforce the max worker |
| 412 | * count in case you need to temporarily go past it. |
| 413 | */ |
| 414 | int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) |
| 415 | { |
| 416 | struct btrfs_worker_thread *worker; |
| 417 | int ret = 0; |
| 418 | int i; |
| 419 | |
| 420 | for (i = 0; i < num_workers; i++) { |
| 421 | worker = kzalloc(sizeof(*worker), GFP_NOFS); |
| 422 | if (!worker) { |
| 423 | ret = -ENOMEM; |
| 424 | goto fail; |
| 425 | } |
| 426 | |
| 427 | INIT_LIST_HEAD(&worker->pending); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 428 | INIT_LIST_HEAD(&worker->prio_pending); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 429 | INIT_LIST_HEAD(&worker->worker_list); |
| 430 | spin_lock_init(&worker->lock); |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 431 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 432 | atomic_set(&worker->num_pending, 0); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 433 | atomic_set(&worker->refs, 1); |
Shin Hong | fd0fb03 | 2009-06-10 20:11:29 -0400 | [diff] [blame] | 434 | worker->workers = workers; |
Chris Mason | 5443be4 | 2008-08-15 15:34:16 -0400 | [diff] [blame] | 435 | worker->task = kthread_run(worker_loop, worker, |
| 436 | "btrfs-%s-%d", workers->name, |
| 437 | workers->num_workers + i); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 438 | if (IS_ERR(worker->task)) { |
| 439 | ret = PTR_ERR(worker->task); |
Jiri Slaby | 9b627e9 | 2009-07-02 13:50:58 -0400 | [diff] [blame] | 440 | kfree(worker); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 441 | goto fail; |
| 442 | } |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 443 | spin_lock_irq(&workers->lock); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 444 | list_add_tail(&worker->worker_list, &workers->idle_list); |
Chris Mason | 4854ddd | 2008-08-15 15:34:17 -0400 | [diff] [blame] | 445 | worker->idle = 1; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 446 | workers->num_workers++; |
| 447 | spin_unlock_irq(&workers->lock); |
| 448 | } |
| 449 | return 0; |
| 450 | fail: |
| 451 | btrfs_stop_workers(workers); |
| 452 | return ret; |
| 453 | } |
| 454 | |
| 455 | /* |
| 456 | * run through the list and find a worker thread that doesn't have a lot |
| 457 | * to do right now. This can return null if we aren't yet at the thread |
| 458 | * count limit and all of the threads are busy. |
| 459 | */ |
| 460 | static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) |
| 461 | { |
| 462 | struct btrfs_worker_thread *worker; |
| 463 | struct list_head *next; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 464 | int enforce_min = workers->num_workers < workers->max_workers; |
| 465 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 466 | /* |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 467 | * if we find an idle thread, don't move it to the end of the |
| 468 | * idle list. This improves the chance that the next submission |
| 469 | * will reuse the same thread, and maybe catch it while it is still |
| 470 | * working |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 471 | */ |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 472 | if (!list_empty(&workers->idle_list)) { |
| 473 | next = workers->idle_list.next; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 474 | worker = list_entry(next, struct btrfs_worker_thread, |
| 475 | worker_list); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 476 | return worker; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 477 | } |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 478 | if (enforce_min || list_empty(&workers->worker_list)) |
| 479 | return NULL; |
| 480 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 481 | /* |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 482 | * if we pick a busy task, move the task to the end of the list. |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 483 | * hopefully this will keep things somewhat evenly balanced. |
| 484 | * Do the move in batches based on the sequence number. This groups |
| 485 | * requests submitted at roughly the same time onto the same worker. |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 486 | */ |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 487 | next = workers->worker_list.next; |
| 488 | worker = list_entry(next, struct btrfs_worker_thread, worker_list); |
Chris Mason | 4854ddd | 2008-08-15 15:34:17 -0400 | [diff] [blame] | 489 | worker->sequence++; |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 490 | |
Chris Mason | 5386323 | 2008-08-15 15:34:18 -0400 | [diff] [blame] | 491 | if (worker->sequence % workers->idle_thresh == 0) |
Chris Mason | 4854ddd | 2008-08-15 15:34:17 -0400 | [diff] [blame] | 492 | list_move_tail(next, &workers->worker_list); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 493 | return worker; |
| 494 | } |
| 495 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 496 | /* |
| 497 | * selects a worker thread to take the next job. This will either find |
| 498 | * an idle worker, start a new worker up to the max count, or just return |
| 499 | * one of the existing busy workers. |
| 500 | */ |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 501 | static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) |
| 502 | { |
| 503 | struct btrfs_worker_thread *worker; |
| 504 | unsigned long flags; |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 505 | struct list_head *fallback; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 506 | |
| 507 | again: |
| 508 | spin_lock_irqsave(&workers->lock, flags); |
| 509 | worker = next_worker(workers); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 510 | |
| 511 | if (!worker) { |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 512 | if (workers->num_workers >= workers->max_workers) { |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 513 | goto fallback; |
| 514 | } else if (workers->atomic_worker_start) { |
| 515 | workers->atomic_start_pending = 1; |
| 516 | goto fallback; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 517 | } else { |
| 518 | spin_unlock_irqrestore(&workers->lock, flags); |
| 519 | /* we're below the limit, start another worker */ |
| 520 | btrfs_start_workers(workers, 1); |
| 521 | goto again; |
| 522 | } |
| 523 | } |
Chris Mason | 6e74057 | 2009-09-15 20:02:33 -0400 | [diff] [blame] | 524 | goto found; |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 525 | |
| 526 | fallback: |
| 527 | fallback = NULL; |
| 528 | /* |
| 529 | * we have failed to find any workers, just |
| 530 | * return the first one we can find. |
| 531 | */ |
| 532 | if (!list_empty(&workers->worker_list)) |
| 533 | fallback = workers->worker_list.next; |
| 534 | if (!list_empty(&workers->idle_list)) |
| 535 | fallback = workers->idle_list.next; |
| 536 | BUG_ON(!fallback); |
| 537 | worker = list_entry(fallback, |
| 538 | struct btrfs_worker_thread, worker_list); |
Chris Mason | 6e74057 | 2009-09-15 20:02:33 -0400 | [diff] [blame] | 539 | found: |
| 540 | /* |
| 541 | * this makes sure the worker doesn't exit before it is placed |
| 542 | * onto a busy/idle list |
| 543 | */ |
| 544 | atomic_inc(&worker->num_pending); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 545 | spin_unlock_irqrestore(&workers->lock, flags); |
| 546 | return worker; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 547 | } |
| 548 | |
| 549 | /* |
| 550 | * btrfs_requeue_work just puts the work item back on the tail of the list |
| 551 | * it was taken from. It is intended for use with long running work functions |
| 552 | * that make some progress and want to give the cpu up for others. |
| 553 | */ |
| 554 | int btrfs_requeue_work(struct btrfs_work *work) |
| 555 | { |
| 556 | struct btrfs_worker_thread *worker = work->worker; |
| 557 | unsigned long flags; |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 558 | int wake = 0; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 559 | |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 560 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 561 | goto out; |
| 562 | |
| 563 | spin_lock_irqsave(&worker->lock, flags); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 564 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) |
| 565 | list_add_tail(&work->list, &worker->prio_pending); |
| 566 | else |
| 567 | list_add_tail(&work->list, &worker->pending); |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 568 | atomic_inc(&worker->num_pending); |
Chris Mason | 75ccf47 | 2008-09-30 19:24:06 -0400 | [diff] [blame] | 569 | |
| 570 | /* by definition we're busy, take ourselves off the idle |
| 571 | * list |
| 572 | */ |
| 573 | if (worker->idle) { |
Julia Lawall | 29c5e8c | 2009-07-22 16:49:00 -0400 | [diff] [blame] | 574 | spin_lock(&worker->workers->lock); |
Chris Mason | 75ccf47 | 2008-09-30 19:24:06 -0400 | [diff] [blame] | 575 | worker->idle = 0; |
| 576 | list_move_tail(&worker->worker_list, |
Chris Mason | 6e74057 | 2009-09-15 20:02:33 -0400 | [diff] [blame] | 577 | &worker->workers->worker_list); |
Julia Lawall | 29c5e8c | 2009-07-22 16:49:00 -0400 | [diff] [blame] | 578 | spin_unlock(&worker->workers->lock); |
Chris Mason | 75ccf47 | 2008-09-30 19:24:06 -0400 | [diff] [blame] | 579 | } |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 580 | if (!worker->working) { |
| 581 | wake = 1; |
| 582 | worker->working = 1; |
| 583 | } |
Chris Mason | 75ccf47 | 2008-09-30 19:24:06 -0400 | [diff] [blame] | 584 | |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 585 | if (wake) |
| 586 | wake_up_process(worker->task); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 587 | spin_unlock_irqrestore(&worker->lock, flags); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 588 | out: |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 589 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 590 | return 0; |
| 591 | } |
| 592 | |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 593 | void btrfs_set_work_high_prio(struct btrfs_work *work) |
| 594 | { |
| 595 | set_bit(WORK_HIGH_PRIO_BIT, &work->flags); |
| 596 | } |
| 597 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 598 | /* |
| 599 | * places a struct btrfs_work into the pending queue of one of the kthreads |
| 600 | */ |
| 601 | int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) |
| 602 | { |
| 603 | struct btrfs_worker_thread *worker; |
| 604 | unsigned long flags; |
| 605 | int wake = 0; |
| 606 | |
| 607 | /* don't requeue something already on a list */ |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 608 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 609 | goto out; |
| 610 | |
| 611 | worker = find_worker(workers); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 612 | if (workers->ordered) { |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 613 | /* |
| 614 | * you're not allowed to do ordered queues from an |
| 615 | * interrupt handler |
| 616 | */ |
| 617 | spin_lock(&workers->order_lock); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 618 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) { |
| 619 | list_add_tail(&work->order_list, |
| 620 | &workers->prio_order_list); |
| 621 | } else { |
| 622 | list_add_tail(&work->order_list, &workers->order_list); |
| 623 | } |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 624 | spin_unlock(&workers->order_lock); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 625 | } else { |
| 626 | INIT_LIST_HEAD(&work->order_list); |
| 627 | } |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 628 | |
| 629 | spin_lock_irqsave(&worker->lock, flags); |
Chris Mason | a683705 | 2009-02-04 09:19:41 -0500 | [diff] [blame] | 630 | |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 631 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) |
| 632 | list_add_tail(&work->list, &worker->prio_pending); |
| 633 | else |
| 634 | list_add_tail(&work->list, &worker->pending); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 635 | check_busy_worker(worker); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 636 | |
| 637 | /* |
| 638 | * avoid calling into wake_up_process if this thread has already |
| 639 | * been kicked |
| 640 | */ |
| 641 | if (!worker->working) |
| 642 | wake = 1; |
| 643 | worker->working = 1; |
| 644 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 645 | if (wake) |
| 646 | wake_up_process(worker->task); |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 647 | spin_unlock_irqrestore(&worker->lock, flags); |
| 648 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 649 | out: |
| 650 | return 0; |
| 651 | } |