Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2007 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | |
Chris Mason | d05e5a4 | 2008-06-11 17:09:48 -0400 | [diff] [blame] | 19 | #include <linux/version.h> |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 20 | #include <linux/kthread.h> |
| 21 | #include <linux/list.h> |
| 22 | #include <linux/spinlock.h> |
Chris Mason | d05e5a4 | 2008-06-11 17:09:48 -0400 | [diff] [blame] | 23 | |
| 24 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) |
| 25 | # include <linux/freezer.h> |
| 26 | #else |
| 27 | # include <linux/sched.h> |
| 28 | #endif |
| 29 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 30 | #include "async-thread.h" |
| 31 | |
| 32 | /* |
| 33 | * container for the kthread task pointer and the list of pending work |
| 34 | * One of these is allocated per thread. |
| 35 | */ |
| 36 | struct btrfs_worker_thread { |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 37 | /* pool we belong to */ |
| 38 | struct btrfs_workers *workers; |
| 39 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 40 | /* list of struct btrfs_work that are waiting for service */ |
| 41 | struct list_head pending; |
| 42 | |
| 43 | /* list of worker threads from struct btrfs_workers */ |
| 44 | struct list_head worker_list; |
| 45 | |
| 46 | /* kthread */ |
| 47 | struct task_struct *task; |
| 48 | |
| 49 | /* number of things on the pending list */ |
| 50 | atomic_t num_pending; |
| 51 | |
| 52 | /* protects the pending list. */ |
| 53 | spinlock_t lock; |
| 54 | |
| 55 | /* set to non-zero when this thread is already awake and kicking */ |
| 56 | int working; |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 57 | |
| 58 | /* are we currently idle */ |
| 59 | int idle; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 60 | }; |
| 61 | |
| 62 | /* |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 63 | * helper function to move a thread onto the idle list after it |
| 64 | * has finished some requests. |
| 65 | */ |
| 66 | static void check_idle_worker(struct btrfs_worker_thread *worker) |
| 67 | { |
| 68 | if (!worker->idle && atomic_read(&worker->num_pending) < |
| 69 | worker->workers->idle_thresh / 2) { |
| 70 | unsigned long flags; |
| 71 | spin_lock_irqsave(&worker->workers->lock, flags); |
| 72 | worker->idle = 1; |
| 73 | list_move(&worker->worker_list, &worker->workers->idle_list); |
| 74 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
| 75 | } |
| 76 | } |
| 77 | |
| 78 | /* |
| 79 | * helper function to move a thread off the idle list after new |
| 80 | * pending work is added. |
| 81 | */ |
| 82 | static void check_busy_worker(struct btrfs_worker_thread *worker) |
| 83 | { |
| 84 | if (worker->idle && atomic_read(&worker->num_pending) >= |
| 85 | worker->workers->idle_thresh) { |
| 86 | unsigned long flags; |
| 87 | spin_lock_irqsave(&worker->workers->lock, flags); |
| 88 | worker->idle = 0; |
| 89 | list_move_tail(&worker->worker_list, |
| 90 | &worker->workers->worker_list); |
| 91 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
| 92 | } |
| 93 | } |
| 94 | |
| 95 | /* |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 96 | * main loop for servicing work items |
| 97 | */ |
| 98 | static int worker_loop(void *arg) |
| 99 | { |
| 100 | struct btrfs_worker_thread *worker = arg; |
| 101 | struct list_head *cur; |
| 102 | struct btrfs_work *work; |
| 103 | do { |
| 104 | spin_lock_irq(&worker->lock); |
| 105 | while(!list_empty(&worker->pending)) { |
| 106 | cur = worker->pending.next; |
| 107 | work = list_entry(cur, struct btrfs_work, list); |
| 108 | list_del(&work->list); |
| 109 | clear_bit(0, &work->flags); |
| 110 | |
| 111 | work->worker = worker; |
| 112 | spin_unlock_irq(&worker->lock); |
| 113 | |
| 114 | work->func(work); |
| 115 | |
| 116 | atomic_dec(&worker->num_pending); |
| 117 | spin_lock_irq(&worker->lock); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 118 | check_idle_worker(worker); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 119 | } |
| 120 | worker->working = 0; |
| 121 | if (freezing(current)) { |
| 122 | refrigerator(); |
| 123 | } else { |
| 124 | set_current_state(TASK_INTERRUPTIBLE); |
| 125 | spin_unlock_irq(&worker->lock); |
| 126 | schedule(); |
| 127 | __set_current_state(TASK_RUNNING); |
| 128 | } |
| 129 | } while (!kthread_should_stop()); |
| 130 | return 0; |
| 131 | } |
| 132 | |
| 133 | /* |
| 134 | * this will wait for all the worker threads to shutdown |
| 135 | */ |
| 136 | int btrfs_stop_workers(struct btrfs_workers *workers) |
| 137 | { |
| 138 | struct list_head *cur; |
| 139 | struct btrfs_worker_thread *worker; |
| 140 | |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 141 | list_splice_init(&workers->idle_list, &workers->worker_list); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 142 | while(!list_empty(&workers->worker_list)) { |
| 143 | cur = workers->worker_list.next; |
| 144 | worker = list_entry(cur, struct btrfs_worker_thread, |
| 145 | worker_list); |
| 146 | kthread_stop(worker->task); |
| 147 | list_del(&worker->worker_list); |
| 148 | kfree(worker); |
| 149 | } |
| 150 | return 0; |
| 151 | } |
| 152 | |
| 153 | /* |
| 154 | * simple init on struct btrfs_workers |
| 155 | */ |
| 156 | void btrfs_init_workers(struct btrfs_workers *workers, int max) |
| 157 | { |
| 158 | workers->num_workers = 0; |
| 159 | INIT_LIST_HEAD(&workers->worker_list); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 160 | INIT_LIST_HEAD(&workers->idle_list); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 161 | spin_lock_init(&workers->lock); |
| 162 | workers->max_workers = max; |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 163 | workers->idle_thresh = 64; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | /* |
| 167 | * starts new worker threads. This does not enforce the max worker |
| 168 | * count in case you need to temporarily go past it. |
| 169 | */ |
| 170 | int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) |
| 171 | { |
| 172 | struct btrfs_worker_thread *worker; |
| 173 | int ret = 0; |
| 174 | int i; |
| 175 | |
| 176 | for (i = 0; i < num_workers; i++) { |
| 177 | worker = kzalloc(sizeof(*worker), GFP_NOFS); |
| 178 | if (!worker) { |
| 179 | ret = -ENOMEM; |
| 180 | goto fail; |
| 181 | } |
| 182 | |
| 183 | INIT_LIST_HEAD(&worker->pending); |
| 184 | INIT_LIST_HEAD(&worker->worker_list); |
| 185 | spin_lock_init(&worker->lock); |
| 186 | atomic_set(&worker->num_pending, 0); |
| 187 | worker->task = kthread_run(worker_loop, worker, "btrfs"); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 188 | worker->workers = workers; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 189 | if (IS_ERR(worker->task)) { |
| 190 | ret = PTR_ERR(worker->task); |
| 191 | goto fail; |
| 192 | } |
| 193 | |
| 194 | spin_lock_irq(&workers->lock); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 195 | list_add_tail(&worker->worker_list, &workers->idle_list); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 196 | workers->num_workers++; |
| 197 | spin_unlock_irq(&workers->lock); |
| 198 | } |
| 199 | return 0; |
| 200 | fail: |
| 201 | btrfs_stop_workers(workers); |
| 202 | return ret; |
| 203 | } |
| 204 | |
| 205 | /* |
| 206 | * run through the list and find a worker thread that doesn't have a lot |
| 207 | * to do right now. This can return null if we aren't yet at the thread |
| 208 | * count limit and all of the threads are busy. |
| 209 | */ |
| 210 | static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) |
| 211 | { |
| 212 | struct btrfs_worker_thread *worker; |
| 213 | struct list_head *next; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 214 | int enforce_min = workers->num_workers < workers->max_workers; |
| 215 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 216 | /* |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 217 | * if we find an idle thread, don't move it to the end of the |
| 218 | * idle list. This improves the chance that the next submission |
| 219 | * will reuse the same thread, and maybe catch it while it is still |
| 220 | * working |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 221 | */ |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 222 | if (!list_empty(&workers->idle_list)) { |
| 223 | next = workers->idle_list.next; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 224 | worker = list_entry(next, struct btrfs_worker_thread, |
| 225 | worker_list); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 226 | return worker; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 227 | } |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 228 | if (enforce_min || list_empty(&workers->worker_list)) |
| 229 | return NULL; |
| 230 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 231 | /* |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 232 | * if we pick a busy task, move the task to the end of the list. |
| 233 | * hopefully this will keep things somewhat evenly balanced |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 234 | */ |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 235 | next = workers->worker_list.next; |
| 236 | worker = list_entry(next, struct btrfs_worker_thread, worker_list); |
| 237 | list_move_tail(next, &workers->worker_list); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 238 | return worker; |
| 239 | } |
| 240 | |
| 241 | static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) |
| 242 | { |
| 243 | struct btrfs_worker_thread *worker; |
| 244 | unsigned long flags; |
| 245 | |
| 246 | again: |
| 247 | spin_lock_irqsave(&workers->lock, flags); |
| 248 | worker = next_worker(workers); |
| 249 | spin_unlock_irqrestore(&workers->lock, flags); |
| 250 | |
| 251 | if (!worker) { |
| 252 | spin_lock_irqsave(&workers->lock, flags); |
| 253 | if (workers->num_workers >= workers->max_workers) { |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 254 | struct list_head *fallback = NULL; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 255 | /* |
| 256 | * we have failed to find any workers, just |
| 257 | * return the force one |
| 258 | */ |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 259 | if (!list_empty(&workers->worker_list)) |
| 260 | fallback = workers->worker_list.next; |
| 261 | if (!list_empty(&workers->idle_list)) |
| 262 | fallback = workers->idle_list.next; |
| 263 | BUG_ON(!fallback); |
| 264 | worker = list_entry(fallback, |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 265 | struct btrfs_worker_thread, worker_list); |
| 266 | spin_unlock_irqrestore(&workers->lock, flags); |
| 267 | } else { |
| 268 | spin_unlock_irqrestore(&workers->lock, flags); |
| 269 | /* we're below the limit, start another worker */ |
| 270 | btrfs_start_workers(workers, 1); |
| 271 | goto again; |
| 272 | } |
| 273 | } |
| 274 | return worker; |
| 275 | } |
| 276 | |
| 277 | /* |
| 278 | * btrfs_requeue_work just puts the work item back on the tail of the list |
| 279 | * it was taken from. It is intended for use with long running work functions |
| 280 | * that make some progress and want to give the cpu up for others. |
| 281 | */ |
| 282 | int btrfs_requeue_work(struct btrfs_work *work) |
| 283 | { |
| 284 | struct btrfs_worker_thread *worker = work->worker; |
| 285 | unsigned long flags; |
| 286 | |
| 287 | if (test_and_set_bit(0, &work->flags)) |
| 288 | goto out; |
| 289 | |
| 290 | spin_lock_irqsave(&worker->lock, flags); |
| 291 | atomic_inc(&worker->num_pending); |
| 292 | list_add_tail(&work->list, &worker->pending); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 293 | check_busy_worker(worker); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 294 | spin_unlock_irqrestore(&worker->lock, flags); |
| 295 | out: |
| 296 | return 0; |
| 297 | } |
| 298 | |
| 299 | /* |
| 300 | * places a struct btrfs_work into the pending queue of one of the kthreads |
| 301 | */ |
| 302 | int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) |
| 303 | { |
| 304 | struct btrfs_worker_thread *worker; |
| 305 | unsigned long flags; |
| 306 | int wake = 0; |
| 307 | |
| 308 | /* don't requeue something already on a list */ |
| 309 | if (test_and_set_bit(0, &work->flags)) |
| 310 | goto out; |
| 311 | |
| 312 | worker = find_worker(workers); |
| 313 | |
| 314 | spin_lock_irqsave(&worker->lock, flags); |
| 315 | atomic_inc(&worker->num_pending); |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 316 | check_busy_worker(worker); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 317 | list_add_tail(&work->list, &worker->pending); |
| 318 | |
| 319 | /* |
| 320 | * avoid calling into wake_up_process if this thread has already |
| 321 | * been kicked |
| 322 | */ |
| 323 | if (!worker->working) |
| 324 | wake = 1; |
| 325 | worker->working = 1; |
| 326 | |
| 327 | spin_unlock_irqrestore(&worker->lock, flags); |
| 328 | |
| 329 | if (wake) |
| 330 | wake_up_process(worker->task); |
| 331 | out: |
| 332 | return 0; |
| 333 | } |