blob: 51bfdfc8fcdac00b5f13599bed87922563a38f07 [file] [log] [blame]
Chris Mason8b712842008-06-11 16:50:36 -04001/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/kthread.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
Chris Masonb51912c2009-02-04 09:23:24 -050022#include <linux/freezer.h>
Chris Mason8b712842008-06-11 16:50:36 -040023#include "async-thread.h"
24
Chris Mason4a69a412008-11-06 22:03:00 -050025#define WORK_QUEUED_BIT 0
26#define WORK_DONE_BIT 1
27#define WORK_ORDER_DONE_BIT 2
28
Chris Mason8b712842008-06-11 16:50:36 -040029/*
30 * container for the kthread task pointer and the list of pending work
31 * One of these is allocated per thread.
32 */
33struct btrfs_worker_thread {
Chris Mason35d8ba62008-06-11 20:21:24 -040034 /* pool we belong to */
35 struct btrfs_workers *workers;
36
Chris Mason8b712842008-06-11 16:50:36 -040037 /* list of struct btrfs_work that are waiting for service */
38 struct list_head pending;
39
40 /* list of worker threads from struct btrfs_workers */
41 struct list_head worker_list;
42
43 /* kthread */
44 struct task_struct *task;
45
46 /* number of things on the pending list */
47 atomic_t num_pending;
Chris Mason53863232008-08-15 15:34:18 -040048
Chris Mason4854ddd2008-08-15 15:34:17 -040049 unsigned long sequence;
Chris Mason8b712842008-06-11 16:50:36 -040050
51 /* protects the pending list. */
52 spinlock_t lock;
53
54 /* set to non-zero when this thread is already awake and kicking */
55 int working;
Chris Mason35d8ba62008-06-11 20:21:24 -040056
57 /* are we currently idle */
58 int idle;
Chris Mason8b712842008-06-11 16:50:36 -040059};
60
61/*
Chris Mason35d8ba62008-06-11 20:21:24 -040062 * helper function to move a thread onto the idle list after it
63 * has finished some requests.
64 */
65static void check_idle_worker(struct btrfs_worker_thread *worker)
66{
67 if (!worker->idle && atomic_read(&worker->num_pending) <
68 worker->workers->idle_thresh / 2) {
69 unsigned long flags;
70 spin_lock_irqsave(&worker->workers->lock, flags);
71 worker->idle = 1;
72 list_move(&worker->worker_list, &worker->workers->idle_list);
73 spin_unlock_irqrestore(&worker->workers->lock, flags);
74 }
75}
76
77/*
78 * helper function to move a thread off the idle list after new
79 * pending work is added.
80 */
81static void check_busy_worker(struct btrfs_worker_thread *worker)
82{
83 if (worker->idle && atomic_read(&worker->num_pending) >=
84 worker->workers->idle_thresh) {
85 unsigned long flags;
86 spin_lock_irqsave(&worker->workers->lock, flags);
87 worker->idle = 0;
88 list_move_tail(&worker->worker_list,
89 &worker->workers->worker_list);
90 spin_unlock_irqrestore(&worker->workers->lock, flags);
91 }
92}
93
Chris Mason4a69a412008-11-06 22:03:00 -050094static noinline int run_ordered_completions(struct btrfs_workers *workers,
95 struct btrfs_work *work)
96{
97 unsigned long flags;
98
99 if (!workers->ordered)
100 return 0;
101
102 set_bit(WORK_DONE_BIT, &work->flags);
103
104 spin_lock_irqsave(&workers->lock, flags);
105
Chris Masond3977122009-01-05 21:25:51 -0500106 while (!list_empty(&workers->order_list)) {
Chris Mason4a69a412008-11-06 22:03:00 -0500107 work = list_entry(workers->order_list.next,
108 struct btrfs_work, order_list);
109
110 if (!test_bit(WORK_DONE_BIT, &work->flags))
111 break;
112
113 /* we are going to call the ordered done function, but
114 * we leave the work item on the list as a barrier so
115 * that later work items that are done don't have their
116 * functions called before this one returns
117 */
118 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
119 break;
120
121 spin_unlock_irqrestore(&workers->lock, flags);
122
123 work->ordered_func(work);
124
125 /* now take the lock again and call the freeing code */
126 spin_lock_irqsave(&workers->lock, flags);
127 list_del(&work->order_list);
128 work->ordered_free(work);
129 }
130
131 spin_unlock_irqrestore(&workers->lock, flags);
132 return 0;
133}
134
Chris Mason35d8ba62008-06-11 20:21:24 -0400135/*
Chris Mason8b712842008-06-11 16:50:36 -0400136 * main loop for servicing work items
137 */
138static int worker_loop(void *arg)
139{
140 struct btrfs_worker_thread *worker = arg;
141 struct list_head *cur;
142 struct btrfs_work *work;
143 do {
144 spin_lock_irq(&worker->lock);
Chris Masonb51912c2009-02-04 09:23:24 -0500145again_locked:
Chris Masond3977122009-01-05 21:25:51 -0500146 while (!list_empty(&worker->pending)) {
Chris Mason8b712842008-06-11 16:50:36 -0400147 cur = worker->pending.next;
148 work = list_entry(cur, struct btrfs_work, list);
149 list_del(&work->list);
Chris Mason4a69a412008-11-06 22:03:00 -0500150 clear_bit(WORK_QUEUED_BIT, &work->flags);
Chris Mason8b712842008-06-11 16:50:36 -0400151
152 work->worker = worker;
153 spin_unlock_irq(&worker->lock);
154
155 work->func(work);
156
157 atomic_dec(&worker->num_pending);
Chris Mason4a69a412008-11-06 22:03:00 -0500158 /*
159 * unless this is an ordered work queue,
160 * 'work' was probably freed by func above.
161 */
162 run_ordered_completions(worker->workers, work);
163
Chris Mason8b712842008-06-11 16:50:36 -0400164 spin_lock_irq(&worker->lock);
Chris Mason35d8ba62008-06-11 20:21:24 -0400165 check_idle_worker(worker);
Chris Mason4a69a412008-11-06 22:03:00 -0500166
Chris Mason8b712842008-06-11 16:50:36 -0400167 }
Chris Mason8b712842008-06-11 16:50:36 -0400168 if (freezing(current)) {
Chris Masonb51912c2009-02-04 09:23:24 -0500169 worker->working = 0;
170 spin_unlock_irq(&worker->lock);
Chris Mason8b712842008-06-11 16:50:36 -0400171 refrigerator();
172 } else {
Chris Mason8b712842008-06-11 16:50:36 -0400173 spin_unlock_irq(&worker->lock);
Chris Masonb51912c2009-02-04 09:23:24 -0500174 if (!kthread_should_stop()) {
175 cpu_relax();
176 /*
177 * we've dropped the lock, did someone else
178 * jump_in?
179 */
180 smp_mb();
181 if (!list_empty(&worker->pending))
182 continue;
183
184 /*
185 * this short schedule allows more work to
186 * come in without the queue functions
187 * needing to go through wake_up_process()
188 *
189 * worker->working is still 1, so nobody
190 * is going to try and wake us up
191 */
192 schedule_timeout(1);
193 smp_mb();
194 if (!list_empty(&worker->pending))
195 continue;
196
Amit Gudb5555f72009-04-02 17:01:27 -0400197 if (kthread_should_stop())
198 break;
199
Chris Masonb51912c2009-02-04 09:23:24 -0500200 /* still no more work?, sleep for real */
201 spin_lock_irq(&worker->lock);
202 set_current_state(TASK_INTERRUPTIBLE);
203 if (!list_empty(&worker->pending))
204 goto again_locked;
205
206 /*
207 * this makes sure we get a wakeup when someone
208 * adds something new to the queue
209 */
210 worker->working = 0;
211 spin_unlock_irq(&worker->lock);
212
Amit Gudb5555f72009-04-02 17:01:27 -0400213 if (!kthread_should_stop())
214 schedule();
Chris Masonb51912c2009-02-04 09:23:24 -0500215 }
Chris Mason8b712842008-06-11 16:50:36 -0400216 __set_current_state(TASK_RUNNING);
217 }
218 } while (!kthread_should_stop());
219 return 0;
220}
221
222/*
223 * this will wait for all the worker threads to shutdown
224 */
225int btrfs_stop_workers(struct btrfs_workers *workers)
226{
227 struct list_head *cur;
228 struct btrfs_worker_thread *worker;
229
Chris Mason35d8ba62008-06-11 20:21:24 -0400230 list_splice_init(&workers->idle_list, &workers->worker_list);
Chris Masond3977122009-01-05 21:25:51 -0500231 while (!list_empty(&workers->worker_list)) {
Chris Mason8b712842008-06-11 16:50:36 -0400232 cur = workers->worker_list.next;
233 worker = list_entry(cur, struct btrfs_worker_thread,
234 worker_list);
235 kthread_stop(worker->task);
236 list_del(&worker->worker_list);
237 kfree(worker);
238 }
239 return 0;
240}
241
242/*
243 * simple init on struct btrfs_workers
244 */
Chris Mason5443be42008-08-15 15:34:16 -0400245void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
Chris Mason8b712842008-06-11 16:50:36 -0400246{
247 workers->num_workers = 0;
248 INIT_LIST_HEAD(&workers->worker_list);
Chris Mason35d8ba62008-06-11 20:21:24 -0400249 INIT_LIST_HEAD(&workers->idle_list);
Chris Mason4a69a412008-11-06 22:03:00 -0500250 INIT_LIST_HEAD(&workers->order_list);
Chris Mason8b712842008-06-11 16:50:36 -0400251 spin_lock_init(&workers->lock);
252 workers->max_workers = max;
Chris Mason61b49442008-07-31 15:42:53 -0400253 workers->idle_thresh = 32;
Chris Mason5443be42008-08-15 15:34:16 -0400254 workers->name = name;
Chris Mason4a69a412008-11-06 22:03:00 -0500255 workers->ordered = 0;
Chris Mason8b712842008-06-11 16:50:36 -0400256}
257
258/*
259 * starts new worker threads. This does not enforce the max worker
260 * count in case you need to temporarily go past it.
261 */
262int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
263{
264 struct btrfs_worker_thread *worker;
265 int ret = 0;
266 int i;
267
268 for (i = 0; i < num_workers; i++) {
269 worker = kzalloc(sizeof(*worker), GFP_NOFS);
270 if (!worker) {
271 ret = -ENOMEM;
272 goto fail;
273 }
274
275 INIT_LIST_HEAD(&worker->pending);
276 INIT_LIST_HEAD(&worker->worker_list);
277 spin_lock_init(&worker->lock);
278 atomic_set(&worker->num_pending, 0);
Chris Mason5443be42008-08-15 15:34:16 -0400279 worker->task = kthread_run(worker_loop, worker,
280 "btrfs-%s-%d", workers->name,
281 workers->num_workers + i);
Chris Mason35d8ba62008-06-11 20:21:24 -0400282 worker->workers = workers;
Chris Mason8b712842008-06-11 16:50:36 -0400283 if (IS_ERR(worker->task)) {
Li Zefan3bf10412008-07-30 09:24:37 -0400284 kfree(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400285 ret = PTR_ERR(worker->task);
286 goto fail;
287 }
288
289 spin_lock_irq(&workers->lock);
Chris Mason35d8ba62008-06-11 20:21:24 -0400290 list_add_tail(&worker->worker_list, &workers->idle_list);
Chris Mason4854ddd2008-08-15 15:34:17 -0400291 worker->idle = 1;
Chris Mason8b712842008-06-11 16:50:36 -0400292 workers->num_workers++;
293 spin_unlock_irq(&workers->lock);
294 }
295 return 0;
296fail:
297 btrfs_stop_workers(workers);
298 return ret;
299}
300
301/*
302 * run through the list and find a worker thread that doesn't have a lot
303 * to do right now. This can return null if we aren't yet at the thread
304 * count limit and all of the threads are busy.
305 */
306static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
307{
308 struct btrfs_worker_thread *worker;
309 struct list_head *next;
Chris Mason8b712842008-06-11 16:50:36 -0400310 int enforce_min = workers->num_workers < workers->max_workers;
311
Chris Mason8b712842008-06-11 16:50:36 -0400312 /*
Chris Mason35d8ba62008-06-11 20:21:24 -0400313 * if we find an idle thread, don't move it to the end of the
314 * idle list. This improves the chance that the next submission
315 * will reuse the same thread, and maybe catch it while it is still
316 * working
Chris Mason8b712842008-06-11 16:50:36 -0400317 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400318 if (!list_empty(&workers->idle_list)) {
319 next = workers->idle_list.next;
Chris Mason8b712842008-06-11 16:50:36 -0400320 worker = list_entry(next, struct btrfs_worker_thread,
321 worker_list);
Chris Mason35d8ba62008-06-11 20:21:24 -0400322 return worker;
Chris Mason8b712842008-06-11 16:50:36 -0400323 }
Chris Mason35d8ba62008-06-11 20:21:24 -0400324 if (enforce_min || list_empty(&workers->worker_list))
325 return NULL;
326
Chris Mason8b712842008-06-11 16:50:36 -0400327 /*
Chris Mason35d8ba62008-06-11 20:21:24 -0400328 * if we pick a busy task, move the task to the end of the list.
Chris Masond352ac62008-09-29 15:18:18 -0400329 * hopefully this will keep things somewhat evenly balanced.
330 * Do the move in batches based on the sequence number. This groups
331 * requests submitted at roughly the same time onto the same worker.
Chris Mason8b712842008-06-11 16:50:36 -0400332 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400333 next = workers->worker_list.next;
334 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
Chris Mason4854ddd2008-08-15 15:34:17 -0400335 atomic_inc(&worker->num_pending);
336 worker->sequence++;
Chris Masond352ac62008-09-29 15:18:18 -0400337
Chris Mason53863232008-08-15 15:34:18 -0400338 if (worker->sequence % workers->idle_thresh == 0)
Chris Mason4854ddd2008-08-15 15:34:17 -0400339 list_move_tail(next, &workers->worker_list);
Chris Mason8b712842008-06-11 16:50:36 -0400340 return worker;
341}
342
Chris Masond352ac62008-09-29 15:18:18 -0400343/*
344 * selects a worker thread to take the next job. This will either find
345 * an idle worker, start a new worker up to the max count, or just return
346 * one of the existing busy workers.
347 */
Chris Mason8b712842008-06-11 16:50:36 -0400348static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
349{
350 struct btrfs_worker_thread *worker;
351 unsigned long flags;
352
353again:
354 spin_lock_irqsave(&workers->lock, flags);
355 worker = next_worker(workers);
356 spin_unlock_irqrestore(&workers->lock, flags);
357
358 if (!worker) {
359 spin_lock_irqsave(&workers->lock, flags);
360 if (workers->num_workers >= workers->max_workers) {
Chris Mason35d8ba62008-06-11 20:21:24 -0400361 struct list_head *fallback = NULL;
Chris Mason8b712842008-06-11 16:50:36 -0400362 /*
363 * we have failed to find any workers, just
364 * return the force one
365 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400366 if (!list_empty(&workers->worker_list))
367 fallback = workers->worker_list.next;
368 if (!list_empty(&workers->idle_list))
369 fallback = workers->idle_list.next;
370 BUG_ON(!fallback);
371 worker = list_entry(fallback,
Chris Mason8b712842008-06-11 16:50:36 -0400372 struct btrfs_worker_thread, worker_list);
373 spin_unlock_irqrestore(&workers->lock, flags);
374 } else {
375 spin_unlock_irqrestore(&workers->lock, flags);
376 /* we're below the limit, start another worker */
377 btrfs_start_workers(workers, 1);
378 goto again;
379 }
380 }
381 return worker;
382}
383
384/*
385 * btrfs_requeue_work just puts the work item back on the tail of the list
386 * it was taken from. It is intended for use with long running work functions
387 * that make some progress and want to give the cpu up for others.
388 */
389int btrfs_requeue_work(struct btrfs_work *work)
390{
391 struct btrfs_worker_thread *worker = work->worker;
392 unsigned long flags;
Chris Masona6837052009-02-04 09:19:41 -0500393 int wake = 0;
Chris Mason8b712842008-06-11 16:50:36 -0400394
Chris Mason4a69a412008-11-06 22:03:00 -0500395 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
Chris Mason8b712842008-06-11 16:50:36 -0400396 goto out;
397
398 spin_lock_irqsave(&worker->lock, flags);
Chris Mason8b712842008-06-11 16:50:36 -0400399 list_add_tail(&work->list, &worker->pending);
Chris Masonb51912c2009-02-04 09:23:24 -0500400 atomic_inc(&worker->num_pending);
Chris Mason75ccf472008-09-30 19:24:06 -0400401
402 /* by definition we're busy, take ourselves off the idle
403 * list
404 */
405 if (worker->idle) {
406 spin_lock_irqsave(&worker->workers->lock, flags);
407 worker->idle = 0;
408 list_move_tail(&worker->worker_list,
409 &worker->workers->worker_list);
410 spin_unlock_irqrestore(&worker->workers->lock, flags);
411 }
Chris Masona6837052009-02-04 09:19:41 -0500412 if (!worker->working) {
413 wake = 1;
414 worker->working = 1;
415 }
Chris Mason75ccf472008-09-30 19:24:06 -0400416
Chris Mason8b712842008-06-11 16:50:36 -0400417 spin_unlock_irqrestore(&worker->lock, flags);
Chris Masona6837052009-02-04 09:19:41 -0500418 if (wake)
419 wake_up_process(worker->task);
Chris Mason8b712842008-06-11 16:50:36 -0400420out:
Chris Masona6837052009-02-04 09:19:41 -0500421
Chris Mason8b712842008-06-11 16:50:36 -0400422 return 0;
423}
424
425/*
426 * places a struct btrfs_work into the pending queue of one of the kthreads
427 */
428int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
429{
430 struct btrfs_worker_thread *worker;
431 unsigned long flags;
432 int wake = 0;
433
434 /* don't requeue something already on a list */
Chris Mason4a69a412008-11-06 22:03:00 -0500435 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
Chris Mason8b712842008-06-11 16:50:36 -0400436 goto out;
437
438 worker = find_worker(workers);
Chris Mason4a69a412008-11-06 22:03:00 -0500439 if (workers->ordered) {
440 spin_lock_irqsave(&workers->lock, flags);
441 list_add_tail(&work->order_list, &workers->order_list);
442 spin_unlock_irqrestore(&workers->lock, flags);
443 } else {
444 INIT_LIST_HEAD(&work->order_list);
445 }
Chris Mason8b712842008-06-11 16:50:36 -0400446
447 spin_lock_irqsave(&worker->lock, flags);
Chris Masona6837052009-02-04 09:19:41 -0500448
Chris Masonb51912c2009-02-04 09:23:24 -0500449 list_add_tail(&work->list, &worker->pending);
Chris Mason8b712842008-06-11 16:50:36 -0400450 atomic_inc(&worker->num_pending);
Chris Mason35d8ba62008-06-11 20:21:24 -0400451 check_busy_worker(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400452
453 /*
454 * avoid calling into wake_up_process if this thread has already
455 * been kicked
456 */
457 if (!worker->working)
458 wake = 1;
459 worker->working = 1;
460
461 spin_unlock_irqrestore(&worker->lock, flags);
462
463 if (wake)
464 wake_up_process(worker->task);
465out:
466 return 0;
467}