blob: f2e80f3768ec76266d3c4424946d7e4e000da427 [file] [log] [blame]
Chris Mason8b712842008-06-11 16:50:36 -04001/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/kthread.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
Chris Masond05e5a42008-06-11 17:09:48 -040022# include <linux/freezer.h>
Chris Mason8b712842008-06-11 16:50:36 -040023#include "async-thread.h"
24
Chris Mason4a69a412008-11-06 22:03:00 -050025#define WORK_QUEUED_BIT 0
26#define WORK_DONE_BIT 1
27#define WORK_ORDER_DONE_BIT 2
28
Chris Mason8b712842008-06-11 16:50:36 -040029/*
30 * container for the kthread task pointer and the list of pending work
31 * One of these is allocated per thread.
32 */
33struct btrfs_worker_thread {
Chris Mason35d8ba62008-06-11 20:21:24 -040034 /* pool we belong to */
35 struct btrfs_workers *workers;
36
Chris Mason8b712842008-06-11 16:50:36 -040037 /* list of struct btrfs_work that are waiting for service */
38 struct list_head pending;
39
40 /* list of worker threads from struct btrfs_workers */
41 struct list_head worker_list;
42
43 /* kthread */
44 struct task_struct *task;
45
46 /* number of things on the pending list */
47 atomic_t num_pending;
Chris Mason53863232008-08-15 15:34:18 -040048
Chris Mason4854ddd2008-08-15 15:34:17 -040049 unsigned long sequence;
Chris Mason8b712842008-06-11 16:50:36 -040050
51 /* protects the pending list. */
52 spinlock_t lock;
53
54 /* set to non-zero when this thread is already awake and kicking */
55 int working;
Chris Mason35d8ba62008-06-11 20:21:24 -040056
57 /* are we currently idle */
58 int idle;
Chris Mason8b712842008-06-11 16:50:36 -040059};
60
61/*
Chris Mason35d8ba62008-06-11 20:21:24 -040062 * helper function to move a thread onto the idle list after it
63 * has finished some requests.
64 */
65static void check_idle_worker(struct btrfs_worker_thread *worker)
66{
67 if (!worker->idle && atomic_read(&worker->num_pending) <
68 worker->workers->idle_thresh / 2) {
69 unsigned long flags;
70 spin_lock_irqsave(&worker->workers->lock, flags);
71 worker->idle = 1;
72 list_move(&worker->worker_list, &worker->workers->idle_list);
73 spin_unlock_irqrestore(&worker->workers->lock, flags);
74 }
75}
76
77/*
78 * helper function to move a thread off the idle list after new
79 * pending work is added.
80 */
81static void check_busy_worker(struct btrfs_worker_thread *worker)
82{
83 if (worker->idle && atomic_read(&worker->num_pending) >=
84 worker->workers->idle_thresh) {
85 unsigned long flags;
86 spin_lock_irqsave(&worker->workers->lock, flags);
87 worker->idle = 0;
88 list_move_tail(&worker->worker_list,
89 &worker->workers->worker_list);
90 spin_unlock_irqrestore(&worker->workers->lock, flags);
91 }
92}
93
Chris Mason4a69a412008-11-06 22:03:00 -050094static noinline int run_ordered_completions(struct btrfs_workers *workers,
95 struct btrfs_work *work)
96{
97 unsigned long flags;
98
99 if (!workers->ordered)
100 return 0;
101
102 set_bit(WORK_DONE_BIT, &work->flags);
103
104 spin_lock_irqsave(&workers->lock, flags);
105
Chris Masond3977122009-01-05 21:25:51 -0500106 while (!list_empty(&workers->order_list)) {
Chris Mason4a69a412008-11-06 22:03:00 -0500107 work = list_entry(workers->order_list.next,
108 struct btrfs_work, order_list);
109
110 if (!test_bit(WORK_DONE_BIT, &work->flags))
111 break;
112
113 /* we are going to call the ordered done function, but
114 * we leave the work item on the list as a barrier so
115 * that later work items that are done don't have their
116 * functions called before this one returns
117 */
118 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
119 break;
120
121 spin_unlock_irqrestore(&workers->lock, flags);
122
123 work->ordered_func(work);
124
125 /* now take the lock again and call the freeing code */
126 spin_lock_irqsave(&workers->lock, flags);
127 list_del(&work->order_list);
128 work->ordered_free(work);
129 }
130
131 spin_unlock_irqrestore(&workers->lock, flags);
132 return 0;
133}
134
Chris Mason35d8ba62008-06-11 20:21:24 -0400135/*
Chris Mason8b712842008-06-11 16:50:36 -0400136 * main loop for servicing work items
137 */
138static int worker_loop(void *arg)
139{
140 struct btrfs_worker_thread *worker = arg;
141 struct list_head *cur;
142 struct btrfs_work *work;
143 do {
144 spin_lock_irq(&worker->lock);
Chris Masond3977122009-01-05 21:25:51 -0500145 while (!list_empty(&worker->pending)) {
Chris Mason8b712842008-06-11 16:50:36 -0400146 cur = worker->pending.next;
147 work = list_entry(cur, struct btrfs_work, list);
148 list_del(&work->list);
Chris Mason4a69a412008-11-06 22:03:00 -0500149 clear_bit(WORK_QUEUED_BIT, &work->flags);
Chris Mason8b712842008-06-11 16:50:36 -0400150
151 work->worker = worker;
152 spin_unlock_irq(&worker->lock);
153
154 work->func(work);
155
156 atomic_dec(&worker->num_pending);
Chris Mason4a69a412008-11-06 22:03:00 -0500157 /*
158 * unless this is an ordered work queue,
159 * 'work' was probably freed by func above.
160 */
161 run_ordered_completions(worker->workers, work);
162
Chris Mason8b712842008-06-11 16:50:36 -0400163 spin_lock_irq(&worker->lock);
Chris Mason35d8ba62008-06-11 20:21:24 -0400164 check_idle_worker(worker);
Chris Mason4a69a412008-11-06 22:03:00 -0500165
Chris Mason8b712842008-06-11 16:50:36 -0400166 }
167 worker->working = 0;
168 if (freezing(current)) {
169 refrigerator();
170 } else {
171 set_current_state(TASK_INTERRUPTIBLE);
172 spin_unlock_irq(&worker->lock);
yanhai zhu0df49b92008-11-12 14:36:58 -0500173 if (!kthread_should_stop())
174 schedule();
Chris Mason8b712842008-06-11 16:50:36 -0400175 __set_current_state(TASK_RUNNING);
176 }
177 } while (!kthread_should_stop());
178 return 0;
179}
180
181/*
182 * this will wait for all the worker threads to shutdown
183 */
184int btrfs_stop_workers(struct btrfs_workers *workers)
185{
186 struct list_head *cur;
187 struct btrfs_worker_thread *worker;
188
Chris Mason35d8ba62008-06-11 20:21:24 -0400189 list_splice_init(&workers->idle_list, &workers->worker_list);
Chris Masond3977122009-01-05 21:25:51 -0500190 while (!list_empty(&workers->worker_list)) {
Chris Mason8b712842008-06-11 16:50:36 -0400191 cur = workers->worker_list.next;
192 worker = list_entry(cur, struct btrfs_worker_thread,
193 worker_list);
194 kthread_stop(worker->task);
195 list_del(&worker->worker_list);
196 kfree(worker);
197 }
198 return 0;
199}
200
201/*
202 * simple init on struct btrfs_workers
203 */
Chris Mason5443be42008-08-15 15:34:16 -0400204void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
Chris Mason8b712842008-06-11 16:50:36 -0400205{
206 workers->num_workers = 0;
207 INIT_LIST_HEAD(&workers->worker_list);
Chris Mason35d8ba62008-06-11 20:21:24 -0400208 INIT_LIST_HEAD(&workers->idle_list);
Chris Mason4a69a412008-11-06 22:03:00 -0500209 INIT_LIST_HEAD(&workers->order_list);
Chris Mason8b712842008-06-11 16:50:36 -0400210 spin_lock_init(&workers->lock);
211 workers->max_workers = max;
Chris Mason61b49442008-07-31 15:42:53 -0400212 workers->idle_thresh = 32;
Chris Mason5443be42008-08-15 15:34:16 -0400213 workers->name = name;
Chris Mason4a69a412008-11-06 22:03:00 -0500214 workers->ordered = 0;
Chris Mason8b712842008-06-11 16:50:36 -0400215}
216
217/*
218 * starts new worker threads. This does not enforce the max worker
219 * count in case you need to temporarily go past it.
220 */
221int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
222{
223 struct btrfs_worker_thread *worker;
224 int ret = 0;
225 int i;
226
227 for (i = 0; i < num_workers; i++) {
228 worker = kzalloc(sizeof(*worker), GFP_NOFS);
229 if (!worker) {
230 ret = -ENOMEM;
231 goto fail;
232 }
233
234 INIT_LIST_HEAD(&worker->pending);
235 INIT_LIST_HEAD(&worker->worker_list);
236 spin_lock_init(&worker->lock);
237 atomic_set(&worker->num_pending, 0);
Chris Mason5443be42008-08-15 15:34:16 -0400238 worker->task = kthread_run(worker_loop, worker,
239 "btrfs-%s-%d", workers->name,
240 workers->num_workers + i);
Chris Mason35d8ba62008-06-11 20:21:24 -0400241 worker->workers = workers;
Chris Mason8b712842008-06-11 16:50:36 -0400242 if (IS_ERR(worker->task)) {
Li Zefan3bf10412008-07-30 09:24:37 -0400243 kfree(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400244 ret = PTR_ERR(worker->task);
245 goto fail;
246 }
247
248 spin_lock_irq(&workers->lock);
Chris Mason35d8ba62008-06-11 20:21:24 -0400249 list_add_tail(&worker->worker_list, &workers->idle_list);
Chris Mason4854ddd2008-08-15 15:34:17 -0400250 worker->idle = 1;
Chris Mason8b712842008-06-11 16:50:36 -0400251 workers->num_workers++;
252 spin_unlock_irq(&workers->lock);
253 }
254 return 0;
255fail:
256 btrfs_stop_workers(workers);
257 return ret;
258}
259
260/*
261 * run through the list and find a worker thread that doesn't have a lot
262 * to do right now. This can return null if we aren't yet at the thread
263 * count limit and all of the threads are busy.
264 */
265static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
266{
267 struct btrfs_worker_thread *worker;
268 struct list_head *next;
Chris Mason8b712842008-06-11 16:50:36 -0400269 int enforce_min = workers->num_workers < workers->max_workers;
270
Chris Mason8b712842008-06-11 16:50:36 -0400271 /*
Chris Mason35d8ba62008-06-11 20:21:24 -0400272 * if we find an idle thread, don't move it to the end of the
273 * idle list. This improves the chance that the next submission
274 * will reuse the same thread, and maybe catch it while it is still
275 * working
Chris Mason8b712842008-06-11 16:50:36 -0400276 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400277 if (!list_empty(&workers->idle_list)) {
278 next = workers->idle_list.next;
Chris Mason8b712842008-06-11 16:50:36 -0400279 worker = list_entry(next, struct btrfs_worker_thread,
280 worker_list);
Chris Mason35d8ba62008-06-11 20:21:24 -0400281 return worker;
Chris Mason8b712842008-06-11 16:50:36 -0400282 }
Chris Mason35d8ba62008-06-11 20:21:24 -0400283 if (enforce_min || list_empty(&workers->worker_list))
284 return NULL;
285
Chris Mason8b712842008-06-11 16:50:36 -0400286 /*
Chris Mason35d8ba62008-06-11 20:21:24 -0400287 * if we pick a busy task, move the task to the end of the list.
Chris Masond352ac62008-09-29 15:18:18 -0400288 * hopefully this will keep things somewhat evenly balanced.
289 * Do the move in batches based on the sequence number. This groups
290 * requests submitted at roughly the same time onto the same worker.
Chris Mason8b712842008-06-11 16:50:36 -0400291 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400292 next = workers->worker_list.next;
293 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
Chris Mason4854ddd2008-08-15 15:34:17 -0400294 atomic_inc(&worker->num_pending);
295 worker->sequence++;
Chris Masond352ac62008-09-29 15:18:18 -0400296
Chris Mason53863232008-08-15 15:34:18 -0400297 if (worker->sequence % workers->idle_thresh == 0)
Chris Mason4854ddd2008-08-15 15:34:17 -0400298 list_move_tail(next, &workers->worker_list);
Chris Mason8b712842008-06-11 16:50:36 -0400299 return worker;
300}
301
Chris Masond352ac62008-09-29 15:18:18 -0400302/*
303 * selects a worker thread to take the next job. This will either find
304 * an idle worker, start a new worker up to the max count, or just return
305 * one of the existing busy workers.
306 */
Chris Mason8b712842008-06-11 16:50:36 -0400307static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
308{
309 struct btrfs_worker_thread *worker;
310 unsigned long flags;
311
312again:
313 spin_lock_irqsave(&workers->lock, flags);
314 worker = next_worker(workers);
315 spin_unlock_irqrestore(&workers->lock, flags);
316
317 if (!worker) {
318 spin_lock_irqsave(&workers->lock, flags);
319 if (workers->num_workers >= workers->max_workers) {
Chris Mason35d8ba62008-06-11 20:21:24 -0400320 struct list_head *fallback = NULL;
Chris Mason8b712842008-06-11 16:50:36 -0400321 /*
322 * we have failed to find any workers, just
323 * return the force one
324 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400325 if (!list_empty(&workers->worker_list))
326 fallback = workers->worker_list.next;
327 if (!list_empty(&workers->idle_list))
328 fallback = workers->idle_list.next;
329 BUG_ON(!fallback);
330 worker = list_entry(fallback,
Chris Mason8b712842008-06-11 16:50:36 -0400331 struct btrfs_worker_thread, worker_list);
332 spin_unlock_irqrestore(&workers->lock, flags);
333 } else {
334 spin_unlock_irqrestore(&workers->lock, flags);
335 /* we're below the limit, start another worker */
336 btrfs_start_workers(workers, 1);
337 goto again;
338 }
339 }
340 return worker;
341}
342
343/*
344 * btrfs_requeue_work just puts the work item back on the tail of the list
345 * it was taken from. It is intended for use with long running work functions
346 * that make some progress and want to give the cpu up for others.
347 */
348int btrfs_requeue_work(struct btrfs_work *work)
349{
350 struct btrfs_worker_thread *worker = work->worker;
351 unsigned long flags;
Chris Masona6837052009-02-04 09:19:41 -0500352 int wake = 0;
Chris Mason8b712842008-06-11 16:50:36 -0400353
Chris Mason4a69a412008-11-06 22:03:00 -0500354 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
Chris Mason8b712842008-06-11 16:50:36 -0400355 goto out;
356
357 spin_lock_irqsave(&worker->lock, flags);
358 atomic_inc(&worker->num_pending);
359 list_add_tail(&work->list, &worker->pending);
Chris Mason75ccf472008-09-30 19:24:06 -0400360
361 /* by definition we're busy, take ourselves off the idle
362 * list
363 */
364 if (worker->idle) {
365 spin_lock_irqsave(&worker->workers->lock, flags);
366 worker->idle = 0;
367 list_move_tail(&worker->worker_list,
368 &worker->workers->worker_list);
369 spin_unlock_irqrestore(&worker->workers->lock, flags);
370 }
Chris Masona6837052009-02-04 09:19:41 -0500371 if (!worker->working) {
372 wake = 1;
373 worker->working = 1;
374 }
Chris Mason75ccf472008-09-30 19:24:06 -0400375
Chris Mason8b712842008-06-11 16:50:36 -0400376 spin_unlock_irqrestore(&worker->lock, flags);
Chris Masona6837052009-02-04 09:19:41 -0500377 if (wake)
378 wake_up_process(worker->task);
Chris Mason8b712842008-06-11 16:50:36 -0400379out:
Chris Masona6837052009-02-04 09:19:41 -0500380
Chris Mason8b712842008-06-11 16:50:36 -0400381 return 0;
382}
383
384/*
385 * places a struct btrfs_work into the pending queue of one of the kthreads
386 */
387int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
388{
389 struct btrfs_worker_thread *worker;
390 unsigned long flags;
391 int wake = 0;
392
393 /* don't requeue something already on a list */
Chris Mason4a69a412008-11-06 22:03:00 -0500394 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
Chris Mason8b712842008-06-11 16:50:36 -0400395 goto out;
396
397 worker = find_worker(workers);
Chris Mason4a69a412008-11-06 22:03:00 -0500398 if (workers->ordered) {
399 spin_lock_irqsave(&workers->lock, flags);
400 list_add_tail(&work->order_list, &workers->order_list);
401 spin_unlock_irqrestore(&workers->lock, flags);
402 } else {
403 INIT_LIST_HEAD(&work->order_list);
404 }
Chris Mason8b712842008-06-11 16:50:36 -0400405
406 spin_lock_irqsave(&worker->lock, flags);
Chris Masona6837052009-02-04 09:19:41 -0500407
Chris Mason8b712842008-06-11 16:50:36 -0400408 atomic_inc(&worker->num_pending);
Chris Mason35d8ba62008-06-11 20:21:24 -0400409 check_busy_worker(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400410 list_add_tail(&work->list, &worker->pending);
411
412 /*
413 * avoid calling into wake_up_process if this thread has already
414 * been kicked
415 */
416 if (!worker->working)
417 wake = 1;
418 worker->working = 1;
419
420 spin_unlock_irqrestore(&worker->lock, flags);
421
422 if (wake)
423 wake_up_process(worker->task);
424out:
425 return 0;
426}