blob: 2ee301740195a0d04c6040958d6ce8e6851a7a3e [file] [log] [blame]
Chris Mason8b712842008-06-11 16:50:36 -04001/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
Chris Masond05e5a42008-06-11 17:09:48 -040019#include <linux/version.h>
Chris Mason8b712842008-06-11 16:50:36 -040020#include <linux/kthread.h>
21#include <linux/list.h>
22#include <linux/spinlock.h>
Chris Masond05e5a42008-06-11 17:09:48 -040023
24#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
25# include <linux/freezer.h>
26#else
27# include <linux/sched.h>
28#endif
29
Chris Mason8b712842008-06-11 16:50:36 -040030#include "async-thread.h"
31
32/*
33 * container for the kthread task pointer and the list of pending work
34 * One of these is allocated per thread.
35 */
36struct btrfs_worker_thread {
Chris Mason35d8ba62008-06-11 20:21:24 -040037 /* pool we belong to */
38 struct btrfs_workers *workers;
39
Chris Mason8b712842008-06-11 16:50:36 -040040 /* list of struct btrfs_work that are waiting for service */
41 struct list_head pending;
42
43 /* list of worker threads from struct btrfs_workers */
44 struct list_head worker_list;
45
46 /* kthread */
47 struct task_struct *task;
48
49 /* number of things on the pending list */
50 atomic_t num_pending;
Chris Mason53863232008-08-15 15:34:18 -040051
Chris Mason4854ddd2008-08-15 15:34:17 -040052 unsigned long sequence;
Chris Mason8b712842008-06-11 16:50:36 -040053
54 /* protects the pending list. */
55 spinlock_t lock;
56
57 /* set to non-zero when this thread is already awake and kicking */
58 int working;
Chris Mason35d8ba62008-06-11 20:21:24 -040059
60 /* are we currently idle */
61 int idle;
Chris Mason8b712842008-06-11 16:50:36 -040062};
63
64/*
Chris Mason35d8ba62008-06-11 20:21:24 -040065 * helper function to move a thread onto the idle list after it
66 * has finished some requests.
67 */
68static void check_idle_worker(struct btrfs_worker_thread *worker)
69{
70 if (!worker->idle && atomic_read(&worker->num_pending) <
71 worker->workers->idle_thresh / 2) {
72 unsigned long flags;
73 spin_lock_irqsave(&worker->workers->lock, flags);
74 worker->idle = 1;
75 list_move(&worker->worker_list, &worker->workers->idle_list);
76 spin_unlock_irqrestore(&worker->workers->lock, flags);
77 }
78}
79
80/*
81 * helper function to move a thread off the idle list after new
82 * pending work is added.
83 */
84static void check_busy_worker(struct btrfs_worker_thread *worker)
85{
86 if (worker->idle && atomic_read(&worker->num_pending) >=
87 worker->workers->idle_thresh) {
88 unsigned long flags;
89 spin_lock_irqsave(&worker->workers->lock, flags);
90 worker->idle = 0;
91 list_move_tail(&worker->worker_list,
92 &worker->workers->worker_list);
93 spin_unlock_irqrestore(&worker->workers->lock, flags);
94 }
95}
96
97/*
Chris Mason8b712842008-06-11 16:50:36 -040098 * main loop for servicing work items
99 */
100static int worker_loop(void *arg)
101{
102 struct btrfs_worker_thread *worker = arg;
103 struct list_head *cur;
104 struct btrfs_work *work;
105 do {
106 spin_lock_irq(&worker->lock);
107 while(!list_empty(&worker->pending)) {
108 cur = worker->pending.next;
109 work = list_entry(cur, struct btrfs_work, list);
110 list_del(&work->list);
111 clear_bit(0, &work->flags);
112
113 work->worker = worker;
114 spin_unlock_irq(&worker->lock);
115
116 work->func(work);
117
118 atomic_dec(&worker->num_pending);
119 spin_lock_irq(&worker->lock);
Chris Mason35d8ba62008-06-11 20:21:24 -0400120 check_idle_worker(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400121 }
122 worker->working = 0;
123 if (freezing(current)) {
124 refrigerator();
125 } else {
126 set_current_state(TASK_INTERRUPTIBLE);
127 spin_unlock_irq(&worker->lock);
128 schedule();
129 __set_current_state(TASK_RUNNING);
130 }
131 } while (!kthread_should_stop());
132 return 0;
133}
134
135/*
136 * this will wait for all the worker threads to shutdown
137 */
138int btrfs_stop_workers(struct btrfs_workers *workers)
139{
140 struct list_head *cur;
141 struct btrfs_worker_thread *worker;
142
Chris Mason35d8ba62008-06-11 20:21:24 -0400143 list_splice_init(&workers->idle_list, &workers->worker_list);
Chris Mason8b712842008-06-11 16:50:36 -0400144 while(!list_empty(&workers->worker_list)) {
145 cur = workers->worker_list.next;
146 worker = list_entry(cur, struct btrfs_worker_thread,
147 worker_list);
148 kthread_stop(worker->task);
149 list_del(&worker->worker_list);
150 kfree(worker);
151 }
152 return 0;
153}
154
155/*
156 * simple init on struct btrfs_workers
157 */
Chris Mason5443be42008-08-15 15:34:16 -0400158void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
Chris Mason8b712842008-06-11 16:50:36 -0400159{
160 workers->num_workers = 0;
161 INIT_LIST_HEAD(&workers->worker_list);
Chris Mason35d8ba62008-06-11 20:21:24 -0400162 INIT_LIST_HEAD(&workers->idle_list);
Chris Mason8b712842008-06-11 16:50:36 -0400163 spin_lock_init(&workers->lock);
164 workers->max_workers = max;
Chris Mason61b49442008-07-31 15:42:53 -0400165 workers->idle_thresh = 32;
Chris Mason5443be42008-08-15 15:34:16 -0400166 workers->name = name;
Chris Mason8b712842008-06-11 16:50:36 -0400167}
168
169/*
170 * starts new worker threads. This does not enforce the max worker
171 * count in case you need to temporarily go past it.
172 */
173int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
174{
175 struct btrfs_worker_thread *worker;
176 int ret = 0;
177 int i;
178
179 for (i = 0; i < num_workers; i++) {
180 worker = kzalloc(sizeof(*worker), GFP_NOFS);
181 if (!worker) {
182 ret = -ENOMEM;
183 goto fail;
184 }
185
186 INIT_LIST_HEAD(&worker->pending);
187 INIT_LIST_HEAD(&worker->worker_list);
188 spin_lock_init(&worker->lock);
189 atomic_set(&worker->num_pending, 0);
Chris Mason5443be42008-08-15 15:34:16 -0400190 worker->task = kthread_run(worker_loop, worker,
191 "btrfs-%s-%d", workers->name,
192 workers->num_workers + i);
Chris Mason35d8ba62008-06-11 20:21:24 -0400193 worker->workers = workers;
Chris Mason8b712842008-06-11 16:50:36 -0400194 if (IS_ERR(worker->task)) {
Li Zefan3bf10412008-07-30 09:24:37 -0400195 kfree(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400196 ret = PTR_ERR(worker->task);
197 goto fail;
198 }
199
200 spin_lock_irq(&workers->lock);
Chris Mason35d8ba62008-06-11 20:21:24 -0400201 list_add_tail(&worker->worker_list, &workers->idle_list);
Chris Mason4854ddd2008-08-15 15:34:17 -0400202 worker->idle = 1;
Chris Mason8b712842008-06-11 16:50:36 -0400203 workers->num_workers++;
204 spin_unlock_irq(&workers->lock);
205 }
206 return 0;
207fail:
208 btrfs_stop_workers(workers);
209 return ret;
210}
211
212/*
213 * run through the list and find a worker thread that doesn't have a lot
214 * to do right now. This can return null if we aren't yet at the thread
215 * count limit and all of the threads are busy.
216 */
217static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
218{
219 struct btrfs_worker_thread *worker;
220 struct list_head *next;
Chris Mason8b712842008-06-11 16:50:36 -0400221 int enforce_min = workers->num_workers < workers->max_workers;
222
Chris Mason8b712842008-06-11 16:50:36 -0400223 /*
Chris Mason35d8ba62008-06-11 20:21:24 -0400224 * if we find an idle thread, don't move it to the end of the
225 * idle list. This improves the chance that the next submission
226 * will reuse the same thread, and maybe catch it while it is still
227 * working
Chris Mason8b712842008-06-11 16:50:36 -0400228 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400229 if (!list_empty(&workers->idle_list)) {
230 next = workers->idle_list.next;
Chris Mason8b712842008-06-11 16:50:36 -0400231 worker = list_entry(next, struct btrfs_worker_thread,
232 worker_list);
Chris Mason35d8ba62008-06-11 20:21:24 -0400233 return worker;
Chris Mason8b712842008-06-11 16:50:36 -0400234 }
Chris Mason35d8ba62008-06-11 20:21:24 -0400235 if (enforce_min || list_empty(&workers->worker_list))
236 return NULL;
237
Chris Mason8b712842008-06-11 16:50:36 -0400238 /*
Chris Mason35d8ba62008-06-11 20:21:24 -0400239 * if we pick a busy task, move the task to the end of the list.
240 * hopefully this will keep things somewhat evenly balanced
Chris Mason8b712842008-06-11 16:50:36 -0400241 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400242 next = workers->worker_list.next;
243 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
Chris Mason4854ddd2008-08-15 15:34:17 -0400244 atomic_inc(&worker->num_pending);
245 worker->sequence++;
Chris Mason53863232008-08-15 15:34:18 -0400246 if (worker->sequence % workers->idle_thresh == 0)
Chris Mason4854ddd2008-08-15 15:34:17 -0400247 list_move_tail(next, &workers->worker_list);
Chris Mason8b712842008-06-11 16:50:36 -0400248 return worker;
249}
250
251static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
252{
253 struct btrfs_worker_thread *worker;
254 unsigned long flags;
255
256again:
257 spin_lock_irqsave(&workers->lock, flags);
258 worker = next_worker(workers);
259 spin_unlock_irqrestore(&workers->lock, flags);
260
261 if (!worker) {
262 spin_lock_irqsave(&workers->lock, flags);
263 if (workers->num_workers >= workers->max_workers) {
Chris Mason35d8ba62008-06-11 20:21:24 -0400264 struct list_head *fallback = NULL;
Chris Mason8b712842008-06-11 16:50:36 -0400265 /*
266 * we have failed to find any workers, just
267 * return the force one
268 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400269 if (!list_empty(&workers->worker_list))
270 fallback = workers->worker_list.next;
271 if (!list_empty(&workers->idle_list))
272 fallback = workers->idle_list.next;
273 BUG_ON(!fallback);
274 worker = list_entry(fallback,
Chris Mason8b712842008-06-11 16:50:36 -0400275 struct btrfs_worker_thread, worker_list);
276 spin_unlock_irqrestore(&workers->lock, flags);
277 } else {
278 spin_unlock_irqrestore(&workers->lock, flags);
279 /* we're below the limit, start another worker */
280 btrfs_start_workers(workers, 1);
281 goto again;
282 }
283 }
284 return worker;
285}
286
287/*
288 * btrfs_requeue_work just puts the work item back on the tail of the list
289 * it was taken from. It is intended for use with long running work functions
290 * that make some progress and want to give the cpu up for others.
291 */
292int btrfs_requeue_work(struct btrfs_work *work)
293{
294 struct btrfs_worker_thread *worker = work->worker;
295 unsigned long flags;
296
297 if (test_and_set_bit(0, &work->flags))
298 goto out;
299
300 spin_lock_irqsave(&worker->lock, flags);
301 atomic_inc(&worker->num_pending);
302 list_add_tail(&work->list, &worker->pending);
Chris Mason35d8ba62008-06-11 20:21:24 -0400303 check_busy_worker(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400304 spin_unlock_irqrestore(&worker->lock, flags);
305out:
306 return 0;
307}
308
309/*
310 * places a struct btrfs_work into the pending queue of one of the kthreads
311 */
312int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
313{
314 struct btrfs_worker_thread *worker;
315 unsigned long flags;
316 int wake = 0;
317
318 /* don't requeue something already on a list */
319 if (test_and_set_bit(0, &work->flags))
320 goto out;
321
322 worker = find_worker(workers);
323
324 spin_lock_irqsave(&worker->lock, flags);
325 atomic_inc(&worker->num_pending);
Chris Mason35d8ba62008-06-11 20:21:24 -0400326 check_busy_worker(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400327 list_add_tail(&work->list, &worker->pending);
328
329 /*
330 * avoid calling into wake_up_process if this thread has already
331 * been kicked
332 */
333 if (!worker->working)
334 wake = 1;
335 worker->working = 1;
336
337 spin_unlock_irqrestore(&worker->lock, flags);
338
339 if (wake)
340 wake_up_process(worker->task);
341out:
342 return 0;
343}