blob: 958cd8b5f0d7fa77bf087a72f16e7a511df14827 [file] [log] [blame]
Chris Mason8b712842008-06-11 16:50:36 -04001/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
Chris Masond05e5a42008-06-11 17:09:48 -040019#include <linux/version.h>
Chris Mason8b712842008-06-11 16:50:36 -040020#include <linux/kthread.h>
21#include <linux/list.h>
22#include <linux/spinlock.h>
Chris Masond05e5a42008-06-11 17:09:48 -040023
24#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
25# include <linux/freezer.h>
26#else
27# include <linux/sched.h>
28#endif
29
Chris Mason8b712842008-06-11 16:50:36 -040030#include "async-thread.h"
31
32/*
33 * container for the kthread task pointer and the list of pending work
34 * One of these is allocated per thread.
35 */
36struct btrfs_worker_thread {
Chris Mason35d8ba62008-06-11 20:21:24 -040037 /* pool we belong to */
38 struct btrfs_workers *workers;
39
Chris Mason8b712842008-06-11 16:50:36 -040040 /* list of struct btrfs_work that are waiting for service */
41 struct list_head pending;
42
43 /* list of worker threads from struct btrfs_workers */
44 struct list_head worker_list;
45
46 /* kthread */
47 struct task_struct *task;
48
49 /* number of things on the pending list */
50 atomic_t num_pending;
Chris Mason4854ddd2008-08-15 15:34:17 -040051 unsigned long sequence;
Chris Mason8b712842008-06-11 16:50:36 -040052
53 /* protects the pending list. */
54 spinlock_t lock;
55
56 /* set to non-zero when this thread is already awake and kicking */
57 int working;
Chris Mason35d8ba62008-06-11 20:21:24 -040058
59 /* are we currently idle */
60 int idle;
Chris Mason8b712842008-06-11 16:50:36 -040061};
62
63/*
Chris Mason35d8ba62008-06-11 20:21:24 -040064 * helper function to move a thread onto the idle list after it
65 * has finished some requests.
66 */
67static void check_idle_worker(struct btrfs_worker_thread *worker)
68{
69 if (!worker->idle && atomic_read(&worker->num_pending) <
70 worker->workers->idle_thresh / 2) {
71 unsigned long flags;
72 spin_lock_irqsave(&worker->workers->lock, flags);
73 worker->idle = 1;
74 list_move(&worker->worker_list, &worker->workers->idle_list);
75 spin_unlock_irqrestore(&worker->workers->lock, flags);
76 }
77}
78
79/*
80 * helper function to move a thread off the idle list after new
81 * pending work is added.
82 */
83static void check_busy_worker(struct btrfs_worker_thread *worker)
84{
85 if (worker->idle && atomic_read(&worker->num_pending) >=
86 worker->workers->idle_thresh) {
87 unsigned long flags;
88 spin_lock_irqsave(&worker->workers->lock, flags);
89 worker->idle = 0;
90 list_move_tail(&worker->worker_list,
91 &worker->workers->worker_list);
92 spin_unlock_irqrestore(&worker->workers->lock, flags);
93 }
94}
95
96/*
Chris Mason8b712842008-06-11 16:50:36 -040097 * main loop for servicing work items
98 */
99static int worker_loop(void *arg)
100{
101 struct btrfs_worker_thread *worker = arg;
102 struct list_head *cur;
103 struct btrfs_work *work;
104 do {
105 spin_lock_irq(&worker->lock);
106 while(!list_empty(&worker->pending)) {
107 cur = worker->pending.next;
108 work = list_entry(cur, struct btrfs_work, list);
109 list_del(&work->list);
110 clear_bit(0, &work->flags);
111
112 work->worker = worker;
113 spin_unlock_irq(&worker->lock);
114
115 work->func(work);
116
117 atomic_dec(&worker->num_pending);
118 spin_lock_irq(&worker->lock);
Chris Mason35d8ba62008-06-11 20:21:24 -0400119 check_idle_worker(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400120 }
121 worker->working = 0;
122 if (freezing(current)) {
123 refrigerator();
124 } else {
125 set_current_state(TASK_INTERRUPTIBLE);
126 spin_unlock_irq(&worker->lock);
127 schedule();
128 __set_current_state(TASK_RUNNING);
129 }
130 } while (!kthread_should_stop());
131 return 0;
132}
133
134/*
135 * this will wait for all the worker threads to shutdown
136 */
137int btrfs_stop_workers(struct btrfs_workers *workers)
138{
139 struct list_head *cur;
140 struct btrfs_worker_thread *worker;
141
Chris Mason35d8ba62008-06-11 20:21:24 -0400142 list_splice_init(&workers->idle_list, &workers->worker_list);
Chris Mason8b712842008-06-11 16:50:36 -0400143 while(!list_empty(&workers->worker_list)) {
144 cur = workers->worker_list.next;
145 worker = list_entry(cur, struct btrfs_worker_thread,
146 worker_list);
147 kthread_stop(worker->task);
148 list_del(&worker->worker_list);
149 kfree(worker);
150 }
151 return 0;
152}
153
154/*
155 * simple init on struct btrfs_workers
156 */
Chris Mason5443be42008-08-15 15:34:16 -0400157void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
Chris Mason8b712842008-06-11 16:50:36 -0400158{
159 workers->num_workers = 0;
160 INIT_LIST_HEAD(&workers->worker_list);
Chris Mason35d8ba62008-06-11 20:21:24 -0400161 INIT_LIST_HEAD(&workers->idle_list);
Chris Mason8b712842008-06-11 16:50:36 -0400162 spin_lock_init(&workers->lock);
163 workers->max_workers = max;
Chris Mason61b49442008-07-31 15:42:53 -0400164 workers->idle_thresh = 32;
Chris Mason5443be42008-08-15 15:34:16 -0400165 workers->name = name;
Chris Mason8b712842008-06-11 16:50:36 -0400166}
167
168/*
169 * starts new worker threads. This does not enforce the max worker
170 * count in case you need to temporarily go past it.
171 */
172int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
173{
174 struct btrfs_worker_thread *worker;
175 int ret = 0;
176 int i;
177
178 for (i = 0; i < num_workers; i++) {
179 worker = kzalloc(sizeof(*worker), GFP_NOFS);
180 if (!worker) {
181 ret = -ENOMEM;
182 goto fail;
183 }
184
185 INIT_LIST_HEAD(&worker->pending);
186 INIT_LIST_HEAD(&worker->worker_list);
187 spin_lock_init(&worker->lock);
188 atomic_set(&worker->num_pending, 0);
Chris Mason5443be42008-08-15 15:34:16 -0400189 worker->task = kthread_run(worker_loop, worker,
190 "btrfs-%s-%d", workers->name,
191 workers->num_workers + i);
Chris Mason35d8ba62008-06-11 20:21:24 -0400192 worker->workers = workers;
Chris Mason8b712842008-06-11 16:50:36 -0400193 if (IS_ERR(worker->task)) {
Li Zefan3bf10412008-07-30 09:24:37 -0400194 kfree(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400195 ret = PTR_ERR(worker->task);
196 goto fail;
197 }
198
199 spin_lock_irq(&workers->lock);
Chris Mason35d8ba62008-06-11 20:21:24 -0400200 list_add_tail(&worker->worker_list, &workers->idle_list);
Chris Mason4854ddd2008-08-15 15:34:17 -0400201 worker->idle = 1;
Chris Mason8b712842008-06-11 16:50:36 -0400202 workers->num_workers++;
203 spin_unlock_irq(&workers->lock);
204 }
205 return 0;
206fail:
207 btrfs_stop_workers(workers);
208 return ret;
209}
210
211/*
212 * run through the list and find a worker thread that doesn't have a lot
213 * to do right now. This can return null if we aren't yet at the thread
214 * count limit and all of the threads are busy.
215 */
216static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
217{
218 struct btrfs_worker_thread *worker;
219 struct list_head *next;
Chris Mason8b712842008-06-11 16:50:36 -0400220 int enforce_min = workers->num_workers < workers->max_workers;
221
Chris Mason8b712842008-06-11 16:50:36 -0400222 /*
Chris Mason35d8ba62008-06-11 20:21:24 -0400223 * if we find an idle thread, don't move it to the end of the
224 * idle list. This improves the chance that the next submission
225 * will reuse the same thread, and maybe catch it while it is still
226 * working
Chris Mason8b712842008-06-11 16:50:36 -0400227 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400228 if (!list_empty(&workers->idle_list)) {
229 next = workers->idle_list.next;
Chris Mason8b712842008-06-11 16:50:36 -0400230 worker = list_entry(next, struct btrfs_worker_thread,
231 worker_list);
Chris Mason35d8ba62008-06-11 20:21:24 -0400232 return worker;
Chris Mason8b712842008-06-11 16:50:36 -0400233 }
Chris Mason35d8ba62008-06-11 20:21:24 -0400234 if (enforce_min || list_empty(&workers->worker_list))
235 return NULL;
236
Chris Mason8b712842008-06-11 16:50:36 -0400237 /*
Chris Mason35d8ba62008-06-11 20:21:24 -0400238 * if we pick a busy task, move the task to the end of the list.
239 * hopefully this will keep things somewhat evenly balanced
Chris Mason8b712842008-06-11 16:50:36 -0400240 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400241 next = workers->worker_list.next;
242 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
Chris Mason4854ddd2008-08-15 15:34:17 -0400243 atomic_inc(&worker->num_pending);
244 worker->sequence++;
245 if (worker->sequence % 4 == 0)
246 list_move_tail(next, &workers->worker_list);
Chris Mason8b712842008-06-11 16:50:36 -0400247 return worker;
248}
249
250static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
251{
252 struct btrfs_worker_thread *worker;
253 unsigned long flags;
254
255again:
256 spin_lock_irqsave(&workers->lock, flags);
257 worker = next_worker(workers);
258 spin_unlock_irqrestore(&workers->lock, flags);
259
260 if (!worker) {
261 spin_lock_irqsave(&workers->lock, flags);
262 if (workers->num_workers >= workers->max_workers) {
Chris Mason35d8ba62008-06-11 20:21:24 -0400263 struct list_head *fallback = NULL;
Chris Mason8b712842008-06-11 16:50:36 -0400264 /*
265 * we have failed to find any workers, just
266 * return the force one
267 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400268 if (!list_empty(&workers->worker_list))
269 fallback = workers->worker_list.next;
270 if (!list_empty(&workers->idle_list))
271 fallback = workers->idle_list.next;
272 BUG_ON(!fallback);
273 worker = list_entry(fallback,
Chris Mason8b712842008-06-11 16:50:36 -0400274 struct btrfs_worker_thread, worker_list);
275 spin_unlock_irqrestore(&workers->lock, flags);
276 } else {
277 spin_unlock_irqrestore(&workers->lock, flags);
278 /* we're below the limit, start another worker */
279 btrfs_start_workers(workers, 1);
280 goto again;
281 }
282 }
283 return worker;
284}
285
286/*
287 * btrfs_requeue_work just puts the work item back on the tail of the list
288 * it was taken from. It is intended for use with long running work functions
289 * that make some progress and want to give the cpu up for others.
290 */
291int btrfs_requeue_work(struct btrfs_work *work)
292{
293 struct btrfs_worker_thread *worker = work->worker;
294 unsigned long flags;
295
296 if (test_and_set_bit(0, &work->flags))
297 goto out;
298
299 spin_lock_irqsave(&worker->lock, flags);
300 atomic_inc(&worker->num_pending);
301 list_add_tail(&work->list, &worker->pending);
Chris Mason35d8ba62008-06-11 20:21:24 -0400302 check_busy_worker(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400303 spin_unlock_irqrestore(&worker->lock, flags);
304out:
305 return 0;
306}
307
308/*
309 * places a struct btrfs_work into the pending queue of one of the kthreads
310 */
311int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
312{
313 struct btrfs_worker_thread *worker;
314 unsigned long flags;
315 int wake = 0;
316
317 /* don't requeue something already on a list */
318 if (test_and_set_bit(0, &work->flags))
319 goto out;
320
321 worker = find_worker(workers);
322
323 spin_lock_irqsave(&worker->lock, flags);
324 atomic_inc(&worker->num_pending);
Chris Mason35d8ba62008-06-11 20:21:24 -0400325 check_busy_worker(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400326 list_add_tail(&work->list, &worker->pending);
327
328 /*
329 * avoid calling into wake_up_process if this thread has already
330 * been kicked
331 */
332 if (!worker->working)
333 wake = 1;
334 worker->working = 1;
335
336 spin_unlock_irqrestore(&worker->lock, flags);
337
338 if (wake)
339 wake_up_process(worker->task);
340out:
341 return 0;
342}