blob: 1f26792683edf195b48db80992c05acae6fd71f0 [file] [log] [blame]
Chris Mason8b712842008-06-11 16:50:36 -04001/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#ifndef __BTRFS_ASYNC_THREAD_
20#define __BTRFS_ASYNC_THREAD_
21
22struct btrfs_worker_thread;
23
24/*
25 * This is similar to a workqueue, but it is meant to spread the operations
26 * across all available cpus instead of just the CPU that was used to
27 * queue the work. There is also some batching introduced to try and
28 * cut down on context switches.
29 *
30 * By default threads are added on demand up to 2 * the number of cpus.
31 * Changing struct btrfs_workers->max_workers is one way to prevent
32 * demand creation of kthreads.
33 *
34 * the basic model of these worker threads is to embed a btrfs_work
35 * structure in your own data struct, and use container_of in a
36 * work function to get back to your data struct.
37 */
38struct btrfs_work {
39 /*
Chris Mason4a69a412008-11-06 22:03:00 -050040 * func should be set to the function you want called
Chris Mason8b712842008-06-11 16:50:36 -040041 * your work struct is passed as the only arg
Chris Mason4a69a412008-11-06 22:03:00 -050042 *
43 * ordered_func must be set for work sent to an ordered work queue,
44 * and it is called to complete a given work item in the same
45 * order they were sent to the queue.
Chris Mason8b712842008-06-11 16:50:36 -040046 */
47 void (*func)(struct btrfs_work *work);
Chris Mason4a69a412008-11-06 22:03:00 -050048 void (*ordered_func)(struct btrfs_work *work);
49 void (*ordered_free)(struct btrfs_work *work);
Chris Mason8b712842008-06-11 16:50:36 -040050
51 /*
52 * flags should be set to zero. It is used to make sure the
53 * struct is only inserted once into the list.
54 */
55 unsigned long flags;
56
57 /* don't touch these */
58 struct btrfs_worker_thread *worker;
59 struct list_head list;
Chris Mason4a69a412008-11-06 22:03:00 -050060 struct list_head order_list;
Chris Mason8b712842008-06-11 16:50:36 -040061};
62
63struct btrfs_workers {
64 /* current number of running workers */
65 int num_workers;
66
Chris Mason61d92c32009-10-02 19:11:56 -040067 int num_workers_starting;
68
Chris Mason8b712842008-06-11 16:50:36 -040069 /* max number of workers allowed. changed by btrfs_start_workers */
70 int max_workers;
71
Chris Mason35d8ba62008-06-11 20:21:24 -040072 /* once a worker has this many requests or fewer, it is idle */
73 int idle_thresh;
74
Chris Mason4a69a412008-11-06 22:03:00 -050075 /* force completions in the order they were queued */
76 int ordered;
77
Chris Mason90428462009-08-04 16:56:34 -040078 /* more workers required, but in an interrupt handler */
79 int atomic_start_pending;
80
81 /*
82 * are we allowed to sleep while starting workers or are we required
Chris Mason61d92c32009-10-02 19:11:56 -040083 * to start them at a later time? If we can't sleep, this indicates
84 * which queue we need to use to schedule thread creation.
Chris Mason90428462009-08-04 16:56:34 -040085 */
Chris Mason61d92c32009-10-02 19:11:56 -040086 struct btrfs_workers *atomic_worker_start;
Chris Mason90428462009-08-04 16:56:34 -040087
Chris Masond352ac62008-09-29 15:18:18 -040088 /* list with all the work threads. The workers on the idle thread
89 * may be actively servicing jobs, but they haven't yet hit the
90 * idle thresh limit above.
91 */
Chris Mason8b712842008-06-11 16:50:36 -040092 struct list_head worker_list;
Chris Mason35d8ba62008-06-11 20:21:24 -040093 struct list_head idle_list;
Chris Mason8b712842008-06-11 16:50:36 -040094
Chris Mason4a69a412008-11-06 22:03:00 -050095 /*
96 * when operating in ordered mode, this maintains the list
97 * of work items waiting for completion
98 */
99 struct list_head order_list;
Chris Masond313d7a2009-04-20 15:50:09 -0400100 struct list_head prio_order_list;
Chris Mason4a69a412008-11-06 22:03:00 -0500101
Chris Mason8b712842008-06-11 16:50:36 -0400102 /* lock for finding the next worker thread to queue on */
103 spinlock_t lock;
Chris Mason5443be42008-08-15 15:34:16 -0400104
Chris Mason4e3f9c52009-08-05 16:36:45 -0400105 /* lock for the ordered lists */
106 spinlock_t order_lock;
107
Chris Masond352ac62008-09-29 15:18:18 -0400108 /* extra name for this worker, used for current->name */
Chris Mason5443be42008-08-15 15:34:16 -0400109 char *name;
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300110
111 int stopping;
Chris Mason8b712842008-06-11 16:50:36 -0400112};
113
Josef Bacik0dc3b842011-11-18 14:37:27 -0500114void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
115int btrfs_start_workers(struct btrfs_workers *workers);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100116void btrfs_stop_workers(struct btrfs_workers *workers);
Chris Mason61d92c32009-10-02 19:11:56 -0400117void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
118 struct btrfs_workers *async_starter);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100119void btrfs_requeue_work(struct btrfs_work *work);
Chris Masond313d7a2009-04-20 15:50:09 -0400120void btrfs_set_work_high_prio(struct btrfs_work *work);
Chris Mason8b712842008-06-11 16:50:36 -0400121#endif