blob: fce623cfe3dad03adf6f808760dfb118bac117ac [file] [log] [blame]
Chris Mason8b712842008-06-11 16:50:36 -04001/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
Qu Wenruo08a9ff32014-02-28 10:46:03 +08003 * Copyright (C) 2014 Fujitsu. All rights reserved.
Chris Mason8b712842008-06-11 16:50:36 -04004 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#ifndef __BTRFS_ASYNC_THREAD_
21#define __BTRFS_ASYNC_THREAD_
22
23struct btrfs_worker_thread;
24
25/*
26 * This is similar to a workqueue, but it is meant to spread the operations
27 * across all available cpus instead of just the CPU that was used to
28 * queue the work. There is also some batching introduced to try and
29 * cut down on context switches.
30 *
31 * By default threads are added on demand up to 2 * the number of cpus.
32 * Changing struct btrfs_workers->max_workers is one way to prevent
33 * demand creation of kthreads.
34 *
35 * the basic model of these worker threads is to embed a btrfs_work
36 * structure in your own data struct, and use container_of in a
37 * work function to get back to your data struct.
38 */
39struct btrfs_work {
40 /*
Chris Mason4a69a412008-11-06 22:03:00 -050041 * func should be set to the function you want called
Chris Mason8b712842008-06-11 16:50:36 -040042 * your work struct is passed as the only arg
Chris Mason4a69a412008-11-06 22:03:00 -050043 *
44 * ordered_func must be set for work sent to an ordered work queue,
45 * and it is called to complete a given work item in the same
46 * order they were sent to the queue.
Chris Mason8b712842008-06-11 16:50:36 -040047 */
48 void (*func)(struct btrfs_work *work);
Chris Mason4a69a412008-11-06 22:03:00 -050049 void (*ordered_func)(struct btrfs_work *work);
50 void (*ordered_free)(struct btrfs_work *work);
Chris Mason8b712842008-06-11 16:50:36 -040051
52 /*
53 * flags should be set to zero. It is used to make sure the
54 * struct is only inserted once into the list.
55 */
56 unsigned long flags;
57
58 /* don't touch these */
59 struct btrfs_worker_thread *worker;
60 struct list_head list;
Chris Mason4a69a412008-11-06 22:03:00 -050061 struct list_head order_list;
Chris Mason8b712842008-06-11 16:50:36 -040062};
63
64struct btrfs_workers {
65 /* current number of running workers */
66 int num_workers;
67
Chris Mason61d92c32009-10-02 19:11:56 -040068 int num_workers_starting;
69
Chris Mason8b712842008-06-11 16:50:36 -040070 /* max number of workers allowed. changed by btrfs_start_workers */
71 int max_workers;
72
Chris Mason35d8ba62008-06-11 20:21:24 -040073 /* once a worker has this many requests or fewer, it is idle */
74 int idle_thresh;
75
Chris Mason4a69a412008-11-06 22:03:00 -050076 /* force completions in the order they were queued */
77 int ordered;
78
Chris Mason90428462009-08-04 16:56:34 -040079 /* more workers required, but in an interrupt handler */
80 int atomic_start_pending;
81
82 /*
83 * are we allowed to sleep while starting workers or are we required
Chris Mason61d92c32009-10-02 19:11:56 -040084 * to start them at a later time? If we can't sleep, this indicates
85 * which queue we need to use to schedule thread creation.
Chris Mason90428462009-08-04 16:56:34 -040086 */
Chris Mason61d92c32009-10-02 19:11:56 -040087 struct btrfs_workers *atomic_worker_start;
Chris Mason90428462009-08-04 16:56:34 -040088
Chris Masond352ac62008-09-29 15:18:18 -040089 /* list with all the work threads. The workers on the idle thread
90 * may be actively servicing jobs, but they haven't yet hit the
91 * idle thresh limit above.
92 */
Chris Mason8b712842008-06-11 16:50:36 -040093 struct list_head worker_list;
Chris Mason35d8ba62008-06-11 20:21:24 -040094 struct list_head idle_list;
Chris Mason8b712842008-06-11 16:50:36 -040095
Chris Mason4a69a412008-11-06 22:03:00 -050096 /*
97 * when operating in ordered mode, this maintains the list
98 * of work items waiting for completion
99 */
100 struct list_head order_list;
Chris Masond313d7a2009-04-20 15:50:09 -0400101 struct list_head prio_order_list;
Chris Mason4a69a412008-11-06 22:03:00 -0500102
Chris Mason8b712842008-06-11 16:50:36 -0400103 /* lock for finding the next worker thread to queue on */
104 spinlock_t lock;
Chris Mason5443be42008-08-15 15:34:16 -0400105
Chris Mason4e3f9c52009-08-05 16:36:45 -0400106 /* lock for the ordered lists */
107 spinlock_t order_lock;
108
Chris Masond352ac62008-09-29 15:18:18 -0400109 /* extra name for this worker, used for current->name */
Chris Mason5443be42008-08-15 15:34:16 -0400110 char *name;
Ilya Dryomov964fb152013-10-02 19:39:50 +0300111
112 int stopping;
Chris Mason8b712842008-06-11 16:50:36 -0400113};
114
Josef Bacik0dc3b842011-11-18 14:37:27 -0500115void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
116int btrfs_start_workers(struct btrfs_workers *workers);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100117void btrfs_stop_workers(struct btrfs_workers *workers);
Chris Mason61d92c32009-10-02 19:11:56 -0400118void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
119 struct btrfs_workers *async_starter);
Jeff Mahoney143bede2012-03-01 14:56:26 +0100120void btrfs_requeue_work(struct btrfs_work *work);
Chris Masond313d7a2009-04-20 15:50:09 -0400121void btrfs_set_work_high_prio(struct btrfs_work *work);
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800122
123struct btrfs_workqueue_struct;
Qu Wenruo1ca08972014-02-28 10:46:04 +0800124/* Internal use only */
125struct __btrfs_workqueue_struct;
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800126
127struct btrfs_work_struct {
128 void (*func)(struct btrfs_work_struct *arg);
129 void (*ordered_func)(struct btrfs_work_struct *arg);
130 void (*ordered_free)(struct btrfs_work_struct *arg);
131
132 /* Don't touch things below */
133 struct work_struct normal_work;
134 struct list_head ordered_list;
Qu Wenruo1ca08972014-02-28 10:46:04 +0800135 struct __btrfs_workqueue_struct *wq;
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800136 unsigned long flags;
137};
138
139struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
140 int flags,
141 int max_active);
142void btrfs_init_work(struct btrfs_work_struct *work,
143 void (*func)(struct btrfs_work_struct *),
144 void (*ordered_func)(struct btrfs_work_struct *),
145 void (*ordered_free)(struct btrfs_work_struct *));
146void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
147 struct btrfs_work_struct *work);
148void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq);
149void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max);
Qu Wenruo1ca08972014-02-28 10:46:04 +0800150void btrfs_set_work_high_priority(struct btrfs_work_struct *work);
Chris Mason8b712842008-06-11 16:50:36 -0400151#endif