Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2007 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | |
| 19 | #ifndef __BTRFS_ASYNC_THREAD_ |
| 20 | #define __BTRFS_ASYNC_THREAD_ |
| 21 | |
| 22 | struct btrfs_worker_thread; |
| 23 | |
| 24 | /* |
| 25 | * This is similar to a workqueue, but it is meant to spread the operations |
| 26 | * across all available cpus instead of just the CPU that was used to |
| 27 | * queue the work. There is also some batching introduced to try and |
| 28 | * cut down on context switches. |
| 29 | * |
| 30 | * By default threads are added on demand up to 2 * the number of cpus. |
| 31 | * Changing struct btrfs_workers->max_workers is one way to prevent |
| 32 | * demand creation of kthreads. |
| 33 | * |
| 34 | * the basic model of these worker threads is to embed a btrfs_work |
| 35 | * structure in your own data struct, and use container_of in a |
| 36 | * work function to get back to your data struct. |
| 37 | */ |
| 38 | struct btrfs_work { |
| 39 | /* |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 40 | * func should be set to the function you want called |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 41 | * your work struct is passed as the only arg |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 42 | * |
| 43 | * ordered_func must be set for work sent to an ordered work queue, |
| 44 | * and it is called to complete a given work item in the same |
| 45 | * order they were sent to the queue. |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 46 | */ |
| 47 | void (*func)(struct btrfs_work *work); |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 48 | void (*ordered_func)(struct btrfs_work *work); |
| 49 | void (*ordered_free)(struct btrfs_work *work); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 50 | |
| 51 | /* |
| 52 | * flags should be set to zero. It is used to make sure the |
| 53 | * struct is only inserted once into the list. |
| 54 | */ |
| 55 | unsigned long flags; |
| 56 | |
| 57 | /* don't touch these */ |
| 58 | struct btrfs_worker_thread *worker; |
| 59 | struct list_head list; |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 60 | struct list_head order_list; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 61 | }; |
| 62 | |
| 63 | struct btrfs_workers { |
| 64 | /* current number of running workers */ |
| 65 | int num_workers; |
| 66 | |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 67 | int num_workers_starting; |
| 68 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 69 | /* max number of workers allowed. changed by btrfs_start_workers */ |
| 70 | int max_workers; |
| 71 | |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 72 | /* once a worker has this many requests or fewer, it is idle */ |
| 73 | int idle_thresh; |
| 74 | |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 75 | /* force completions in the order they were queued */ |
| 76 | int ordered; |
| 77 | |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 78 | /* more workers required, but in an interrupt handler */ |
| 79 | int atomic_start_pending; |
| 80 | |
| 81 | /* |
| 82 | * are we allowed to sleep while starting workers or are we required |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 83 | * to start them at a later time? If we can't sleep, this indicates |
| 84 | * which queue we need to use to schedule thread creation. |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 85 | */ |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 86 | struct btrfs_workers *atomic_worker_start; |
Chris Mason | 9042846 | 2009-08-04 16:56:34 -0400 | [diff] [blame] | 87 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 88 | /* list with all the work threads. The workers on the idle thread |
| 89 | * may be actively servicing jobs, but they haven't yet hit the |
| 90 | * idle thresh limit above. |
| 91 | */ |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 92 | struct list_head worker_list; |
Chris Mason | 35d8ba6 | 2008-06-11 20:21:24 -0400 | [diff] [blame] | 93 | struct list_head idle_list; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 94 | |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 95 | /* |
| 96 | * when operating in ordered mode, this maintains the list |
| 97 | * of work items waiting for completion |
| 98 | */ |
| 99 | struct list_head order_list; |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 100 | struct list_head prio_order_list; |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 101 | |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 102 | /* lock for finding the next worker thread to queue on */ |
| 103 | spinlock_t lock; |
Chris Mason | 5443be4 | 2008-08-15 15:34:16 -0400 | [diff] [blame] | 104 | |
Chris Mason | 4e3f9c5 | 2009-08-05 16:36:45 -0400 | [diff] [blame] | 105 | /* lock for the ordered lists */ |
| 106 | spinlock_t order_lock; |
| 107 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 108 | /* extra name for this worker, used for current->name */ |
Chris Mason | 5443be4 | 2008-08-15 15:34:16 -0400 | [diff] [blame] | 109 | char *name; |
Ilya Dryomov | 964fb15 | 2013-10-02 19:39:50 +0300 | [diff] [blame^] | 110 | |
| 111 | int stopping; |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 112 | }; |
| 113 | |
Josef Bacik | 0dc3b84 | 2011-11-18 14:37:27 -0500 | [diff] [blame] | 114 | void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); |
| 115 | int btrfs_start_workers(struct btrfs_workers *workers); |
Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 116 | void btrfs_stop_workers(struct btrfs_workers *workers); |
Chris Mason | 61d92c3 | 2009-10-02 19:11:56 -0400 | [diff] [blame] | 117 | void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max, |
| 118 | struct btrfs_workers *async_starter); |
Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 119 | void btrfs_requeue_work(struct btrfs_work *work); |
Chris Mason | d313d7a | 2009-04-20 15:50:09 -0400 | [diff] [blame] | 120 | void btrfs_set_work_high_prio(struct btrfs_work *work); |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 121 | #endif |