Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 1 | #ifndef __LINUX_BACKING_DEV_DEFS_H |
| 2 | #define __LINUX_BACKING_DEV_DEFS_H |
| 3 | |
| 4 | #include <linux/list.h> |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 5 | #include <linux/radix-tree.h> |
| 6 | #include <linux/rbtree.h> |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 7 | #include <linux/spinlock.h> |
| 8 | #include <linux/percpu_counter.h> |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 9 | #include <linux/percpu-refcount.h> |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 10 | #include <linux/flex_proportions.h> |
| 11 | #include <linux/timer.h> |
| 12 | #include <linux/workqueue.h> |
| 13 | |
| 14 | struct page; |
| 15 | struct device; |
| 16 | struct dentry; |
| 17 | |
| 18 | /* |
| 19 | * Bits in bdi_writeback.state |
| 20 | */ |
| 21 | enum wb_state { |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 22 | WB_registered, /* bdi_register() was done */ |
| 23 | WB_writeback_running, /* Writeback is in progress */ |
| 24 | }; |
| 25 | |
Tejun Heo | 4aa9c69 | 2015-05-22 17:13:35 -0400 | [diff] [blame] | 26 | enum wb_congested_state { |
| 27 | WB_async_congested, /* The async (write) queue is getting full */ |
| 28 | WB_sync_congested, /* The sync queue is getting full */ |
| 29 | }; |
| 30 | |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 31 | typedef int (congested_fn)(void *, int); |
| 32 | |
| 33 | enum wb_stat_item { |
| 34 | WB_RECLAIMABLE, |
| 35 | WB_WRITEBACK, |
| 36 | WB_DIRTIED, |
| 37 | WB_WRITTEN, |
| 38 | NR_WB_STAT_ITEMS |
| 39 | }; |
| 40 | |
| 41 | #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) |
| 42 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 43 | /* |
| 44 | * For cgroup writeback, multiple wb's may map to the same blkcg. Those |
| 45 | * wb's can operate mostly independently but should share the congested |
| 46 | * state. To facilitate such sharing, the congested state is tracked using |
| 47 | * the following struct which is created on demand, indexed by blkcg ID on |
| 48 | * its bdi, and refcounted. |
| 49 | */ |
Tejun Heo | 4aa9c69 | 2015-05-22 17:13:35 -0400 | [diff] [blame] | 50 | struct bdi_writeback_congested { |
| 51 | unsigned long state; /* WB_[a]sync_congested flags */ |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 52 | |
| 53 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 54 | struct backing_dev_info *bdi; /* the associated bdi */ |
| 55 | atomic_t refcnt; /* nr of attached wb's and blkg */ |
| 56 | int blkcg_id; /* ID of the associated blkcg */ |
| 57 | struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ |
| 58 | #endif |
Tejun Heo | 4aa9c69 | 2015-05-22 17:13:35 -0400 | [diff] [blame] | 59 | }; |
| 60 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 61 | /* |
| 62 | * Each wb (bdi_writeback) can perform writeback operations, is measured |
| 63 | * and throttled, independently. Without cgroup writeback, each bdi |
| 64 | * (bdi_writeback) is served by its embedded bdi->wb. |
| 65 | * |
| 66 | * On the default hierarchy, blkcg implicitly enables memcg. This allows |
| 67 | * using memcg's page ownership for attributing writeback IOs, and every |
| 68 | * memcg - blkcg combination can be served by its own wb by assigning a |
| 69 | * dedicated wb to each memcg, which enables isolation across different |
| 70 | * cgroups and propagation of IO back pressure down from the IO layer upto |
| 71 | * the tasks which are generating the dirty pages to be written back. |
| 72 | * |
| 73 | * A cgroup wb is indexed on its bdi by the ID of the associated memcg, |
| 74 | * refcounted with the number of inodes attached to it, and pins the memcg |
| 75 | * and the corresponding blkcg. As the corresponding blkcg for a memcg may |
| 76 | * change as blkcg is disabled and enabled higher up in the hierarchy, a wb |
| 77 | * is tested for blkcg after lookup and removed from index on mismatch so |
| 78 | * that a new wb for the combination can be created. |
| 79 | */ |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 80 | struct bdi_writeback { |
| 81 | struct backing_dev_info *bdi; /* our parent bdi */ |
| 82 | |
| 83 | unsigned long state; /* Always use atomic bitops on this */ |
| 84 | unsigned long last_old_flush; /* last old data flush */ |
| 85 | |
| 86 | struct list_head b_dirty; /* dirty inodes */ |
| 87 | struct list_head b_io; /* parked for writeback */ |
| 88 | struct list_head b_more_io; /* parked for more writeback */ |
| 89 | struct list_head b_dirty_time; /* time stamps are dirty */ |
| 90 | spinlock_t list_lock; /* protects the b_* lists */ |
| 91 | |
| 92 | struct percpu_counter stat[NR_WB_STAT_ITEMS]; |
| 93 | |
Tejun Heo | 4aa9c69 | 2015-05-22 17:13:35 -0400 | [diff] [blame] | 94 | struct bdi_writeback_congested *congested; |
| 95 | |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 96 | unsigned long bw_time_stamp; /* last time write bw is updated */ |
| 97 | unsigned long dirtied_stamp; |
| 98 | unsigned long written_stamp; /* pages written at bw_time_stamp */ |
| 99 | unsigned long write_bandwidth; /* the estimated write bandwidth */ |
| 100 | unsigned long avg_write_bandwidth; /* further smoothed write bw */ |
| 101 | |
| 102 | /* |
| 103 | * The base dirty throttle rate, re-calculated on every 200ms. |
| 104 | * All the bdi tasks' dirty rate will be curbed under it. |
| 105 | * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit |
| 106 | * in small steps and is much more smooth/stable than the latter. |
| 107 | */ |
| 108 | unsigned long dirty_ratelimit; |
| 109 | unsigned long balanced_dirty_ratelimit; |
| 110 | |
| 111 | struct fprop_local_percpu completions; |
| 112 | int dirty_exceeded; |
| 113 | |
| 114 | spinlock_t work_lock; /* protects work_list & dwork scheduling */ |
| 115 | struct list_head work_list; |
| 116 | struct delayed_work dwork; /* work item used for writeback */ |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 117 | |
| 118 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 119 | struct percpu_ref refcnt; /* used only for !root wb's */ |
| 120 | struct cgroup_subsys_state *memcg_css; /* the associated memcg */ |
| 121 | struct cgroup_subsys_state *blkcg_css; /* and blkcg */ |
| 122 | struct list_head memcg_node; /* anchored at memcg->cgwb_list */ |
| 123 | struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ |
| 124 | |
| 125 | union { |
| 126 | struct work_struct release_work; |
| 127 | struct rcu_head rcu; |
| 128 | }; |
| 129 | #endif |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 130 | }; |
| 131 | |
| 132 | struct backing_dev_info { |
| 133 | struct list_head bdi_list; |
| 134 | unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ |
| 135 | unsigned int capabilities; /* Device capabilities */ |
| 136 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ |
| 137 | void *congested_data; /* Pointer to aux data for congested func */ |
| 138 | |
| 139 | char *name; |
| 140 | |
| 141 | unsigned int min_ratio; |
| 142 | unsigned int max_ratio, max_prop_frac; |
| 143 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 144 | struct bdi_writeback wb; /* the root writeback info for this bdi */ |
| 145 | struct bdi_writeback_congested wb_congested; /* its congested state */ |
| 146 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 147 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ |
| 148 | struct rb_root cgwb_congested_tree; /* their congested states */ |
| 149 | atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */ |
| 150 | #endif |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 151 | struct device *dev; |
| 152 | |
| 153 | struct timer_list laptop_mode_wb_timer; |
| 154 | |
| 155 | #ifdef CONFIG_DEBUG_FS |
| 156 | struct dentry *debug_dir; |
| 157 | struct dentry *debug_stats; |
| 158 | #endif |
| 159 | }; |
| 160 | |
| 161 | enum { |
| 162 | BLK_RW_ASYNC = 0, |
| 163 | BLK_RW_SYNC = 1, |
| 164 | }; |
| 165 | |
| 166 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync); |
| 167 | void set_bdi_congested(struct backing_dev_info *bdi, int sync); |
| 168 | |
| 169 | #endif /* __LINUX_BACKING_DEV_DEFS_H */ |