Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 1 | #ifndef __LINUX_BACKING_DEV_DEFS_H |
| 2 | #define __LINUX_BACKING_DEV_DEFS_H |
| 3 | |
| 4 | #include <linux/list.h> |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 5 | #include <linux/radix-tree.h> |
| 6 | #include <linux/rbtree.h> |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 7 | #include <linux/spinlock.h> |
| 8 | #include <linux/percpu_counter.h> |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 9 | #include <linux/percpu-refcount.h> |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 10 | #include <linux/flex_proportions.h> |
| 11 | #include <linux/timer.h> |
| 12 | #include <linux/workqueue.h> |
Jan Kara | d03f6cd | 2017-02-02 15:56:51 +0100 | [diff] [blame] | 13 | #include <linux/kref.h> |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 14 | |
| 15 | struct page; |
| 16 | struct device; |
| 17 | struct dentry; |
| 18 | |
| 19 | /* |
| 20 | * Bits in bdi_writeback.state |
| 21 | */ |
| 22 | enum wb_state { |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 23 | WB_registered, /* bdi_register() was done */ |
Jan Kara | 5318ce7 | 2017-03-23 01:36:57 +0100 | [diff] [blame] | 24 | WB_shutting_down, /* wb_shutdown() in progress */ |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 25 | WB_writeback_running, /* Writeback is in progress */ |
Tejun Heo | d6c10f1 | 2015-05-22 17:13:45 -0400 | [diff] [blame] | 26 | WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 27 | }; |
| 28 | |
Tejun Heo | 4aa9c69 | 2015-05-22 17:13:35 -0400 | [diff] [blame] | 29 | enum wb_congested_state { |
| 30 | WB_async_congested, /* The async (write) queue is getting full */ |
| 31 | WB_sync_congested, /* The sync queue is getting full */ |
| 32 | }; |
| 33 | |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 34 | typedef int (congested_fn)(void *, int); |
| 35 | |
| 36 | enum wb_stat_item { |
| 37 | WB_RECLAIMABLE, |
| 38 | WB_WRITEBACK, |
| 39 | WB_DIRTIED, |
| 40 | WB_WRITTEN, |
| 41 | NR_WB_STAT_ITEMS |
| 42 | }; |
| 43 | |
| 44 | #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) |
| 45 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 46 | /* |
| 47 | * For cgroup writeback, multiple wb's may map to the same blkcg. Those |
| 48 | * wb's can operate mostly independently but should share the congested |
| 49 | * state. To facilitate such sharing, the congested state is tracked using |
| 50 | * the following struct which is created on demand, indexed by blkcg ID on |
| 51 | * its bdi, and refcounted. |
| 52 | */ |
Tejun Heo | 4aa9c69 | 2015-05-22 17:13:35 -0400 | [diff] [blame] | 53 | struct bdi_writeback_congested { |
| 54 | unsigned long state; /* WB_[a]sync_congested flags */ |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 55 | atomic_t refcnt; /* nr of attached wb's and blkg */ |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 56 | |
| 57 | #ifdef CONFIG_CGROUP_WRITEBACK |
Jan Kara | b7d680d | 2017-03-23 01:36:54 +0100 | [diff] [blame] | 58 | struct backing_dev_info *__bdi; /* the associated bdi, set to NULL |
| 59 | * on bdi unregistration. For memcg-wb |
| 60 | * internal use only! */ |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 61 | int blkcg_id; /* ID of the associated blkcg */ |
| 62 | struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ |
| 63 | #endif |
Tejun Heo | 4aa9c69 | 2015-05-22 17:13:35 -0400 | [diff] [blame] | 64 | }; |
| 65 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 66 | /* |
| 67 | * Each wb (bdi_writeback) can perform writeback operations, is measured |
| 68 | * and throttled, independently. Without cgroup writeback, each bdi |
| 69 | * (bdi_writeback) is served by its embedded bdi->wb. |
| 70 | * |
| 71 | * On the default hierarchy, blkcg implicitly enables memcg. This allows |
| 72 | * using memcg's page ownership for attributing writeback IOs, and every |
| 73 | * memcg - blkcg combination can be served by its own wb by assigning a |
| 74 | * dedicated wb to each memcg, which enables isolation across different |
| 75 | * cgroups and propagation of IO back pressure down from the IO layer upto |
| 76 | * the tasks which are generating the dirty pages to be written back. |
| 77 | * |
| 78 | * A cgroup wb is indexed on its bdi by the ID of the associated memcg, |
| 79 | * refcounted with the number of inodes attached to it, and pins the memcg |
| 80 | * and the corresponding blkcg. As the corresponding blkcg for a memcg may |
| 81 | * change as blkcg is disabled and enabled higher up in the hierarchy, a wb |
| 82 | * is tested for blkcg after lookup and removed from index on mismatch so |
| 83 | * that a new wb for the combination can be created. |
| 84 | */ |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 85 | struct bdi_writeback { |
| 86 | struct backing_dev_info *bdi; /* our parent bdi */ |
| 87 | |
| 88 | unsigned long state; /* Always use atomic bitops on this */ |
| 89 | unsigned long last_old_flush; /* last old data flush */ |
| 90 | |
| 91 | struct list_head b_dirty; /* dirty inodes */ |
| 92 | struct list_head b_io; /* parked for writeback */ |
| 93 | struct list_head b_more_io; /* parked for more writeback */ |
| 94 | struct list_head b_dirty_time; /* time stamps are dirty */ |
| 95 | spinlock_t list_lock; /* protects the b_* lists */ |
| 96 | |
| 97 | struct percpu_counter stat[NR_WB_STAT_ITEMS]; |
| 98 | |
Tejun Heo | 4aa9c69 | 2015-05-22 17:13:35 -0400 | [diff] [blame] | 99 | struct bdi_writeback_congested *congested; |
| 100 | |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 101 | unsigned long bw_time_stamp; /* last time write bw is updated */ |
| 102 | unsigned long dirtied_stamp; |
| 103 | unsigned long written_stamp; /* pages written at bw_time_stamp */ |
| 104 | unsigned long write_bandwidth; /* the estimated write bandwidth */ |
Tejun Heo | 95a46c6 | 2015-05-22 17:13:47 -0400 | [diff] [blame] | 105 | unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */ |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 106 | |
| 107 | /* |
| 108 | * The base dirty throttle rate, re-calculated on every 200ms. |
| 109 | * All the bdi tasks' dirty rate will be curbed under it. |
| 110 | * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit |
| 111 | * in small steps and is much more smooth/stable than the latter. |
| 112 | */ |
| 113 | unsigned long dirty_ratelimit; |
| 114 | unsigned long balanced_dirty_ratelimit; |
| 115 | |
| 116 | struct fprop_local_percpu completions; |
| 117 | int dirty_exceeded; |
| 118 | |
| 119 | spinlock_t work_lock; /* protects work_list & dwork scheduling */ |
| 120 | struct list_head work_list; |
| 121 | struct delayed_work dwork; /* work item used for writeback */ |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 122 | |
Jens Axboe | b57d74a | 2016-09-01 10:20:33 -0600 | [diff] [blame] | 123 | unsigned long dirty_sleep; /* last wait */ |
| 124 | |
Tejun Heo | b817525 | 2015-10-02 14:47:05 -0400 | [diff] [blame] | 125 | struct list_head bdi_node; /* anchored at bdi->wb_list */ |
| 126 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 127 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 128 | struct percpu_ref refcnt; /* used only for !root wb's */ |
Tejun Heo | 841710a | 2015-05-22 18:23:33 -0400 | [diff] [blame] | 129 | struct fprop_local_percpu memcg_completions; |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 130 | struct cgroup_subsys_state *memcg_css; /* the associated memcg */ |
| 131 | struct cgroup_subsys_state *blkcg_css; /* and blkcg */ |
| 132 | struct list_head memcg_node; /* anchored at memcg->cgwb_list */ |
| 133 | struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ |
| 134 | |
| 135 | union { |
| 136 | struct work_struct release_work; |
| 137 | struct rcu_head rcu; |
| 138 | }; |
| 139 | #endif |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 140 | }; |
| 141 | |
| 142 | struct backing_dev_info { |
| 143 | struct list_head bdi_list; |
Kirill A. Shutemov | ea1754a | 2016-04-01 15:29:48 +0300 | [diff] [blame] | 144 | unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ |
Jens Axboe | 9491ae4 | 2016-12-12 16:43:26 -0800 | [diff] [blame] | 145 | unsigned long io_pages; /* max allowed IO size */ |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 146 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ |
| 147 | void *congested_data; /* Pointer to aux data for congested func */ |
| 148 | |
Jan Kara | fca3934 | 2017-04-12 12:24:28 +0200 | [diff] [blame] | 149 | const char *name; |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 150 | |
Jan Kara | d03f6cd | 2017-02-02 15:56:51 +0100 | [diff] [blame] | 151 | struct kref refcnt; /* Reference counter for the structure */ |
Andrew Morton | 8db378a | 2016-12-12 16:43:29 -0800 | [diff] [blame] | 152 | unsigned int capabilities; /* Device capabilities */ |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 153 | unsigned int min_ratio; |
| 154 | unsigned int max_ratio, max_prop_frac; |
| 155 | |
Tejun Heo | 95a46c6 | 2015-05-22 17:13:47 -0400 | [diff] [blame] | 156 | /* |
| 157 | * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are |
| 158 | * any dirty wbs, which is depended upon by bdi_has_dirty(). |
| 159 | */ |
| 160 | atomic_long_t tot_write_bandwidth; |
Tejun Heo | 766a9d6 | 2015-05-22 17:13:46 -0400 | [diff] [blame] | 161 | |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 162 | struct bdi_writeback wb; /* the root writeback info for this bdi */ |
Tejun Heo | b817525 | 2015-10-02 14:47:05 -0400 | [diff] [blame] | 163 | struct list_head wb_list; /* list of all wbs */ |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 164 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 165 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ |
| 166 | struct rb_root cgwb_congested_tree; /* their congested states */ |
Tejun Heo | a13f35e | 2015-07-02 08:44:34 -0600 | [diff] [blame] | 167 | #else |
| 168 | struct bdi_writeback_congested *wb_congested; |
Tejun Heo | 52ebea7 | 2015-05-22 17:13:37 -0400 | [diff] [blame] | 169 | #endif |
Tejun Heo | cc395d7 | 2015-05-22 17:13:58 -0400 | [diff] [blame] | 170 | wait_queue_head_t wb_waitq; |
| 171 | |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 172 | struct device *dev; |
Dan Williams | df08c32 | 2016-07-31 11:15:13 -0700 | [diff] [blame] | 173 | struct device *owner; |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 174 | |
| 175 | struct timer_list laptop_mode_wb_timer; |
| 176 | |
| 177 | #ifdef CONFIG_DEBUG_FS |
| 178 | struct dentry *debug_dir; |
| 179 | struct dentry *debug_stats; |
| 180 | #endif |
| 181 | }; |
| 182 | |
| 183 | enum { |
| 184 | BLK_RW_ASYNC = 0, |
| 185 | BLK_RW_SYNC = 1, |
| 186 | }; |
| 187 | |
Tejun Heo | ec8a6f2 | 2015-05-22 17:13:41 -0400 | [diff] [blame] | 188 | void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); |
| 189 | void set_wb_congested(struct bdi_writeback_congested *congested, int sync); |
| 190 | |
| 191 | static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync) |
| 192 | { |
| 193 | clear_wb_congested(bdi->wb.congested, sync); |
| 194 | } |
| 195 | |
| 196 | static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) |
| 197 | { |
| 198 | set_wb_congested(bdi->wb.congested, sync); |
| 199 | } |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 200 | |
Tejun Heo | 21c6321 | 2015-05-28 14:50:49 -0400 | [diff] [blame] | 201 | #ifdef CONFIG_CGROUP_WRITEBACK |
| 202 | |
| 203 | /** |
| 204 | * wb_tryget - try to increment a wb's refcount |
| 205 | * @wb: bdi_writeback to get |
| 206 | */ |
| 207 | static inline bool wb_tryget(struct bdi_writeback *wb) |
| 208 | { |
| 209 | if (wb != &wb->bdi->wb) |
| 210 | return percpu_ref_tryget(&wb->refcnt); |
| 211 | return true; |
| 212 | } |
| 213 | |
| 214 | /** |
| 215 | * wb_get - increment a wb's refcount |
| 216 | * @wb: bdi_writeback to get |
| 217 | */ |
| 218 | static inline void wb_get(struct bdi_writeback *wb) |
| 219 | { |
| 220 | if (wb != &wb->bdi->wb) |
| 221 | percpu_ref_get(&wb->refcnt); |
| 222 | } |
| 223 | |
| 224 | /** |
| 225 | * wb_put - decrement a wb's refcount |
| 226 | * @wb: bdi_writeback to put |
| 227 | */ |
| 228 | static inline void wb_put(struct bdi_writeback *wb) |
| 229 | { |
| 230 | if (wb != &wb->bdi->wb) |
| 231 | percpu_ref_put(&wb->refcnt); |
| 232 | } |
| 233 | |
Tejun Heo | e8a7abf | 2015-05-28 14:50:57 -0400 | [diff] [blame] | 234 | /** |
| 235 | * wb_dying - is a wb dying? |
| 236 | * @wb: bdi_writeback of interest |
| 237 | * |
| 238 | * Returns whether @wb is unlinked and being drained. |
| 239 | */ |
| 240 | static inline bool wb_dying(struct bdi_writeback *wb) |
| 241 | { |
| 242 | return percpu_ref_is_dying(&wb->refcnt); |
| 243 | } |
| 244 | |
Tejun Heo | 21c6321 | 2015-05-28 14:50:49 -0400 | [diff] [blame] | 245 | #else /* CONFIG_CGROUP_WRITEBACK */ |
| 246 | |
| 247 | static inline bool wb_tryget(struct bdi_writeback *wb) |
| 248 | { |
| 249 | return true; |
| 250 | } |
| 251 | |
| 252 | static inline void wb_get(struct bdi_writeback *wb) |
| 253 | { |
| 254 | } |
| 255 | |
| 256 | static inline void wb_put(struct bdi_writeback *wb) |
| 257 | { |
| 258 | } |
| 259 | |
Tejun Heo | e8a7abf | 2015-05-28 14:50:57 -0400 | [diff] [blame] | 260 | static inline bool wb_dying(struct bdi_writeback *wb) |
| 261 | { |
| 262 | return false; |
| 263 | } |
| 264 | |
Tejun Heo | 21c6321 | 2015-05-28 14:50:49 -0400 | [diff] [blame] | 265 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
| 266 | |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 267 | #endif /* __LINUX_BACKING_DEV_DEFS_H */ |