| #ifndef __LINUX_BACKING_DEV_DEFS_H |
| #define __LINUX_BACKING_DEV_DEFS_H |
| |
| #include <linux/list.h> |
| #include <linux/radix-tree.h> |
| #include <linux/rbtree.h> |
| #include <linux/spinlock.h> |
| #include <linux/percpu_counter.h> |
| #include <linux/percpu-refcount.h> |
| #include <linux/flex_proportions.h> |
| #include <linux/timer.h> |
| #include <linux/workqueue.h> |
| |
| struct page; |
| struct device; |
| struct dentry; |
| |
| /* |
| * Bits in bdi_writeback.state |
| */ |
| enum wb_state { |
| WB_registered, /* bdi_register() was done */ |
| WB_writeback_running, /* Writeback is in progress */ |
| }; |
| |
| enum wb_congested_state { |
| WB_async_congested, /* The async (write) queue is getting full */ |
| WB_sync_congested, /* The sync queue is getting full */ |
| }; |
| |
| typedef int (congested_fn)(void *, int); |
| |
| enum wb_stat_item { |
| WB_RECLAIMABLE, |
| WB_WRITEBACK, |
| WB_DIRTIED, |
| WB_WRITTEN, |
| NR_WB_STAT_ITEMS |
| }; |
| |
| #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) |
| |
| /* |
| * For cgroup writeback, multiple wb's may map to the same blkcg. Those |
| * wb's can operate mostly independently but should share the congested |
| * state. To facilitate such sharing, the congested state is tracked using |
| * the following struct which is created on demand, indexed by blkcg ID on |
| * its bdi, and refcounted. |
| */ |
| struct bdi_writeback_congested { |
| unsigned long state; /* WB_[a]sync_congested flags */ |
| |
| #ifdef CONFIG_CGROUP_WRITEBACK |
| struct backing_dev_info *bdi; /* the associated bdi */ |
| atomic_t refcnt; /* nr of attached wb's and blkg */ |
| int blkcg_id; /* ID of the associated blkcg */ |
| struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ |
| #endif |
| }; |
| |
| /* |
| * Each wb (bdi_writeback) can perform writeback operations, is measured |
| * and throttled, independently. Without cgroup writeback, each bdi |
| * (bdi_writeback) is served by its embedded bdi->wb. |
| * |
| * On the default hierarchy, blkcg implicitly enables memcg. This allows |
| * using memcg's page ownership for attributing writeback IOs, and every |
| * memcg - blkcg combination can be served by its own wb by assigning a |
| * dedicated wb to each memcg, which enables isolation across different |
| * cgroups and propagation of IO back pressure down from the IO layer upto |
| * the tasks which are generating the dirty pages to be written back. |
| * |
| * A cgroup wb is indexed on its bdi by the ID of the associated memcg, |
| * refcounted with the number of inodes attached to it, and pins the memcg |
| * and the corresponding blkcg. As the corresponding blkcg for a memcg may |
| * change as blkcg is disabled and enabled higher up in the hierarchy, a wb |
| * is tested for blkcg after lookup and removed from index on mismatch so |
| * that a new wb for the combination can be created. |
| */ |
| struct bdi_writeback { |
| struct backing_dev_info *bdi; /* our parent bdi */ |
| |
| unsigned long state; /* Always use atomic bitops on this */ |
| unsigned long last_old_flush; /* last old data flush */ |
| |
| struct list_head b_dirty; /* dirty inodes */ |
| struct list_head b_io; /* parked for writeback */ |
| struct list_head b_more_io; /* parked for more writeback */ |
| struct list_head b_dirty_time; /* time stamps are dirty */ |
| spinlock_t list_lock; /* protects the b_* lists */ |
| |
| struct percpu_counter stat[NR_WB_STAT_ITEMS]; |
| |
| struct bdi_writeback_congested *congested; |
| |
| unsigned long bw_time_stamp; /* last time write bw is updated */ |
| unsigned long dirtied_stamp; |
| unsigned long written_stamp; /* pages written at bw_time_stamp */ |
| unsigned long write_bandwidth; /* the estimated write bandwidth */ |
| unsigned long avg_write_bandwidth; /* further smoothed write bw */ |
| |
| /* |
| * The base dirty throttle rate, re-calculated on every 200ms. |
| * All the bdi tasks' dirty rate will be curbed under it. |
| * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit |
| * in small steps and is much more smooth/stable than the latter. |
| */ |
| unsigned long dirty_ratelimit; |
| unsigned long balanced_dirty_ratelimit; |
| |
| struct fprop_local_percpu completions; |
| int dirty_exceeded; |
| |
| spinlock_t work_lock; /* protects work_list & dwork scheduling */ |
| struct list_head work_list; |
| struct delayed_work dwork; /* work item used for writeback */ |
| |
| #ifdef CONFIG_CGROUP_WRITEBACK |
| struct percpu_ref refcnt; /* used only for !root wb's */ |
| struct cgroup_subsys_state *memcg_css; /* the associated memcg */ |
| struct cgroup_subsys_state *blkcg_css; /* and blkcg */ |
| struct list_head memcg_node; /* anchored at memcg->cgwb_list */ |
| struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ |
| |
| union { |
| struct work_struct release_work; |
| struct rcu_head rcu; |
| }; |
| #endif |
| }; |
| |
| struct backing_dev_info { |
| struct list_head bdi_list; |
| unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ |
| unsigned int capabilities; /* Device capabilities */ |
| congested_fn *congested_fn; /* Function pointer if device is md/dm */ |
| void *congested_data; /* Pointer to aux data for congested func */ |
| |
| char *name; |
| |
| unsigned int min_ratio; |
| unsigned int max_ratio, max_prop_frac; |
| |
| struct bdi_writeback wb; /* the root writeback info for this bdi */ |
| struct bdi_writeback_congested wb_congested; /* its congested state */ |
| #ifdef CONFIG_CGROUP_WRITEBACK |
| struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ |
| struct rb_root cgwb_congested_tree; /* their congested states */ |
| atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */ |
| #endif |
| struct device *dev; |
| |
| struct timer_list laptop_mode_wb_timer; |
| |
| #ifdef CONFIG_DEBUG_FS |
| struct dentry *debug_dir; |
| struct dentry *debug_stats; |
| #endif |
| }; |
| |
| enum { |
| BLK_RW_ASYNC = 0, |
| BLK_RW_SYNC = 1, |
| }; |
| |
| void clear_bdi_congested(struct backing_dev_info *bdi, int sync); |
| void set_bdi_congested(struct backing_dev_info *bdi, int sync); |
| |
| #endif /* __LINUX_BACKING_DEV_DEFS_H */ |