Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 2 | * include/linux/writeback.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | */ |
| 4 | #ifndef WRITEBACK_H |
| 5 | #define WRITEBACK_H |
| 6 | |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 7 | #include <linux/sched.h> |
Jens Axboe | f5ff842 | 2007-09-21 09:19:54 +0200 | [diff] [blame] | 8 | #include <linux/fs.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 9 | |
Wu Fengguang | ffd1f60 | 2011-06-19 22:18:42 -0600 | [diff] [blame] | 10 | /* |
Wu Fengguang | 1a12d8b | 2010-08-29 13:28:09 -0600 | [diff] [blame] | 11 | * The 1/4 region under the global dirty thresh is for smooth dirty throttling: |
| 12 | * |
| 13 | * (thresh - thresh/DIRTY_FULL_SCOPE, thresh) |
| 14 | * |
Wu Fengguang | ffd1f60 | 2011-06-19 22:18:42 -0600 | [diff] [blame] | 15 | * The 1/16 region above the global dirty limit will be put to maximum pauses: |
| 16 | * |
| 17 | * (limit, limit + limit/DIRTY_MAXPAUSE_AREA) |
| 18 | * |
| 19 | * The 1/16 region above the max-pause region, dirty exceeded bdi's will be put |
| 20 | * to loops: |
| 21 | * |
| 22 | * (limit + limit/DIRTY_MAXPAUSE_AREA, limit + limit/DIRTY_PASSGOOD_AREA) |
| 23 | * |
| 24 | * Further beyond, all dirtier tasks will enter a loop waiting (possibly long |
| 25 | * time) for the dirty pages to drop, unless written enough pages. |
| 26 | * |
| 27 | * The global dirty threshold is normally equal to the global dirty limit, |
| 28 | * except when the system suddenly allocates a lot of anonymous memory and |
| 29 | * knocks down the global dirty threshold quickly, in which case the global |
| 30 | * dirty limit will follow down slowly to prevent livelocking all dirtier tasks. |
| 31 | */ |
Wu Fengguang | 1a12d8b | 2010-08-29 13:28:09 -0600 | [diff] [blame] | 32 | #define DIRTY_SCOPE 8 |
| 33 | #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2) |
Wu Fengguang | ffd1f60 | 2011-06-19 22:18:42 -0600 | [diff] [blame] | 34 | #define DIRTY_MAXPAUSE_AREA 16 |
| 35 | #define DIRTY_PASSGOOD_AREA 8 |
| 36 | |
Wu Fengguang | 1a12d8b | 2010-08-29 13:28:09 -0600 | [diff] [blame] | 37 | /* |
| 38 | * 4MB minimal write chunk size |
| 39 | */ |
| 40 | #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) |
| 41 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | struct backing_dev_info; |
| 43 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | * fs/fs-writeback.c |
| 46 | */ |
| 47 | enum writeback_sync_modes { |
| 48 | WB_SYNC_NONE, /* Don't wait on anything */ |
| 49 | WB_SYNC_ALL, /* Wait on every mapping */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | }; |
| 51 | |
| 52 | /* |
| 53 | * A control structure which tells the writeback code what to do. These are |
| 54 | * always on the stack, and hence need no locking. They are always initialised |
| 55 | * in a manner such that unspecified fields are set to zero. |
| 56 | */ |
| 57 | struct writeback_control { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | enum writeback_sync_modes sync_mode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | long nr_to_write; /* Write this many pages, and decrement |
| 60 | this for each page written */ |
| 61 | long pages_skipped; /* Pages which were not written */ |
| 62 | |
| 63 | /* |
| 64 | * For a_ops->writepages(): is start or end are non-zero then this is |
| 65 | * a hint that the filesystem need only write out the pages inside that |
| 66 | * byterange. The byte at `end' is included in the writeout request. |
| 67 | */ |
OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 68 | loff_t range_start; |
| 69 | loff_t range_end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | |
Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 71 | unsigned for_kupdate:1; /* A kupdate writeback */ |
Wu Fengguang | b17621f | 2009-12-03 13:54:25 +0100 | [diff] [blame] | 72 | unsigned for_background:1; /* A background writeback */ |
Wu Fengguang | 6e6938b | 2010-06-06 10:38:15 -0600 | [diff] [blame] | 73 | unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ |
Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 74 | unsigned for_reclaim:1; /* Invoked from the page allocator */ |
OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 75 | unsigned range_cyclic:1; /* range_start is cyclic */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | }; |
| 77 | |
| 78 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | * fs/fs-writeback.c |
| 80 | */ |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 81 | struct bdi_writeback; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | int inode_wait(void *); |
Jens Axboe | b6e5131 | 2009-09-16 15:13:54 +0200 | [diff] [blame] | 83 | void writeback_inodes_sb(struct super_block *); |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 84 | void writeback_inodes_sb_nr(struct super_block *, unsigned long nr); |
Eric Sandeen | 17bd55d | 2009-12-23 07:57:07 -0500 | [diff] [blame] | 85 | int writeback_inodes_sb_if_idle(struct super_block *); |
Chris Mason | 3259f8b | 2010-10-29 11:16:17 -0400 | [diff] [blame] | 86 | int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr); |
Jens Axboe | b6e5131 | 2009-09-16 15:13:54 +0200 | [diff] [blame] | 87 | void sync_inodes_sb(struct super_block *); |
Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 88 | long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages); |
Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 89 | long wb_do_writeback(struct bdi_writeback *wb, int force_wait); |
| 90 | void wakeup_flusher_threads(long nr_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | |
| 92 | /* writeback.h requires fs.h; it, too, is not included from here. */ |
| 93 | static inline void wait_on_inode(struct inode *inode) |
| 94 | { |
| 95 | might_sleep(); |
Christoph Hellwig | eaff807 | 2009-12-17 14:25:01 +0100 | [diff] [blame] | 96 | wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | } |
Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 98 | static inline void inode_sync_wait(struct inode *inode) |
| 99 | { |
| 100 | might_sleep(); |
| 101 | wait_on_bit(&inode->i_state, __I_SYNC, inode_wait, |
| 102 | TASK_UNINTERRUPTIBLE); |
| 103 | } |
| 104 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
| 106 | /* |
| 107 | * mm/page-writeback.c |
| 108 | */ |
Jens Axboe | c2c4986 | 2010-05-20 09:18:47 +0200 | [diff] [blame] | 109 | #ifdef CONFIG_BLOCK |
Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 110 | void laptop_io_completion(struct backing_dev_info *info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | void laptop_sync_completion(void); |
Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 112 | void laptop_mode_sync(struct work_struct *work); |
| 113 | void laptop_mode_timer_fn(unsigned long data); |
Jens Axboe | c2c4986 | 2010-05-20 09:18:47 +0200 | [diff] [blame] | 114 | #else |
| 115 | static inline void laptop_sync_completion(void) { } |
| 116 | #endif |
Andrew Morton | 232ea4d | 2007-02-28 20:13:21 -0800 | [diff] [blame] | 117 | void throttle_vm_writeout(gfp_t gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | |
Wu Fengguang | c42843f | 2011-03-02 15:54:09 -0600 | [diff] [blame] | 119 | extern unsigned long global_dirty_limit; |
| 120 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | /* These are exported to sysctl. */ |
| 122 | extern int dirty_background_ratio; |
David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 123 | extern unsigned long dirty_background_bytes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | extern int vm_dirty_ratio; |
David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 125 | extern unsigned long vm_dirty_bytes; |
Alexey Dobriyan | 704503d | 2009-03-31 15:23:18 -0700 | [diff] [blame] | 126 | extern unsigned int dirty_writeback_interval; |
| 127 | extern unsigned int dirty_expire_interval; |
Bron Gondwana | 195cf453 | 2008-02-04 22:29:20 -0800 | [diff] [blame] | 128 | extern int vm_highmem_is_dirtyable; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | extern int block_dump; |
| 130 | extern int laptop_mode; |
| 131 | |
Steven Rostedt | 3eefae9 | 2008-05-12 21:21:04 +0200 | [diff] [blame] | 132 | extern unsigned long determine_dirtyable_memory(void); |
| 133 | |
David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 134 | extern int dirty_background_ratio_handler(struct ctl_table *table, int write, |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 135 | void __user *buffer, size_t *lenp, |
David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 136 | loff_t *ppos); |
| 137 | extern int dirty_background_bytes_handler(struct ctl_table *table, int write, |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 138 | void __user *buffer, size_t *lenp, |
David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 139 | loff_t *ppos); |
Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 140 | extern int dirty_ratio_handler(struct ctl_table *table, int write, |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 141 | void __user *buffer, size_t *lenp, |
Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 142 | loff_t *ppos); |
David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 143 | extern int dirty_bytes_handler(struct ctl_table *table, int write, |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 144 | void __user *buffer, size_t *lenp, |
David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 145 | loff_t *ppos); |
Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 146 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | struct ctl_table; |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 148 | int dirty_writeback_centisecs_handler(struct ctl_table *, int, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | void __user *, size_t *, loff_t *); |
| 150 | |
Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 151 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); |
| 152 | unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, |
| 153 | unsigned long dirty); |
Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 154 | |
Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 155 | void __bdi_update_bandwidth(struct backing_dev_info *bdi, |
Wu Fengguang | c42843f | 2011-03-02 15:54:09 -0600 | [diff] [blame] | 156 | unsigned long thresh, |
| 157 | unsigned long dirty, |
| 158 | unsigned long bdi_thresh, |
| 159 | unsigned long bdi_dirty, |
Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 160 | unsigned long start_time); |
| 161 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | void page_writeback_init(void); |
Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 163 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, |
| 164 | unsigned long nr_pages_dirtied); |
| 165 | |
| 166 | static inline void |
| 167 | balance_dirty_pages_ratelimited(struct address_space *mapping) |
| 168 | { |
| 169 | balance_dirty_pages_ratelimited_nr(mapping, 1); |
| 170 | } |
| 171 | |
Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 172 | typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, |
| 173 | void *data); |
| 174 | |
Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 175 | int generic_writepages(struct address_space *mapping, |
| 176 | struct writeback_control *wbc); |
Eric Sandeen | 5b41d92 | 2010-10-27 21:30:13 -0400 | [diff] [blame] | 177 | void tag_pages_for_writeback(struct address_space *mapping, |
| 178 | pgoff_t start, pgoff_t end); |
Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 179 | int write_cache_pages(struct address_space *mapping, |
| 180 | struct writeback_control *wbc, writepage_t writepage, |
| 181 | void *data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); |
Peter Zijlstra | a200ee1 | 2007-10-08 18:54:37 +0200 | [diff] [blame] | 183 | void set_page_dirty_balance(struct page *page, int page_mkwrite); |
Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 184 | void writeback_set_ratelimit(void); |
Namhyung Kim | 92c09c0 | 2010-10-26 14:22:03 -0700 | [diff] [blame] | 185 | void tag_pages_for_writeback(struct address_space *mapping, |
| 186 | pgoff_t start, pgoff_t end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | |
| 188 | /* pdflush.c */ |
| 189 | extern int nr_pdflush_threads; /* Global so it can be exported to sysctl |
| 190 | read-only. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | |
| 192 | |
| 193 | #endif /* WRITEBACK_H */ |