| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 2 | * mm/page-writeback.c | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * | 
|  | 4 | * Copyright (C) 2002, Linus Torvalds. | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * | 
|  | 7 | * Contains functions related to writing back dirty pages at the | 
|  | 8 | * address_space level. | 
|  | 9 | * | 
|  | 10 | * 10Apr2002	akpm@zip.com.au | 
|  | 11 | *		Initial version | 
|  | 12 | */ | 
|  | 13 |  | 
|  | 14 | #include <linux/kernel.h> | 
|  | 15 | #include <linux/module.h> | 
|  | 16 | #include <linux/spinlock.h> | 
|  | 17 | #include <linux/fs.h> | 
|  | 18 | #include <linux/mm.h> | 
|  | 19 | #include <linux/swap.h> | 
|  | 20 | #include <linux/slab.h> | 
|  | 21 | #include <linux/pagemap.h> | 
|  | 22 | #include <linux/writeback.h> | 
|  | 23 | #include <linux/init.h> | 
|  | 24 | #include <linux/backing-dev.h> | 
| Andrew Morton | 55e829a | 2006-12-10 02:19:27 -0800 | [diff] [blame] | 25 | #include <linux/task_io_accounting_ops.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/blkdev.h> | 
|  | 27 | #include <linux/mpage.h> | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 28 | #include <linux/rmap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/percpu.h> | 
|  | 30 | #include <linux/notifier.h> | 
|  | 31 | #include <linux/smp.h> | 
|  | 32 | #include <linux/sysctl.h> | 
|  | 33 | #include <linux/cpu.h> | 
|  | 34 | #include <linux/syscalls.h> | 
| David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 35 | #include <linux/buffer_head.h> | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 36 | #include <linux/pagevec.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 |  | 
|  | 38 | /* | 
|  | 39 | * The maximum number of pages to writeout in a single bdflush/kupdate | 
| Joern Engel | 1c0eeaf | 2007-10-16 23:30:44 -0700 | [diff] [blame] | 40 | * operation.  We do this so we don't hold I_SYNC against an inode for | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | * enormous amounts of time, which would block a userspace task which has | 
|  | 42 | * been forced to throttle against that inode.  Also, the code reevaluates | 
|  | 43 | * the dirty each time it has written this many pages. | 
|  | 44 | */ | 
|  | 45 | #define MAX_WRITEBACK_PAGES	1024 | 
|  | 46 |  | 
|  | 47 | /* | 
|  | 48 | * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited | 
|  | 49 | * will look to see if it needs to force writeback or throttling. | 
|  | 50 | */ | 
|  | 51 | static long ratelimit_pages = 32; | 
|  | 52 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | /* | 
|  | 54 | * When balance_dirty_pages decides that the caller needs to perform some | 
|  | 55 | * non-background writeback, this is how many pages it will attempt to write. | 
|  | 56 | * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably | 
|  | 57 | * large amounts of I/O are submitted. | 
|  | 58 | */ | 
|  | 59 | static inline long sync_writeback_pages(void) | 
|  | 60 | { | 
|  | 61 | return ratelimit_pages + ratelimit_pages / 2; | 
|  | 62 | } | 
|  | 63 |  | 
|  | 64 | /* The following parameters are exported via /proc/sys/vm */ | 
|  | 65 |  | 
|  | 66 | /* | 
|  | 67 | * Start background writeback (via pdflush) at this percentage | 
|  | 68 | */ | 
| Linus Torvalds | 07db59b | 2007-04-27 09:10:47 -0700 | [diff] [blame] | 69 | int dirty_background_ratio = 5; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 |  | 
|  | 71 | /* | 
| Bron Gondwana | 195cf453 | 2008-02-04 22:29:20 -0800 | [diff] [blame] | 72 | * free highmem will not be subtracted from the total free memory | 
|  | 73 | * for calculating free ratios if vm_highmem_is_dirtyable is true | 
|  | 74 | */ | 
|  | 75 | int vm_highmem_is_dirtyable; | 
|  | 76 |  | 
|  | 77 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | * The generator of dirty data starts writeback at this percentage | 
|  | 79 | */ | 
| Linus Torvalds | 07db59b | 2007-04-27 09:10:47 -0700 | [diff] [blame] | 80 | int vm_dirty_ratio = 10; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 |  | 
|  | 82 | /* | 
| Coywolf Qi Hunt | fd5403c | 2006-04-10 22:54:35 -0700 | [diff] [blame] | 83 | * The interval between `kupdate'-style writebacks, in jiffies | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | */ | 
| Bart Samwel | f6ef943 | 2006-03-24 03:15:48 -0800 | [diff] [blame] | 85 | int dirty_writeback_interval = 5 * HZ; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 |  | 
|  | 87 | /* | 
| Coywolf Qi Hunt | fd5403c | 2006-04-10 22:54:35 -0700 | [diff] [blame] | 88 | * The longest number of jiffies for which data is allowed to remain dirty | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | */ | 
| Bart Samwel | f6ef943 | 2006-03-24 03:15:48 -0800 | [diff] [blame] | 90 | int dirty_expire_interval = 30 * HZ; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 |  | 
|  | 92 | /* | 
|  | 93 | * Flag that makes the machine dump writes/reads and block dirtyings. | 
|  | 94 | */ | 
|  | 95 | int block_dump; | 
|  | 96 |  | 
|  | 97 | /* | 
| Bart Samwel | ed5b43f | 2006-03-24 03:15:49 -0800 | [diff] [blame] | 98 | * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: | 
|  | 99 | * a full sync is triggered after this time elapses without any disk activity. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | */ | 
|  | 101 | int laptop_mode; | 
|  | 102 |  | 
|  | 103 | EXPORT_SYMBOL(laptop_mode); | 
|  | 104 |  | 
|  | 105 | /* End of sysctl-exported parameters */ | 
|  | 106 |  | 
|  | 107 |  | 
|  | 108 | static void background_writeout(unsigned long _min_pages); | 
|  | 109 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | /* | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 111 | * Scale the writeback cache size proportional to the relative writeout speeds. | 
|  | 112 | * | 
|  | 113 | * We do this by keeping a floating proportion between BDIs, based on page | 
|  | 114 | * writeback completions [end_page_writeback()]. Those devices that write out | 
|  | 115 | * pages fastest will get the larger share, while the slower will get a smaller | 
|  | 116 | * share. | 
|  | 117 | * | 
|  | 118 | * We use page writeout completions because we are interested in getting rid of | 
|  | 119 | * dirty pages. Having them written out is the primary goal. | 
|  | 120 | * | 
|  | 121 | * We introduce a concept of time, a period over which we measure these events, | 
|  | 122 | * because demand can/will vary over time. The length of this period itself is | 
|  | 123 | * measured in page writeback completions. | 
|  | 124 | * | 
|  | 125 | */ | 
|  | 126 | static struct prop_descriptor vm_completions; | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 127 | static struct prop_descriptor vm_dirties; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 128 |  | 
|  | 129 | static unsigned long determine_dirtyable_memory(void); | 
|  | 130 |  | 
|  | 131 | /* | 
|  | 132 | * couple the period to the dirty_ratio: | 
|  | 133 | * | 
|  | 134 | *   period/2 ~ roundup_pow_of_two(dirty limit) | 
|  | 135 | */ | 
|  | 136 | static int calc_period_shift(void) | 
|  | 137 | { | 
|  | 138 | unsigned long dirty_total; | 
|  | 139 |  | 
|  | 140 | dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / 100; | 
|  | 141 | return 2 + ilog2(dirty_total - 1); | 
|  | 142 | } | 
|  | 143 |  | 
|  | 144 | /* | 
|  | 145 | * update the period when the dirty ratio changes. | 
|  | 146 | */ | 
|  | 147 | int dirty_ratio_handler(struct ctl_table *table, int write, | 
|  | 148 | struct file *filp, void __user *buffer, size_t *lenp, | 
|  | 149 | loff_t *ppos) | 
|  | 150 | { | 
|  | 151 | int old_ratio = vm_dirty_ratio; | 
|  | 152 | int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | 
|  | 153 | if (ret == 0 && write && vm_dirty_ratio != old_ratio) { | 
|  | 154 | int shift = calc_period_shift(); | 
|  | 155 | prop_change_shift(&vm_completions, shift); | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 156 | prop_change_shift(&vm_dirties, shift); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 157 | } | 
|  | 158 | return ret; | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | /* | 
|  | 162 | * Increment the BDI's writeout completion count and the global writeout | 
|  | 163 | * completion count. Called from test_clear_page_writeback(). | 
|  | 164 | */ | 
|  | 165 | static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) | 
|  | 166 | { | 
|  | 167 | __prop_inc_percpu(&vm_completions, &bdi->completions); | 
|  | 168 | } | 
|  | 169 |  | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 170 | static inline void task_dirty_inc(struct task_struct *tsk) | 
|  | 171 | { | 
|  | 172 | prop_inc_single(&vm_dirties, &tsk->dirties); | 
|  | 173 | } | 
|  | 174 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 175 | /* | 
|  | 176 | * Obtain an accurate fraction of the BDI's portion. | 
|  | 177 | */ | 
|  | 178 | static void bdi_writeout_fraction(struct backing_dev_info *bdi, | 
|  | 179 | long *numerator, long *denominator) | 
|  | 180 | { | 
|  | 181 | if (bdi_cap_writeback_dirty(bdi)) { | 
|  | 182 | prop_fraction_percpu(&vm_completions, &bdi->completions, | 
|  | 183 | numerator, denominator); | 
|  | 184 | } else { | 
|  | 185 | *numerator = 0; | 
|  | 186 | *denominator = 1; | 
|  | 187 | } | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | /* | 
|  | 191 | * Clip the earned share of dirty pages to that which is actually available. | 
|  | 192 | * This avoids exceeding the total dirty_limit when the floating averages | 
|  | 193 | * fluctuate too quickly. | 
|  | 194 | */ | 
|  | 195 | static void | 
|  | 196 | clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty) | 
|  | 197 | { | 
|  | 198 | long avail_dirty; | 
|  | 199 |  | 
|  | 200 | avail_dirty = dirty - | 
|  | 201 | (global_page_state(NR_FILE_DIRTY) + | 
|  | 202 | global_page_state(NR_WRITEBACK) + | 
|  | 203 | global_page_state(NR_UNSTABLE_NFS)); | 
|  | 204 |  | 
|  | 205 | if (avail_dirty < 0) | 
|  | 206 | avail_dirty = 0; | 
|  | 207 |  | 
|  | 208 | avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) + | 
|  | 209 | bdi_stat(bdi, BDI_WRITEBACK); | 
|  | 210 |  | 
|  | 211 | *pbdi_dirty = min(*pbdi_dirty, avail_dirty); | 
|  | 212 | } | 
|  | 213 |  | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 214 | static inline void task_dirties_fraction(struct task_struct *tsk, | 
|  | 215 | long *numerator, long *denominator) | 
|  | 216 | { | 
|  | 217 | prop_fraction_single(&vm_dirties, &tsk->dirties, | 
|  | 218 | numerator, denominator); | 
|  | 219 | } | 
|  | 220 |  | 
|  | 221 | /* | 
|  | 222 | * scale the dirty limit | 
|  | 223 | * | 
|  | 224 | * task specific dirty limit: | 
|  | 225 | * | 
|  | 226 | *   dirty -= (dirty/8) * p_{t} | 
|  | 227 | */ | 
| Adrian Bunk | f61eaf9 | 2008-02-04 22:29:08 -0800 | [diff] [blame] | 228 | static void task_dirty_limit(struct task_struct *tsk, long *pdirty) | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 229 | { | 
|  | 230 | long numerator, denominator; | 
|  | 231 | long dirty = *pdirty; | 
|  | 232 | u64 inv = dirty >> 3; | 
|  | 233 |  | 
|  | 234 | task_dirties_fraction(tsk, &numerator, &denominator); | 
|  | 235 | inv *= numerator; | 
|  | 236 | do_div(inv, denominator); | 
|  | 237 |  | 
|  | 238 | dirty -= inv; | 
|  | 239 | if (dirty < *pdirty/2) | 
|  | 240 | dirty = *pdirty/2; | 
|  | 241 |  | 
|  | 242 | *pdirty = dirty; | 
|  | 243 | } | 
|  | 244 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 245 | /* | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame^] | 246 | * | 
|  | 247 | */ | 
|  | 248 | static DEFINE_SPINLOCK(bdi_lock); | 
|  | 249 | static unsigned int bdi_min_ratio; | 
|  | 250 |  | 
|  | 251 | int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) | 
|  | 252 | { | 
|  | 253 | int ret = 0; | 
|  | 254 | unsigned long flags; | 
|  | 255 |  | 
|  | 256 | spin_lock_irqsave(&bdi_lock, flags); | 
|  | 257 | min_ratio -= bdi->min_ratio; | 
|  | 258 | if (bdi_min_ratio + min_ratio < 100) { | 
|  | 259 | bdi_min_ratio += min_ratio; | 
|  | 260 | bdi->min_ratio += min_ratio; | 
|  | 261 | } else | 
|  | 262 | ret = -EINVAL; | 
|  | 263 | spin_unlock_irqrestore(&bdi_lock, flags); | 
|  | 264 |  | 
|  | 265 | return ret; | 
|  | 266 | } | 
|  | 267 |  | 
|  | 268 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | * Work out the current dirty-memory clamping and background writeout | 
|  | 270 | * thresholds. | 
|  | 271 | * | 
|  | 272 | * The main aim here is to lower them aggressively if there is a lot of mapped | 
|  | 273 | * memory around.  To avoid stressing page reclaim with lots of unreclaimable | 
|  | 274 | * pages.  It is better to clamp down on writers than to start swapping, and | 
|  | 275 | * performing lots of scanning. | 
|  | 276 | * | 
|  | 277 | * We only allow 1/2 of the currently-unmapped memory to be dirtied. | 
|  | 278 | * | 
|  | 279 | * We don't permit the clamping level to fall below 5% - that is getting rather | 
|  | 280 | * excessive. | 
|  | 281 | * | 
|  | 282 | * We make sure that the background writeout level is below the adjusted | 
|  | 283 | * clamping level. | 
|  | 284 | */ | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 285 |  | 
|  | 286 | static unsigned long highmem_dirtyable_memory(unsigned long total) | 
|  | 287 | { | 
|  | 288 | #ifdef CONFIG_HIGHMEM | 
|  | 289 | int node; | 
|  | 290 | unsigned long x = 0; | 
|  | 291 |  | 
| Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 292 | for_each_node_state(node, N_HIGH_MEMORY) { | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 293 | struct zone *z = | 
|  | 294 | &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; | 
|  | 295 |  | 
|  | 296 | x += zone_page_state(z, NR_FREE_PAGES) | 
|  | 297 | + zone_page_state(z, NR_INACTIVE) | 
|  | 298 | + zone_page_state(z, NR_ACTIVE); | 
|  | 299 | } | 
|  | 300 | /* | 
|  | 301 | * Make sure that the number of highmem pages is never larger | 
|  | 302 | * than the number of the total dirtyable memory. This can only | 
|  | 303 | * occur in very strange VM situations but we want to make sure | 
|  | 304 | * that this does not occur. | 
|  | 305 | */ | 
|  | 306 | return min(x, total); | 
|  | 307 | #else | 
|  | 308 | return 0; | 
|  | 309 | #endif | 
|  | 310 | } | 
|  | 311 |  | 
|  | 312 | static unsigned long determine_dirtyable_memory(void) | 
|  | 313 | { | 
|  | 314 | unsigned long x; | 
|  | 315 |  | 
|  | 316 | x = global_page_state(NR_FREE_PAGES) | 
|  | 317 | + global_page_state(NR_INACTIVE) | 
|  | 318 | + global_page_state(NR_ACTIVE); | 
| Bron Gondwana | 195cf453 | 2008-02-04 22:29:20 -0800 | [diff] [blame] | 319 |  | 
|  | 320 | if (!vm_highmem_is_dirtyable) | 
|  | 321 | x -= highmem_dirtyable_memory(x); | 
|  | 322 |  | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 323 | return x + 1;	/* Ensure that we never return 0 */ | 
|  | 324 | } | 
|  | 325 |  | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 326 | void | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 327 | get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty, | 
|  | 328 | struct backing_dev_info *bdi) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | { | 
|  | 330 | int background_ratio;		/* Percentages */ | 
|  | 331 | int dirty_ratio; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | long background; | 
|  | 333 | long dirty; | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 334 | unsigned long available_memory = determine_dirtyable_memory(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | struct task_struct *tsk; | 
|  | 336 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | dirty_ratio = vm_dirty_ratio; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | if (dirty_ratio < 5) | 
|  | 339 | dirty_ratio = 5; | 
|  | 340 |  | 
|  | 341 | background_ratio = dirty_background_ratio; | 
|  | 342 | if (background_ratio >= dirty_ratio) | 
|  | 343 | background_ratio = dirty_ratio / 2; | 
|  | 344 |  | 
|  | 345 | background = (background_ratio * available_memory) / 100; | 
|  | 346 | dirty = (dirty_ratio * available_memory) / 100; | 
|  | 347 | tsk = current; | 
|  | 348 | if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { | 
|  | 349 | background += background / 4; | 
|  | 350 | dirty += dirty / 4; | 
|  | 351 | } | 
|  | 352 | *pbackground = background; | 
|  | 353 | *pdirty = dirty; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 354 |  | 
|  | 355 | if (bdi) { | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame^] | 356 | u64 bdi_dirty; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 357 | long numerator, denominator; | 
|  | 358 |  | 
|  | 359 | /* | 
|  | 360 | * Calculate this BDI's share of the dirty ratio. | 
|  | 361 | */ | 
|  | 362 | bdi_writeout_fraction(bdi, &numerator, &denominator); | 
|  | 363 |  | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame^] | 364 | bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 365 | bdi_dirty *= numerator; | 
|  | 366 | do_div(bdi_dirty, denominator); | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame^] | 367 | bdi_dirty += (dirty * bdi->min_ratio) / 100; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 368 |  | 
|  | 369 | *pbdi_dirty = bdi_dirty; | 
|  | 370 | clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty); | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 371 | task_dirty_limit(current, pbdi_dirty); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 372 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | } | 
|  | 374 |  | 
|  | 375 | /* | 
|  | 376 | * balance_dirty_pages() must be called by processes which are generating dirty | 
|  | 377 | * data.  It looks at the number of dirty pages in the machine and will force | 
|  | 378 | * the caller to perform writeback if the system is over `vm_dirty_ratio'. | 
|  | 379 | * If we're over `background_thresh' then pdflush is woken to perform some | 
|  | 380 | * writeout. | 
|  | 381 | */ | 
|  | 382 | static void balance_dirty_pages(struct address_space *mapping) | 
|  | 383 | { | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 384 | long nr_reclaimable, bdi_nr_reclaimable; | 
|  | 385 | long nr_writeback, bdi_nr_writeback; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | long background_thresh; | 
|  | 387 | long dirty_thresh; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 388 | long bdi_thresh; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | unsigned long pages_written = 0; | 
|  | 390 | unsigned long write_chunk = sync_writeback_pages(); | 
|  | 391 |  | 
|  | 392 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 
|  | 393 |  | 
|  | 394 | for (;;) { | 
|  | 395 | struct writeback_control wbc = { | 
|  | 396 | .bdi		= bdi, | 
|  | 397 | .sync_mode	= WB_SYNC_NONE, | 
|  | 398 | .older_than_this = NULL, | 
|  | 399 | .nr_to_write	= write_chunk, | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 400 | .range_cyclic	= 1, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | }; | 
|  | 402 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 403 | get_dirty_limits(&background_thresh, &dirty_thresh, | 
|  | 404 | &bdi_thresh, bdi); | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 405 |  | 
|  | 406 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + | 
|  | 407 | global_page_state(NR_UNSTABLE_NFS); | 
|  | 408 | nr_writeback = global_page_state(NR_WRITEBACK); | 
|  | 409 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 410 | bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); | 
|  | 411 | bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 412 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 413 | if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh) | 
|  | 414 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 |  | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 416 | /* | 
|  | 417 | * Throttle it only when the background writeback cannot | 
|  | 418 | * catch-up. This avoids (excessively) small writeouts | 
|  | 419 | * when the bdi limits are ramping up. | 
|  | 420 | */ | 
|  | 421 | if (nr_reclaimable + nr_writeback < | 
|  | 422 | (background_thresh + dirty_thresh) / 2) | 
|  | 423 | break; | 
|  | 424 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 425 | if (!bdi->dirty_exceeded) | 
|  | 426 | bdi->dirty_exceeded = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 |  | 
|  | 428 | /* Note: nr_reclaimable denotes nr_dirty + nr_unstable. | 
|  | 429 | * Unstable writes are a feature of certain networked | 
|  | 430 | * filesystems (i.e. NFS) in which data may have been | 
|  | 431 | * written to the server's write cache, but has not yet | 
|  | 432 | * been flushed to permanent storage. | 
|  | 433 | */ | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 434 | if (bdi_nr_reclaimable) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | writeback_inodes(&wbc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | pages_written += write_chunk - wbc.nr_to_write; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 437 | get_dirty_limits(&background_thresh, &dirty_thresh, | 
|  | 438 | &bdi_thresh, bdi); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | } | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 440 |  | 
|  | 441 | /* | 
|  | 442 | * In order to avoid the stacked BDI deadlock we need | 
|  | 443 | * to ensure we accurately count the 'dirty' pages when | 
|  | 444 | * the threshold is low. | 
|  | 445 | * | 
|  | 446 | * Otherwise it would be possible to get thresh+n pages | 
|  | 447 | * reported dirty, even though there are thresh-m pages | 
|  | 448 | * actually dirty; with m+n sitting in the percpu | 
|  | 449 | * deltas. | 
|  | 450 | */ | 
|  | 451 | if (bdi_thresh < 2*bdi_stat_error(bdi)) { | 
|  | 452 | bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); | 
|  | 453 | bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK); | 
|  | 454 | } else if (bdi_nr_reclaimable) { | 
|  | 455 | bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); | 
|  | 456 | bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); | 
|  | 457 | } | 
|  | 458 |  | 
|  | 459 | if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh) | 
|  | 460 | break; | 
|  | 461 | if (pages_written >= write_chunk) | 
|  | 462 | break;		/* We've done our duty */ | 
|  | 463 |  | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 464 | congestion_wait(WRITE, HZ/10); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | } | 
|  | 466 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 467 | if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh && | 
|  | 468 | bdi->dirty_exceeded) | 
|  | 469 | bdi->dirty_exceeded = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 |  | 
|  | 471 | if (writeback_in_progress(bdi)) | 
|  | 472 | return;		/* pdflush is already working this queue */ | 
|  | 473 |  | 
|  | 474 | /* | 
|  | 475 | * In laptop mode, we wait until hitting the higher threshold before | 
|  | 476 | * starting background writeout, and then write out all the way down | 
|  | 477 | * to the lower threshold.  So slow writers cause minimal disk activity. | 
|  | 478 | * | 
|  | 479 | * In normal mode, we start background writeout at the lower | 
|  | 480 | * background_thresh, to keep the amount of dirty memory low. | 
|  | 481 | */ | 
|  | 482 | if ((laptop_mode && pages_written) || | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 483 | (!laptop_mode && (global_page_state(NR_FILE_DIRTY) | 
|  | 484 | + global_page_state(NR_UNSTABLE_NFS) | 
|  | 485 | > background_thresh))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | pdflush_operation(background_writeout, 0); | 
|  | 487 | } | 
|  | 488 |  | 
| Peter Zijlstra | a200ee1 | 2007-10-08 18:54:37 +0200 | [diff] [blame] | 489 | void set_page_dirty_balance(struct page *page, int page_mkwrite) | 
| Peter Zijlstra | edc79b2 | 2006-09-25 23:30:58 -0700 | [diff] [blame] | 490 | { | 
| Peter Zijlstra | a200ee1 | 2007-10-08 18:54:37 +0200 | [diff] [blame] | 491 | if (set_page_dirty(page) || page_mkwrite) { | 
| Peter Zijlstra | edc79b2 | 2006-09-25 23:30:58 -0700 | [diff] [blame] | 492 | struct address_space *mapping = page_mapping(page); | 
|  | 493 |  | 
|  | 494 | if (mapping) | 
|  | 495 | balance_dirty_pages_ratelimited(mapping); | 
|  | 496 | } | 
|  | 497 | } | 
|  | 498 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | /** | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 500 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 501 | * @mapping: address_space which was dirtied | 
| Martin Waitz | a580290 | 2006-04-02 13:59:55 +0200 | [diff] [blame] | 502 | * @nr_pages_dirtied: number of pages which the caller has just dirtied | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | * | 
|  | 504 | * Processes which are dirtying memory should call in here once for each page | 
|  | 505 | * which was newly dirtied.  The function will periodically check the system's | 
|  | 506 | * dirty state and will initiate writeback if needed. | 
|  | 507 | * | 
|  | 508 | * On really big machines, get_writeback_state is expensive, so try to avoid | 
|  | 509 | * calling it too often (ratelimiting).  But once we're over the dirty memory | 
|  | 510 | * limit we decrease the ratelimiting by a lot, to prevent individual processes | 
|  | 511 | * from overshooting the limit by (ratelimit_pages) each. | 
|  | 512 | */ | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 513 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | 
|  | 514 | unsigned long nr_pages_dirtied) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | { | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 516 | static DEFINE_PER_CPU(unsigned long, ratelimits) = 0; | 
|  | 517 | unsigned long ratelimit; | 
|  | 518 | unsigned long *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 |  | 
|  | 520 | ratelimit = ratelimit_pages; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 521 | if (mapping->backing_dev_info->dirty_exceeded) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | ratelimit = 8; | 
|  | 523 |  | 
|  | 524 | /* | 
|  | 525 | * Check the rate limiting. Also, we do not want to throttle real-time | 
|  | 526 | * tasks in balance_dirty_pages(). Period. | 
|  | 527 | */ | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 528 | preempt_disable(); | 
|  | 529 | p =  &__get_cpu_var(ratelimits); | 
|  | 530 | *p += nr_pages_dirtied; | 
|  | 531 | if (unlikely(*p >= ratelimit)) { | 
|  | 532 | *p = 0; | 
|  | 533 | preempt_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | balance_dirty_pages(mapping); | 
|  | 535 | return; | 
|  | 536 | } | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 537 | preempt_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | } | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 539 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 |  | 
| Andrew Morton | 232ea4d | 2007-02-28 20:13:21 -0800 | [diff] [blame] | 541 | void throttle_vm_writeout(gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | long background_thresh; | 
|  | 544 | long dirty_thresh; | 
|  | 545 |  | 
|  | 546 | for ( ; ; ) { | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 547 | get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 |  | 
|  | 549 | /* | 
|  | 550 | * Boost the allowable dirty threshold a bit for page | 
|  | 551 | * allocators so they don't get DoS'ed by heavy writers | 
|  | 552 | */ | 
|  | 553 | dirty_thresh += dirty_thresh / 10;      /* wheeee... */ | 
|  | 554 |  | 
| Christoph Lameter | c24f21b | 2006-06-30 01:55:42 -0700 | [diff] [blame] | 555 | if (global_page_state(NR_UNSTABLE_NFS) + | 
|  | 556 | global_page_state(NR_WRITEBACK) <= dirty_thresh) | 
|  | 557 | break; | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 558 | congestion_wait(WRITE, HZ/10); | 
| Fengguang Wu | 369f238 | 2007-10-16 23:30:45 -0700 | [diff] [blame] | 559 |  | 
|  | 560 | /* | 
|  | 561 | * The caller might hold locks which can prevent IO completion | 
|  | 562 | * or progress in the filesystem.  So we cannot just sit here | 
|  | 563 | * waiting for IO to complete. | 
|  | 564 | */ | 
|  | 565 | if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) | 
|  | 566 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | } | 
|  | 568 | } | 
|  | 569 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | /* | 
|  | 571 | * writeback at least _min_pages, and keep writing until the amount of dirty | 
|  | 572 | * memory is less than the background threshold, or until we're all clean. | 
|  | 573 | */ | 
|  | 574 | static void background_writeout(unsigned long _min_pages) | 
|  | 575 | { | 
|  | 576 | long min_pages = _min_pages; | 
|  | 577 | struct writeback_control wbc = { | 
|  | 578 | .bdi		= NULL, | 
|  | 579 | .sync_mode	= WB_SYNC_NONE, | 
|  | 580 | .older_than_this = NULL, | 
|  | 581 | .nr_to_write	= 0, | 
|  | 582 | .nonblocking	= 1, | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 583 | .range_cyclic	= 1, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | }; | 
|  | 585 |  | 
|  | 586 | for ( ; ; ) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | long background_thresh; | 
|  | 588 | long dirty_thresh; | 
|  | 589 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 590 | get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); | 
| Christoph Lameter | c24f21b | 2006-06-30 01:55:42 -0700 | [diff] [blame] | 591 | if (global_page_state(NR_FILE_DIRTY) + | 
|  | 592 | global_page_state(NR_UNSTABLE_NFS) < background_thresh | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | && min_pages <= 0) | 
|  | 594 | break; | 
| Fengguang Wu | 8bc3be2 | 2008-02-04 22:29:36 -0800 | [diff] [blame] | 595 | wbc.more_io = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | wbc.encountered_congestion = 0; | 
|  | 597 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; | 
|  | 598 | wbc.pages_skipped = 0; | 
|  | 599 | writeback_inodes(&wbc); | 
|  | 600 | min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | 
|  | 601 | if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { | 
|  | 602 | /* Wrote less than expected */ | 
| Fengguang Wu | 8bc3be2 | 2008-02-04 22:29:36 -0800 | [diff] [blame] | 603 | if (wbc.encountered_congestion || wbc.more_io) | 
|  | 604 | congestion_wait(WRITE, HZ/10); | 
|  | 605 | else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | break; | 
|  | 607 | } | 
|  | 608 | } | 
|  | 609 | } | 
|  | 610 |  | 
|  | 611 | /* | 
|  | 612 | * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back | 
|  | 613 | * the whole world.  Returns 0 if a pdflush thread was dispatched.  Returns | 
|  | 614 | * -1 if all pdflush threads were busy. | 
|  | 615 | */ | 
| Pekka J Enberg | 687a21c | 2005-06-28 20:44:55 -0700 | [diff] [blame] | 616 | int wakeup_pdflush(long nr_pages) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | { | 
| Christoph Lameter | c24f21b | 2006-06-30 01:55:42 -0700 | [diff] [blame] | 618 | if (nr_pages == 0) | 
|  | 619 | nr_pages = global_page_state(NR_FILE_DIRTY) + | 
|  | 620 | global_page_state(NR_UNSTABLE_NFS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | return pdflush_operation(background_writeout, nr_pages); | 
|  | 622 | } | 
|  | 623 |  | 
|  | 624 | static void wb_timer_fn(unsigned long unused); | 
|  | 625 | static void laptop_timer_fn(unsigned long unused); | 
|  | 626 |  | 
| Ingo Molnar | 8d06afa | 2005-09-09 13:10:40 -0700 | [diff] [blame] | 627 | static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0); | 
|  | 628 | static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 |  | 
|  | 630 | /* | 
|  | 631 | * Periodic writeback of "old" data. | 
|  | 632 | * | 
|  | 633 | * Define "old": the first time one of an inode's pages is dirtied, we mark the | 
|  | 634 | * dirtying-time in the inode's address_space.  So this periodic writeback code | 
|  | 635 | * just walks the superblock inode list, writing back any inodes which are | 
|  | 636 | * older than a specific point in time. | 
|  | 637 | * | 
| Bart Samwel | f6ef943 | 2006-03-24 03:15:48 -0800 | [diff] [blame] | 638 | * Try to run once per dirty_writeback_interval.  But if a writeback event | 
|  | 639 | * takes longer than a dirty_writeback_interval interval, then leave a | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | * one-second gap. | 
|  | 641 | * | 
|  | 642 | * older_than_this takes precedence over nr_to_write.  So we'll only write back | 
|  | 643 | * all dirty pages if they are all attached to "old" mappings. | 
|  | 644 | */ | 
|  | 645 | static void wb_kupdate(unsigned long arg) | 
|  | 646 | { | 
|  | 647 | unsigned long oldest_jif; | 
|  | 648 | unsigned long start_jif; | 
|  | 649 | unsigned long next_jif; | 
|  | 650 | long nr_to_write; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | struct writeback_control wbc = { | 
|  | 652 | .bdi		= NULL, | 
|  | 653 | .sync_mode	= WB_SYNC_NONE, | 
|  | 654 | .older_than_this = &oldest_jif, | 
|  | 655 | .nr_to_write	= 0, | 
|  | 656 | .nonblocking	= 1, | 
|  | 657 | .for_kupdate	= 1, | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 658 | .range_cyclic	= 1, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | }; | 
|  | 660 |  | 
|  | 661 | sync_supers(); | 
|  | 662 |  | 
| Bart Samwel | f6ef943 | 2006-03-24 03:15:48 -0800 | [diff] [blame] | 663 | oldest_jif = jiffies - dirty_expire_interval; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | start_jif = jiffies; | 
| Bart Samwel | f6ef943 | 2006-03-24 03:15:48 -0800 | [diff] [blame] | 665 | next_jif = start_jif + dirty_writeback_interval; | 
| Christoph Lameter | c24f21b | 2006-06-30 01:55:42 -0700 | [diff] [blame] | 666 | nr_to_write = global_page_state(NR_FILE_DIRTY) + | 
|  | 667 | global_page_state(NR_UNSTABLE_NFS) + | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | 
|  | 669 | while (nr_to_write > 0) { | 
| Fengguang Wu | 8bc3be2 | 2008-02-04 22:29:36 -0800 | [diff] [blame] | 670 | wbc.more_io = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | wbc.encountered_congestion = 0; | 
|  | 672 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; | 
|  | 673 | writeback_inodes(&wbc); | 
|  | 674 | if (wbc.nr_to_write > 0) { | 
| Fengguang Wu | 8bc3be2 | 2008-02-04 22:29:36 -0800 | [diff] [blame] | 675 | if (wbc.encountered_congestion || wbc.more_io) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 676 | congestion_wait(WRITE, HZ/10); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | else | 
|  | 678 | break;	/* All the old data is written */ | 
|  | 679 | } | 
|  | 680 | nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | 
|  | 681 | } | 
|  | 682 | if (time_before(next_jif, jiffies + HZ)) | 
|  | 683 | next_jif = jiffies + HZ; | 
| Bart Samwel | f6ef943 | 2006-03-24 03:15:48 -0800 | [diff] [blame] | 684 | if (dirty_writeback_interval) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | mod_timer(&wb_timer, next_jif); | 
|  | 686 | } | 
|  | 687 |  | 
|  | 688 | /* | 
|  | 689 | * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs | 
|  | 690 | */ | 
|  | 691 | int dirty_writeback_centisecs_handler(ctl_table *table, int write, | 
| Andrew Morton | 3e733f0 | 2007-07-15 23:41:05 -0700 | [diff] [blame] | 692 | struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | { | 
| Bart Samwel | f6ef943 | 2006-03-24 03:15:48 -0800 | [diff] [blame] | 694 | proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos); | 
| Andrew Morton | 3e733f0 | 2007-07-15 23:41:05 -0700 | [diff] [blame] | 695 | if (dirty_writeback_interval) | 
|  | 696 | mod_timer(&wb_timer, jiffies + dirty_writeback_interval); | 
|  | 697 | else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | del_timer(&wb_timer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 699 | return 0; | 
|  | 700 | } | 
|  | 701 |  | 
|  | 702 | static void wb_timer_fn(unsigned long unused) | 
|  | 703 | { | 
|  | 704 | if (pdflush_operation(wb_kupdate, 0) < 0) | 
|  | 705 | mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */ | 
|  | 706 | } | 
|  | 707 |  | 
|  | 708 | static void laptop_flush(unsigned long unused) | 
|  | 709 | { | 
|  | 710 | sys_sync(); | 
|  | 711 | } | 
|  | 712 |  | 
|  | 713 | static void laptop_timer_fn(unsigned long unused) | 
|  | 714 | { | 
|  | 715 | pdflush_operation(laptop_flush, 0); | 
|  | 716 | } | 
|  | 717 |  | 
|  | 718 | /* | 
|  | 719 | * We've spun up the disk and we're in laptop mode: schedule writeback | 
|  | 720 | * of all dirty data a few seconds from now.  If the flush is already scheduled | 
|  | 721 | * then push it back - the user is still using the disk. | 
|  | 722 | */ | 
|  | 723 | void laptop_io_completion(void) | 
|  | 724 | { | 
| Bart Samwel | ed5b43f | 2006-03-24 03:15:49 -0800 | [diff] [blame] | 725 | mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 726 | } | 
|  | 727 |  | 
|  | 728 | /* | 
|  | 729 | * We're in laptop mode and we've just synced. The sync's writes will have | 
|  | 730 | * caused another writeback to be scheduled by laptop_io_completion. | 
|  | 731 | * Nothing needs to be written back anymore, so we unschedule the writeback. | 
|  | 732 | */ | 
|  | 733 | void laptop_sync_completion(void) | 
|  | 734 | { | 
|  | 735 | del_timer(&laptop_mode_wb_timer); | 
|  | 736 | } | 
|  | 737 |  | 
|  | 738 | /* | 
|  | 739 | * If ratelimit_pages is too high then we can get into dirty-data overload | 
|  | 740 | * if a large number of processes all perform writes at the same time. | 
|  | 741 | * If it is too low then SMP machines will call the (expensive) | 
|  | 742 | * get_writeback_state too often. | 
|  | 743 | * | 
|  | 744 | * Here we set ratelimit_pages to a level which ensures that when all CPUs are | 
|  | 745 | * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory | 
|  | 746 | * thresholds before writeback cuts in. | 
|  | 747 | * | 
|  | 748 | * But the limit should not be set too high.  Because it also controls the | 
|  | 749 | * amount of memory which the balance_dirty_pages() caller has to write back. | 
|  | 750 | * If this is too large then the caller will block on the IO queue all the | 
|  | 751 | * time.  So limit it to four megabytes - the balance_dirty_pages() caller | 
|  | 752 | * will write six megabyte chunks, max. | 
|  | 753 | */ | 
|  | 754 |  | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 755 | void writeback_set_ratelimit(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | { | 
| Chandra Seetharaman | 40c99aa | 2006-09-29 02:01:24 -0700 | [diff] [blame] | 757 | ratelimit_pages = vm_total_pages / (num_online_cpus() * 32); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | if (ratelimit_pages < 16) | 
|  | 759 | ratelimit_pages = 16; | 
|  | 760 | if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024) | 
|  | 761 | ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; | 
|  | 762 | } | 
|  | 763 |  | 
| Chandra Seetharaman | 26c2143 | 2006-06-27 02:54:10 -0700 | [diff] [blame] | 764 | static int __cpuinit | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) | 
|  | 766 | { | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 767 | writeback_set_ratelimit(); | 
| Paul E. McKenney | aa0f030 | 2007-02-10 01:46:37 -0800 | [diff] [blame] | 768 | return NOTIFY_DONE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | } | 
|  | 770 |  | 
| Chandra Seetharaman | 74b85f3 | 2006-06-27 02:54:09 -0700 | [diff] [blame] | 771 | static struct notifier_block __cpuinitdata ratelimit_nb = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 772 | .notifier_call	= ratelimit_handler, | 
|  | 773 | .next		= NULL, | 
|  | 774 | }; | 
|  | 775 |  | 
|  | 776 | /* | 
| Linus Torvalds | dc6e29d | 2007-01-29 16:37:38 -0800 | [diff] [blame] | 777 | * Called early on to tune the page writeback dirty limits. | 
|  | 778 | * | 
|  | 779 | * We used to scale dirty pages according to how total memory | 
|  | 780 | * related to pages that could be allocated for buffers (by | 
|  | 781 | * comparing nr_free_buffer_pages() to vm_total_pages. | 
|  | 782 | * | 
|  | 783 | * However, that was when we used "dirty_ratio" to scale with | 
|  | 784 | * all memory, and we don't do that any more. "dirty_ratio" | 
|  | 785 | * is now applied to total non-HIGHPAGE memory (by subtracting | 
|  | 786 | * totalhigh_pages from vm_total_pages), and as such we can't | 
|  | 787 | * get into the old insane situation any more where we had | 
|  | 788 | * large amounts of dirty pages compared to a small amount of | 
|  | 789 | * non-HIGHMEM memory. | 
|  | 790 | * | 
|  | 791 | * But we might still want to scale the dirty_ratio by how | 
|  | 792 | * much memory the box has.. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 793 | */ | 
|  | 794 | void __init page_writeback_init(void) | 
|  | 795 | { | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 796 | int shift; | 
|  | 797 |  | 
| Bart Samwel | f6ef943 | 2006-03-24 03:15:48 -0800 | [diff] [blame] | 798 | mod_timer(&wb_timer, jiffies + dirty_writeback_interval); | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 799 | writeback_set_ratelimit(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | register_cpu_notifier(&ratelimit_nb); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 801 |  | 
|  | 802 | shift = calc_period_shift(); | 
|  | 803 | prop_descriptor_init(&vm_completions, shift); | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 804 | prop_descriptor_init(&vm_dirties, shift); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | } | 
|  | 806 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 807 | /** | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 808 | * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 809 | * @mapping: address space structure to write | 
|  | 810 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 811 | * @writepage: function called for each page | 
|  | 812 | * @data: data passed to writepage function | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 813 | * | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 814 | * If a page is already under I/O, write_cache_pages() skips it, even | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 815 | * if it's dirty.  This is desirable behaviour for memory-cleaning writeback, | 
|  | 816 | * but it is INCORRECT for data-integrity system calls such as fsync().  fsync() | 
|  | 817 | * and msync() need to guarantee that all the data which was dirty at the time | 
|  | 818 | * the call was made get new I/O started against them.  If wbc->sync_mode is | 
|  | 819 | * WB_SYNC_ALL then we were called for data integrity and we must wait for | 
|  | 820 | * existing IO to complete. | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 821 | */ | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 822 | int write_cache_pages(struct address_space *mapping, | 
|  | 823 | struct writeback_control *wbc, writepage_t writepage, | 
|  | 824 | void *data) | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 825 | { | 
|  | 826 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 
|  | 827 | int ret = 0; | 
|  | 828 | int done = 0; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 829 | struct pagevec pvec; | 
|  | 830 | int nr_pages; | 
|  | 831 | pgoff_t index; | 
|  | 832 | pgoff_t end;		/* Inclusive */ | 
|  | 833 | int scanned = 0; | 
|  | 834 | int range_whole = 0; | 
|  | 835 |  | 
|  | 836 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | 
|  | 837 | wbc->encountered_congestion = 1; | 
|  | 838 | return 0; | 
|  | 839 | } | 
|  | 840 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 841 | pagevec_init(&pvec, 0); | 
|  | 842 | if (wbc->range_cyclic) { | 
|  | 843 | index = mapping->writeback_index; /* Start from prev offset */ | 
|  | 844 | end = -1; | 
|  | 845 | } else { | 
|  | 846 | index = wbc->range_start >> PAGE_CACHE_SHIFT; | 
|  | 847 | end = wbc->range_end >> PAGE_CACHE_SHIFT; | 
|  | 848 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) | 
|  | 849 | range_whole = 1; | 
|  | 850 | scanned = 1; | 
|  | 851 | } | 
|  | 852 | retry: | 
|  | 853 | while (!done && (index <= end) && | 
|  | 854 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | 
|  | 855 | PAGECACHE_TAG_DIRTY, | 
|  | 856 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { | 
|  | 857 | unsigned i; | 
|  | 858 |  | 
|  | 859 | scanned = 1; | 
|  | 860 | for (i = 0; i < nr_pages; i++) { | 
|  | 861 | struct page *page = pvec.pages[i]; | 
|  | 862 |  | 
|  | 863 | /* | 
|  | 864 | * At this point we hold neither mapping->tree_lock nor | 
|  | 865 | * lock on the page itself: the page may be truncated or | 
|  | 866 | * invalidated (changing page->mapping to NULL), or even | 
|  | 867 | * swizzled back from swapper_space to tmpfs file | 
|  | 868 | * mapping | 
|  | 869 | */ | 
|  | 870 | lock_page(page); | 
|  | 871 |  | 
|  | 872 | if (unlikely(page->mapping != mapping)) { | 
|  | 873 | unlock_page(page); | 
|  | 874 | continue; | 
|  | 875 | } | 
|  | 876 |  | 
|  | 877 | if (!wbc->range_cyclic && page->index > end) { | 
|  | 878 | done = 1; | 
|  | 879 | unlock_page(page); | 
|  | 880 | continue; | 
|  | 881 | } | 
|  | 882 |  | 
|  | 883 | if (wbc->sync_mode != WB_SYNC_NONE) | 
|  | 884 | wait_on_page_writeback(page); | 
|  | 885 |  | 
|  | 886 | if (PageWriteback(page) || | 
|  | 887 | !clear_page_dirty_for_io(page)) { | 
|  | 888 | unlock_page(page); | 
|  | 889 | continue; | 
|  | 890 | } | 
|  | 891 |  | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 892 | ret = (*writepage)(page, wbc, data); | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 893 |  | 
| Andrew Morton | e423003 | 2007-10-16 23:26:02 -0700 | [diff] [blame] | 894 | if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) { | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 895 | unlock_page(page); | 
| Andrew Morton | e423003 | 2007-10-16 23:26:02 -0700 | [diff] [blame] | 896 | ret = 0; | 
|  | 897 | } | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 898 | if (ret || (--(wbc->nr_to_write) <= 0)) | 
|  | 899 | done = 1; | 
|  | 900 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | 
|  | 901 | wbc->encountered_congestion = 1; | 
|  | 902 | done = 1; | 
|  | 903 | } | 
|  | 904 | } | 
|  | 905 | pagevec_release(&pvec); | 
|  | 906 | cond_resched(); | 
|  | 907 | } | 
|  | 908 | if (!scanned && !done) { | 
|  | 909 | /* | 
|  | 910 | * We hit the last page and there is more work to be done: wrap | 
|  | 911 | * back to the start of the file | 
|  | 912 | */ | 
|  | 913 | scanned = 1; | 
|  | 914 | index = 0; | 
|  | 915 | goto retry; | 
|  | 916 | } | 
|  | 917 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) | 
|  | 918 | mapping->writeback_index = index; | 
|  | 919 | return ret; | 
|  | 920 | } | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 921 | EXPORT_SYMBOL(write_cache_pages); | 
|  | 922 |  | 
|  | 923 | /* | 
|  | 924 | * Function used by generic_writepages to call the real writepage | 
|  | 925 | * function and set the mapping flags on error | 
|  | 926 | */ | 
|  | 927 | static int __writepage(struct page *page, struct writeback_control *wbc, | 
|  | 928 | void *data) | 
|  | 929 | { | 
|  | 930 | struct address_space *mapping = data; | 
|  | 931 | int ret = mapping->a_ops->writepage(page, wbc); | 
|  | 932 | mapping_set_error(mapping, ret); | 
|  | 933 | return ret; | 
|  | 934 | } | 
|  | 935 |  | 
|  | 936 | /** | 
|  | 937 | * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them. | 
|  | 938 | * @mapping: address space structure to write | 
|  | 939 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write | 
|  | 940 | * | 
|  | 941 | * This is a library function, which implements the writepages() | 
|  | 942 | * address_space_operation. | 
|  | 943 | */ | 
|  | 944 | int generic_writepages(struct address_space *mapping, | 
|  | 945 | struct writeback_control *wbc) | 
|  | 946 | { | 
|  | 947 | /* deal with chardevs and other special file */ | 
|  | 948 | if (!mapping->a_ops->writepage) | 
|  | 949 | return 0; | 
|  | 950 |  | 
|  | 951 | return write_cache_pages(mapping, wbc, __writepage, mapping); | 
|  | 952 | } | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 953 |  | 
|  | 954 | EXPORT_SYMBOL(generic_writepages); | 
|  | 955 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc) | 
|  | 957 | { | 
| Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 958 | int ret; | 
|  | 959 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | if (wbc->nr_to_write <= 0) | 
|  | 961 | return 0; | 
| Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 962 | wbc->for_writepages = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | if (mapping->a_ops->writepages) | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 964 | ret = mapping->a_ops->writepages(mapping, wbc); | 
| Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 965 | else | 
|  | 966 | ret = generic_writepages(mapping, wbc); | 
|  | 967 | wbc->for_writepages = 0; | 
|  | 968 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | } | 
|  | 970 |  | 
|  | 971 | /** | 
|  | 972 | * write_one_page - write out a single page and optionally wait on I/O | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 973 | * @page: the page to write | 
|  | 974 | * @wait: if true, wait on writeout | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 975 | * | 
|  | 976 | * The page must be locked by the caller and will be unlocked upon return. | 
|  | 977 | * | 
|  | 978 | * write_one_page() returns a negative error code if I/O failed. | 
|  | 979 | */ | 
|  | 980 | int write_one_page(struct page *page, int wait) | 
|  | 981 | { | 
|  | 982 | struct address_space *mapping = page->mapping; | 
|  | 983 | int ret = 0; | 
|  | 984 | struct writeback_control wbc = { | 
|  | 985 | .sync_mode = WB_SYNC_ALL, | 
|  | 986 | .nr_to_write = 1, | 
|  | 987 | }; | 
|  | 988 |  | 
|  | 989 | BUG_ON(!PageLocked(page)); | 
|  | 990 |  | 
|  | 991 | if (wait) | 
|  | 992 | wait_on_page_writeback(page); | 
|  | 993 |  | 
|  | 994 | if (clear_page_dirty_for_io(page)) { | 
|  | 995 | page_cache_get(page); | 
|  | 996 | ret = mapping->a_ops->writepage(page, &wbc); | 
|  | 997 | if (ret == 0 && wait) { | 
|  | 998 | wait_on_page_writeback(page); | 
|  | 999 | if (PageError(page)) | 
|  | 1000 | ret = -EIO; | 
|  | 1001 | } | 
|  | 1002 | page_cache_release(page); | 
|  | 1003 | } else { | 
|  | 1004 | unlock_page(page); | 
|  | 1005 | } | 
|  | 1006 | return ret; | 
|  | 1007 | } | 
|  | 1008 | EXPORT_SYMBOL(write_one_page); | 
|  | 1009 |  | 
|  | 1010 | /* | 
| Ken Chen | 7671932 | 2007-02-10 01:43:15 -0800 | [diff] [blame] | 1011 | * For address_spaces which do not use buffers nor write back. | 
|  | 1012 | */ | 
|  | 1013 | int __set_page_dirty_no_writeback(struct page *page) | 
|  | 1014 | { | 
|  | 1015 | if (!PageDirty(page)) | 
|  | 1016 | SetPageDirty(page); | 
|  | 1017 | return 0; | 
|  | 1018 | } | 
|  | 1019 |  | 
|  | 1020 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1021 | * For address_spaces which do not use buffers.  Just tag the page as dirty in | 
|  | 1022 | * its radix tree. | 
|  | 1023 | * | 
|  | 1024 | * This is also used when a single buffer is being dirtied: we want to set the | 
|  | 1025 | * page dirty in that case, but not all the buffers.  This is a "bottom-up" | 
|  | 1026 | * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. | 
|  | 1027 | * | 
|  | 1028 | * Most callers have locked the page, which pins the address_space in memory. | 
|  | 1029 | * But zap_pte_range() does not lock the page, however in that case the | 
|  | 1030 | * mapping is pinned by the vma's ->vm_file reference. | 
|  | 1031 | * | 
|  | 1032 | * We take care to handle the case where the page was truncated from the | 
| Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 1033 | * mapping by re-checking page_mapping() inside tree_lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1034 | */ | 
|  | 1035 | int __set_page_dirty_nobuffers(struct page *page) | 
|  | 1036 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1037 | if (!TestSetPageDirty(page)) { | 
|  | 1038 | struct address_space *mapping = page_mapping(page); | 
|  | 1039 | struct address_space *mapping2; | 
|  | 1040 |  | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1041 | if (!mapping) | 
|  | 1042 | return 1; | 
|  | 1043 |  | 
|  | 1044 | write_lock_irq(&mapping->tree_lock); | 
|  | 1045 | mapping2 = page_mapping(page); | 
|  | 1046 | if (mapping2) { /* Race with truncate? */ | 
|  | 1047 | BUG_ON(mapping2 != mapping); | 
| Nick Piggin | 787d221 | 2007-07-17 04:03:34 -0700 | [diff] [blame] | 1048 | WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); | 
| Andrew Morton | 55e829a | 2006-12-10 02:19:27 -0800 | [diff] [blame] | 1049 | if (mapping_cap_account_dirty(mapping)) { | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1050 | __inc_zone_page_state(page, NR_FILE_DIRTY); | 
| Peter Zijlstra | c9e51e4 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 1051 | __inc_bdi_stat(mapping->backing_dev_info, | 
|  | 1052 | BDI_RECLAIMABLE); | 
| Andrew Morton | 55e829a | 2006-12-10 02:19:27 -0800 | [diff] [blame] | 1053 | task_io_account_write(PAGE_CACHE_SIZE); | 
|  | 1054 | } | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1055 | radix_tree_tag_set(&mapping->page_tree, | 
|  | 1056 | page_index(page), PAGECACHE_TAG_DIRTY); | 
|  | 1057 | } | 
|  | 1058 | write_unlock_irq(&mapping->tree_lock); | 
|  | 1059 | if (mapping->host) { | 
|  | 1060 | /* !PageAnon && !swapper_space */ | 
|  | 1061 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1062 | } | 
| Andrew Morton | 4741c9f | 2006-03-24 03:18:11 -0800 | [diff] [blame] | 1063 | return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | } | 
| Andrew Morton | 4741c9f | 2006-03-24 03:18:11 -0800 | [diff] [blame] | 1065 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1066 | } | 
|  | 1067 | EXPORT_SYMBOL(__set_page_dirty_nobuffers); | 
|  | 1068 |  | 
|  | 1069 | /* | 
|  | 1070 | * When a writepage implementation decides that it doesn't want to write this | 
|  | 1071 | * page for some reason, it should redirty the locked page via | 
|  | 1072 | * redirty_page_for_writepage() and it should then unlock the page and return 0 | 
|  | 1073 | */ | 
|  | 1074 | int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) | 
|  | 1075 | { | 
|  | 1076 | wbc->pages_skipped++; | 
|  | 1077 | return __set_page_dirty_nobuffers(page); | 
|  | 1078 | } | 
|  | 1079 | EXPORT_SYMBOL(redirty_page_for_writepage); | 
|  | 1080 |  | 
|  | 1081 | /* | 
|  | 1082 | * If the mapping doesn't provide a set_page_dirty a_op, then | 
|  | 1083 | * just fall through and assume that it wants buffer_heads. | 
|  | 1084 | */ | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 1085 | static int __set_page_dirty(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1086 | { | 
|  | 1087 | struct address_space *mapping = page_mapping(page); | 
|  | 1088 |  | 
|  | 1089 | if (likely(mapping)) { | 
|  | 1090 | int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 1091 | #ifdef CONFIG_BLOCK | 
|  | 1092 | if (!spd) | 
|  | 1093 | spd = __set_page_dirty_buffers; | 
|  | 1094 | #endif | 
|  | 1095 | return (*spd)(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1096 | } | 
| Andrew Morton | 4741c9f | 2006-03-24 03:18:11 -0800 | [diff] [blame] | 1097 | if (!PageDirty(page)) { | 
|  | 1098 | if (!TestSetPageDirty(page)) | 
|  | 1099 | return 1; | 
|  | 1100 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1101 | return 0; | 
|  | 1102 | } | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 1103 |  | 
| Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 1104 | int set_page_dirty(struct page *page) | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 1105 | { | 
|  | 1106 | int ret = __set_page_dirty(page); | 
|  | 1107 | if (ret) | 
|  | 1108 | task_dirty_inc(current); | 
|  | 1109 | return ret; | 
|  | 1110 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1111 | EXPORT_SYMBOL(set_page_dirty); | 
|  | 1112 |  | 
|  | 1113 | /* | 
|  | 1114 | * set_page_dirty() is racy if the caller has no reference against | 
|  | 1115 | * page->mapping->host, and if the page is unlocked.  This is because another | 
|  | 1116 | * CPU could truncate the page off the mapping and then free the mapping. | 
|  | 1117 | * | 
|  | 1118 | * Usually, the page _is_ locked, or the caller is a user-space process which | 
|  | 1119 | * holds a reference on the inode by having an open file. | 
|  | 1120 | * | 
|  | 1121 | * In other cases, the page should be locked before running set_page_dirty(). | 
|  | 1122 | */ | 
|  | 1123 | int set_page_dirty_lock(struct page *page) | 
|  | 1124 | { | 
|  | 1125 | int ret; | 
|  | 1126 |  | 
| Nick Piggin | db37648 | 2006-09-25 23:31:24 -0700 | [diff] [blame] | 1127 | lock_page_nosync(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1128 | ret = set_page_dirty(page); | 
|  | 1129 | unlock_page(page); | 
|  | 1130 | return ret; | 
|  | 1131 | } | 
|  | 1132 | EXPORT_SYMBOL(set_page_dirty_lock); | 
|  | 1133 |  | 
|  | 1134 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1135 | * Clear a page's dirty flag, while caring for dirty memory accounting. | 
|  | 1136 | * Returns true if the page was previously dirty. | 
|  | 1137 | * | 
|  | 1138 | * This is for preparing to put the page under writeout.  We leave the page | 
|  | 1139 | * tagged as dirty in the radix tree so that a concurrent write-for-sync | 
|  | 1140 | * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage | 
|  | 1141 | * implementation will run either set_page_writeback() or set_page_dirty(), | 
|  | 1142 | * at which stage we bring the page's dirty flag and radix-tree dirty tag | 
|  | 1143 | * back into sync. | 
|  | 1144 | * | 
|  | 1145 | * This incoherency between the page's dirty flag and radix-tree tag is | 
|  | 1146 | * unfortunate, but it only exists while the page is locked. | 
|  | 1147 | */ | 
|  | 1148 | int clear_page_dirty_for_io(struct page *page) | 
|  | 1149 | { | 
|  | 1150 | struct address_space *mapping = page_mapping(page); | 
|  | 1151 |  | 
| Nick Piggin | 7935289 | 2007-07-19 01:47:22 -0700 | [diff] [blame] | 1152 | BUG_ON(!PageLocked(page)); | 
|  | 1153 |  | 
| Fengguang Wu | fe3cba1 | 2007-07-19 01:48:07 -0700 | [diff] [blame] | 1154 | ClearPageReclaim(page); | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1155 | if (mapping && mapping_cap_account_dirty(mapping)) { | 
|  | 1156 | /* | 
|  | 1157 | * Yes, Virginia, this is indeed insane. | 
|  | 1158 | * | 
|  | 1159 | * We use this sequence to make sure that | 
|  | 1160 | *  (a) we account for dirty stats properly | 
|  | 1161 | *  (b) we tell the low-level filesystem to | 
|  | 1162 | *      mark the whole page dirty if it was | 
|  | 1163 | *      dirty in a pagetable. Only to then | 
|  | 1164 | *  (c) clean the page again and return 1 to | 
|  | 1165 | *      cause the writeback. | 
|  | 1166 | * | 
|  | 1167 | * This way we avoid all nasty races with the | 
|  | 1168 | * dirty bit in multiple places and clearing | 
|  | 1169 | * them concurrently from different threads. | 
|  | 1170 | * | 
|  | 1171 | * Note! Normally the "set_page_dirty(page)" | 
|  | 1172 | * has no effect on the actual dirty bit - since | 
|  | 1173 | * that will already usually be set. But we | 
|  | 1174 | * need the side effects, and it can help us | 
|  | 1175 | * avoid races. | 
|  | 1176 | * | 
|  | 1177 | * We basically use the page "master dirty bit" | 
|  | 1178 | * as a serialization point for all the different | 
|  | 1179 | * threads doing their things. | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1180 | */ | 
|  | 1181 | if (page_mkclean(page)) | 
|  | 1182 | set_page_dirty(page); | 
| Nick Piggin | 7935289 | 2007-07-19 01:47:22 -0700 | [diff] [blame] | 1183 | /* | 
|  | 1184 | * We carefully synchronise fault handlers against | 
|  | 1185 | * installing a dirty pte and marking the page dirty | 
|  | 1186 | * at this point. We do this by having them hold the | 
|  | 1187 | * page lock at some point after installing their | 
|  | 1188 | * pte, but before marking the page dirty. | 
|  | 1189 | * Pages are always locked coming in here, so we get | 
|  | 1190 | * the desired exclusion. See mm/memory.c:do_wp_page() | 
|  | 1191 | * for more comments. | 
|  | 1192 | */ | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1193 | if (TestClearPageDirty(page)) { | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1194 | dec_zone_page_state(page, NR_FILE_DIRTY); | 
| Peter Zijlstra | c9e51e4 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 1195 | dec_bdi_stat(mapping->backing_dev_info, | 
|  | 1196 | BDI_RECLAIMABLE); | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1197 | return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 | } | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1199 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | } | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1201 | return TestClearPageDirty(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1202 | } | 
| Hans Reiser | 58bb01a | 2005-11-18 01:10:53 -0800 | [diff] [blame] | 1203 | EXPORT_SYMBOL(clear_page_dirty_for_io); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1204 |  | 
|  | 1205 | int test_clear_page_writeback(struct page *page) | 
|  | 1206 | { | 
|  | 1207 | struct address_space *mapping = page_mapping(page); | 
|  | 1208 | int ret; | 
|  | 1209 |  | 
|  | 1210 | if (mapping) { | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1211 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1212 | unsigned long flags; | 
|  | 1213 |  | 
|  | 1214 | write_lock_irqsave(&mapping->tree_lock, flags); | 
|  | 1215 | ret = TestClearPageWriteback(page); | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1216 | if (ret) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1217 | radix_tree_tag_clear(&mapping->page_tree, | 
|  | 1218 | page_index(page), | 
|  | 1219 | PAGECACHE_TAG_WRITEBACK); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 1220 | if (bdi_cap_writeback_dirty(bdi)) { | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1221 | __dec_bdi_stat(bdi, BDI_WRITEBACK); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 1222 | __bdi_writeout_inc(bdi); | 
|  | 1223 | } | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1224 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1225 | write_unlock_irqrestore(&mapping->tree_lock, flags); | 
|  | 1226 | } else { | 
|  | 1227 | ret = TestClearPageWriteback(page); | 
|  | 1228 | } | 
| Andrew Morton | d688abf | 2007-07-19 01:49:17 -0700 | [diff] [blame] | 1229 | if (ret) | 
|  | 1230 | dec_zone_page_state(page, NR_WRITEBACK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | return ret; | 
|  | 1232 | } | 
|  | 1233 |  | 
|  | 1234 | int test_set_page_writeback(struct page *page) | 
|  | 1235 | { | 
|  | 1236 | struct address_space *mapping = page_mapping(page); | 
|  | 1237 | int ret; | 
|  | 1238 |  | 
|  | 1239 | if (mapping) { | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1240 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1241 | unsigned long flags; | 
|  | 1242 |  | 
|  | 1243 | write_lock_irqsave(&mapping->tree_lock, flags); | 
|  | 1244 | ret = TestSetPageWriteback(page); | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1245 | if (!ret) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1246 | radix_tree_tag_set(&mapping->page_tree, | 
|  | 1247 | page_index(page), | 
|  | 1248 | PAGECACHE_TAG_WRITEBACK); | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1249 | if (bdi_cap_writeback_dirty(bdi)) | 
|  | 1250 | __inc_bdi_stat(bdi, BDI_WRITEBACK); | 
|  | 1251 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1252 | if (!PageDirty(page)) | 
|  | 1253 | radix_tree_tag_clear(&mapping->page_tree, | 
|  | 1254 | page_index(page), | 
|  | 1255 | PAGECACHE_TAG_DIRTY); | 
|  | 1256 | write_unlock_irqrestore(&mapping->tree_lock, flags); | 
|  | 1257 | } else { | 
|  | 1258 | ret = TestSetPageWriteback(page); | 
|  | 1259 | } | 
| Andrew Morton | d688abf | 2007-07-19 01:49:17 -0700 | [diff] [blame] | 1260 | if (!ret) | 
|  | 1261 | inc_zone_page_state(page, NR_WRITEBACK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1262 | return ret; | 
|  | 1263 |  | 
|  | 1264 | } | 
|  | 1265 | EXPORT_SYMBOL(test_set_page_writeback); | 
|  | 1266 |  | 
|  | 1267 | /* | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 1268 | * Return true if any of the pages in the mapping are marked with the | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | * passed tag. | 
|  | 1270 | */ | 
|  | 1271 | int mapping_tagged(struct address_space *mapping, int tag) | 
|  | 1272 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1273 | int ret; | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 1274 | rcu_read_lock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1275 | ret = radix_tree_tagged(&mapping->page_tree, tag); | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 1276 | rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1277 | return ret; | 
|  | 1278 | } | 
|  | 1279 | EXPORT_SYMBOL(mapping_tagged); |