| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 2 |  * mm/page-writeback.c | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 |  * | 
 | 4 |  * Copyright (C) 2002, Linus Torvalds. | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 5 |  * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 |  * | 
 | 7 |  * Contains functions related to writing back dirty pages at the | 
 | 8 |  * address_space level. | 
 | 9 |  * | 
| Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 10 |  * 10Apr2002	Andrew Morton | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 |  *		Initial version | 
 | 12 |  */ | 
 | 13 |  | 
 | 14 | #include <linux/kernel.h> | 
 | 15 | #include <linux/module.h> | 
 | 16 | #include <linux/spinlock.h> | 
 | 17 | #include <linux/fs.h> | 
 | 18 | #include <linux/mm.h> | 
 | 19 | #include <linux/swap.h> | 
 | 20 | #include <linux/slab.h> | 
 | 21 | #include <linux/pagemap.h> | 
 | 22 | #include <linux/writeback.h> | 
 | 23 | #include <linux/init.h> | 
 | 24 | #include <linux/backing-dev.h> | 
| Andrew Morton | 55e829a | 2006-12-10 02:19:27 -0800 | [diff] [blame] | 25 | #include <linux/task_io_accounting_ops.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/blkdev.h> | 
 | 27 | #include <linux/mpage.h> | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 28 | #include <linux/rmap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/percpu.h> | 
 | 30 | #include <linux/notifier.h> | 
 | 31 | #include <linux/smp.h> | 
 | 32 | #include <linux/sysctl.h> | 
 | 33 | #include <linux/cpu.h> | 
 | 34 | #include <linux/syscalls.h> | 
| David Howells | cf9a2ae | 2006-08-29 19:05:54 +0100 | [diff] [blame] | 35 | #include <linux/buffer_head.h> | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 36 | #include <linux/pagevec.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 |  | 
 | 38 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited | 
 | 40 |  * will look to see if it needs to force writeback or throttling. | 
 | 41 |  */ | 
 | 42 | static long ratelimit_pages = 32; | 
 | 43 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | /* | 
 | 45 |  * When balance_dirty_pages decides that the caller needs to perform some | 
 | 46 |  * non-background writeback, this is how many pages it will attempt to write. | 
 | 47 |  * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably | 
 | 48 |  * large amounts of I/O are submitted. | 
 | 49 |  */ | 
 | 50 | static inline long sync_writeback_pages(void) | 
 | 51 | { | 
 | 52 | 	return ratelimit_pages + ratelimit_pages / 2; | 
 | 53 | } | 
 | 54 |  | 
 | 55 | /* The following parameters are exported via /proc/sys/vm */ | 
 | 56 |  | 
 | 57 | /* | 
 | 58 |  * Start background writeback (via pdflush) at this percentage | 
 | 59 |  */ | 
| Wu Fengguang | 1b5e62b | 2009-03-23 08:57:38 +0800 | [diff] [blame] | 60 | int dirty_background_ratio = 10; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 |  | 
 | 62 | /* | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 63 |  * dirty_background_bytes starts at 0 (disabled) so that it is a function of | 
 | 64 |  * dirty_background_ratio * the amount of dirtyable memory | 
 | 65 |  */ | 
 | 66 | unsigned long dirty_background_bytes; | 
 | 67 |  | 
 | 68 | /* | 
| Bron Gondwana | 195cf45 | 2008-02-04 22:29:20 -0800 | [diff] [blame] | 69 |  * free highmem will not be subtracted from the total free memory | 
 | 70 |  * for calculating free ratios if vm_highmem_is_dirtyable is true | 
 | 71 |  */ | 
 | 72 | int vm_highmem_is_dirtyable; | 
 | 73 |  | 
 | 74 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 |  * The generator of dirty data starts writeback at this percentage | 
 | 76 |  */ | 
| Wu Fengguang | 1b5e62b | 2009-03-23 08:57:38 +0800 | [diff] [blame] | 77 | int vm_dirty_ratio = 20; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 |  | 
 | 79 | /* | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 80 |  * vm_dirty_bytes starts at 0 (disabled) so that it is a function of | 
 | 81 |  * vm_dirty_ratio * the amount of dirtyable memory | 
 | 82 |  */ | 
 | 83 | unsigned long vm_dirty_bytes; | 
 | 84 |  | 
 | 85 | /* | 
| Alexey Dobriyan | 704503d | 2009-03-31 15:23:18 -0700 | [diff] [blame] | 86 |  * The interval between `kupdate'-style writebacks | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 |  */ | 
| Toshiyuki Okajima | 22ef37e | 2009-05-16 22:56:28 -0700 | [diff] [blame] | 88 | unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 |  | 
 | 90 | /* | 
| Alexey Dobriyan | 704503d | 2009-03-31 15:23:18 -0700 | [diff] [blame] | 91 |  * The longest time for which data is allowed to remain dirty | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 |  */ | 
| Toshiyuki Okajima | 22ef37e | 2009-05-16 22:56:28 -0700 | [diff] [blame] | 93 | unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 |  | 
 | 95 | /* | 
 | 96 |  * Flag that makes the machine dump writes/reads and block dirtyings. | 
 | 97 |  */ | 
 | 98 | int block_dump; | 
 | 99 |  | 
 | 100 | /* | 
| Bart Samwel | ed5b43f | 2006-03-24 03:15:49 -0800 | [diff] [blame] | 101 |  * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: | 
 | 102 |  * a full sync is triggered after this time elapses without any disk activity. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 |  */ | 
 | 104 | int laptop_mode; | 
 | 105 |  | 
 | 106 | EXPORT_SYMBOL(laptop_mode); | 
 | 107 |  | 
 | 108 | /* End of sysctl-exported parameters */ | 
 | 109 |  | 
 | 110 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | /* | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 112 |  * Scale the writeback cache size proportional to the relative writeout speeds. | 
 | 113 |  * | 
 | 114 |  * We do this by keeping a floating proportion between BDIs, based on page | 
 | 115 |  * writeback completions [end_page_writeback()]. Those devices that write out | 
 | 116 |  * pages fastest will get the larger share, while the slower will get a smaller | 
 | 117 |  * share. | 
 | 118 |  * | 
 | 119 |  * We use page writeout completions because we are interested in getting rid of | 
 | 120 |  * dirty pages. Having them written out is the primary goal. | 
 | 121 |  * | 
 | 122 |  * We introduce a concept of time, a period over which we measure these events, | 
 | 123 |  * because demand can/will vary over time. The length of this period itself is | 
 | 124 |  * measured in page writeback completions. | 
 | 125 |  * | 
 | 126 |  */ | 
 | 127 | static struct prop_descriptor vm_completions; | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 128 | static struct prop_descriptor vm_dirties; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 129 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 130 | /* | 
 | 131 |  * couple the period to the dirty_ratio: | 
 | 132 |  * | 
 | 133 |  *   period/2 ~ roundup_pow_of_two(dirty limit) | 
 | 134 |  */ | 
 | 135 | static int calc_period_shift(void) | 
 | 136 | { | 
 | 137 | 	unsigned long dirty_total; | 
 | 138 |  | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 139 | 	if (vm_dirty_bytes) | 
 | 140 | 		dirty_total = vm_dirty_bytes / PAGE_SIZE; | 
 | 141 | 	else | 
 | 142 | 		dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / | 
 | 143 | 				100; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 144 | 	return 2 + ilog2(dirty_total - 1); | 
 | 145 | } | 
 | 146 |  | 
 | 147 | /* | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 148 |  * update the period when the dirty threshold changes. | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 149 |  */ | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 150 | static void update_completion_period(void) | 
 | 151 | { | 
 | 152 | 	int shift = calc_period_shift(); | 
 | 153 | 	prop_change_shift(&vm_completions, shift); | 
 | 154 | 	prop_change_shift(&vm_dirties, shift); | 
 | 155 | } | 
 | 156 |  | 
 | 157 | int dirty_background_ratio_handler(struct ctl_table *table, int write, | 
 | 158 | 		struct file *filp, void __user *buffer, size_t *lenp, | 
 | 159 | 		loff_t *ppos) | 
 | 160 | { | 
 | 161 | 	int ret; | 
 | 162 |  | 
 | 163 | 	ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | 
 | 164 | 	if (ret == 0 && write) | 
 | 165 | 		dirty_background_bytes = 0; | 
 | 166 | 	return ret; | 
 | 167 | } | 
 | 168 |  | 
 | 169 | int dirty_background_bytes_handler(struct ctl_table *table, int write, | 
 | 170 | 		struct file *filp, void __user *buffer, size_t *lenp, | 
 | 171 | 		loff_t *ppos) | 
 | 172 | { | 
 | 173 | 	int ret; | 
 | 174 |  | 
 | 175 | 	ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos); | 
 | 176 | 	if (ret == 0 && write) | 
 | 177 | 		dirty_background_ratio = 0; | 
 | 178 | 	return ret; | 
 | 179 | } | 
 | 180 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 181 | int dirty_ratio_handler(struct ctl_table *table, int write, | 
 | 182 | 		struct file *filp, void __user *buffer, size_t *lenp, | 
 | 183 | 		loff_t *ppos) | 
 | 184 | { | 
 | 185 | 	int old_ratio = vm_dirty_ratio; | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 186 | 	int ret; | 
 | 187 |  | 
 | 188 | 	ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 189 | 	if (ret == 0 && write && vm_dirty_ratio != old_ratio) { | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 190 | 		update_completion_period(); | 
 | 191 | 		vm_dirty_bytes = 0; | 
 | 192 | 	} | 
 | 193 | 	return ret; | 
 | 194 | } | 
 | 195 |  | 
 | 196 |  | 
 | 197 | int dirty_bytes_handler(struct ctl_table *table, int write, | 
 | 198 | 		struct file *filp, void __user *buffer, size_t *lenp, | 
 | 199 | 		loff_t *ppos) | 
 | 200 | { | 
| Sven Wegener | fc3501d | 2009-02-11 13:04:23 -0800 | [diff] [blame] | 201 | 	unsigned long old_bytes = vm_dirty_bytes; | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 202 | 	int ret; | 
 | 203 |  | 
 | 204 | 	ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos); | 
 | 205 | 	if (ret == 0 && write && vm_dirty_bytes != old_bytes) { | 
 | 206 | 		update_completion_period(); | 
 | 207 | 		vm_dirty_ratio = 0; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 208 | 	} | 
 | 209 | 	return ret; | 
 | 210 | } | 
 | 211 |  | 
 | 212 | /* | 
 | 213 |  * Increment the BDI's writeout completion count and the global writeout | 
 | 214 |  * completion count. Called from test_clear_page_writeback(). | 
 | 215 |  */ | 
 | 216 | static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) | 
 | 217 | { | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 218 | 	__prop_inc_percpu_max(&vm_completions, &bdi->completions, | 
 | 219 | 			      bdi->max_prop_frac); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 220 | } | 
 | 221 |  | 
| Miklos Szeredi | dd5656e | 2008-04-30 00:54:37 -0700 | [diff] [blame] | 222 | void bdi_writeout_inc(struct backing_dev_info *bdi) | 
 | 223 | { | 
 | 224 | 	unsigned long flags; | 
 | 225 |  | 
 | 226 | 	local_irq_save(flags); | 
 | 227 | 	__bdi_writeout_inc(bdi); | 
 | 228 | 	local_irq_restore(flags); | 
 | 229 | } | 
 | 230 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); | 
 | 231 |  | 
| Nick Piggin | 1cf6e7d | 2009-02-18 14:48:18 -0800 | [diff] [blame] | 232 | void task_dirty_inc(struct task_struct *tsk) | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 233 | { | 
 | 234 | 	prop_inc_single(&vm_dirties, &tsk->dirties); | 
 | 235 | } | 
 | 236 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 237 | /* | 
 | 238 |  * Obtain an accurate fraction of the BDI's portion. | 
 | 239 |  */ | 
 | 240 | static void bdi_writeout_fraction(struct backing_dev_info *bdi, | 
 | 241 | 		long *numerator, long *denominator) | 
 | 242 | { | 
 | 243 | 	if (bdi_cap_writeback_dirty(bdi)) { | 
 | 244 | 		prop_fraction_percpu(&vm_completions, &bdi->completions, | 
 | 245 | 				numerator, denominator); | 
 | 246 | 	} else { | 
 | 247 | 		*numerator = 0; | 
 | 248 | 		*denominator = 1; | 
 | 249 | 	} | 
 | 250 | } | 
 | 251 |  | 
 | 252 | /* | 
 | 253 |  * Clip the earned share of dirty pages to that which is actually available. | 
 | 254 |  * This avoids exceeding the total dirty_limit when the floating averages | 
 | 255 |  * fluctuate too quickly. | 
 | 256 |  */ | 
| H Hartley Sweeten | dcf975d | 2009-06-16 15:31:44 -0700 | [diff] [blame] | 257 | static void clip_bdi_dirty_limit(struct backing_dev_info *bdi, | 
 | 258 | 		unsigned long dirty, unsigned long *pbdi_dirty) | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 259 | { | 
| H Hartley Sweeten | dcf975d | 2009-06-16 15:31:44 -0700 | [diff] [blame] | 260 | 	unsigned long avail_dirty; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 261 |  | 
| H Hartley Sweeten | dcf975d | 2009-06-16 15:31:44 -0700 | [diff] [blame] | 262 | 	avail_dirty = global_page_state(NR_FILE_DIRTY) + | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 263 | 		 global_page_state(NR_WRITEBACK) + | 
| Miklos Szeredi | fc3ba69 | 2008-04-30 00:54:38 -0700 | [diff] [blame] | 264 | 		 global_page_state(NR_UNSTABLE_NFS) + | 
| H Hartley Sweeten | dcf975d | 2009-06-16 15:31:44 -0700 | [diff] [blame] | 265 | 		 global_page_state(NR_WRITEBACK_TEMP); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 266 |  | 
| H Hartley Sweeten | dcf975d | 2009-06-16 15:31:44 -0700 | [diff] [blame] | 267 | 	if (avail_dirty < dirty) | 
 | 268 | 		avail_dirty = dirty - avail_dirty; | 
 | 269 | 	else | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 270 | 		avail_dirty = 0; | 
 | 271 |  | 
 | 272 | 	avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) + | 
 | 273 | 		bdi_stat(bdi, BDI_WRITEBACK); | 
 | 274 |  | 
 | 275 | 	*pbdi_dirty = min(*pbdi_dirty, avail_dirty); | 
 | 276 | } | 
 | 277 |  | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 278 | static inline void task_dirties_fraction(struct task_struct *tsk, | 
 | 279 | 		long *numerator, long *denominator) | 
 | 280 | { | 
 | 281 | 	prop_fraction_single(&vm_dirties, &tsk->dirties, | 
 | 282 | 				numerator, denominator); | 
 | 283 | } | 
 | 284 |  | 
 | 285 | /* | 
 | 286 |  * scale the dirty limit | 
 | 287 |  * | 
 | 288 |  * task specific dirty limit: | 
 | 289 |  * | 
 | 290 |  *   dirty -= (dirty/8) * p_{t} | 
 | 291 |  */ | 
| H Hartley Sweeten | dcf975d | 2009-06-16 15:31:44 -0700 | [diff] [blame] | 292 | static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty) | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 293 | { | 
 | 294 | 	long numerator, denominator; | 
| H Hartley Sweeten | dcf975d | 2009-06-16 15:31:44 -0700 | [diff] [blame] | 295 | 	unsigned long dirty = *pdirty; | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 296 | 	u64 inv = dirty >> 3; | 
 | 297 |  | 
 | 298 | 	task_dirties_fraction(tsk, &numerator, &denominator); | 
 | 299 | 	inv *= numerator; | 
 | 300 | 	do_div(inv, denominator); | 
 | 301 |  | 
 | 302 | 	dirty -= inv; | 
 | 303 | 	if (dirty < *pdirty/2) | 
 | 304 | 		dirty = *pdirty/2; | 
 | 305 |  | 
 | 306 | 	*pdirty = dirty; | 
 | 307 | } | 
 | 308 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 309 | /* | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 310 |  * | 
 | 311 |  */ | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 312 | static unsigned int bdi_min_ratio; | 
 | 313 |  | 
 | 314 | int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) | 
 | 315 | { | 
 | 316 | 	int ret = 0; | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 317 |  | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 318 | 	spin_lock_bh(&bdi_lock); | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 319 | 	if (min_ratio > bdi->max_ratio) { | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 320 | 		ret = -EINVAL; | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 321 | 	} else { | 
 | 322 | 		min_ratio -= bdi->min_ratio; | 
 | 323 | 		if (bdi_min_ratio + min_ratio < 100) { | 
 | 324 | 			bdi_min_ratio += min_ratio; | 
 | 325 | 			bdi->min_ratio += min_ratio; | 
 | 326 | 		} else { | 
 | 327 | 			ret = -EINVAL; | 
 | 328 | 		} | 
 | 329 | 	} | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 330 | 	spin_unlock_bh(&bdi_lock); | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 331 |  | 
 | 332 | 	return ret; | 
 | 333 | } | 
 | 334 |  | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 335 | int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) | 
 | 336 | { | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 337 | 	int ret = 0; | 
 | 338 |  | 
 | 339 | 	if (max_ratio > 100) | 
 | 340 | 		return -EINVAL; | 
 | 341 |  | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 342 | 	spin_lock_bh(&bdi_lock); | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 343 | 	if (bdi->min_ratio > max_ratio) { | 
 | 344 | 		ret = -EINVAL; | 
 | 345 | 	} else { | 
 | 346 | 		bdi->max_ratio = max_ratio; | 
 | 347 | 		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100; | 
 | 348 | 	} | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 349 | 	spin_unlock_bh(&bdi_lock); | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 350 |  | 
 | 351 | 	return ret; | 
 | 352 | } | 
 | 353 | EXPORT_SYMBOL(bdi_set_max_ratio); | 
 | 354 |  | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 355 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 |  * Work out the current dirty-memory clamping and background writeout | 
 | 357 |  * thresholds. | 
 | 358 |  * | 
 | 359 |  * The main aim here is to lower them aggressively if there is a lot of mapped | 
 | 360 |  * memory around.  To avoid stressing page reclaim with lots of unreclaimable | 
 | 361 |  * pages.  It is better to clamp down on writers than to start swapping, and | 
 | 362 |  * performing lots of scanning. | 
 | 363 |  * | 
 | 364 |  * We only allow 1/2 of the currently-unmapped memory to be dirtied. | 
 | 365 |  * | 
 | 366 |  * We don't permit the clamping level to fall below 5% - that is getting rather | 
 | 367 |  * excessive. | 
 | 368 |  * | 
 | 369 |  * We make sure that the background writeout level is below the adjusted | 
 | 370 |  * clamping level. | 
 | 371 |  */ | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 372 |  | 
 | 373 | static unsigned long highmem_dirtyable_memory(unsigned long total) | 
 | 374 | { | 
 | 375 | #ifdef CONFIG_HIGHMEM | 
 | 376 | 	int node; | 
 | 377 | 	unsigned long x = 0; | 
 | 378 |  | 
| Lee Schermerhorn | 37b07e4 | 2007-10-16 01:25:39 -0700 | [diff] [blame] | 379 | 	for_each_node_state(node, N_HIGH_MEMORY) { | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 380 | 		struct zone *z = | 
 | 381 | 			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; | 
 | 382 |  | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 383 | 		x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z); | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 384 | 	} | 
 | 385 | 	/* | 
 | 386 | 	 * Make sure that the number of highmem pages is never larger | 
 | 387 | 	 * than the number of the total dirtyable memory. This can only | 
 | 388 | 	 * occur in very strange VM situations but we want to make sure | 
 | 389 | 	 * that this does not occur. | 
 | 390 | 	 */ | 
 | 391 | 	return min(x, total); | 
 | 392 | #else | 
 | 393 | 	return 0; | 
 | 394 | #endif | 
 | 395 | } | 
 | 396 |  | 
| Steven Rostedt | 3eefae9 | 2008-05-12 21:21:04 +0200 | [diff] [blame] | 397 | /** | 
 | 398 |  * determine_dirtyable_memory - amount of memory that may be used | 
 | 399 |  * | 
 | 400 |  * Returns the numebr of pages that can currently be freed and used | 
 | 401 |  * by the kernel for direct mappings. | 
 | 402 |  */ | 
 | 403 | unsigned long determine_dirtyable_memory(void) | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 404 | { | 
 | 405 | 	unsigned long x; | 
 | 406 |  | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 407 | 	x = global_page_state(NR_FREE_PAGES) + global_lru_pages(); | 
| Bron Gondwana | 195cf45 | 2008-02-04 22:29:20 -0800 | [diff] [blame] | 408 |  | 
 | 409 | 	if (!vm_highmem_is_dirtyable) | 
 | 410 | 		x -= highmem_dirtyable_memory(x); | 
 | 411 |  | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 412 | 	return x + 1;	/* Ensure that we never return 0 */ | 
 | 413 | } | 
 | 414 |  | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 415 | void | 
| David Rientjes | 364aeb2 | 2009-01-06 14:39:29 -0800 | [diff] [blame] | 416 | get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, | 
 | 417 | 		 unsigned long *pbdi_dirty, struct backing_dev_info *bdi) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | { | 
| David Rientjes | 364aeb2 | 2009-01-06 14:39:29 -0800 | [diff] [blame] | 419 | 	unsigned long background; | 
 | 420 | 	unsigned long dirty; | 
| Christoph Lameter | 1b42446 | 2007-05-06 14:48:59 -0700 | [diff] [blame] | 421 | 	unsigned long available_memory = determine_dirtyable_memory(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | 	struct task_struct *tsk; | 
 | 423 |  | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 424 | 	if (vm_dirty_bytes) | 
 | 425 | 		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); | 
 | 426 | 	else { | 
 | 427 | 		int dirty_ratio; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 |  | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 429 | 		dirty_ratio = vm_dirty_ratio; | 
 | 430 | 		if (dirty_ratio < 5) | 
 | 431 | 			dirty_ratio = 5; | 
 | 432 | 		dirty = (dirty_ratio * available_memory) / 100; | 
 | 433 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 |  | 
| David Rientjes | 2da0299 | 2009-01-06 14:39:31 -0800 | [diff] [blame] | 435 | 	if (dirty_background_bytes) | 
 | 436 | 		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); | 
 | 437 | 	else | 
 | 438 | 		background = (dirty_background_ratio * available_memory) / 100; | 
 | 439 |  | 
 | 440 | 	if (background >= dirty) | 
 | 441 | 		background = dirty / 2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | 	tsk = current; | 
 | 443 | 	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { | 
 | 444 | 		background += background / 4; | 
 | 445 | 		dirty += dirty / 4; | 
 | 446 | 	} | 
 | 447 | 	*pbackground = background; | 
 | 448 | 	*pdirty = dirty; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 449 |  | 
 | 450 | 	if (bdi) { | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 451 | 		u64 bdi_dirty; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 452 | 		long numerator, denominator; | 
 | 453 |  | 
 | 454 | 		/* | 
 | 455 | 		 * Calculate this BDI's share of the dirty ratio. | 
 | 456 | 		 */ | 
 | 457 | 		bdi_writeout_fraction(bdi, &numerator, &denominator); | 
 | 458 |  | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 459 | 		bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 460 | 		bdi_dirty *= numerator; | 
 | 461 | 		do_div(bdi_dirty, denominator); | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 462 | 		bdi_dirty += (dirty * bdi->min_ratio) / 100; | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 463 | 		if (bdi_dirty > (dirty * bdi->max_ratio) / 100) | 
 | 464 | 			bdi_dirty = dirty * bdi->max_ratio / 100; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 465 |  | 
 | 466 | 		*pbdi_dirty = bdi_dirty; | 
 | 467 | 		clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty); | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 468 | 		task_dirty_limit(current, pbdi_dirty); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 469 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | } | 
 | 471 |  | 
 | 472 | /* | 
 | 473 |  * balance_dirty_pages() must be called by processes which are generating dirty | 
 | 474 |  * data.  It looks at the number of dirty pages in the machine and will force | 
 | 475 |  * the caller to perform writeback if the system is over `vm_dirty_ratio'. | 
 | 476 |  * If we're over `background_thresh' then pdflush is woken to perform some | 
 | 477 |  * writeout. | 
 | 478 |  */ | 
 | 479 | static void balance_dirty_pages(struct address_space *mapping) | 
 | 480 | { | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 481 | 	long nr_reclaimable, bdi_nr_reclaimable; | 
 | 482 | 	long nr_writeback, bdi_nr_writeback; | 
| David Rientjes | 364aeb2 | 2009-01-06 14:39:29 -0800 | [diff] [blame] | 483 | 	unsigned long background_thresh; | 
 | 484 | 	unsigned long dirty_thresh; | 
 | 485 | 	unsigned long bdi_thresh; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | 	unsigned long pages_written = 0; | 
 | 487 | 	unsigned long write_chunk = sync_writeback_pages(); | 
 | 488 |  | 
 | 489 | 	struct backing_dev_info *bdi = mapping->backing_dev_info; | 
 | 490 |  | 
 | 491 | 	for (;;) { | 
 | 492 | 		struct writeback_control wbc = { | 
 | 493 | 			.bdi		= bdi, | 
 | 494 | 			.sync_mode	= WB_SYNC_NONE, | 
 | 495 | 			.older_than_this = NULL, | 
 | 496 | 			.nr_to_write	= write_chunk, | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 497 | 			.range_cyclic	= 1, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | 		}; | 
 | 499 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 500 | 		get_dirty_limits(&background_thresh, &dirty_thresh, | 
 | 501 | 				&bdi_thresh, bdi); | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 502 |  | 
 | 503 | 		nr_reclaimable = global_page_state(NR_FILE_DIRTY) + | 
 | 504 | 					global_page_state(NR_UNSTABLE_NFS); | 
 | 505 | 		nr_writeback = global_page_state(NR_WRITEBACK); | 
 | 506 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 507 | 		bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); | 
 | 508 | 		bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 509 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 510 | 		if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh) | 
 | 511 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 |  | 
| Peter Zijlstra | 5fce25a | 2007-11-14 16:59:15 -0800 | [diff] [blame] | 513 | 		/* | 
 | 514 | 		 * Throttle it only when the background writeback cannot | 
 | 515 | 		 * catch-up. This avoids (excessively) small writeouts | 
 | 516 | 		 * when the bdi limits are ramping up. | 
 | 517 | 		 */ | 
 | 518 | 		if (nr_reclaimable + nr_writeback < | 
 | 519 | 				(background_thresh + dirty_thresh) / 2) | 
 | 520 | 			break; | 
 | 521 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 522 | 		if (!bdi->dirty_exceeded) | 
 | 523 | 			bdi->dirty_exceeded = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 |  | 
 | 525 | 		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable. | 
 | 526 | 		 * Unstable writes are a feature of certain networked | 
 | 527 | 		 * filesystems (i.e. NFS) in which data may have been | 
 | 528 | 		 * written to the server's write cache, but has not yet | 
 | 529 | 		 * been flushed to permanent storage. | 
| Richard Kennedy | d7831a0 | 2009-06-30 11:41:35 -0700 | [diff] [blame] | 530 | 		 * Only move pages to writeback if this bdi is over its | 
 | 531 | 		 * threshold otherwise wait until the disk writes catch | 
 | 532 | 		 * up. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | 		 */ | 
| Richard Kennedy | d7831a0 | 2009-06-30 11:41:35 -0700 | [diff] [blame] | 534 | 		if (bdi_nr_reclaimable > bdi_thresh) { | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 535 | 			writeback_inodes_wbc(&wbc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | 			pages_written += write_chunk - wbc.nr_to_write; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 537 | 			get_dirty_limits(&background_thresh, &dirty_thresh, | 
 | 538 | 				       &bdi_thresh, bdi); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | 		} | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 540 |  | 
 | 541 | 		/* | 
 | 542 | 		 * In order to avoid the stacked BDI deadlock we need | 
 | 543 | 		 * to ensure we accurately count the 'dirty' pages when | 
 | 544 | 		 * the threshold is low. | 
 | 545 | 		 * | 
 | 546 | 		 * Otherwise it would be possible to get thresh+n pages | 
 | 547 | 		 * reported dirty, even though there are thresh-m pages | 
 | 548 | 		 * actually dirty; with m+n sitting in the percpu | 
 | 549 | 		 * deltas. | 
 | 550 | 		 */ | 
 | 551 | 		if (bdi_thresh < 2*bdi_stat_error(bdi)) { | 
 | 552 | 			bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); | 
 | 553 | 			bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK); | 
 | 554 | 		} else if (bdi_nr_reclaimable) { | 
 | 555 | 			bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); | 
 | 556 | 			bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); | 
 | 557 | 		} | 
 | 558 |  | 
 | 559 | 		if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh) | 
 | 560 | 			break; | 
 | 561 | 		if (pages_written >= write_chunk) | 
 | 562 | 			break;		/* We've done our duty */ | 
 | 563 |  | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 564 | 		schedule_timeout(1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | 	} | 
 | 566 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 567 | 	if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh && | 
 | 568 | 			bdi->dirty_exceeded) | 
 | 569 | 		bdi->dirty_exceeded = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 |  | 
 | 571 | 	if (writeback_in_progress(bdi)) | 
 | 572 | 		return;		/* pdflush is already working this queue */ | 
 | 573 |  | 
 | 574 | 	/* | 
 | 575 | 	 * In laptop mode, we wait until hitting the higher threshold before | 
 | 576 | 	 * starting background writeout, and then write out all the way down | 
 | 577 | 	 * to the lower threshold.  So slow writers cause minimal disk activity. | 
 | 578 | 	 * | 
 | 579 | 	 * In normal mode, we start background writeout at the lower | 
 | 580 | 	 * background_thresh, to keep the amount of dirty memory low. | 
 | 581 | 	 */ | 
 | 582 | 	if ((laptop_mode && pages_written) || | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 583 | 	    (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY) | 
 | 584 | 					  + global_page_state(NR_UNSTABLE_NFS)) | 
| Jens Axboe | b6e5131 | 2009-09-16 15:13:54 +0200 | [diff] [blame^] | 585 | 					  > background_thresh))) | 
 | 586 | 		bdi_start_writeback(bdi, nr_writeback); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | } | 
 | 588 |  | 
| Peter Zijlstra | a200ee1 | 2007-10-08 18:54:37 +0200 | [diff] [blame] | 589 | void set_page_dirty_balance(struct page *page, int page_mkwrite) | 
| Peter Zijlstra | edc79b2 | 2006-09-25 23:30:58 -0700 | [diff] [blame] | 590 | { | 
| Peter Zijlstra | a200ee1 | 2007-10-08 18:54:37 +0200 | [diff] [blame] | 591 | 	if (set_page_dirty(page) || page_mkwrite) { | 
| Peter Zijlstra | edc79b2 | 2006-09-25 23:30:58 -0700 | [diff] [blame] | 592 | 		struct address_space *mapping = page_mapping(page); | 
 | 593 |  | 
 | 594 | 		if (mapping) | 
 | 595 | 			balance_dirty_pages_ratelimited(mapping); | 
 | 596 | 	} | 
 | 597 | } | 
 | 598 |  | 
| Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 599 | static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0; | 
 | 600 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | /** | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 602 |  * balance_dirty_pages_ratelimited_nr - balance dirty memory state | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 603 |  * @mapping: address_space which was dirtied | 
| Martin Waitz | a580290 | 2006-04-02 13:59:55 +0200 | [diff] [blame] | 604 |  * @nr_pages_dirtied: number of pages which the caller has just dirtied | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 |  * | 
 | 606 |  * Processes which are dirtying memory should call in here once for each page | 
 | 607 |  * which was newly dirtied.  The function will periodically check the system's | 
 | 608 |  * dirty state and will initiate writeback if needed. | 
 | 609 |  * | 
 | 610 |  * On really big machines, get_writeback_state is expensive, so try to avoid | 
 | 611 |  * calling it too often (ratelimiting).  But once we're over the dirty memory | 
 | 612 |  * limit we decrease the ratelimiting by a lot, to prevent individual processes | 
 | 613 |  * from overshooting the limit by (ratelimit_pages) each. | 
 | 614 |  */ | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 615 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | 
 | 616 | 					unsigned long nr_pages_dirtied) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | { | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 618 | 	unsigned long ratelimit; | 
 | 619 | 	unsigned long *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 |  | 
 | 621 | 	ratelimit = ratelimit_pages; | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 622 | 	if (mapping->backing_dev_info->dirty_exceeded) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | 		ratelimit = 8; | 
 | 624 |  | 
 | 625 | 	/* | 
 | 626 | 	 * Check the rate limiting. Also, we do not want to throttle real-time | 
 | 627 | 	 * tasks in balance_dirty_pages(). Period. | 
 | 628 | 	 */ | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 629 | 	preempt_disable(); | 
| Tejun Heo | 245b2e7 | 2009-06-24 15:13:48 +0900 | [diff] [blame] | 630 | 	p =  &__get_cpu_var(bdp_ratelimits); | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 631 | 	*p += nr_pages_dirtied; | 
 | 632 | 	if (unlikely(*p >= ratelimit)) { | 
 | 633 | 		*p = 0; | 
 | 634 | 		preempt_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | 		balance_dirty_pages(mapping); | 
 | 636 | 		return; | 
 | 637 | 	} | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 638 | 	preempt_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | } | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 640 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 |  | 
| Andrew Morton | 232ea4d | 2007-02-28 20:13:21 -0800 | [diff] [blame] | 642 | void throttle_vm_writeout(gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | { | 
| David Rientjes | 364aeb2 | 2009-01-06 14:39:29 -0800 | [diff] [blame] | 644 | 	unsigned long background_thresh; | 
 | 645 | 	unsigned long dirty_thresh; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 |  | 
 | 647 |         for ( ; ; ) { | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 648 | 		get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 |  | 
 | 650 |                 /* | 
 | 651 |                  * Boost the allowable dirty threshold a bit for page | 
 | 652 |                  * allocators so they don't get DoS'ed by heavy writers | 
 | 653 |                  */ | 
 | 654 |                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */ | 
 | 655 |  | 
| Christoph Lameter | c24f21b | 2006-06-30 01:55:42 -0700 | [diff] [blame] | 656 |                 if (global_page_state(NR_UNSTABLE_NFS) + | 
 | 657 | 			global_page_state(NR_WRITEBACK) <= dirty_thresh) | 
 | 658 |                         	break; | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 659 |                 congestion_wait(BLK_RW_ASYNC, HZ/10); | 
| Fengguang Wu | 369f238 | 2007-10-16 23:30:45 -0700 | [diff] [blame] | 660 |  | 
 | 661 | 		/* | 
 | 662 | 		 * The caller might hold locks which can prevent IO completion | 
 | 663 | 		 * or progress in the filesystem.  So we cannot just sit here | 
 | 664 | 		 * waiting for IO to complete. | 
 | 665 | 		 */ | 
 | 666 | 		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) | 
 | 667 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 |         } | 
 | 669 | } | 
 | 670 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | static void laptop_timer_fn(unsigned long unused); | 
 | 672 |  | 
| Ingo Molnar | 8d06afa | 2005-09-09 13:10:40 -0700 | [diff] [blame] | 673 | static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 |  | 
 | 675 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 |  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs | 
 | 677 |  */ | 
 | 678 | int dirty_writeback_centisecs_handler(ctl_table *table, int write, | 
| Andrew Morton | 3e733f0 | 2007-07-15 23:41:05 -0700 | [diff] [blame] | 679 | 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | { | 
| Alexey Dobriyan | 704503d | 2009-03-31 15:23:18 -0700 | [diff] [blame] | 681 | 	proc_dointvec(table, write, file, buffer, length, ppos); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 | 	return 0; | 
 | 683 | } | 
 | 684 |  | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 685 | static void do_laptop_sync(struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | { | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 687 | 	wakeup_flusher_threads(0); | 
 | 688 | 	kfree(work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | } | 
 | 690 |  | 
 | 691 | static void laptop_timer_fn(unsigned long unused) | 
 | 692 | { | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 693 | 	struct work_struct *work; | 
 | 694 |  | 
 | 695 | 	work = kmalloc(sizeof(*work), GFP_ATOMIC); | 
 | 696 | 	if (work) { | 
 | 697 | 		INIT_WORK(work, do_laptop_sync); | 
 | 698 | 		schedule_work(work); | 
 | 699 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | } | 
 | 701 |  | 
 | 702 | /* | 
 | 703 |  * We've spun up the disk and we're in laptop mode: schedule writeback | 
 | 704 |  * of all dirty data a few seconds from now.  If the flush is already scheduled | 
 | 705 |  * then push it back - the user is still using the disk. | 
 | 706 |  */ | 
 | 707 | void laptop_io_completion(void) | 
 | 708 | { | 
| Bart Samwel | ed5b43f | 2006-03-24 03:15:49 -0800 | [diff] [blame] | 709 | 	mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | } | 
 | 711 |  | 
 | 712 | /* | 
 | 713 |  * We're in laptop mode and we've just synced. The sync's writes will have | 
 | 714 |  * caused another writeback to be scheduled by laptop_io_completion. | 
 | 715 |  * Nothing needs to be written back anymore, so we unschedule the writeback. | 
 | 716 |  */ | 
 | 717 | void laptop_sync_completion(void) | 
 | 718 | { | 
 | 719 | 	del_timer(&laptop_mode_wb_timer); | 
 | 720 | } | 
 | 721 |  | 
 | 722 | /* | 
 | 723 |  * If ratelimit_pages is too high then we can get into dirty-data overload | 
 | 724 |  * if a large number of processes all perform writes at the same time. | 
 | 725 |  * If it is too low then SMP machines will call the (expensive) | 
 | 726 |  * get_writeback_state too often. | 
 | 727 |  * | 
 | 728 |  * Here we set ratelimit_pages to a level which ensures that when all CPUs are | 
 | 729 |  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory | 
 | 730 |  * thresholds before writeback cuts in. | 
 | 731 |  * | 
 | 732 |  * But the limit should not be set too high.  Because it also controls the | 
 | 733 |  * amount of memory which the balance_dirty_pages() caller has to write back. | 
 | 734 |  * If this is too large then the caller will block on the IO queue all the | 
 | 735 |  * time.  So limit it to four megabytes - the balance_dirty_pages() caller | 
 | 736 |  * will write six megabyte chunks, max. | 
 | 737 |  */ | 
 | 738 |  | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 739 | void writeback_set_ratelimit(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | { | 
| Chandra Seetharaman | 40c99aa | 2006-09-29 02:01:24 -0700 | [diff] [blame] | 741 | 	ratelimit_pages = vm_total_pages / (num_online_cpus() * 32); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | 	if (ratelimit_pages < 16) | 
 | 743 | 		ratelimit_pages = 16; | 
 | 744 | 	if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024) | 
 | 745 | 		ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; | 
 | 746 | } | 
 | 747 |  | 
| Chandra Seetharaman | 26c2143 | 2006-06-27 02:54:10 -0700 | [diff] [blame] | 748 | static int __cpuinit | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 749 | ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) | 
 | 750 | { | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 751 | 	writeback_set_ratelimit(); | 
| Paul E. McKenney | aa0f030 | 2007-02-10 01:46:37 -0800 | [diff] [blame] | 752 | 	return NOTIFY_DONE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 753 | } | 
 | 754 |  | 
| Chandra Seetharaman | 74b85f3 | 2006-06-27 02:54:09 -0700 | [diff] [blame] | 755 | static struct notifier_block __cpuinitdata ratelimit_nb = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | 	.notifier_call	= ratelimit_handler, | 
 | 757 | 	.next		= NULL, | 
 | 758 | }; | 
 | 759 |  | 
 | 760 | /* | 
| Linus Torvalds | dc6e29d | 2007-01-29 16:37:38 -0800 | [diff] [blame] | 761 |  * Called early on to tune the page writeback dirty limits. | 
 | 762 |  * | 
 | 763 |  * We used to scale dirty pages according to how total memory | 
 | 764 |  * related to pages that could be allocated for buffers (by | 
 | 765 |  * comparing nr_free_buffer_pages() to vm_total_pages. | 
 | 766 |  * | 
 | 767 |  * However, that was when we used "dirty_ratio" to scale with | 
 | 768 |  * all memory, and we don't do that any more. "dirty_ratio" | 
 | 769 |  * is now applied to total non-HIGHPAGE memory (by subtracting | 
 | 770 |  * totalhigh_pages from vm_total_pages), and as such we can't | 
 | 771 |  * get into the old insane situation any more where we had | 
 | 772 |  * large amounts of dirty pages compared to a small amount of | 
 | 773 |  * non-HIGHMEM memory. | 
 | 774 |  * | 
 | 775 |  * But we might still want to scale the dirty_ratio by how | 
 | 776 |  * much memory the box has.. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 |  */ | 
 | 778 | void __init page_writeback_init(void) | 
 | 779 | { | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 780 | 	int shift; | 
 | 781 |  | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 782 | 	writeback_set_ratelimit(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 | 	register_cpu_notifier(&ratelimit_nb); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 784 |  | 
 | 785 | 	shift = calc_period_shift(); | 
 | 786 | 	prop_descriptor_init(&vm_completions, shift); | 
| Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 787 | 	prop_descriptor_init(&vm_dirties, shift); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | } | 
 | 789 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 790 | /** | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 791 |  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 792 |  * @mapping: address space structure to write | 
 | 793 |  * @wbc: subtract the number of written pages from *@wbc->nr_to_write | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 794 |  * @writepage: function called for each page | 
 | 795 |  * @data: data passed to writepage function | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 796 |  * | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 797 |  * If a page is already under I/O, write_cache_pages() skips it, even | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 798 |  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback, | 
 | 799 |  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync() | 
 | 800 |  * and msync() need to guarantee that all the data which was dirty at the time | 
 | 801 |  * the call was made get new I/O started against them.  If wbc->sync_mode is | 
 | 802 |  * WB_SYNC_ALL then we were called for data integrity and we must wait for | 
 | 803 |  * existing IO to complete. | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 804 |  */ | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 805 | int write_cache_pages(struct address_space *mapping, | 
 | 806 | 		      struct writeback_control *wbc, writepage_t writepage, | 
 | 807 | 		      void *data) | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 808 | { | 
 | 809 | 	struct backing_dev_info *bdi = mapping->backing_dev_info; | 
 | 810 | 	int ret = 0; | 
 | 811 | 	int done = 0; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 812 | 	struct pagevec pvec; | 
 | 813 | 	int nr_pages; | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 814 | 	pgoff_t uninitialized_var(writeback_index); | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 815 | 	pgoff_t index; | 
 | 816 | 	pgoff_t end;		/* Inclusive */ | 
| Nick Piggin | bd19e01 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 817 | 	pgoff_t done_index; | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 818 | 	int cycled; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 819 | 	int range_whole = 0; | 
| Aneesh Kumar K.V | 17bc6c3 | 2008-10-16 10:09:17 -0400 | [diff] [blame] | 820 | 	long nr_to_write = wbc->nr_to_write; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 821 |  | 
 | 822 | 	if (wbc->nonblocking && bdi_write_congested(bdi)) { | 
 | 823 | 		wbc->encountered_congestion = 1; | 
 | 824 | 		return 0; | 
 | 825 | 	} | 
 | 826 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 827 | 	pagevec_init(&pvec, 0); | 
 | 828 | 	if (wbc->range_cyclic) { | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 829 | 		writeback_index = mapping->writeback_index; /* prev offset */ | 
 | 830 | 		index = writeback_index; | 
 | 831 | 		if (index == 0) | 
 | 832 | 			cycled = 1; | 
 | 833 | 		else | 
 | 834 | 			cycled = 0; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 835 | 		end = -1; | 
 | 836 | 	} else { | 
 | 837 | 		index = wbc->range_start >> PAGE_CACHE_SHIFT; | 
 | 838 | 		end = wbc->range_end >> PAGE_CACHE_SHIFT; | 
 | 839 | 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) | 
 | 840 | 			range_whole = 1; | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 841 | 		cycled = 1; /* ignore range_cyclic tests */ | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 842 | 	} | 
 | 843 | retry: | 
| Nick Piggin | bd19e01 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 844 | 	done_index = index; | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 845 | 	while (!done && (index <= end)) { | 
 | 846 | 		int i; | 
 | 847 |  | 
 | 848 | 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | 
 | 849 | 			      PAGECACHE_TAG_DIRTY, | 
 | 850 | 			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); | 
 | 851 | 		if (nr_pages == 0) | 
 | 852 | 			break; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 853 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 854 | 		for (i = 0; i < nr_pages; i++) { | 
 | 855 | 			struct page *page = pvec.pages[i]; | 
 | 856 |  | 
| Nick Piggin | d5482cd | 2009-01-06 14:39:11 -0800 | [diff] [blame] | 857 | 			/* | 
 | 858 | 			 * At this point, the page may be truncated or | 
 | 859 | 			 * invalidated (changing page->mapping to NULL), or | 
 | 860 | 			 * even swizzled back from swapper_space to tmpfs file | 
 | 861 | 			 * mapping. However, page->index will not change | 
 | 862 | 			 * because we have a reference on the page. | 
 | 863 | 			 */ | 
 | 864 | 			if (page->index > end) { | 
 | 865 | 				/* | 
 | 866 | 				 * can't be range_cyclic (1st pass) because | 
 | 867 | 				 * end == -1 in that case. | 
 | 868 | 				 */ | 
 | 869 | 				done = 1; | 
 | 870 | 				break; | 
 | 871 | 			} | 
 | 872 |  | 
| Nick Piggin | bd19e01 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 873 | 			done_index = page->index + 1; | 
 | 874 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 875 | 			lock_page(page); | 
 | 876 |  | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 877 | 			/* | 
 | 878 | 			 * Page truncated or invalidated. We can freely skip it | 
 | 879 | 			 * then, even for data integrity operations: the page | 
 | 880 | 			 * has disappeared concurrently, so there could be no | 
 | 881 | 			 * real expectation of this data interity operation | 
 | 882 | 			 * even if there is now a new, dirty page at the same | 
 | 883 | 			 * pagecache address. | 
 | 884 | 			 */ | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 885 | 			if (unlikely(page->mapping != mapping)) { | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 886 | continue_unlock: | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 887 | 				unlock_page(page); | 
 | 888 | 				continue; | 
 | 889 | 			} | 
 | 890 |  | 
| Nick Piggin | 515f4a0 | 2009-01-06 14:39:10 -0800 | [diff] [blame] | 891 | 			if (!PageDirty(page)) { | 
 | 892 | 				/* someone wrote it for us */ | 
 | 893 | 				goto continue_unlock; | 
 | 894 | 			} | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 895 |  | 
| Nick Piggin | 515f4a0 | 2009-01-06 14:39:10 -0800 | [diff] [blame] | 896 | 			if (PageWriteback(page)) { | 
 | 897 | 				if (wbc->sync_mode != WB_SYNC_NONE) | 
 | 898 | 					wait_on_page_writeback(page); | 
 | 899 | 				else | 
 | 900 | 					goto continue_unlock; | 
 | 901 | 			} | 
 | 902 |  | 
 | 903 | 			BUG_ON(PageWriteback(page)); | 
 | 904 | 			if (!clear_page_dirty_for_io(page)) | 
| Nick Piggin | 5a3d5c9 | 2009-01-06 14:39:09 -0800 | [diff] [blame] | 905 | 				goto continue_unlock; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 906 |  | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 907 | 			ret = (*writepage)(page, wbc, data); | 
| Nick Piggin | 0026677 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 908 | 			if (unlikely(ret)) { | 
 | 909 | 				if (ret == AOP_WRITEPAGE_ACTIVATE) { | 
 | 910 | 					unlock_page(page); | 
 | 911 | 					ret = 0; | 
 | 912 | 				} else { | 
 | 913 | 					/* | 
 | 914 | 					 * done_index is set past this page, | 
 | 915 | 					 * so media errors will not choke | 
 | 916 | 					 * background writeout for the entire | 
 | 917 | 					 * file. This has consequences for | 
 | 918 | 					 * range_cyclic semantics (ie. it may | 
 | 919 | 					 * not be suitable for data integrity | 
 | 920 | 					 * writeout). | 
 | 921 | 					 */ | 
 | 922 | 					done = 1; | 
 | 923 | 					break; | 
 | 924 | 				} | 
 | 925 |  			} | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 926 |  | 
| Federico Cuello | 89e1219 | 2009-02-11 13:04:39 -0800 | [diff] [blame] | 927 | 			if (nr_to_write > 0) { | 
| Artem Bityutskiy | dcf6a79 | 2009-02-02 18:33:49 +0200 | [diff] [blame] | 928 | 				nr_to_write--; | 
| Federico Cuello | 89e1219 | 2009-02-11 13:04:39 -0800 | [diff] [blame] | 929 | 				if (nr_to_write == 0 && | 
 | 930 | 				    wbc->sync_mode == WB_SYNC_NONE) { | 
 | 931 | 					/* | 
 | 932 | 					 * We stop writing back only if we are | 
 | 933 | 					 * not doing integrity sync. In case of | 
 | 934 | 					 * integrity sync we have to keep going | 
 | 935 | 					 * because someone may be concurrently | 
 | 936 | 					 * dirtying pages, and we might have | 
 | 937 | 					 * synced a lot of newly appeared dirty | 
 | 938 | 					 * pages, but have not synced all of the | 
 | 939 | 					 * old dirty pages. | 
 | 940 | 					 */ | 
 | 941 | 					done = 1; | 
 | 942 | 					break; | 
 | 943 | 				} | 
| Nick Piggin | 05fe478 | 2009-01-06 14:39:08 -0800 | [diff] [blame] | 944 | 			} | 
| Artem Bityutskiy | dcf6a79 | 2009-02-02 18:33:49 +0200 | [diff] [blame] | 945 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 946 | 			if (wbc->nonblocking && bdi_write_congested(bdi)) { | 
 | 947 | 				wbc->encountered_congestion = 1; | 
 | 948 | 				done = 1; | 
| Andrew Morton | 82fd1a9 | 2009-01-06 14:39:11 -0800 | [diff] [blame] | 949 | 				break; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 950 | 			} | 
 | 951 | 		} | 
 | 952 | 		pagevec_release(&pvec); | 
 | 953 | 		cond_resched(); | 
 | 954 | 	} | 
| Nick Piggin | 3a4c680 | 2009-02-12 04:34:23 +0100 | [diff] [blame] | 955 | 	if (!cycled && !done) { | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 956 | 		/* | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 957 | 		 * range_cyclic: | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 958 | 		 * We hit the last page and there is more work to be done: wrap | 
 | 959 | 		 * back to the start of the file | 
 | 960 | 		 */ | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 961 | 		cycled = 1; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 962 | 		index = 0; | 
| Nick Piggin | 31a1266 | 2009-01-06 14:39:04 -0800 | [diff] [blame] | 963 | 		end = writeback_index - 1; | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 964 | 		goto retry; | 
 | 965 | 	} | 
| Aneesh Kumar K.V | 17bc6c3 | 2008-10-16 10:09:17 -0400 | [diff] [blame] | 966 | 	if (!wbc->no_nrwrite_index_update) { | 
 | 967 | 		if (wbc->range_cyclic || (range_whole && nr_to_write > 0)) | 
| Nick Piggin | bd19e01 | 2009-01-06 14:39:06 -0800 | [diff] [blame] | 968 | 			mapping->writeback_index = done_index; | 
| Aneesh Kumar K.V | 17bc6c3 | 2008-10-16 10:09:17 -0400 | [diff] [blame] | 969 | 		wbc->nr_to_write = nr_to_write; | 
 | 970 | 	} | 
| Aneesh Kumar K.V | 06d6cf6 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 971 |  | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 972 | 	return ret; | 
 | 973 | } | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 974 | EXPORT_SYMBOL(write_cache_pages); | 
 | 975 |  | 
 | 976 | /* | 
 | 977 |  * Function used by generic_writepages to call the real writepage | 
 | 978 |  * function and set the mapping flags on error | 
 | 979 |  */ | 
 | 980 | static int __writepage(struct page *page, struct writeback_control *wbc, | 
 | 981 | 		       void *data) | 
 | 982 | { | 
 | 983 | 	struct address_space *mapping = data; | 
 | 984 | 	int ret = mapping->a_ops->writepage(page, wbc); | 
 | 985 | 	mapping_set_error(mapping, ret); | 
 | 986 | 	return ret; | 
 | 987 | } | 
 | 988 |  | 
 | 989 | /** | 
 | 990 |  * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them. | 
 | 991 |  * @mapping: address space structure to write | 
 | 992 |  * @wbc: subtract the number of written pages from *@wbc->nr_to_write | 
 | 993 |  * | 
 | 994 |  * This is a library function, which implements the writepages() | 
 | 995 |  * address_space_operation. | 
 | 996 |  */ | 
 | 997 | int generic_writepages(struct address_space *mapping, | 
 | 998 | 		       struct writeback_control *wbc) | 
 | 999 | { | 
 | 1000 | 	/* deal with chardevs and other special file */ | 
 | 1001 | 	if (!mapping->a_ops->writepage) | 
 | 1002 | 		return 0; | 
 | 1003 |  | 
 | 1004 | 	return write_cache_pages(mapping, wbc, __writepage, mapping); | 
 | 1005 | } | 
| David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 1006 |  | 
 | 1007 | EXPORT_SYMBOL(generic_writepages); | 
 | 1008 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1009 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc) | 
 | 1010 | { | 
| Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 1011 | 	int ret; | 
 | 1012 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | 	if (wbc->nr_to_write <= 0) | 
 | 1014 | 		return 0; | 
 | 1015 | 	if (mapping->a_ops->writepages) | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 1016 | 		ret = mapping->a_ops->writepages(mapping, wbc); | 
| Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 1017 | 	else | 
 | 1018 | 		ret = generic_writepages(mapping, wbc); | 
| Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 1019 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 | } | 
 | 1021 |  | 
 | 1022 | /** | 
 | 1023 |  * write_one_page - write out a single page and optionally wait on I/O | 
| Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 1024 |  * @page: the page to write | 
 | 1025 |  * @wait: if true, wait on writeout | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1026 |  * | 
 | 1027 |  * The page must be locked by the caller and will be unlocked upon return. | 
 | 1028 |  * | 
 | 1029 |  * write_one_page() returns a negative error code if I/O failed. | 
 | 1030 |  */ | 
 | 1031 | int write_one_page(struct page *page, int wait) | 
 | 1032 | { | 
 | 1033 | 	struct address_space *mapping = page->mapping; | 
 | 1034 | 	int ret = 0; | 
 | 1035 | 	struct writeback_control wbc = { | 
 | 1036 | 		.sync_mode = WB_SYNC_ALL, | 
 | 1037 | 		.nr_to_write = 1, | 
 | 1038 | 	}; | 
 | 1039 |  | 
 | 1040 | 	BUG_ON(!PageLocked(page)); | 
 | 1041 |  | 
 | 1042 | 	if (wait) | 
 | 1043 | 		wait_on_page_writeback(page); | 
 | 1044 |  | 
 | 1045 | 	if (clear_page_dirty_for_io(page)) { | 
 | 1046 | 		page_cache_get(page); | 
 | 1047 | 		ret = mapping->a_ops->writepage(page, &wbc); | 
 | 1048 | 		if (ret == 0 && wait) { | 
 | 1049 | 			wait_on_page_writeback(page); | 
 | 1050 | 			if (PageError(page)) | 
 | 1051 | 				ret = -EIO; | 
 | 1052 | 		} | 
 | 1053 | 		page_cache_release(page); | 
 | 1054 | 	} else { | 
 | 1055 | 		unlock_page(page); | 
 | 1056 | 	} | 
 | 1057 | 	return ret; | 
 | 1058 | } | 
 | 1059 | EXPORT_SYMBOL(write_one_page); | 
 | 1060 |  | 
 | 1061 | /* | 
| Ken Chen | 7671932 | 2007-02-10 01:43:15 -0800 | [diff] [blame] | 1062 |  * For address_spaces which do not use buffers nor write back. | 
 | 1063 |  */ | 
 | 1064 | int __set_page_dirty_no_writeback(struct page *page) | 
 | 1065 | { | 
 | 1066 | 	if (!PageDirty(page)) | 
 | 1067 | 		SetPageDirty(page); | 
 | 1068 | 	return 0; | 
 | 1069 | } | 
 | 1070 |  | 
 | 1071 | /* | 
| Edward Shishkin | e3a7cca | 2009-03-31 15:19:39 -0700 | [diff] [blame] | 1072 |  * Helper function for set_page_dirty family. | 
 | 1073 |  * NOTE: This relies on being atomic wrt interrupts. | 
 | 1074 |  */ | 
 | 1075 | void account_page_dirtied(struct page *page, struct address_space *mapping) | 
 | 1076 | { | 
 | 1077 | 	if (mapping_cap_account_dirty(mapping)) { | 
 | 1078 | 		__inc_zone_page_state(page, NR_FILE_DIRTY); | 
 | 1079 | 		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); | 
 | 1080 | 		task_dirty_inc(current); | 
 | 1081 | 		task_io_account_write(PAGE_CACHE_SIZE); | 
 | 1082 | 	} | 
 | 1083 | } | 
 | 1084 |  | 
 | 1085 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1086 |  * For address_spaces which do not use buffers.  Just tag the page as dirty in | 
 | 1087 |  * its radix tree. | 
 | 1088 |  * | 
 | 1089 |  * This is also used when a single buffer is being dirtied: we want to set the | 
 | 1090 |  * page dirty in that case, but not all the buffers.  This is a "bottom-up" | 
 | 1091 |  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. | 
 | 1092 |  * | 
 | 1093 |  * Most callers have locked the page, which pins the address_space in memory. | 
 | 1094 |  * But zap_pte_range() does not lock the page, however in that case the | 
 | 1095 |  * mapping is pinned by the vma's ->vm_file reference. | 
 | 1096 |  * | 
 | 1097 |  * We take care to handle the case where the page was truncated from the | 
| Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 1098 |  * mapping by re-checking page_mapping() inside tree_lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 |  */ | 
 | 1100 | int __set_page_dirty_nobuffers(struct page *page) | 
 | 1101 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1102 | 	if (!TestSetPageDirty(page)) { | 
 | 1103 | 		struct address_space *mapping = page_mapping(page); | 
 | 1104 | 		struct address_space *mapping2; | 
 | 1105 |  | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1106 | 		if (!mapping) | 
 | 1107 | 			return 1; | 
 | 1108 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1109 | 		spin_lock_irq(&mapping->tree_lock); | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1110 | 		mapping2 = page_mapping(page); | 
 | 1111 | 		if (mapping2) { /* Race with truncate? */ | 
 | 1112 | 			BUG_ON(mapping2 != mapping); | 
| Nick Piggin | 787d221 | 2007-07-17 04:03:34 -0700 | [diff] [blame] | 1113 | 			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); | 
| Edward Shishkin | e3a7cca | 2009-03-31 15:19:39 -0700 | [diff] [blame] | 1114 | 			account_page_dirtied(page, mapping); | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1115 | 			radix_tree_tag_set(&mapping->page_tree, | 
 | 1116 | 				page_index(page), PAGECACHE_TAG_DIRTY); | 
 | 1117 | 		} | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1118 | 		spin_unlock_irq(&mapping->tree_lock); | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1119 | 		if (mapping->host) { | 
 | 1120 | 			/* !PageAnon && !swapper_space */ | 
 | 1121 | 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | 		} | 
| Andrew Morton | 4741c9f | 2006-03-24 03:18:11 -0800 | [diff] [blame] | 1123 | 		return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | 	} | 
| Andrew Morton | 4741c9f | 2006-03-24 03:18:11 -0800 | [diff] [blame] | 1125 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 | } | 
 | 1127 | EXPORT_SYMBOL(__set_page_dirty_nobuffers); | 
 | 1128 |  | 
 | 1129 | /* | 
 | 1130 |  * When a writepage implementation decides that it doesn't want to write this | 
 | 1131 |  * page for some reason, it should redirty the locked page via | 
 | 1132 |  * redirty_page_for_writepage() and it should then unlock the page and return 0 | 
 | 1133 |  */ | 
 | 1134 | int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) | 
 | 1135 | { | 
 | 1136 | 	wbc->pages_skipped++; | 
 | 1137 | 	return __set_page_dirty_nobuffers(page); | 
 | 1138 | } | 
 | 1139 | EXPORT_SYMBOL(redirty_page_for_writepage); | 
 | 1140 |  | 
 | 1141 | /* | 
 | 1142 |  * If the mapping doesn't provide a set_page_dirty a_op, then | 
 | 1143 |  * just fall through and assume that it wants buffer_heads. | 
 | 1144 |  */ | 
| Nick Piggin | 1cf6e7d | 2009-02-18 14:48:18 -0800 | [diff] [blame] | 1145 | int set_page_dirty(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1146 | { | 
 | 1147 | 	struct address_space *mapping = page_mapping(page); | 
 | 1148 |  | 
 | 1149 | 	if (likely(mapping)) { | 
 | 1150 | 		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 1151 | #ifdef CONFIG_BLOCK | 
 | 1152 | 		if (!spd) | 
 | 1153 | 			spd = __set_page_dirty_buffers; | 
 | 1154 | #endif | 
 | 1155 | 		return (*spd)(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1156 | 	} | 
| Andrew Morton | 4741c9f | 2006-03-24 03:18:11 -0800 | [diff] [blame] | 1157 | 	if (!PageDirty(page)) { | 
 | 1158 | 		if (!TestSetPageDirty(page)) | 
 | 1159 | 			return 1; | 
 | 1160 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1161 | 	return 0; | 
 | 1162 | } | 
 | 1163 | EXPORT_SYMBOL(set_page_dirty); | 
 | 1164 |  | 
 | 1165 | /* | 
 | 1166 |  * set_page_dirty() is racy if the caller has no reference against | 
 | 1167 |  * page->mapping->host, and if the page is unlocked.  This is because another | 
 | 1168 |  * CPU could truncate the page off the mapping and then free the mapping. | 
 | 1169 |  * | 
 | 1170 |  * Usually, the page _is_ locked, or the caller is a user-space process which | 
 | 1171 |  * holds a reference on the inode by having an open file. | 
 | 1172 |  * | 
 | 1173 |  * In other cases, the page should be locked before running set_page_dirty(). | 
 | 1174 |  */ | 
 | 1175 | int set_page_dirty_lock(struct page *page) | 
 | 1176 | { | 
 | 1177 | 	int ret; | 
 | 1178 |  | 
| Nick Piggin | db37648 | 2006-09-25 23:31:24 -0700 | [diff] [blame] | 1179 | 	lock_page_nosync(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1180 | 	ret = set_page_dirty(page); | 
 | 1181 | 	unlock_page(page); | 
 | 1182 | 	return ret; | 
 | 1183 | } | 
 | 1184 | EXPORT_SYMBOL(set_page_dirty_lock); | 
 | 1185 |  | 
 | 1186 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1187 |  * Clear a page's dirty flag, while caring for dirty memory accounting. | 
 | 1188 |  * Returns true if the page was previously dirty. | 
 | 1189 |  * | 
 | 1190 |  * This is for preparing to put the page under writeout.  We leave the page | 
 | 1191 |  * tagged as dirty in the radix tree so that a concurrent write-for-sync | 
 | 1192 |  * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage | 
 | 1193 |  * implementation will run either set_page_writeback() or set_page_dirty(), | 
 | 1194 |  * at which stage we bring the page's dirty flag and radix-tree dirty tag | 
 | 1195 |  * back into sync. | 
 | 1196 |  * | 
 | 1197 |  * This incoherency between the page's dirty flag and radix-tree tag is | 
 | 1198 |  * unfortunate, but it only exists while the page is locked. | 
 | 1199 |  */ | 
 | 1200 | int clear_page_dirty_for_io(struct page *page) | 
 | 1201 | { | 
 | 1202 | 	struct address_space *mapping = page_mapping(page); | 
 | 1203 |  | 
| Nick Piggin | 7935289 | 2007-07-19 01:47:22 -0700 | [diff] [blame] | 1204 | 	BUG_ON(!PageLocked(page)); | 
 | 1205 |  | 
| Fengguang Wu | fe3cba1 | 2007-07-19 01:48:07 -0700 | [diff] [blame] | 1206 | 	ClearPageReclaim(page); | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1207 | 	if (mapping && mapping_cap_account_dirty(mapping)) { | 
 | 1208 | 		/* | 
 | 1209 | 		 * Yes, Virginia, this is indeed insane. | 
 | 1210 | 		 * | 
 | 1211 | 		 * We use this sequence to make sure that | 
 | 1212 | 		 *  (a) we account for dirty stats properly | 
 | 1213 | 		 *  (b) we tell the low-level filesystem to | 
 | 1214 | 		 *      mark the whole page dirty if it was | 
 | 1215 | 		 *      dirty in a pagetable. Only to then | 
 | 1216 | 		 *  (c) clean the page again and return 1 to | 
 | 1217 | 		 *      cause the writeback. | 
 | 1218 | 		 * | 
 | 1219 | 		 * This way we avoid all nasty races with the | 
 | 1220 | 		 * dirty bit in multiple places and clearing | 
 | 1221 | 		 * them concurrently from different threads. | 
 | 1222 | 		 * | 
 | 1223 | 		 * Note! Normally the "set_page_dirty(page)" | 
 | 1224 | 		 * has no effect on the actual dirty bit - since | 
 | 1225 | 		 * that will already usually be set. But we | 
 | 1226 | 		 * need the side effects, and it can help us | 
 | 1227 | 		 * avoid races. | 
 | 1228 | 		 * | 
 | 1229 | 		 * We basically use the page "master dirty bit" | 
 | 1230 | 		 * as a serialization point for all the different | 
 | 1231 | 		 * threads doing their things. | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1232 | 		 */ | 
 | 1233 | 		if (page_mkclean(page)) | 
 | 1234 | 			set_page_dirty(page); | 
| Nick Piggin | 7935289 | 2007-07-19 01:47:22 -0700 | [diff] [blame] | 1235 | 		/* | 
 | 1236 | 		 * We carefully synchronise fault handlers against | 
 | 1237 | 		 * installing a dirty pte and marking the page dirty | 
 | 1238 | 		 * at this point. We do this by having them hold the | 
 | 1239 | 		 * page lock at some point after installing their | 
 | 1240 | 		 * pte, but before marking the page dirty. | 
 | 1241 | 		 * Pages are always locked coming in here, so we get | 
 | 1242 | 		 * the desired exclusion. See mm/memory.c:do_wp_page() | 
 | 1243 | 		 * for more comments. | 
 | 1244 | 		 */ | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1245 | 		if (TestClearPageDirty(page)) { | 
| Andrew Morton | 8c08540 | 2006-12-10 02:19:24 -0800 | [diff] [blame] | 1246 | 			dec_zone_page_state(page, NR_FILE_DIRTY); | 
| Peter Zijlstra | c9e51e4 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 1247 | 			dec_bdi_stat(mapping->backing_dev_info, | 
 | 1248 | 					BDI_RECLAIMABLE); | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1249 | 			return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1250 | 		} | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1251 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1252 | 	} | 
| Linus Torvalds | 7658cc2 | 2006-12-29 10:00:58 -0800 | [diff] [blame] | 1253 | 	return TestClearPageDirty(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1254 | } | 
| Hans Reiser | 58bb01a | 2005-11-18 01:10:53 -0800 | [diff] [blame] | 1255 | EXPORT_SYMBOL(clear_page_dirty_for_io); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1256 |  | 
 | 1257 | int test_clear_page_writeback(struct page *page) | 
 | 1258 | { | 
 | 1259 | 	struct address_space *mapping = page_mapping(page); | 
 | 1260 | 	int ret; | 
 | 1261 |  | 
 | 1262 | 	if (mapping) { | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1263 | 		struct backing_dev_info *bdi = mapping->backing_dev_info; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1264 | 		unsigned long flags; | 
 | 1265 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1266 | 		spin_lock_irqsave(&mapping->tree_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1267 | 		ret = TestClearPageWriteback(page); | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1268 | 		if (ret) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | 			radix_tree_tag_clear(&mapping->page_tree, | 
 | 1270 | 						page_index(page), | 
 | 1271 | 						PAGECACHE_TAG_WRITEBACK); | 
| Miklos Szeredi | e4ad08f | 2008-04-30 00:54:37 -0700 | [diff] [blame] | 1272 | 			if (bdi_cap_account_writeback(bdi)) { | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1273 | 				__dec_bdi_stat(bdi, BDI_WRITEBACK); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 1274 | 				__bdi_writeout_inc(bdi); | 
 | 1275 | 			} | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1276 | 		} | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1277 | 		spin_unlock_irqrestore(&mapping->tree_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1278 | 	} else { | 
 | 1279 | 		ret = TestClearPageWriteback(page); | 
 | 1280 | 	} | 
| Andrew Morton | d688abf | 2007-07-19 01:49:17 -0700 | [diff] [blame] | 1281 | 	if (ret) | 
 | 1282 | 		dec_zone_page_state(page, NR_WRITEBACK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1283 | 	return ret; | 
 | 1284 | } | 
 | 1285 |  | 
 | 1286 | int test_set_page_writeback(struct page *page) | 
 | 1287 | { | 
 | 1288 | 	struct address_space *mapping = page_mapping(page); | 
 | 1289 | 	int ret; | 
 | 1290 |  | 
 | 1291 | 	if (mapping) { | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1292 | 		struct backing_dev_info *bdi = mapping->backing_dev_info; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1293 | 		unsigned long flags; | 
 | 1294 |  | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1295 | 		spin_lock_irqsave(&mapping->tree_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1296 | 		ret = TestSetPageWriteback(page); | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1297 | 		if (!ret) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1298 | 			radix_tree_tag_set(&mapping->page_tree, | 
 | 1299 | 						page_index(page), | 
 | 1300 | 						PAGECACHE_TAG_WRITEBACK); | 
| Miklos Szeredi | e4ad08f | 2008-04-30 00:54:37 -0700 | [diff] [blame] | 1301 | 			if (bdi_cap_account_writeback(bdi)) | 
| Peter Zijlstra | 69cb51d | 2007-10-16 23:25:48 -0700 | [diff] [blame] | 1302 | 				__inc_bdi_stat(bdi, BDI_WRITEBACK); | 
 | 1303 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1304 | 		if (!PageDirty(page)) | 
 | 1305 | 			radix_tree_tag_clear(&mapping->page_tree, | 
 | 1306 | 						page_index(page), | 
 | 1307 | 						PAGECACHE_TAG_DIRTY); | 
| Nick Piggin | 19fd623 | 2008-07-25 19:45:32 -0700 | [diff] [blame] | 1308 | 		spin_unlock_irqrestore(&mapping->tree_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1309 | 	} else { | 
 | 1310 | 		ret = TestSetPageWriteback(page); | 
 | 1311 | 	} | 
| Andrew Morton | d688abf | 2007-07-19 01:49:17 -0700 | [diff] [blame] | 1312 | 	if (!ret) | 
 | 1313 | 		inc_zone_page_state(page, NR_WRITEBACK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | 	return ret; | 
 | 1315 |  | 
 | 1316 | } | 
 | 1317 | EXPORT_SYMBOL(test_set_page_writeback); | 
 | 1318 |  | 
 | 1319 | /* | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 1320 |  * Return true if any of the pages in the mapping are marked with the | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1321 |  * passed tag. | 
 | 1322 |  */ | 
 | 1323 | int mapping_tagged(struct address_space *mapping, int tag) | 
 | 1324 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1325 | 	int ret; | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 1326 | 	rcu_read_lock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1327 | 	ret = radix_tree_tagged(&mapping->page_tree, tag); | 
| Nick Piggin | 0012818 | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 1328 | 	rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1329 | 	return ret; | 
 | 1330 | } | 
 | 1331 | EXPORT_SYMBOL(mapping_tagged); |