blob: 1c87430b7a25ea40f74397502e285d5b1fed671f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/page-writeback.c.
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains functions related to writing back dirty pages at the
7 * address_space level.
8 *
9 * 10Apr2002 akpm@zip.com.au
10 * Initial version
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/spinlock.h>
16#include <linux/fs.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/slab.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/init.h>
23#include <linux/backing-dev.h>
24#include <linux/blkdev.h>
25#include <linux/mpage.h>
Peter Zijlstrad08b3852006-09-25 23:30:57 -070026#include <linux/rmap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/percpu.h>
28#include <linux/notifier.h>
29#include <linux/smp.h>
30#include <linux/sysctl.h>
31#include <linux/cpu.h>
32#include <linux/syscalls.h>
33
34/*
35 * The maximum number of pages to writeout in a single bdflush/kupdate
36 * operation. We do this so we don't hold I_LOCK against an inode for
37 * enormous amounts of time, which would block a userspace task which has
38 * been forced to throttle against that inode. Also, the code reevaluates
39 * the dirty each time it has written this many pages.
40 */
41#define MAX_WRITEBACK_PAGES 1024
42
43/*
44 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
45 * will look to see if it needs to force writeback or throttling.
46 */
47static long ratelimit_pages = 32;
48
49static long total_pages; /* The total number of pages in the machine. */
Andrew Mortone236a162006-01-18 17:42:26 -080050static int dirty_exceeded __cacheline_aligned_in_smp; /* Dirty mem may be over limit */
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
52/*
53 * When balance_dirty_pages decides that the caller needs to perform some
54 * non-background writeback, this is how many pages it will attempt to write.
55 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
56 * large amounts of I/O are submitted.
57 */
58static inline long sync_writeback_pages(void)
59{
60 return ratelimit_pages + ratelimit_pages / 2;
61}
62
63/* The following parameters are exported via /proc/sys/vm */
64
65/*
66 * Start background writeback (via pdflush) at this percentage
67 */
68int dirty_background_ratio = 10;
69
70/*
71 * The generator of dirty data starts writeback at this percentage
72 */
73int vm_dirty_ratio = 40;
74
75/*
Coywolf Qi Huntfd5403c2006-04-10 22:54:35 -070076 * The interval between `kupdate'-style writebacks, in jiffies
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 */
Bart Samwelf6ef9432006-03-24 03:15:48 -080078int dirty_writeback_interval = 5 * HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80/*
Coywolf Qi Huntfd5403c2006-04-10 22:54:35 -070081 * The longest number of jiffies for which data is allowed to remain dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 */
Bart Samwelf6ef9432006-03-24 03:15:48 -080083int dirty_expire_interval = 30 * HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85/*
86 * Flag that makes the machine dump writes/reads and block dirtyings.
87 */
88int block_dump;
89
90/*
Bart Samweled5b43f2006-03-24 03:15:49 -080091 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
92 * a full sync is triggered after this time elapses without any disk activity.
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 */
94int laptop_mode;
95
96EXPORT_SYMBOL(laptop_mode);
97
98/* End of sysctl-exported parameters */
99
100
101static void background_writeout(unsigned long _min_pages);
102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103/*
104 * Work out the current dirty-memory clamping and background writeout
105 * thresholds.
106 *
107 * The main aim here is to lower them aggressively if there is a lot of mapped
108 * memory around. To avoid stressing page reclaim with lots of unreclaimable
109 * pages. It is better to clamp down on writers than to start swapping, and
110 * performing lots of scanning.
111 *
112 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
113 *
114 * We don't permit the clamping level to fall below 5% - that is getting rather
115 * excessive.
116 *
117 * We make sure that the background writeout level is below the adjusted
118 * clamping level.
119 */
120static void
Christoph Lameterc24f21b2006-06-30 01:55:42 -0700121get_dirty_limits(long *pbackground, long *pdirty,
122 struct address_space *mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
124 int background_ratio; /* Percentages */
125 int dirty_ratio;
126 int unmapped_ratio;
127 long background;
128 long dirty;
129 unsigned long available_memory = total_pages;
130 struct task_struct *tsk;
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132#ifdef CONFIG_HIGHMEM
133 /*
134 * If this mapping can only allocate from low memory,
135 * we exclude high memory from our count.
136 */
137 if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM))
138 available_memory -= totalhigh_pages;
139#endif
140
141
Christoph Lameterc24f21b2006-06-30 01:55:42 -0700142 unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
143 global_page_state(NR_ANON_PAGES)) * 100) /
144 total_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146 dirty_ratio = vm_dirty_ratio;
147 if (dirty_ratio > unmapped_ratio / 2)
148 dirty_ratio = unmapped_ratio / 2;
149
150 if (dirty_ratio < 5)
151 dirty_ratio = 5;
152
153 background_ratio = dirty_background_ratio;
154 if (background_ratio >= dirty_ratio)
155 background_ratio = dirty_ratio / 2;
156
157 background = (background_ratio * available_memory) / 100;
158 dirty = (dirty_ratio * available_memory) / 100;
159 tsk = current;
160 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
161 background += background / 4;
162 dirty += dirty / 4;
163 }
164 *pbackground = background;
165 *pdirty = dirty;
166}
167
168/*
169 * balance_dirty_pages() must be called by processes which are generating dirty
170 * data. It looks at the number of dirty pages in the machine and will force
171 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
172 * If we're over `background_thresh' then pdflush is woken to perform some
173 * writeout.
174 */
175static void balance_dirty_pages(struct address_space *mapping)
176{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 long nr_reclaimable;
178 long background_thresh;
179 long dirty_thresh;
180 unsigned long pages_written = 0;
181 unsigned long write_chunk = sync_writeback_pages();
182
183 struct backing_dev_info *bdi = mapping->backing_dev_info;
184
185 for (;;) {
186 struct writeback_control wbc = {
187 .bdi = bdi,
188 .sync_mode = WB_SYNC_NONE,
189 .older_than_this = NULL,
190 .nr_to_write = write_chunk,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -0700191 .range_cyclic = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 };
193
Christoph Lameterc24f21b2006-06-30 01:55:42 -0700194 get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
195 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
196 global_page_state(NR_UNSTABLE_NFS);
197 if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
198 dirty_thresh)
199 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Andrew Mortone236a162006-01-18 17:42:26 -0800201 if (!dirty_exceeded)
202 dirty_exceeded = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
205 * Unstable writes are a feature of certain networked
206 * filesystems (i.e. NFS) in which data may have been
207 * written to the server's write cache, but has not yet
208 * been flushed to permanent storage.
209 */
210 if (nr_reclaimable) {
211 writeback_inodes(&wbc);
Christoph Lameterc24f21b2006-06-30 01:55:42 -0700212 get_dirty_limits(&background_thresh,
213 &dirty_thresh, mapping);
214 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
215 global_page_state(NR_UNSTABLE_NFS);
216 if (nr_reclaimable +
217 global_page_state(NR_WRITEBACK)
218 <= dirty_thresh)
219 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 pages_written += write_chunk - wbc.nr_to_write;
221 if (pages_written >= write_chunk)
222 break; /* We've done our duty */
223 }
224 blk_congestion_wait(WRITE, HZ/10);
225 }
226
Christoph Lameterc24f21b2006-06-30 01:55:42 -0700227 if (nr_reclaimable + global_page_state(NR_WRITEBACK)
228 <= dirty_thresh && dirty_exceeded)
229 dirty_exceeded = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 if (writeback_in_progress(bdi))
232 return; /* pdflush is already working this queue */
233
234 /*
235 * In laptop mode, we wait until hitting the higher threshold before
236 * starting background writeout, and then write out all the way down
237 * to the lower threshold. So slow writers cause minimal disk activity.
238 *
239 * In normal mode, we start background writeout at the lower
240 * background_thresh, to keep the amount of dirty memory low.
241 */
242 if ((laptop_mode && pages_written) ||
243 (!laptop_mode && (nr_reclaimable > background_thresh)))
244 pdflush_operation(background_writeout, 0);
245}
246
247/**
Andrew Mortonfa5a7342006-03-24 03:18:10 -0800248 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
Martin Waitz67be2dd2005-05-01 08:59:26 -0700249 * @mapping: address_space which was dirtied
Martin Waitza5802902006-04-02 13:59:55 +0200250 * @nr_pages_dirtied: number of pages which the caller has just dirtied
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 *
252 * Processes which are dirtying memory should call in here once for each page
253 * which was newly dirtied. The function will periodically check the system's
254 * dirty state and will initiate writeback if needed.
255 *
256 * On really big machines, get_writeback_state is expensive, so try to avoid
257 * calling it too often (ratelimiting). But once we're over the dirty memory
258 * limit we decrease the ratelimiting by a lot, to prevent individual processes
259 * from overshooting the limit by (ratelimit_pages) each.
260 */
Andrew Mortonfa5a7342006-03-24 03:18:10 -0800261void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
262 unsigned long nr_pages_dirtied)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263{
Andrew Mortonfa5a7342006-03-24 03:18:10 -0800264 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
265 unsigned long ratelimit;
266 unsigned long *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268 ratelimit = ratelimit_pages;
269 if (dirty_exceeded)
270 ratelimit = 8;
271
272 /*
273 * Check the rate limiting. Also, we do not want to throttle real-time
274 * tasks in balance_dirty_pages(). Period.
275 */
Andrew Mortonfa5a7342006-03-24 03:18:10 -0800276 preempt_disable();
277 p = &__get_cpu_var(ratelimits);
278 *p += nr_pages_dirtied;
279 if (unlikely(*p >= ratelimit)) {
280 *p = 0;
281 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 balance_dirty_pages(mapping);
283 return;
284 }
Andrew Mortonfa5a7342006-03-24 03:18:10 -0800285 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
Andrew Mortonfa5a7342006-03-24 03:18:10 -0800287EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
289void throttle_vm_writeout(void)
290{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 long background_thresh;
292 long dirty_thresh;
293
294 for ( ; ; ) {
Christoph Lameterc24f21b2006-06-30 01:55:42 -0700295 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
297 /*
298 * Boost the allowable dirty threshold a bit for page
299 * allocators so they don't get DoS'ed by heavy writers
300 */
301 dirty_thresh += dirty_thresh / 10; /* wheeee... */
302
Christoph Lameterc24f21b2006-06-30 01:55:42 -0700303 if (global_page_state(NR_UNSTABLE_NFS) +
304 global_page_state(NR_WRITEBACK) <= dirty_thresh)
305 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 blk_congestion_wait(WRITE, HZ/10);
307 }
308}
309
310
311/*
312 * writeback at least _min_pages, and keep writing until the amount of dirty
313 * memory is less than the background threshold, or until we're all clean.
314 */
315static void background_writeout(unsigned long _min_pages)
316{
317 long min_pages = _min_pages;
318 struct writeback_control wbc = {
319 .bdi = NULL,
320 .sync_mode = WB_SYNC_NONE,
321 .older_than_this = NULL,
322 .nr_to_write = 0,
323 .nonblocking = 1,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -0700324 .range_cyclic = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 };
326
327 for ( ; ; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 long background_thresh;
329 long dirty_thresh;
330
Christoph Lameterc24f21b2006-06-30 01:55:42 -0700331 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
332 if (global_page_state(NR_FILE_DIRTY) +
333 global_page_state(NR_UNSTABLE_NFS) < background_thresh
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 && min_pages <= 0)
335 break;
336 wbc.encountered_congestion = 0;
337 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
338 wbc.pages_skipped = 0;
339 writeback_inodes(&wbc);
340 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
341 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
342 /* Wrote less than expected */
343 blk_congestion_wait(WRITE, HZ/10);
344 if (!wbc.encountered_congestion)
345 break;
346 }
347 }
348}
349
350/*
351 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
352 * the whole world. Returns 0 if a pdflush thread was dispatched. Returns
353 * -1 if all pdflush threads were busy.
354 */
Pekka J Enberg687a21c2005-06-28 20:44:55 -0700355int wakeup_pdflush(long nr_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
Christoph Lameterc24f21b2006-06-30 01:55:42 -0700357 if (nr_pages == 0)
358 nr_pages = global_page_state(NR_FILE_DIRTY) +
359 global_page_state(NR_UNSTABLE_NFS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 return pdflush_operation(background_writeout, nr_pages);
361}
362
363static void wb_timer_fn(unsigned long unused);
364static void laptop_timer_fn(unsigned long unused);
365
Ingo Molnar8d06afa2005-09-09 13:10:40 -0700366static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
367static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
369/*
370 * Periodic writeback of "old" data.
371 *
372 * Define "old": the first time one of an inode's pages is dirtied, we mark the
373 * dirtying-time in the inode's address_space. So this periodic writeback code
374 * just walks the superblock inode list, writing back any inodes which are
375 * older than a specific point in time.
376 *
Bart Samwelf6ef9432006-03-24 03:15:48 -0800377 * Try to run once per dirty_writeback_interval. But if a writeback event
378 * takes longer than a dirty_writeback_interval interval, then leave a
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 * one-second gap.
380 *
381 * older_than_this takes precedence over nr_to_write. So we'll only write back
382 * all dirty pages if they are all attached to "old" mappings.
383 */
384static void wb_kupdate(unsigned long arg)
385{
386 unsigned long oldest_jif;
387 unsigned long start_jif;
388 unsigned long next_jif;
389 long nr_to_write;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 struct writeback_control wbc = {
391 .bdi = NULL,
392 .sync_mode = WB_SYNC_NONE,
393 .older_than_this = &oldest_jif,
394 .nr_to_write = 0,
395 .nonblocking = 1,
396 .for_kupdate = 1,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -0700397 .range_cyclic = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 };
399
400 sync_supers();
401
Bart Samwelf6ef9432006-03-24 03:15:48 -0800402 oldest_jif = jiffies - dirty_expire_interval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 start_jif = jiffies;
Bart Samwelf6ef9432006-03-24 03:15:48 -0800404 next_jif = start_jif + dirty_writeback_interval;
Christoph Lameterc24f21b2006-06-30 01:55:42 -0700405 nr_to_write = global_page_state(NR_FILE_DIRTY) +
406 global_page_state(NR_UNSTABLE_NFS) +
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
408 while (nr_to_write > 0) {
409 wbc.encountered_congestion = 0;
410 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
411 writeback_inodes(&wbc);
412 if (wbc.nr_to_write > 0) {
413 if (wbc.encountered_congestion)
414 blk_congestion_wait(WRITE, HZ/10);
415 else
416 break; /* All the old data is written */
417 }
418 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
419 }
420 if (time_before(next_jif, jiffies + HZ))
421 next_jif = jiffies + HZ;
Bart Samwelf6ef9432006-03-24 03:15:48 -0800422 if (dirty_writeback_interval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 mod_timer(&wb_timer, next_jif);
424}
425
426/*
427 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
428 */
429int dirty_writeback_centisecs_handler(ctl_table *table, int write,
430 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
431{
Bart Samwelf6ef9432006-03-24 03:15:48 -0800432 proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
433 if (dirty_writeback_interval) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 mod_timer(&wb_timer,
Bart Samwelf6ef9432006-03-24 03:15:48 -0800435 jiffies + dirty_writeback_interval);
436 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 del_timer(&wb_timer);
438 }
439 return 0;
440}
441
442static void wb_timer_fn(unsigned long unused)
443{
444 if (pdflush_operation(wb_kupdate, 0) < 0)
445 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
446}
447
448static void laptop_flush(unsigned long unused)
449{
450 sys_sync();
451}
452
453static void laptop_timer_fn(unsigned long unused)
454{
455 pdflush_operation(laptop_flush, 0);
456}
457
458/*
459 * We've spun up the disk and we're in laptop mode: schedule writeback
460 * of all dirty data a few seconds from now. If the flush is already scheduled
461 * then push it back - the user is still using the disk.
462 */
463void laptop_io_completion(void)
464{
Bart Samweled5b43f2006-03-24 03:15:49 -0800465 mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466}
467
468/*
469 * We're in laptop mode and we've just synced. The sync's writes will have
470 * caused another writeback to be scheduled by laptop_io_completion.
471 * Nothing needs to be written back anymore, so we unschedule the writeback.
472 */
473void laptop_sync_completion(void)
474{
475 del_timer(&laptop_mode_wb_timer);
476}
477
478/*
479 * If ratelimit_pages is too high then we can get into dirty-data overload
480 * if a large number of processes all perform writes at the same time.
481 * If it is too low then SMP machines will call the (expensive)
482 * get_writeback_state too often.
483 *
484 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
485 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
486 * thresholds before writeback cuts in.
487 *
488 * But the limit should not be set too high. Because it also controls the
489 * amount of memory which the balance_dirty_pages() caller has to write back.
490 * If this is too large then the caller will block on the IO queue all the
491 * time. So limit it to four megabytes - the balance_dirty_pages() caller
492 * will write six megabyte chunks, max.
493 */
494
495static void set_ratelimit(void)
496{
497 ratelimit_pages = total_pages / (num_online_cpus() * 32);
498 if (ratelimit_pages < 16)
499 ratelimit_pages = 16;
500 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
501 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
502}
503
Chandra Seetharaman26c21432006-06-27 02:54:10 -0700504static int __cpuinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
506{
507 set_ratelimit();
508 return 0;
509}
510
Chandra Seetharaman74b85f32006-06-27 02:54:09 -0700511static struct notifier_block __cpuinitdata ratelimit_nb = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 .notifier_call = ratelimit_handler,
513 .next = NULL,
514};
515
516/*
517 * If the machine has a large highmem:lowmem ratio then scale back the default
518 * dirty memory thresholds: allowing too much dirty highmem pins an excessive
519 * number of buffer_heads.
520 */
521void __init page_writeback_init(void)
522{
523 long buffer_pages = nr_free_buffer_pages();
524 long correction;
525
526 total_pages = nr_free_pagecache_pages();
527
528 correction = (100 * 4 * buffer_pages) / total_pages;
529
530 if (correction < 100) {
531 dirty_background_ratio *= correction;
532 dirty_background_ratio /= 100;
533 vm_dirty_ratio *= correction;
534 vm_dirty_ratio /= 100;
535
536 if (dirty_background_ratio <= 0)
537 dirty_background_ratio = 1;
538 if (vm_dirty_ratio <= 0)
539 vm_dirty_ratio = 1;
540 }
Bart Samwelf6ef9432006-03-24 03:15:48 -0800541 mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 set_ratelimit();
543 register_cpu_notifier(&ratelimit_nb);
544}
545
546int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
547{
Andrew Morton22905f72005-11-16 15:07:01 -0800548 int ret;
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (wbc->nr_to_write <= 0)
551 return 0;
Andrew Morton22905f72005-11-16 15:07:01 -0800552 wbc->for_writepages = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 if (mapping->a_ops->writepages)
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700554 ret = mapping->a_ops->writepages(mapping, wbc);
Andrew Morton22905f72005-11-16 15:07:01 -0800555 else
556 ret = generic_writepages(mapping, wbc);
557 wbc->for_writepages = 0;
558 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559}
560
561/**
562 * write_one_page - write out a single page and optionally wait on I/O
563 *
Martin Waitz67be2dd2005-05-01 08:59:26 -0700564 * @page: the page to write
565 * @wait: if true, wait on writeout
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 *
567 * The page must be locked by the caller and will be unlocked upon return.
568 *
569 * write_one_page() returns a negative error code if I/O failed.
570 */
571int write_one_page(struct page *page, int wait)
572{
573 struct address_space *mapping = page->mapping;
574 int ret = 0;
575 struct writeback_control wbc = {
576 .sync_mode = WB_SYNC_ALL,
577 .nr_to_write = 1,
578 };
579
580 BUG_ON(!PageLocked(page));
581
582 if (wait)
583 wait_on_page_writeback(page);
584
585 if (clear_page_dirty_for_io(page)) {
586 page_cache_get(page);
587 ret = mapping->a_ops->writepage(page, &wbc);
588 if (ret == 0 && wait) {
589 wait_on_page_writeback(page);
590 if (PageError(page))
591 ret = -EIO;
592 }
593 page_cache_release(page);
594 } else {
595 unlock_page(page);
596 }
597 return ret;
598}
599EXPORT_SYMBOL(write_one_page);
600
601/*
602 * For address_spaces which do not use buffers. Just tag the page as dirty in
603 * its radix tree.
604 *
605 * This is also used when a single buffer is being dirtied: we want to set the
606 * page dirty in that case, but not all the buffers. This is a "bottom-up"
607 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
608 *
609 * Most callers have locked the page, which pins the address_space in memory.
610 * But zap_pte_range() does not lock the page, however in that case the
611 * mapping is pinned by the vma's ->vm_file reference.
612 *
613 * We take care to handle the case where the page was truncated from the
614 * mapping by re-checking page_mapping() insode tree_lock.
615 */
616int __set_page_dirty_nobuffers(struct page *page)
617{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 if (!TestSetPageDirty(page)) {
619 struct address_space *mapping = page_mapping(page);
620 struct address_space *mapping2;
621
622 if (mapping) {
623 write_lock_irq(&mapping->tree_lock);
624 mapping2 = page_mapping(page);
625 if (mapping2) { /* Race with truncate? */
626 BUG_ON(mapping2 != mapping);
627 if (mapping_cap_account_dirty(mapping))
Christoph Lameterb1e7a8f2006-06-30 01:55:39 -0700628 __inc_zone_page_state(page,
629 NR_FILE_DIRTY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 radix_tree_tag_set(&mapping->page_tree,
631 page_index(page), PAGECACHE_TAG_DIRTY);
632 }
633 write_unlock_irq(&mapping->tree_lock);
634 if (mapping->host) {
635 /* !PageAnon && !swapper_space */
636 __mark_inode_dirty(mapping->host,
637 I_DIRTY_PAGES);
638 }
639 }
Andrew Morton4741c9f2006-03-24 03:18:11 -0800640 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 }
Andrew Morton4741c9f2006-03-24 03:18:11 -0800642 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643}
644EXPORT_SYMBOL(__set_page_dirty_nobuffers);
645
646/*
647 * When a writepage implementation decides that it doesn't want to write this
648 * page for some reason, it should redirty the locked page via
649 * redirty_page_for_writepage() and it should then unlock the page and return 0
650 */
651int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
652{
653 wbc->pages_skipped++;
654 return __set_page_dirty_nobuffers(page);
655}
656EXPORT_SYMBOL(redirty_page_for_writepage);
657
658/*
659 * If the mapping doesn't provide a set_page_dirty a_op, then
660 * just fall through and assume that it wants buffer_heads.
661 */
662int fastcall set_page_dirty(struct page *page)
663{
664 struct address_space *mapping = page_mapping(page);
665
666 if (likely(mapping)) {
667 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
668 if (spd)
669 return (*spd)(page);
670 return __set_page_dirty_buffers(page);
671 }
Andrew Morton4741c9f2006-03-24 03:18:11 -0800672 if (!PageDirty(page)) {
673 if (!TestSetPageDirty(page))
674 return 1;
675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 return 0;
677}
678EXPORT_SYMBOL(set_page_dirty);
679
680/*
681 * set_page_dirty() is racy if the caller has no reference against
682 * page->mapping->host, and if the page is unlocked. This is because another
683 * CPU could truncate the page off the mapping and then free the mapping.
684 *
685 * Usually, the page _is_ locked, or the caller is a user-space process which
686 * holds a reference on the inode by having an open file.
687 *
688 * In other cases, the page should be locked before running set_page_dirty().
689 */
690int set_page_dirty_lock(struct page *page)
691{
692 int ret;
693
694 lock_page(page);
695 ret = set_page_dirty(page);
696 unlock_page(page);
697 return ret;
698}
699EXPORT_SYMBOL(set_page_dirty_lock);
700
701/*
702 * Clear a page's dirty flag, while caring for dirty memory accounting.
703 * Returns true if the page was previously dirty.
704 */
705int test_clear_page_dirty(struct page *page)
706{
707 struct address_space *mapping = page_mapping(page);
708 unsigned long flags;
709
710 if (mapping) {
711 write_lock_irqsave(&mapping->tree_lock, flags);
712 if (TestClearPageDirty(page)) {
713 radix_tree_tag_clear(&mapping->page_tree,
714 page_index(page),
715 PAGECACHE_TAG_DIRTY);
Christoph Lameterb1e7a8f2006-06-30 01:55:39 -0700716 write_unlock_irqrestore(&mapping->tree_lock, flags);
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700717 /*
718 * We can continue to use `mapping' here because the
719 * page is locked, which pins the address_space
720 */
721 if (mapping_cap_account_dirty(mapping)) {
722 page_mkclean(page);
723 dec_zone_page_state(page, NR_FILE_DIRTY);
724 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 return 1;
726 }
727 write_unlock_irqrestore(&mapping->tree_lock, flags);
728 return 0;
729 }
730 return TestClearPageDirty(page);
731}
732EXPORT_SYMBOL(test_clear_page_dirty);
733
734/*
735 * Clear a page's dirty flag, while caring for dirty memory accounting.
736 * Returns true if the page was previously dirty.
737 *
738 * This is for preparing to put the page under writeout. We leave the page
739 * tagged as dirty in the radix tree so that a concurrent write-for-sync
740 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
741 * implementation will run either set_page_writeback() or set_page_dirty(),
742 * at which stage we bring the page's dirty flag and radix-tree dirty tag
743 * back into sync.
744 *
745 * This incoherency between the page's dirty flag and radix-tree tag is
746 * unfortunate, but it only exists while the page is locked.
747 */
748int clear_page_dirty_for_io(struct page *page)
749{
750 struct address_space *mapping = page_mapping(page);
751
752 if (mapping) {
753 if (TestClearPageDirty(page)) {
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700754 if (mapping_cap_account_dirty(mapping)) {
755 page_mkclean(page);
Christoph Lameterb1e7a8f2006-06-30 01:55:39 -0700756 dec_zone_page_state(page, NR_FILE_DIRTY);
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700757 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 return 1;
759 }
760 return 0;
761 }
762 return TestClearPageDirty(page);
763}
Hans Reiser58bb01a2005-11-18 01:10:53 -0800764EXPORT_SYMBOL(clear_page_dirty_for_io);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
766int test_clear_page_writeback(struct page *page)
767{
768 struct address_space *mapping = page_mapping(page);
769 int ret;
770
771 if (mapping) {
772 unsigned long flags;
773
774 write_lock_irqsave(&mapping->tree_lock, flags);
775 ret = TestClearPageWriteback(page);
776 if (ret)
777 radix_tree_tag_clear(&mapping->page_tree,
778 page_index(page),
779 PAGECACHE_TAG_WRITEBACK);
780 write_unlock_irqrestore(&mapping->tree_lock, flags);
781 } else {
782 ret = TestClearPageWriteback(page);
783 }
784 return ret;
785}
786
787int test_set_page_writeback(struct page *page)
788{
789 struct address_space *mapping = page_mapping(page);
790 int ret;
791
792 if (mapping) {
793 unsigned long flags;
794
795 write_lock_irqsave(&mapping->tree_lock, flags);
796 ret = TestSetPageWriteback(page);
797 if (!ret)
798 radix_tree_tag_set(&mapping->page_tree,
799 page_index(page),
800 PAGECACHE_TAG_WRITEBACK);
801 if (!PageDirty(page))
802 radix_tree_tag_clear(&mapping->page_tree,
803 page_index(page),
804 PAGECACHE_TAG_DIRTY);
805 write_unlock_irqrestore(&mapping->tree_lock, flags);
806 } else {
807 ret = TestSetPageWriteback(page);
808 }
809 return ret;
810
811}
812EXPORT_SYMBOL(test_set_page_writeback);
813
814/*
Trond Myklebust275a0822006-08-22 20:06:24 -0400815 * Wakes up tasks that are being throttled due to writeback congestion
816 */
817void writeback_congestion_end(void)
818{
819 blk_congestion_end(WRITE);
820}
821EXPORT_SYMBOL(writeback_congestion_end);
822
823/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 * Return true if any of the pages in the mapping are marged with the
825 * passed tag.
826 */
827int mapping_tagged(struct address_space *mapping, int tag)
828{
829 unsigned long flags;
830 int ret;
831
832 read_lock_irqsave(&mapping->tree_lock, flags);
833 ret = radix_tree_tagged(&mapping->page_tree, tag);
834 read_unlock_irqrestore(&mapping->tree_lock, flags);
835 return ret;
836}
837EXPORT_SYMBOL(mapping_tagged);