blob: 45ad4bb700e6c294c77f420a045bace0ae1cc996 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/fs-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
10 *
Francois Camie1f8e872008-10-15 22:01:59 -070011 * 10Apr2002 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
14 */
15
16#include <linux/kernel.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020017#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/spinlock.h>
19#include <linux/sched.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
22#include <linux/writeback.h>
23#include <linux/blkdev.h>
24#include <linux/backing-dev.h>
25#include <linux/buffer_head.h>
David Howells07f3f052006-09-30 20:52:18 +020026#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Jens Axboe66f3b8e2009-09-02 09:19:46 +020028#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
Adrian Bunkf11b00f2008-04-29 00:58:56 -070029
30/**
31 * writeback_acquire - attempt to get exclusive writeback access to a device
32 * @bdi: the device's backing_dev_info structure
33 *
34 * It is a waste of resources to have more than one pdflush thread blocked on
35 * a single request queue. Exclusion at the request_queue level is obtained
36 * via a flag in the request_queue's backing_dev_info.state.
37 *
38 * Non-request_queue-backed address_spaces will share default_backing_dev_info,
39 * unless they implement their own. Which is somewhat inefficient, as this
40 * may prevent concurrent writeback against multiple devices.
41 */
42static int writeback_acquire(struct backing_dev_info *bdi)
43{
44 return !test_and_set_bit(BDI_pdflush, &bdi->state);
45}
46
47/**
48 * writeback_in_progress - determine whether there is writeback in progress
49 * @bdi: the device's backing_dev_info structure.
50 *
51 * Determine whether there is writeback in progress against a backing device.
52 */
53int writeback_in_progress(struct backing_dev_info *bdi)
54{
55 return test_bit(BDI_pdflush, &bdi->state);
56}
57
58/**
59 * writeback_release - relinquish exclusive writeback access against a device.
60 * @bdi: the device's backing_dev_info structure
61 */
62static void writeback_release(struct backing_dev_info *bdi)
63{
64 BUG_ON(!writeback_in_progress(bdi));
65 clear_bit(BDI_pdflush, &bdi->state);
66}
67
Nick Piggin4195f732009-05-28 09:01:15 +020068static noinline void block_dump___mark_inode_dirty(struct inode *inode)
69{
70 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
71 struct dentry *dentry;
72 const char *name = "?";
73
74 dentry = d_find_alias(inode);
75 if (dentry) {
76 spin_lock(&dentry->d_lock);
77 name = (const char *) dentry->d_name.name;
78 }
79 printk(KERN_DEBUG
80 "%s(%d): dirtied inode %lu (%s) on %s\n",
81 current->comm, task_pid_nr(current), inode->i_ino,
82 name, inode->i_sb->s_id);
83 if (dentry) {
84 spin_unlock(&dentry->d_lock);
85 dput(dentry);
86 }
87 }
88}
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090/**
91 * __mark_inode_dirty - internal function
92 * @inode: inode to mark
93 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
94 * Mark an inode as dirty. Callers should use mark_inode_dirty or
95 * mark_inode_dirty_sync.
96 *
97 * Put the inode on the super block's dirty list.
98 *
99 * CAREFUL! We mark it dirty unconditionally, but move it onto the
100 * dirty list only if it is hashed or if it refers to a blockdev.
101 * If it was not hashed, it will never be added to the dirty list
102 * even if it is later hashed, as it will have been marked dirty already.
103 *
104 * In short, make sure you hash any inodes _before_ you start marking
105 * them dirty.
106 *
107 * This function *must* be atomic for the I_DIRTY_PAGES case -
108 * set_page_dirty() is called under spinlock in several places.
109 *
110 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
111 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
112 * the kernel-internal blockdev inode represents the dirtying time of the
113 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
114 * page->mapping->host, so the page-dirtying time is recorded in the internal
115 * blockdev inode.
116 */
117void __mark_inode_dirty(struct inode *inode, int flags)
118{
119 struct super_block *sb = inode->i_sb;
120
121 /*
122 * Don't do this for I_DIRTY_PAGES - that doesn't actually
123 * dirty the inode itself
124 */
125 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
126 if (sb->s_op->dirty_inode)
127 sb->s_op->dirty_inode(inode);
128 }
129
130 /*
131 * make sure that changes are seen by all cpus before we test i_state
132 * -- mikulas
133 */
134 smp_mb();
135
136 /* avoid the locking if we can */
137 if ((inode->i_state & flags) == flags)
138 return;
139
Nick Piggin4195f732009-05-28 09:01:15 +0200140 if (unlikely(block_dump))
141 block_dump___mark_inode_dirty(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143 spin_lock(&inode_lock);
144 if ((inode->i_state & flags) != flags) {
145 const int was_dirty = inode->i_state & I_DIRTY;
146
147 inode->i_state |= flags;
148
149 /*
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700150 * If the inode is being synced, just update its dirty state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * The unlocker will place the inode on the appropriate
152 * superblock list, based upon its state.
153 */
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700154 if (inode->i_state & I_SYNC)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 goto out;
156
157 /*
158 * Only add valid (hashed) inodes to the superblock's
159 * dirty list. Add blockdev inodes as well.
160 */
161 if (!S_ISBLK(inode->i_mode)) {
162 if (hlist_unhashed(&inode->i_hash))
163 goto out;
164 }
165 if (inode->i_state & (I_FREEING|I_CLEAR))
166 goto out;
167
168 /*
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200169 * If the inode was already on b_dirty/b_io/b_more_io, don't
170 * reposition it (that would break b_dirty time-ordering).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 */
172 if (!was_dirty) {
173 inode->dirtied_when = jiffies;
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200174 list_move(&inode->i_list,
175 &inode_to_bdi(inode)->b_dirty);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 }
177 }
178out:
179 spin_unlock(&inode_lock);
180}
181
182EXPORT_SYMBOL(__mark_inode_dirty);
183
184static int write_inode(struct inode *inode, int sync)
185{
186 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
187 return inode->i_sb->s_op->write_inode(inode, sync);
188 return 0;
189}
190
191/*
Andrew Morton6610a0b2007-10-16 23:30:32 -0700192 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
193 * furthest end of its superblock's dirty-inode list.
194 *
195 * Before stamping the inode's ->dirtied_when, we check to see whether it is
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200196 * already the most-recently-dirtied inode on the b_dirty list. If that is
Andrew Morton6610a0b2007-10-16 23:30:32 -0700197 * the case then the inode must have been redirtied while it was being written
198 * out and we don't reset its dirtied_when.
199 */
200static void redirty_tail(struct inode *inode)
201{
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200202 struct backing_dev_info *bdi = inode_to_bdi(inode);
Andrew Morton6610a0b2007-10-16 23:30:32 -0700203
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200204 if (!list_empty(&bdi->b_dirty)) {
205 struct inode *tail;
Andrew Morton6610a0b2007-10-16 23:30:32 -0700206
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200207 tail = list_entry(bdi->b_dirty.next, struct inode, i_list);
208 if (time_before(inode->dirtied_when, tail->dirtied_when))
Andrew Morton6610a0b2007-10-16 23:30:32 -0700209 inode->dirtied_when = jiffies;
210 }
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200211 list_move(&inode->i_list, &bdi->b_dirty);
Andrew Morton6610a0b2007-10-16 23:30:32 -0700212}
213
214/*
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200215 * requeue inode for re-scanning after bdi->b_io list is exhausted.
Andrew Mortonc986d1e2007-10-16 23:30:34 -0700216 */
Ken Chen0e0f4fc2007-10-16 23:30:38 -0700217static void requeue_io(struct inode *inode)
Andrew Mortonc986d1e2007-10-16 23:30:34 -0700218{
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200219 list_move(&inode->i_list, &inode_to_bdi(inode)->b_more_io);
Andrew Mortonc986d1e2007-10-16 23:30:34 -0700220}
221
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700222static void inode_sync_complete(struct inode *inode)
223{
224 /*
225 * Prevent speculative execution through spin_unlock(&inode_lock);
226 */
227 smp_mb();
228 wake_up_bit(&inode->i_state, __I_SYNC);
229}
230
Jeff Laytond2caa3c52009-04-02 16:56:37 -0700231static bool inode_dirtied_after(struct inode *inode, unsigned long t)
232{
233 bool ret = time_after(inode->dirtied_when, t);
234#ifndef CONFIG_64BIT
235 /*
236 * For inodes being constantly redirtied, dirtied_when can get stuck.
237 * It _appears_ to be in the future, but is actually in distant past.
238 * This test is necessary to prevent such wrapped-around relative times
239 * from permanently stopping the whole pdflush writeback.
240 */
241 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
242#endif
243 return ret;
244}
245
Andrew Mortonc986d1e2007-10-16 23:30:34 -0700246/*
Fengguang Wu2c136572007-10-16 23:30:39 -0700247 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
248 */
249static void move_expired_inodes(struct list_head *delaying_queue,
250 struct list_head *dispatch_queue,
251 unsigned long *older_than_this)
252{
253 while (!list_empty(delaying_queue)) {
254 struct inode *inode = list_entry(delaying_queue->prev,
255 struct inode, i_list);
256 if (older_than_this &&
Jeff Laytond2caa3c52009-04-02 16:56:37 -0700257 inode_dirtied_after(inode, *older_than_this))
Fengguang Wu2c136572007-10-16 23:30:39 -0700258 break;
259 list_move(&inode->i_list, dispatch_queue);
260 }
261}
262
263/*
264 * Queue all expired dirty inodes for io, eldest first.
265 */
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200266static void queue_io(struct backing_dev_info *bdi,
267 unsigned long *older_than_this)
Fengguang Wu2c136572007-10-16 23:30:39 -0700268{
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200269 list_splice_init(&bdi->b_more_io, bdi->b_io.prev);
270 move_expired_inodes(&bdi->b_dirty, &bdi->b_io, older_than_this);
271}
272
273static int sb_on_inode_list(struct super_block *sb, struct list_head *list)
274{
275 struct inode *inode;
276 int ret = 0;
277
278 spin_lock(&inode_lock);
279 list_for_each_entry(inode, list, i_list) {
280 if (inode->i_sb == sb) {
281 ret = 1;
282 break;
283 }
284 }
285 spin_unlock(&inode_lock);
286 return ret;
Fengguang Wu2c136572007-10-16 23:30:39 -0700287}
288
Fengguang Wu08d8e972007-10-16 23:30:39 -0700289int sb_has_dirty_inodes(struct super_block *sb)
290{
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200291 struct backing_dev_info *bdi;
292 int ret = 0;
293
294 /*
295 * This is REALLY expensive right now, but it'll go away
296 * when the bdi writeback is introduced
297 */
298 mutex_lock(&bdi_lock);
299 list_for_each_entry(bdi, &bdi_list, bdi_list) {
300 if (sb_on_inode_list(sb, &bdi->b_dirty) ||
301 sb_on_inode_list(sb, &bdi->b_io) ||
302 sb_on_inode_list(sb, &bdi->b_more_io)) {
303 ret = 1;
304 break;
305 }
306 }
307 mutex_unlock(&bdi_lock);
308
309 return ret;
Fengguang Wu08d8e972007-10-16 23:30:39 -0700310}
311EXPORT_SYMBOL(sb_has_dirty_inodes);
312
Fengguang Wu2c136572007-10-16 23:30:39 -0700313/*
Christoph Hellwig01c03192009-06-08 13:35:40 +0200314 * Wait for writeback on an inode to complete.
315 */
316static void inode_wait_for_writeback(struct inode *inode)
317{
318 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
319 wait_queue_head_t *wqh;
320
321 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
322 do {
323 spin_unlock(&inode_lock);
324 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
325 spin_lock(&inode_lock);
326 } while (inode->i_state & I_SYNC);
327}
328
329/*
330 * Write out an inode's dirty pages. Called under inode_lock. Either the
331 * caller has ref on the inode (either via __iget or via syscall against an fd)
332 * or the inode has I_WILL_FREE set (via generic_forget_inode)
333 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 * If `wait' is set, wait on the writeout.
335 *
336 * The whole writeout design is quite complex and fragile. We want to avoid
337 * starvation of particular inodes when others are being redirtied, prevent
338 * livelocks, etc.
339 *
340 * Called under inode_lock.
341 */
342static int
Christoph Hellwig01c03192009-06-08 13:35:40 +0200343writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 struct address_space *mapping = inode->i_mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 int wait = wbc->sync_mode == WB_SYNC_ALL;
Christoph Hellwig01c03192009-06-08 13:35:40 +0200347 unsigned dirty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 int ret;
349
Christoph Hellwig01c03192009-06-08 13:35:40 +0200350 if (!atomic_read(&inode->i_count))
351 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
352 else
353 WARN_ON(inode->i_state & I_WILL_FREE);
354
355 if (inode->i_state & I_SYNC) {
356 /*
357 * If this inode is locked for writeback and we are not doing
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200358 * writeback-for-data-integrity, move it to b_more_io so that
Christoph Hellwig01c03192009-06-08 13:35:40 +0200359 * writeback can proceed with the other inodes on s_io.
360 *
361 * We'll have another go at writing back this inode when we
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200362 * completed a full scan of b_io.
Christoph Hellwig01c03192009-06-08 13:35:40 +0200363 */
364 if (!wait) {
365 requeue_io(inode);
366 return 0;
367 }
368
369 /*
370 * It's a data-integrity sync. We must wait.
371 */
372 inode_wait_for_writeback(inode);
373 }
374
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700375 BUG_ON(inode->i_state & I_SYNC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700377 /* Set I_SYNC, reset I_DIRTY */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 dirty = inode->i_state & I_DIRTY;
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700379 inode->i_state |= I_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 inode->i_state &= ~I_DIRTY;
381
382 spin_unlock(&inode_lock);
383
384 ret = do_writepages(mapping, wbc);
385
386 /* Don't write the inode if only I_DIRTY_PAGES was set */
387 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
388 int err = write_inode(inode, wait);
389 if (ret == 0)
390 ret = err;
391 }
392
393 if (wait) {
394 int err = filemap_fdatawait(mapping);
395 if (ret == 0)
396 ret = err;
397 }
398
399 spin_lock(&inode_lock);
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700400 inode->i_state &= ~I_SYNC;
Wu Fengguang84a89242009-06-16 15:33:17 -0700401 if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 if (!(inode->i_state & I_DIRTY) &&
403 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
404 /*
405 * We didn't write back all the pages. nfs_writepages()
406 * sometimes bales out without doing anything. Redirty
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200407 * the inode; Move it from b_io onto b_more_io/b_dirty.
Andrew Morton1b43ef92007-10-16 23:30:35 -0700408 */
409 /*
410 * akpm: if the caller was the kupdate function we put
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200411 * this inode at the head of b_dirty so it gets first
Andrew Morton1b43ef92007-10-16 23:30:35 -0700412 * consideration. Otherwise, move it to the tail, for
413 * the reasons described there. I'm not really sure
414 * how much sense this makes. Presumably I had a good
415 * reasons for doing it this way, and I'd rather not
416 * muck with it at present.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 */
418 if (wbc->for_kupdate) {
419 /*
Fengguang Wu2c136572007-10-16 23:30:39 -0700420 * For the kupdate function we move the inode
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200421 * to b_more_io so it will get more writeout as
Fengguang Wu2c136572007-10-16 23:30:39 -0700422 * soon as the queue becomes uncongested.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 */
424 inode->i_state |= I_DIRTY_PAGES;
Fengguang Wu8bc3be22008-02-04 22:29:36 -0800425 if (wbc->nr_to_write <= 0) {
426 /*
427 * slice used up: queue for next turn
428 */
429 requeue_io(inode);
430 } else {
431 /*
432 * somehow blocked: retry later
433 */
434 redirty_tail(inode);
435 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 } else {
437 /*
438 * Otherwise fully redirty the inode so that
439 * other inodes on this superblock will get some
440 * writeout. Otherwise heavy writing to one
441 * file would indefinitely suspend writeout of
442 * all the other files.
443 */
444 inode->i_state |= I_DIRTY_PAGES;
Andrew Morton1b43ef92007-10-16 23:30:35 -0700445 redirty_tail(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 }
447 } else if (inode->i_state & I_DIRTY) {
448 /*
449 * Someone redirtied the inode while were writing back
450 * the pages.
451 */
Andrew Morton6610a0b2007-10-16 23:30:32 -0700452 redirty_tail(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 } else if (atomic_read(&inode->i_count)) {
454 /*
455 * The inode is clean, inuse
456 */
457 list_move(&inode->i_list, &inode_in_use);
458 } else {
459 /*
460 * The inode is clean, unused
461 */
462 list_move(&inode->i_list, &inode_unused);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 }
464 }
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700465 inode_sync_complete(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 return ret;
467}
468
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200469static void generic_sync_bdi_inodes(struct backing_dev_info *bdi,
470 struct writeback_control *wbc,
471 struct super_block *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472{
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200473 const int is_blkdev_sb = sb_is_blkdev_sb(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 const unsigned long start = jiffies; /* livelock avoidance */
475
Hans Reiserae8547b2008-05-07 15:48:57 +0300476 spin_lock(&inode_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200478 if (!wbc->for_kupdate || list_empty(&bdi->b_io))
479 queue_io(bdi, wbc->older_than_this);
480
481 while (!list_empty(&bdi->b_io)) {
482 struct inode *inode = list_entry(bdi->b_io.prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 struct inode, i_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 long pages_skipped;
485
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200486 /*
487 * super block given and doesn't match, skip this inode
488 */
489 if (sb && sb != inode->i_sb) {
490 redirty_tail(inode);
491 continue;
492 }
493
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 if (!bdi_cap_writeback_dirty(bdi)) {
Andrew Morton9852a0e72007-10-16 23:30:33 -0700495 redirty_tail(inode);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200496 if (is_blkdev_sb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 /*
498 * Dirty memory-backed blockdev: the ramdisk
499 * driver does this. Skip just this inode
500 */
501 continue;
502 }
503 /*
504 * Dirty memory-backed inode against a filesystem other
505 * than the kernel-internal bdev filesystem. Skip the
506 * entire superblock.
507 */
508 break;
509 }
510
Wu Fengguang84a89242009-06-16 15:33:17 -0700511 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
Nick Piggin7ef0d732009-03-12 14:31:38 -0700512 requeue_io(inode);
513 continue;
514 }
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 if (wbc->nonblocking && bdi_write_congested(bdi)) {
517 wbc->encountered_congestion = 1;
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200518 if (!is_blkdev_sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 break; /* Skip a congested fs */
Ken Chen0e0f4fc2007-10-16 23:30:38 -0700520 requeue_io(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 continue; /* Skip a congested blockdev */
522 }
523
524 if (wbc->bdi && bdi != wbc->bdi) {
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200525 if (!is_blkdev_sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 break; /* fs has the wrong queue */
Ken Chen0e0f4fc2007-10-16 23:30:38 -0700527 requeue_io(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 continue; /* blockdev has wrong queue */
529 }
530
Jeff Laytond2caa3c52009-04-02 16:56:37 -0700531 /*
532 * Was this inode dirtied after sync_sb_inodes was called?
533 * This keeps sync from extra jobs and livelock.
534 */
535 if (inode_dirtied_after(inode, start))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 break;
537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 /* Is another pdflush already flushing this queue? */
539 if (current_is_pdflush() && !writeback_acquire(bdi))
540 break;
541
Wu Fengguang84a89242009-06-16 15:33:17 -0700542 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 __iget(inode);
544 pages_skipped = wbc->pages_skipped;
Christoph Hellwig01c03192009-06-08 13:35:40 +0200545 writeback_single_inode(inode, wbc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 if (current_is_pdflush())
547 writeback_release(bdi);
548 if (wbc->pages_skipped != pages_skipped) {
549 /*
550 * writeback is not making progress due to locked
551 * buffers. Skip this inode for now.
552 */
Andrew Mortonf57b9b72007-10-16 23:30:34 -0700553 redirty_tail(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 }
555 spin_unlock(&inode_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 iput(inode);
OGAWA Hirofumi4ffc8442006-03-25 03:07:44 -0800557 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 spin_lock(&inode_lock);
Fengguang Wu8bc3be22008-02-04 22:29:36 -0800559 if (wbc->nr_to_write <= 0) {
560 wbc->more_io = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 break;
Fengguang Wu8bc3be22008-02-04 22:29:36 -0800562 }
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200563 if (!list_empty(&bdi->b_more_io))
Fengguang Wu8bc3be22008-02-04 22:29:36 -0800564 wbc->more_io = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
Nick Piggin38f21972009-01-06 14:40:25 -0800566
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200567 spin_unlock(&inode_lock);
568 /* Leave any unwritten inodes on b_io */
569}
570
571/*
572 * Write out a superblock's list of dirty inodes. A wait will be performed
573 * upon no inodes, all inodes or the final one, depending upon sync_mode.
574 *
575 * If older_than_this is non-NULL, then only write out inodes which
576 * had their first dirtying at a time earlier than *older_than_this.
577 *
578 * If we're a pdlfush thread, then implement pdflush collision avoidance
579 * against the entire list.
580 *
581 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
582 * This function assumes that the blockdev superblock's inodes are backed by
583 * a variety of queues, so all inodes are searched. For other superblocks,
584 * assume that all inodes are backed by the same queue.
585 *
586 * FIXME: this linear search could get expensive with many fileystems. But
587 * how to fix? We need to go from an address_space to all inodes which share
588 * a queue with that address_space. (Easy: have a global "dirty superblocks"
589 * list).
590 *
591 * The inodes to be written are parked on bdi->b_io. They are moved back onto
592 * bdi->b_dirty as they are selected for writing. This way, none can be missed
593 * on the writer throttling path, and we get decent balancing between many
594 * throttled threads: we don't want them all piling up on inode_sync_wait.
595 */
596static void generic_sync_sb_inodes(struct super_block *sb,
597 struct writeback_control *wbc)
598{
599 struct backing_dev_info *bdi;
600
601 if (!wbc->bdi) {
602 mutex_lock(&bdi_lock);
603 list_for_each_entry(bdi, &bdi_list, bdi_list)
604 generic_sync_bdi_inodes(bdi, wbc, sb);
605 mutex_unlock(&bdi_lock);
606 } else
607 generic_sync_bdi_inodes(wbc->bdi, wbc, sb);
608
609 if (wbc->sync_mode == WB_SYNC_ALL) {
Nick Piggin38f21972009-01-06 14:40:25 -0800610 struct inode *inode, *old_inode = NULL;
611
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200612 spin_lock(&inode_lock);
613
Nick Piggin38f21972009-01-06 14:40:25 -0800614 /*
615 * Data integrity sync. Must wait for all pages under writeback,
616 * because there may have been pages dirtied before our sync
617 * call, but which had writeout started before we write it out.
618 * In which case, the inode may not be on the dirty list, but
619 * we still have to wait for that writeout.
620 */
621 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
622 struct address_space *mapping;
623
Wu Fengguangb6fac632009-04-02 16:56:34 -0700624 if (inode->i_state &
625 (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
Nick Piggin38f21972009-01-06 14:40:25 -0800626 continue;
627 mapping = inode->i_mapping;
628 if (mapping->nrpages == 0)
629 continue;
630 __iget(inode);
631 spin_unlock(&inode_lock);
632 /*
633 * We hold a reference to 'inode' so it couldn't have
634 * been removed from s_inodes list while we dropped the
635 * inode_lock. We cannot iput the inode now as we can
636 * be holding the last reference and we cannot iput it
637 * under inode_lock. So we keep the reference and iput
638 * it later.
639 */
640 iput(old_inode);
641 old_inode = inode;
642
643 filemap_fdatawait(mapping);
644
645 cond_resched();
646
647 spin_lock(&inode_lock);
648 }
649 spin_unlock(&inode_lock);
650 iput(old_inode);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200651 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652}
653
654/*
655 * Start writeback of dirty pagecache data against all unlocked inodes.
656 *
657 * Note:
658 * We don't need to grab a reference to superblock here. If it has non-empty
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200659 * ->b_dirty it's hadn't been killed yet and kill_super() won't proceed
660 * past sync_inodes_sb() until the ->b_dirty/b_io/b_more_io lists are all
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 * empty. Since __sync_single_inode() regains inode_lock before it finally moves
662 * inode from superblock lists we are OK.
663 *
664 * If `older_than_this' is non-zero then only flush inodes which have a
665 * flushtime older than *older_than_this.
666 *
667 * If `bdi' is non-zero then we will scan the first inode against each
668 * superblock until we find the matching ones. One group will be the dirty
669 * inodes against a filesystem. Then when we hit the dummy blockdev superblock,
670 * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not
671 * super-efficient but we're about to do a ton of I/O...
672 */
673void
674writeback_inodes(struct writeback_control *wbc)
675{
676 struct super_block *sb;
677
678 might_sleep();
679 spin_lock(&sb_lock);
680restart:
Akinobu Mita797074e2008-02-06 01:37:08 -0800681 list_for_each_entry_reverse(sb, &super_blocks, s_list) {
Fengguang Wu08d8e972007-10-16 23:30:39 -0700682 if (sb_has_dirty_inodes(sb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 /* we're making our own get_super here */
684 sb->s_count++;
685 spin_unlock(&sb_lock);
686 /*
687 * If we can't get the readlock, there's no sense in
688 * waiting around, most of the time the FS is going to
689 * be unmounted by the time it is released.
690 */
691 if (down_read_trylock(&sb->s_umount)) {
Hans Reiserae8547b2008-05-07 15:48:57 +0300692 if (sb->s_root)
Jens Axboed8a85592009-09-02 12:34:32 +0200693 generic_sync_sb_inodes(sb, wbc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 up_read(&sb->s_umount);
695 }
696 spin_lock(&sb_lock);
697 if (__put_super_and_need_restart(sb))
698 goto restart;
699 }
700 if (wbc->nr_to_write <= 0)
701 break;
702 }
703 spin_unlock(&sb_lock);
704}
705
Jens Axboed8a85592009-09-02 12:34:32 +0200706/**
707 * writeback_inodes_sb - writeback dirty inodes from given super_block
708 * @sb: the superblock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 *
Jens Axboed8a85592009-09-02 12:34:32 +0200710 * Start writeback on some inodes on this super_block. No guarantees are made
711 * on how many (if any) will be written, and this function does not wait
712 * for IO completion of submitted IO. The number of pages submitted is
713 * returned.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 */
Jens Axboed8a85592009-09-02 12:34:32 +0200715long writeback_inodes_sb(struct super_block *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717 struct writeback_control wbc = {
Jens Axboed8a85592009-09-02 12:34:32 +0200718 .sync_mode = WB_SYNC_NONE,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -0700719 .range_start = 0,
720 .range_end = LLONG_MAX,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 };
Jens Axboed8a85592009-09-02 12:34:32 +0200722 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
723 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
724 long nr_to_write;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
Jens Axboed8a85592009-09-02 12:34:32 +0200726 nr_to_write = nr_dirty + nr_unstable +
Nick Piggin38f21972009-01-06 14:40:25 -0800727 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
Nick Piggin38f21972009-01-06 14:40:25 -0800728
Jens Axboed8a85592009-09-02 12:34:32 +0200729 wbc.nr_to_write = nr_to_write;
730 generic_sync_sb_inodes(sb, &wbc);
731 return nr_to_write - wbc.nr_to_write;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732}
Jens Axboed8a85592009-09-02 12:34:32 +0200733EXPORT_SYMBOL(writeback_inodes_sb);
734
735/**
736 * sync_inodes_sb - sync sb inode pages
737 * @sb: the superblock
738 *
739 * This function writes and waits on any dirty inode belonging to this
740 * super_block. The number of pages synced is returned.
741 */
742long sync_inodes_sb(struct super_block *sb)
743{
744 struct writeback_control wbc = {
745 .sync_mode = WB_SYNC_ALL,
746 .range_start = 0,
747 .range_end = LLONG_MAX,
748 };
749 long nr_to_write = LONG_MAX; /* doesn't actually matter */
750
751 wbc.nr_to_write = nr_to_write;
752 generic_sync_sb_inodes(sb, &wbc);
753 return nr_to_write - wbc.nr_to_write;
754}
755EXPORT_SYMBOL(sync_inodes_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757/**
Andrea Arcangeli7f04c262005-10-30 15:03:05 -0800758 * write_inode_now - write an inode to disk
759 * @inode: inode to write to disk
760 * @sync: whether the write should be synchronous or not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 *
Andrea Arcangeli7f04c262005-10-30 15:03:05 -0800762 * This function commits an inode to disk immediately if it is dirty. This is
763 * primarily needed by knfsd.
764 *
765 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767int write_inode_now(struct inode *inode, int sync)
768{
769 int ret;
770 struct writeback_control wbc = {
771 .nr_to_write = LONG_MAX,
Mike Galbraith18914b12008-02-08 04:20:23 -0800772 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -0700773 .range_start = 0,
774 .range_end = LLONG_MAX,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 };
776
777 if (!mapping_cap_writeback_dirty(inode->i_mapping))
Andrew Morton49364ce2005-11-07 00:59:15 -0800778 wbc.nr_to_write = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 might_sleep();
781 spin_lock(&inode_lock);
Christoph Hellwig01c03192009-06-08 13:35:40 +0200782 ret = writeback_single_inode(inode, &wbc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 spin_unlock(&inode_lock);
784 if (sync)
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700785 inode_sync_wait(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 return ret;
787}
788EXPORT_SYMBOL(write_inode_now);
789
790/**
791 * sync_inode - write an inode and its pages to disk.
792 * @inode: the inode to sync
793 * @wbc: controls the writeback mode
794 *
795 * sync_inode() will write an inode and its pages to disk. It will also
796 * correctly update the inode on its superblock's dirty inode lists and will
797 * update inode->i_state.
798 *
799 * The caller must have a ref on the inode.
800 */
801int sync_inode(struct inode *inode, struct writeback_control *wbc)
802{
803 int ret;
804
805 spin_lock(&inode_lock);
Christoph Hellwig01c03192009-06-08 13:35:40 +0200806 ret = writeback_single_inode(inode, wbc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 spin_unlock(&inode_lock);
808 return ret;
809}
810EXPORT_SYMBOL(sync_inode);
811
812/**
813 * generic_osync_inode - flush all dirty data for a given inode to disk
814 * @inode: inode to write
Martin Waitz67be2dd2005-05-01 08:59:26 -0700815 * @mapping: the address_space that should be flushed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 * @what: what to write and wait upon
817 *
818 * This can be called by file_write functions for files which have the
819 * O_SYNC flag set, to flush dirty writes to disk.
820 *
821 * @what is a bitmask, specifying which part of the inode's data should be
Randy Dunlapb8887e62005-11-07 01:01:07 -0800822 * written and waited upon.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 *
824 * OSYNC_DATA: i_mapping's dirty data
825 * OSYNC_METADATA: the buffers at i_mapping->private_list
826 * OSYNC_INODE: the inode itself
827 */
828
829int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what)
830{
831 int err = 0;
832 int need_write_inode_now = 0;
833 int err2;
834
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 if (what & OSYNC_DATA)
836 err = filemap_fdatawrite(mapping);
837 if (what & (OSYNC_METADATA|OSYNC_DATA)) {
838 err2 = sync_mapping_buffers(mapping);
839 if (!err)
840 err = err2;
841 }
842 if (what & OSYNC_DATA) {
843 err2 = filemap_fdatawait(mapping);
844 if (!err)
845 err = err2;
846 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
848 spin_lock(&inode_lock);
849 if ((inode->i_state & I_DIRTY) &&
850 ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
851 need_write_inode_now = 1;
852 spin_unlock(&inode_lock);
853
854 if (need_write_inode_now) {
855 err2 = write_inode_now(inode, 1);
856 if (!err)
857 err = err2;
858 }
859 else
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700860 inode_sync_wait(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
862 return err;
863}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864EXPORT_SYMBOL(generic_osync_inode);