blob: 14f06b45919749f3ac06f31138279baaae1832b4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/fs-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
10 *
Francois Camie1f8e872008-10-15 22:01:59 -070011 * 10Apr2002 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
14 */
15
16#include <linux/kernel.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020017#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/spinlock.h>
19#include <linux/sched.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
Jens Axboe03ba3782009-09-09 09:08:54 +020022#include <linux/kthread.h>
23#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/backing-dev.h>
27#include <linux/buffer_head.h>
David Howells07f3f052006-09-30 20:52:18 +020028#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Jens Axboe66f3b8e2009-09-02 09:19:46 +020030#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
Adrian Bunkf11b00f2008-04-29 00:58:56 -070031
Jens Axboe03ba3782009-09-09 09:08:54 +020032/*
Jens Axboed0bceac2009-05-18 08:20:32 +020033 * We don't actually have pdflush, but this one is exported though /proc...
34 */
35int nr_pdflush_threads;
36
37/*
Jens Axboec4a77a62009-09-16 15:18:25 +020038 * Passed into wb_writeback(), essentially a subset of writeback_control
39 */
40struct wb_writeback_args {
41 long nr_pages;
42 struct super_block *sb;
43 enum writeback_sync_modes sync_mode;
44 int for_kupdate;
45 int range_cyclic;
46};
47
48/*
Jens Axboe03ba3782009-09-09 09:08:54 +020049 * Work items for the bdi_writeback threads
Adrian Bunkf11b00f2008-04-29 00:58:56 -070050 */
Jens Axboe03ba3782009-09-09 09:08:54 +020051struct bdi_work {
52 struct list_head list;
Jens Axboe03ba3782009-09-09 09:08:54 +020053 struct rcu_head rcu_head;
54
55 unsigned long seen;
56 atomic_t pending;
57
Jens Axboec4a77a62009-09-16 15:18:25 +020058 struct wb_writeback_args args;
Jens Axboe03ba3782009-09-09 09:08:54 +020059
60 unsigned long state;
61};
62
63enum {
64 WS_USED_B = 0,
65 WS_ONSTACK_B,
66};
67
68#define WS_USED (1 << WS_USED_B)
69#define WS_ONSTACK (1 << WS_ONSTACK_B)
70
71static inline bool bdi_work_on_stack(struct bdi_work *work)
Adrian Bunkf11b00f2008-04-29 00:58:56 -070072{
Jens Axboe03ba3782009-09-09 09:08:54 +020073 return test_bit(WS_ONSTACK_B, &work->state);
74}
75
76static inline void bdi_work_init(struct bdi_work *work,
77 struct writeback_control *wbc)
78{
79 INIT_RCU_HEAD(&work->rcu_head);
Jens Axboec4a77a62009-09-16 15:18:25 +020080 work->args.sb = wbc->sb;
81 work->args.nr_pages = wbc->nr_to_write;
82 work->args.sync_mode = wbc->sync_mode;
83 work->args.range_cyclic = wbc->range_cyclic;
84 work->args.for_kupdate = 0;
Jens Axboe03ba3782009-09-09 09:08:54 +020085 work->state = WS_USED;
86}
87
Adrian Bunkf11b00f2008-04-29 00:58:56 -070088/**
89 * writeback_in_progress - determine whether there is writeback in progress
90 * @bdi: the device's backing_dev_info structure.
91 *
Jens Axboe03ba3782009-09-09 09:08:54 +020092 * Determine whether there is writeback waiting to be handled against a
93 * backing device.
Adrian Bunkf11b00f2008-04-29 00:58:56 -070094 */
95int writeback_in_progress(struct backing_dev_info *bdi)
96{
Jens Axboe03ba3782009-09-09 09:08:54 +020097 return !list_empty(&bdi->work_list);
Adrian Bunkf11b00f2008-04-29 00:58:56 -070098}
99
Jens Axboe03ba3782009-09-09 09:08:54 +0200100static void bdi_work_clear(struct bdi_work *work)
Adrian Bunkf11b00f2008-04-29 00:58:56 -0700101{
Jens Axboe03ba3782009-09-09 09:08:54 +0200102 clear_bit(WS_USED_B, &work->state);
103 smp_mb__after_clear_bit();
104 wake_up_bit(&work->state, WS_USED_B);
Adrian Bunkf11b00f2008-04-29 00:58:56 -0700105}
106
Jens Axboe03ba3782009-09-09 09:08:54 +0200107static void bdi_work_free(struct rcu_head *head)
Nick Piggin4195f732009-05-28 09:01:15 +0200108{
Jens Axboe03ba3782009-09-09 09:08:54 +0200109 struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
Nick Piggin4195f732009-05-28 09:01:15 +0200110
Jens Axboe03ba3782009-09-09 09:08:54 +0200111 if (!bdi_work_on_stack(work))
112 kfree(work);
113 else
114 bdi_work_clear(work);
115}
116
117static void wb_work_complete(struct bdi_work *work)
118{
Jens Axboec4a77a62009-09-16 15:18:25 +0200119 const enum writeback_sync_modes sync_mode = work->args.sync_mode;
Jens Axboe03ba3782009-09-09 09:08:54 +0200120
121 /*
122 * For allocated work, we can clear the done/seen bit right here.
123 * For on-stack work, we need to postpone both the clear and free
124 * to after the RCU grace period, since the stack could be invalidated
125 * as soon as bdi_work_clear() has done the wakeup.
126 */
127 if (!bdi_work_on_stack(work))
128 bdi_work_clear(work);
129 if (sync_mode == WB_SYNC_NONE || bdi_work_on_stack(work))
130 call_rcu(&work->rcu_head, bdi_work_free);
131}
132
133static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
134{
135 /*
136 * The caller has retrieved the work arguments from this work,
137 * drop our reference. If this is the last ref, delete and free it
138 */
139 if (atomic_dec_and_test(&work->pending)) {
140 struct backing_dev_info *bdi = wb->bdi;
141
142 spin_lock(&bdi->wb_lock);
143 list_del_rcu(&work->list);
144 spin_unlock(&bdi->wb_lock);
145
146 wb_work_complete(work);
Nick Piggin4195f732009-05-28 09:01:15 +0200147 }
148}
149
Jens Axboe03ba3782009-09-09 09:08:54 +0200150static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151{
Jens Axboe03ba3782009-09-09 09:08:54 +0200152 if (work) {
153 work->seen = bdi->wb_mask;
154 BUG_ON(!work->seen);
155 atomic_set(&work->pending, bdi->wb_cnt);
156 BUG_ON(!bdi->wb_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Jens Axboe03ba3782009-09-09 09:08:54 +0200158 /*
159 * Make sure stores are seen before it appears on the list
160 */
161 smp_mb();
162
163 spin_lock(&bdi->wb_lock);
164 list_add_tail_rcu(&work->list, &bdi->work_list);
165 spin_unlock(&bdi->wb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 }
167
168 /*
Jens Axboe03ba3782009-09-09 09:08:54 +0200169 * If the default thread isn't there, make sure we add it. When
170 * it gets created and wakes up, we'll run this work.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 */
Jens Axboe03ba3782009-09-09 09:08:54 +0200172 if (unlikely(list_empty_careful(&bdi->wb_list)))
173 wake_up_process(default_backing_dev_info.wb.task);
174 else {
175 struct bdi_writeback *wb = &bdi->wb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
177 /*
Jens Axboe03ba3782009-09-09 09:08:54 +0200178 * If we failed allocating the bdi work item, wake up the wb
179 * thread always. As a safety precaution, it'll flush out
180 * everything
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 */
Jens Axboe03ba3782009-09-09 09:08:54 +0200182 if (!wb_has_dirty_io(wb)) {
183 if (work)
184 wb_clear_pending(wb, work);
185 } else if (wb->task)
186 wake_up_process(wb->task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Jens Axboe03ba3782009-09-09 09:08:54 +0200190/*
191 * Used for on-stack allocated work items. The caller needs to wait until
192 * the wb threads have acked the work before it's safe to continue.
193 */
194static void bdi_wait_on_work_clear(struct bdi_work *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Jens Axboe03ba3782009-09-09 09:08:54 +0200196 wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
197 TASK_UNINTERRUPTIBLE);
198}
199
Jens Axboef11fcae2009-09-15 09:53:35 +0200200static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
201 struct writeback_control *wbc)
Jens Axboe03ba3782009-09-09 09:08:54 +0200202{
203 struct bdi_work *work;
204
205 work = kmalloc(sizeof(*work), GFP_ATOMIC);
206 if (work)
207 bdi_work_init(work, wbc);
208
Jens Axboef11fcae2009-09-15 09:53:35 +0200209 bdi_queue_work(bdi, work);
Jens Axboe03ba3782009-09-09 09:08:54 +0200210}
211
212void bdi_start_writeback(struct writeback_control *wbc)
213{
Jens Axboe03ba3782009-09-09 09:08:54 +0200214 /*
Christoph Hellwigf0fad8a2009-09-11 09:47:56 +0200215 * WB_SYNC_NONE is opportunistic writeback. If this allocation fails,
216 * bdi_queue_work() will wake up the thread and flush old data. This
217 * should ensure some amount of progress in freeing memory.
Jens Axboe03ba3782009-09-09 09:08:54 +0200218 */
Jens Axboef11fcae2009-09-15 09:53:35 +0200219 if (wbc->sync_mode != WB_SYNC_ALL)
220 bdi_alloc_queue_work(wbc->bdi, wbc);
221 else {
Christoph Hellwigf0fad8a2009-09-11 09:47:56 +0200222 struct bdi_work work;
223
224 bdi_work_init(&work, wbc);
225 work.state |= WS_ONSTACK;
226
227 bdi_queue_work(wbc->bdi, &work);
228 bdi_wait_on_work_clear(&work);
Jens Axboe03ba3782009-09-09 09:08:54 +0200229 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230}
231
232/*
Andrew Morton6610a0b2007-10-16 23:30:32 -0700233 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
234 * furthest end of its superblock's dirty-inode list.
235 *
236 * Before stamping the inode's ->dirtied_when, we check to see whether it is
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200237 * already the most-recently-dirtied inode on the b_dirty list. If that is
Andrew Morton6610a0b2007-10-16 23:30:32 -0700238 * the case then the inode must have been redirtied while it was being written
239 * out and we don't reset its dirtied_when.
240 */
241static void redirty_tail(struct inode *inode)
242{
Jens Axboe03ba3782009-09-09 09:08:54 +0200243 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
Andrew Morton6610a0b2007-10-16 23:30:32 -0700244
Jens Axboe03ba3782009-09-09 09:08:54 +0200245 if (!list_empty(&wb->b_dirty)) {
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200246 struct inode *tail;
Andrew Morton6610a0b2007-10-16 23:30:32 -0700247
Jens Axboe03ba3782009-09-09 09:08:54 +0200248 tail = list_entry(wb->b_dirty.next, struct inode, i_list);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200249 if (time_before(inode->dirtied_when, tail->dirtied_when))
Andrew Morton6610a0b2007-10-16 23:30:32 -0700250 inode->dirtied_when = jiffies;
251 }
Jens Axboe03ba3782009-09-09 09:08:54 +0200252 list_move(&inode->i_list, &wb->b_dirty);
Andrew Morton6610a0b2007-10-16 23:30:32 -0700253}
254
255/*
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200256 * requeue inode for re-scanning after bdi->b_io list is exhausted.
Andrew Mortonc986d1e2007-10-16 23:30:34 -0700257 */
Ken Chen0e0f4fc2007-10-16 23:30:38 -0700258static void requeue_io(struct inode *inode)
Andrew Mortonc986d1e2007-10-16 23:30:34 -0700259{
Jens Axboe03ba3782009-09-09 09:08:54 +0200260 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
261
262 list_move(&inode->i_list, &wb->b_more_io);
Andrew Mortonc986d1e2007-10-16 23:30:34 -0700263}
264
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700265static void inode_sync_complete(struct inode *inode)
266{
267 /*
268 * Prevent speculative execution through spin_unlock(&inode_lock);
269 */
270 smp_mb();
271 wake_up_bit(&inode->i_state, __I_SYNC);
272}
273
Jeff Laytond2caa3c52009-04-02 16:56:37 -0700274static bool inode_dirtied_after(struct inode *inode, unsigned long t)
275{
276 bool ret = time_after(inode->dirtied_when, t);
277#ifndef CONFIG_64BIT
278 /*
279 * For inodes being constantly redirtied, dirtied_when can get stuck.
280 * It _appears_ to be in the future, but is actually in distant past.
281 * This test is necessary to prevent such wrapped-around relative times
282 * from permanently stopping the whole pdflush writeback.
283 */
284 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
285#endif
286 return ret;
287}
288
Andrew Mortonc986d1e2007-10-16 23:30:34 -0700289/*
Fengguang Wu2c136572007-10-16 23:30:39 -0700290 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
291 */
292static void move_expired_inodes(struct list_head *delaying_queue,
293 struct list_head *dispatch_queue,
294 unsigned long *older_than_this)
295{
296 while (!list_empty(delaying_queue)) {
297 struct inode *inode = list_entry(delaying_queue->prev,
298 struct inode, i_list);
299 if (older_than_this &&
Jeff Laytond2caa3c52009-04-02 16:56:37 -0700300 inode_dirtied_after(inode, *older_than_this))
Fengguang Wu2c136572007-10-16 23:30:39 -0700301 break;
302 list_move(&inode->i_list, dispatch_queue);
303 }
304}
305
306/*
307 * Queue all expired dirty inodes for io, eldest first.
308 */
Jens Axboe03ba3782009-09-09 09:08:54 +0200309static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
Fengguang Wu2c136572007-10-16 23:30:39 -0700310{
Jens Axboe03ba3782009-09-09 09:08:54 +0200311 list_splice_init(&wb->b_more_io, wb->b_io.prev);
312 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200313}
314
Jens Axboe03ba3782009-09-09 09:08:54 +0200315static int write_inode(struct inode *inode, int sync)
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200316{
Jens Axboe03ba3782009-09-09 09:08:54 +0200317 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
318 return inode->i_sb->s_op->write_inode(inode, sync);
319 return 0;
Fengguang Wu2c136572007-10-16 23:30:39 -0700320}
321
322/*
Christoph Hellwig01c03192009-06-08 13:35:40 +0200323 * Wait for writeback on an inode to complete.
324 */
325static void inode_wait_for_writeback(struct inode *inode)
326{
327 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
328 wait_queue_head_t *wqh;
329
330 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
331 do {
332 spin_unlock(&inode_lock);
333 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
334 spin_lock(&inode_lock);
335 } while (inode->i_state & I_SYNC);
336}
337
338/*
339 * Write out an inode's dirty pages. Called under inode_lock. Either the
340 * caller has ref on the inode (either via __iget or via syscall against an fd)
341 * or the inode has I_WILL_FREE set (via generic_forget_inode)
342 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 * If `wait' is set, wait on the writeout.
344 *
345 * The whole writeout design is quite complex and fragile. We want to avoid
346 * starvation of particular inodes when others are being redirtied, prevent
347 * livelocks, etc.
348 *
349 * Called under inode_lock.
350 */
351static int
Christoph Hellwig01c03192009-06-08 13:35:40 +0200352writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 struct address_space *mapping = inode->i_mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 int wait = wbc->sync_mode == WB_SYNC_ALL;
Christoph Hellwig01c03192009-06-08 13:35:40 +0200356 unsigned dirty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 int ret;
358
Christoph Hellwig01c03192009-06-08 13:35:40 +0200359 if (!atomic_read(&inode->i_count))
360 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
361 else
362 WARN_ON(inode->i_state & I_WILL_FREE);
363
364 if (inode->i_state & I_SYNC) {
365 /*
366 * If this inode is locked for writeback and we are not doing
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200367 * writeback-for-data-integrity, move it to b_more_io so that
Christoph Hellwig01c03192009-06-08 13:35:40 +0200368 * writeback can proceed with the other inodes on s_io.
369 *
370 * We'll have another go at writing back this inode when we
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200371 * completed a full scan of b_io.
Christoph Hellwig01c03192009-06-08 13:35:40 +0200372 */
373 if (!wait) {
374 requeue_io(inode);
375 return 0;
376 }
377
378 /*
379 * It's a data-integrity sync. We must wait.
380 */
381 inode_wait_for_writeback(inode);
382 }
383
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700384 BUG_ON(inode->i_state & I_SYNC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700386 /* Set I_SYNC, reset I_DIRTY */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 dirty = inode->i_state & I_DIRTY;
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700388 inode->i_state |= I_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 inode->i_state &= ~I_DIRTY;
390
391 spin_unlock(&inode_lock);
392
393 ret = do_writepages(mapping, wbc);
394
395 /* Don't write the inode if only I_DIRTY_PAGES was set */
396 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
397 int err = write_inode(inode, wait);
398 if (ret == 0)
399 ret = err;
400 }
401
402 if (wait) {
403 int err = filemap_fdatawait(mapping);
404 if (ret == 0)
405 ret = err;
406 }
407
408 spin_lock(&inode_lock);
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700409 inode->i_state &= ~I_SYNC;
Wu Fengguang84a89242009-06-16 15:33:17 -0700410 if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 if (!(inode->i_state & I_DIRTY) &&
412 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
413 /*
414 * We didn't write back all the pages. nfs_writepages()
415 * sometimes bales out without doing anything. Redirty
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200416 * the inode; Move it from b_io onto b_more_io/b_dirty.
Andrew Morton1b43ef92007-10-16 23:30:35 -0700417 */
418 /*
419 * akpm: if the caller was the kupdate function we put
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200420 * this inode at the head of b_dirty so it gets first
Andrew Morton1b43ef92007-10-16 23:30:35 -0700421 * consideration. Otherwise, move it to the tail, for
422 * the reasons described there. I'm not really sure
423 * how much sense this makes. Presumably I had a good
424 * reasons for doing it this way, and I'd rather not
425 * muck with it at present.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 */
427 if (wbc->for_kupdate) {
428 /*
Fengguang Wu2c136572007-10-16 23:30:39 -0700429 * For the kupdate function we move the inode
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200430 * to b_more_io so it will get more writeout as
Fengguang Wu2c136572007-10-16 23:30:39 -0700431 * soon as the queue becomes uncongested.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 */
433 inode->i_state |= I_DIRTY_PAGES;
Fengguang Wu8bc3be22008-02-04 22:29:36 -0800434 if (wbc->nr_to_write <= 0) {
435 /*
436 * slice used up: queue for next turn
437 */
438 requeue_io(inode);
439 } else {
440 /*
441 * somehow blocked: retry later
442 */
443 redirty_tail(inode);
444 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 } else {
446 /*
447 * Otherwise fully redirty the inode so that
448 * other inodes on this superblock will get some
449 * writeout. Otherwise heavy writing to one
450 * file would indefinitely suspend writeout of
451 * all the other files.
452 */
453 inode->i_state |= I_DIRTY_PAGES;
Andrew Morton1b43ef92007-10-16 23:30:35 -0700454 redirty_tail(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 }
456 } else if (inode->i_state & I_DIRTY) {
457 /*
458 * Someone redirtied the inode while were writing back
459 * the pages.
460 */
Andrew Morton6610a0b2007-10-16 23:30:32 -0700461 redirty_tail(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 } else if (atomic_read(&inode->i_count)) {
463 /*
464 * The inode is clean, inuse
465 */
466 list_move(&inode->i_list, &inode_in_use);
467 } else {
468 /*
469 * The inode is clean, unused
470 */
471 list_move(&inode->i_list, &inode_unused);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 }
473 }
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700474 inode_sync_complete(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 return ret;
476}
477
Jens Axboe03ba3782009-09-09 09:08:54 +0200478/*
479 * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
480 * before calling writeback. So make sure that we do pin it, so it doesn't
481 * go away while we are writing inodes from it.
482 *
483 * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
484 * 1 if we failed.
485 */
486static int pin_sb_for_writeback(struct writeback_control *wbc,
487 struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488{
Jens Axboe03ba3782009-09-09 09:08:54 +0200489 struct super_block *sb = inode->i_sb;
490
491 /*
492 * Caller must already hold the ref for this
493 */
494 if (wbc->sync_mode == WB_SYNC_ALL) {
495 WARN_ON(!rwsem_is_locked(&sb->s_umount));
496 return 0;
497 }
498
499 spin_lock(&sb_lock);
500 sb->s_count++;
501 if (down_read_trylock(&sb->s_umount)) {
502 if (sb->s_root) {
503 spin_unlock(&sb_lock);
504 return 0;
505 }
506 /*
507 * umounted, drop rwsem again and fall through to failure
508 */
509 up_read(&sb->s_umount);
510 }
511
512 sb->s_count--;
513 spin_unlock(&sb_lock);
514 return 1;
515}
516
517static void unpin_sb_for_writeback(struct writeback_control *wbc,
518 struct inode *inode)
519{
520 struct super_block *sb = inode->i_sb;
521
522 if (wbc->sync_mode == WB_SYNC_ALL)
523 return;
524
525 up_read(&sb->s_umount);
526 put_super(sb);
527}
528
529static void writeback_inodes_wb(struct bdi_writeback *wb,
530 struct writeback_control *wbc)
531{
532 struct super_block *sb = wbc->sb;
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200533 const int is_blkdev_sb = sb_is_blkdev_sb(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 const unsigned long start = jiffies; /* livelock avoidance */
535
Hans Reiserae8547b2008-05-07 15:48:57 +0300536 spin_lock(&inode_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Jens Axboe03ba3782009-09-09 09:08:54 +0200538 if (!wbc->for_kupdate || list_empty(&wb->b_io))
539 queue_io(wb, wbc->older_than_this);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200540
Jens Axboe03ba3782009-09-09 09:08:54 +0200541 while (!list_empty(&wb->b_io)) {
542 struct inode *inode = list_entry(wb->b_io.prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 struct inode, i_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 long pages_skipped;
545
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200546 /*
547 * super block given and doesn't match, skip this inode
548 */
549 if (sb && sb != inode->i_sb) {
550 redirty_tail(inode);
551 continue;
552 }
553
Jens Axboe03ba3782009-09-09 09:08:54 +0200554 if (!bdi_cap_writeback_dirty(wb->bdi)) {
Andrew Morton9852a0e72007-10-16 23:30:33 -0700555 redirty_tail(inode);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200556 if (is_blkdev_sb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 /*
558 * Dirty memory-backed blockdev: the ramdisk
559 * driver does this. Skip just this inode
560 */
561 continue;
562 }
563 /*
564 * Dirty memory-backed inode against a filesystem other
565 * than the kernel-internal bdev filesystem. Skip the
566 * entire superblock.
567 */
568 break;
569 }
570
Wu Fengguang84a89242009-06-16 15:33:17 -0700571 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
Nick Piggin7ef0d732009-03-12 14:31:38 -0700572 requeue_io(inode);
573 continue;
574 }
575
Jens Axboe03ba3782009-09-09 09:08:54 +0200576 if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 wbc->encountered_congestion = 1;
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200578 if (!is_blkdev_sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 break; /* Skip a congested fs */
Ken Chen0e0f4fc2007-10-16 23:30:38 -0700580 requeue_io(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 continue; /* Skip a congested blockdev */
582 }
583
Jeff Laytond2caa3c52009-04-02 16:56:37 -0700584 /*
585 * Was this inode dirtied after sync_sb_inodes was called?
586 * This keeps sync from extra jobs and livelock.
587 */
588 if (inode_dirtied_after(inode, start))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 break;
590
Jens Axboe03ba3782009-09-09 09:08:54 +0200591 if (pin_sb_for_writeback(wbc, inode)) {
592 requeue_io(inode);
593 continue;
594 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
Wu Fengguang84a89242009-06-16 15:33:17 -0700596 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 __iget(inode);
598 pages_skipped = wbc->pages_skipped;
Christoph Hellwig01c03192009-06-08 13:35:40 +0200599 writeback_single_inode(inode, wbc);
Jens Axboe03ba3782009-09-09 09:08:54 +0200600 unpin_sb_for_writeback(wbc, inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 if (wbc->pages_skipped != pages_skipped) {
602 /*
603 * writeback is not making progress due to locked
604 * buffers. Skip this inode for now.
605 */
Andrew Mortonf57b9b72007-10-16 23:30:34 -0700606 redirty_tail(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 }
608 spin_unlock(&inode_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 iput(inode);
OGAWA Hirofumi4ffc8442006-03-25 03:07:44 -0800610 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 spin_lock(&inode_lock);
Fengguang Wu8bc3be22008-02-04 22:29:36 -0800612 if (wbc->nr_to_write <= 0) {
613 wbc->more_io = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 break;
Fengguang Wu8bc3be22008-02-04 22:29:36 -0800615 }
Jens Axboe03ba3782009-09-09 09:08:54 +0200616 if (!list_empty(&wb->b_more_io))
Fengguang Wu8bc3be22008-02-04 22:29:36 -0800617 wbc->more_io = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 }
Nick Piggin38f21972009-01-06 14:40:25 -0800619
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200620 spin_unlock(&inode_lock);
621 /* Leave any unwritten inodes on b_io */
622}
623
Jens Axboe03ba3782009-09-09 09:08:54 +0200624void writeback_inodes_wbc(struct writeback_control *wbc)
625{
626 struct backing_dev_info *bdi = wbc->bdi;
627
628 writeback_inodes_wb(&bdi->wb, wbc);
629}
630
631/*
632 * The maximum number of pages to writeout in a single bdi flush/kupdate
633 * operation. We do this so we don't hold I_SYNC against an inode for
634 * enormous amounts of time, which would block a userspace task which has
635 * been forced to throttle against that inode. Also, the code reevaluates
636 * the dirty each time it has written this many pages.
637 */
638#define MAX_WRITEBACK_PAGES 1024
639
640static inline bool over_bground_thresh(void)
641{
642 unsigned long background_thresh, dirty_thresh;
643
644 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
645
646 return (global_page_state(NR_FILE_DIRTY) +
647 global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
648}
649
650/*
651 * Explicit flushing or periodic writeback of "old" data.
652 *
653 * Define "old": the first time one of an inode's pages is dirtied, we mark the
654 * dirtying-time in the inode's address_space. So this periodic writeback code
655 * just walks the superblock inode list, writing back any inodes which are
656 * older than a specific point in time.
657 *
658 * Try to run once per dirty_writeback_interval. But if a writeback event
659 * takes longer than a dirty_writeback_interval interval, then leave a
660 * one-second gap.
661 *
662 * older_than_this takes precedence over nr_to_write. So we'll only write back
663 * all dirty pages if they are all attached to "old" mappings.
664 */
Jens Axboec4a77a62009-09-16 15:18:25 +0200665static long wb_writeback(struct bdi_writeback *wb,
666 struct wb_writeback_args *args)
Jens Axboe03ba3782009-09-09 09:08:54 +0200667{
668 struct writeback_control wbc = {
669 .bdi = wb->bdi,
Jens Axboec4a77a62009-09-16 15:18:25 +0200670 .sb = args->sb,
671 .sync_mode = args->sync_mode,
Jens Axboe03ba3782009-09-09 09:08:54 +0200672 .older_than_this = NULL,
Jens Axboec4a77a62009-09-16 15:18:25 +0200673 .for_kupdate = args->for_kupdate,
674 .range_cyclic = args->range_cyclic,
Jens Axboe03ba3782009-09-09 09:08:54 +0200675 };
676 unsigned long oldest_jif;
677 long wrote = 0;
678
679 if (wbc.for_kupdate) {
680 wbc.older_than_this = &oldest_jif;
681 oldest_jif = jiffies -
682 msecs_to_jiffies(dirty_expire_interval * 10);
683 }
Jens Axboec4a77a62009-09-16 15:18:25 +0200684 if (!wbc.range_cyclic) {
685 wbc.range_start = 0;
686 wbc.range_end = LLONG_MAX;
687 }
Jens Axboe03ba3782009-09-09 09:08:54 +0200688
689 for (;;) {
690 /*
691 * Don't flush anything for non-integrity writeback where
692 * no nr_pages was given
693 */
Jens Axboec4a77a62009-09-16 15:18:25 +0200694 if (!args->for_kupdate && args->nr_pages <= 0 &&
695 args->sync_mode == WB_SYNC_NONE)
Jens Axboe03ba3782009-09-09 09:08:54 +0200696 break;
697
698 /*
699 * If no specific pages were given and this is just a
700 * periodic background writeout and we are below the
701 * background dirty threshold, don't do anything
702 */
Jens Axboec4a77a62009-09-16 15:18:25 +0200703 if (args->for_kupdate && args->nr_pages <= 0 &&
704 !over_bground_thresh())
Jens Axboe03ba3782009-09-09 09:08:54 +0200705 break;
706
707 wbc.more_io = 0;
708 wbc.encountered_congestion = 0;
709 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
710 wbc.pages_skipped = 0;
711 writeback_inodes_wb(wb, &wbc);
Jens Axboec4a77a62009-09-16 15:18:25 +0200712 args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
Jens Axboe03ba3782009-09-09 09:08:54 +0200713 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
714
715 /*
716 * If we ran out of stuff to write, bail unless more_io got set
717 */
718 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
719 if (wbc.more_io && !wbc.for_kupdate)
720 continue;
721 break;
722 }
723 }
724
725 return wrote;
726}
727
728/*
729 * Return the next bdi_work struct that hasn't been processed by this
730 * wb thread yet
731 */
732static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
733 struct bdi_writeback *wb)
734{
735 struct bdi_work *work, *ret = NULL;
736
737 rcu_read_lock();
738
739 list_for_each_entry_rcu(work, &bdi->work_list, list) {
740 if (!test_and_clear_bit(wb->nr, &work->seen))
741 continue;
742
743 ret = work;
744 break;
745 }
746
747 rcu_read_unlock();
748 return ret;
749}
750
751static long wb_check_old_data_flush(struct bdi_writeback *wb)
752{
753 unsigned long expired;
754 long nr_pages;
755
756 expired = wb->last_old_flush +
757 msecs_to_jiffies(dirty_writeback_interval * 10);
758 if (time_before(jiffies, expired))
759 return 0;
760
761 wb->last_old_flush = jiffies;
762 nr_pages = global_page_state(NR_FILE_DIRTY) +
763 global_page_state(NR_UNSTABLE_NFS) +
764 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
765
Jens Axboec4a77a62009-09-16 15:18:25 +0200766 if (nr_pages) {
767 struct wb_writeback_args args = {
768 .nr_pages = nr_pages,
769 .sync_mode = WB_SYNC_NONE,
770 .for_kupdate = 1,
771 .range_cyclic = 1,
772 };
773
774 return wb_writeback(wb, &args);
775 }
Jens Axboe03ba3782009-09-09 09:08:54 +0200776
777 return 0;
778}
779
780/*
781 * Retrieve work items and do the writeback they describe
782 */
783long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
784{
785 struct backing_dev_info *bdi = wb->bdi;
786 struct bdi_work *work;
Jens Axboec4a77a62009-09-16 15:18:25 +0200787 long wrote = 0;
Jens Axboe03ba3782009-09-09 09:08:54 +0200788
789 while ((work = get_next_work_item(bdi, wb)) != NULL) {
Jens Axboec4a77a62009-09-16 15:18:25 +0200790 struct wb_writeback_args args = work->args;
Jens Axboe03ba3782009-09-09 09:08:54 +0200791
792 /*
793 * Override sync mode, in case we must wait for completion
794 */
795 if (force_wait)
Jens Axboec4a77a62009-09-16 15:18:25 +0200796 work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
Jens Axboe03ba3782009-09-09 09:08:54 +0200797
798 /*
799 * If this isn't a data integrity operation, just notify
800 * that we have seen this work and we are now starting it.
801 */
Jens Axboec4a77a62009-09-16 15:18:25 +0200802 if (args.sync_mode == WB_SYNC_NONE)
Jens Axboe03ba3782009-09-09 09:08:54 +0200803 wb_clear_pending(wb, work);
804
Jens Axboec4a77a62009-09-16 15:18:25 +0200805 wrote += wb_writeback(wb, &args);
Jens Axboe03ba3782009-09-09 09:08:54 +0200806
807 /*
808 * This is a data integrity writeback, so only do the
809 * notification when we have completed the work.
810 */
Jens Axboec4a77a62009-09-16 15:18:25 +0200811 if (args.sync_mode == WB_SYNC_ALL)
Jens Axboe03ba3782009-09-09 09:08:54 +0200812 wb_clear_pending(wb, work);
813 }
814
815 /*
816 * Check for periodic writeback, kupdated() style
817 */
818 wrote += wb_check_old_data_flush(wb);
819
820 return wrote;
821}
822
823/*
824 * Handle writeback of dirty data for the device backed by this bdi. Also
825 * wakes up periodically and does kupdated style flushing.
826 */
827int bdi_writeback_task(struct bdi_writeback *wb)
828{
829 unsigned long last_active = jiffies;
830 unsigned long wait_jiffies = -1UL;
831 long pages_written;
832
833 while (!kthread_should_stop()) {
834 pages_written = wb_do_writeback(wb, 0);
835
836 if (pages_written)
837 last_active = jiffies;
838 else if (wait_jiffies != -1UL) {
839 unsigned long max_idle;
840
841 /*
842 * Longest period of inactivity that we tolerate. If we
843 * see dirty data again later, the task will get
844 * recreated automatically.
845 */
846 max_idle = max(5UL * 60 * HZ, wait_jiffies);
847 if (time_after(jiffies, max_idle + last_active))
848 break;
849 }
850
851 wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
852 set_current_state(TASK_INTERRUPTIBLE);
853 schedule_timeout(wait_jiffies);
854 try_to_freeze();
855 }
856
857 return 0;
858}
859
860/*
Jens Axboef11fcae2009-09-15 09:53:35 +0200861 * Schedule writeback for all backing devices. Can only be used for
862 * WB_SYNC_NONE writeback, WB_SYNC_ALL should use bdi_start_writeback()
863 * and pass in the superblock.
Jens Axboe03ba3782009-09-09 09:08:54 +0200864 */
865static void bdi_writeback_all(struct writeback_control *wbc)
866{
Jens Axboe03ba3782009-09-09 09:08:54 +0200867 struct backing_dev_info *bdi;
Jens Axboe03ba3782009-09-09 09:08:54 +0200868
Jens Axboef11fcae2009-09-15 09:53:35 +0200869 WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
870
Jens Axboe03ba3782009-09-09 09:08:54 +0200871 spin_lock(&bdi_lock);
872
873 list_for_each_entry(bdi, &bdi_list, bdi_list) {
Jens Axboe03ba3782009-09-09 09:08:54 +0200874 if (!bdi_has_dirty_io(bdi))
875 continue;
876
Jens Axboef11fcae2009-09-15 09:53:35 +0200877 bdi_alloc_queue_work(bdi, wbc);
Jens Axboe03ba3782009-09-09 09:08:54 +0200878 }
879
880 spin_unlock(&bdi_lock);
Jens Axboe03ba3782009-09-09 09:08:54 +0200881}
882
883/*
884 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
885 * the whole world.
886 */
887void wakeup_flusher_threads(long nr_pages)
888{
889 struct writeback_control wbc = {
890 .sync_mode = WB_SYNC_NONE,
891 .older_than_this = NULL,
892 .range_cyclic = 1,
893 };
894
895 if (nr_pages == 0)
896 nr_pages = global_page_state(NR_FILE_DIRTY) +
897 global_page_state(NR_UNSTABLE_NFS);
898 wbc.nr_to_write = nr_pages;
899 bdi_writeback_all(&wbc);
900}
901
902static noinline void block_dump___mark_inode_dirty(struct inode *inode)
903{
904 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
905 struct dentry *dentry;
906 const char *name = "?";
907
908 dentry = d_find_alias(inode);
909 if (dentry) {
910 spin_lock(&dentry->d_lock);
911 name = (const char *) dentry->d_name.name;
912 }
913 printk(KERN_DEBUG
914 "%s(%d): dirtied inode %lu (%s) on %s\n",
915 current->comm, task_pid_nr(current), inode->i_ino,
916 name, inode->i_sb->s_id);
917 if (dentry) {
918 spin_unlock(&dentry->d_lock);
919 dput(dentry);
920 }
921 }
922}
923
924/**
925 * __mark_inode_dirty - internal function
926 * @inode: inode to mark
927 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
928 * Mark an inode as dirty. Callers should use mark_inode_dirty or
929 * mark_inode_dirty_sync.
930 *
931 * Put the inode on the super block's dirty list.
932 *
933 * CAREFUL! We mark it dirty unconditionally, but move it onto the
934 * dirty list only if it is hashed or if it refers to a blockdev.
935 * If it was not hashed, it will never be added to the dirty list
936 * even if it is later hashed, as it will have been marked dirty already.
937 *
938 * In short, make sure you hash any inodes _before_ you start marking
939 * them dirty.
940 *
941 * This function *must* be atomic for the I_DIRTY_PAGES case -
942 * set_page_dirty() is called under spinlock in several places.
943 *
944 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
945 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
946 * the kernel-internal blockdev inode represents the dirtying time of the
947 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
948 * page->mapping->host, so the page-dirtying time is recorded in the internal
949 * blockdev inode.
950 */
951void __mark_inode_dirty(struct inode *inode, int flags)
952{
953 struct super_block *sb = inode->i_sb;
954
955 /*
956 * Don't do this for I_DIRTY_PAGES - that doesn't actually
957 * dirty the inode itself
958 */
959 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
960 if (sb->s_op->dirty_inode)
961 sb->s_op->dirty_inode(inode);
962 }
963
964 /*
965 * make sure that changes are seen by all cpus before we test i_state
966 * -- mikulas
967 */
968 smp_mb();
969
970 /* avoid the locking if we can */
971 if ((inode->i_state & flags) == flags)
972 return;
973
974 if (unlikely(block_dump))
975 block_dump___mark_inode_dirty(inode);
976
977 spin_lock(&inode_lock);
978 if ((inode->i_state & flags) != flags) {
979 const int was_dirty = inode->i_state & I_DIRTY;
980
981 inode->i_state |= flags;
982
983 /*
984 * If the inode is being synced, just update its dirty state.
985 * The unlocker will place the inode on the appropriate
986 * superblock list, based upon its state.
987 */
988 if (inode->i_state & I_SYNC)
989 goto out;
990
991 /*
992 * Only add valid (hashed) inodes to the superblock's
993 * dirty list. Add blockdev inodes as well.
994 */
995 if (!S_ISBLK(inode->i_mode)) {
996 if (hlist_unhashed(&inode->i_hash))
997 goto out;
998 }
999 if (inode->i_state & (I_FREEING|I_CLEAR))
1000 goto out;
1001
1002 /*
1003 * If the inode was already on b_dirty/b_io/b_more_io, don't
1004 * reposition it (that would break b_dirty time-ordering).
1005 */
1006 if (!was_dirty) {
1007 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
Jens Axboe500b0672009-09-09 09:10:25 +02001008 struct backing_dev_info *bdi = wb->bdi;
1009
1010 if (bdi_cap_writeback_dirty(bdi) &&
1011 !test_bit(BDI_registered, &bdi->state)) {
1012 WARN_ON(1);
1013 printk(KERN_ERR "bdi-%s not registered\n",
1014 bdi->name);
1015 }
Jens Axboe03ba3782009-09-09 09:08:54 +02001016
1017 inode->dirtied_when = jiffies;
1018 list_move(&inode->i_list, &wb->b_dirty);
1019 }
1020 }
1021out:
1022 spin_unlock(&inode_lock);
1023}
1024EXPORT_SYMBOL(__mark_inode_dirty);
1025
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001026/*
1027 * Write out a superblock's list of dirty inodes. A wait will be performed
1028 * upon no inodes, all inodes or the final one, depending upon sync_mode.
1029 *
1030 * If older_than_this is non-NULL, then only write out inodes which
1031 * had their first dirtying at a time earlier than *older_than_this.
1032 *
1033 * If we're a pdlfush thread, then implement pdflush collision avoidance
1034 * against the entire list.
1035 *
1036 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1037 * This function assumes that the blockdev superblock's inodes are backed by
1038 * a variety of queues, so all inodes are searched. For other superblocks,
1039 * assume that all inodes are backed by the same queue.
1040 *
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001041 * The inodes to be written are parked on bdi->b_io. They are moved back onto
1042 * bdi->b_dirty as they are selected for writing. This way, none can be missed
1043 * on the writer throttling path, and we get decent balancing between many
1044 * throttled threads: we don't want them all piling up on inode_sync_wait.
1045 */
Jens Axboe03ba3782009-09-09 09:08:54 +02001046static void wait_sb_inodes(struct writeback_control *wbc)
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001047{
Jens Axboe03ba3782009-09-09 09:08:54 +02001048 struct inode *inode, *old_inode = NULL;
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001049
Jens Axboe03ba3782009-09-09 09:08:54 +02001050 /*
1051 * We need to be protected against the filesystem going from
1052 * r/o to r/w or vice versa.
1053 */
1054 WARN_ON(!rwsem_is_locked(&wbc->sb->s_umount));
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001055
Jens Axboe03ba3782009-09-09 09:08:54 +02001056 spin_lock(&inode_lock);
1057
1058 /*
1059 * Data integrity sync. Must wait for all pages under writeback,
1060 * because there may have been pages dirtied before our sync
1061 * call, but which had writeout started before we write it out.
1062 * In which case, the inode may not be on the dirty list, but
1063 * we still have to wait for that writeout.
1064 */
1065 list_for_each_entry(inode, &wbc->sb->s_inodes, i_sb_list) {
1066 struct address_space *mapping;
1067
1068 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
1069 continue;
1070 mapping = inode->i_mapping;
1071 if (mapping->nrpages == 0)
1072 continue;
1073 __iget(inode);
1074 spin_unlock(&inode_lock);
1075 /*
1076 * We hold a reference to 'inode' so it couldn't have
1077 * been removed from s_inodes list while we dropped the
1078 * inode_lock. We cannot iput the inode now as we can
1079 * be holding the last reference and we cannot iput it
1080 * under inode_lock. So we keep the reference and iput
1081 * it later.
1082 */
1083 iput(old_inode);
1084 old_inode = inode;
1085
1086 filemap_fdatawait(mapping);
1087
1088 cond_resched();
Nick Piggin38f21972009-01-06 14:40:25 -08001089
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001090 spin_lock(&inode_lock);
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001091 }
Jens Axboe03ba3782009-09-09 09:08:54 +02001092 spin_unlock(&inode_lock);
1093 iput(old_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094}
1095
Jens Axboed8a85592009-09-02 12:34:32 +02001096/**
1097 * writeback_inodes_sb - writeback dirty inodes from given super_block
1098 * @sb: the superblock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 *
Jens Axboed8a85592009-09-02 12:34:32 +02001100 * Start writeback on some inodes on this super_block. No guarantees are made
1101 * on how many (if any) will be written, and this function does not wait
1102 * for IO completion of submitted IO. The number of pages submitted is
1103 * returned.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 */
Jens Axboed8a85592009-09-02 12:34:32 +02001105long writeback_inodes_sb(struct super_block *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106{
1107 struct writeback_control wbc = {
Jens Axboe03ba3782009-09-09 09:08:54 +02001108 .sb = sb,
Jens Axboed8a85592009-09-02 12:34:32 +02001109 .sync_mode = WB_SYNC_NONE,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001110 .range_start = 0,
1111 .range_end = LLONG_MAX,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 };
Jens Axboed8a85592009-09-02 12:34:32 +02001113 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1114 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1115 long nr_to_write;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
Jens Axboed8a85592009-09-02 12:34:32 +02001117 nr_to_write = nr_dirty + nr_unstable +
Nick Piggin38f21972009-01-06 14:40:25 -08001118 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
Nick Piggin38f21972009-01-06 14:40:25 -08001119
Jens Axboed8a85592009-09-02 12:34:32 +02001120 wbc.nr_to_write = nr_to_write;
Jens Axboe03ba3782009-09-09 09:08:54 +02001121 bdi_writeback_all(&wbc);
Jens Axboed8a85592009-09-02 12:34:32 +02001122 return nr_to_write - wbc.nr_to_write;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123}
Jens Axboed8a85592009-09-02 12:34:32 +02001124EXPORT_SYMBOL(writeback_inodes_sb);
1125
1126/**
1127 * sync_inodes_sb - sync sb inode pages
1128 * @sb: the superblock
1129 *
1130 * This function writes and waits on any dirty inode belonging to this
1131 * super_block. The number of pages synced is returned.
1132 */
1133long sync_inodes_sb(struct super_block *sb)
1134{
1135 struct writeback_control wbc = {
Jens Axboe03ba3782009-09-09 09:08:54 +02001136 .sb = sb,
Jens Axboef11fcae2009-09-15 09:53:35 +02001137 .bdi = sb->s_bdi,
Jens Axboed8a85592009-09-02 12:34:32 +02001138 .sync_mode = WB_SYNC_ALL,
1139 .range_start = 0,
1140 .range_end = LLONG_MAX,
1141 };
1142 long nr_to_write = LONG_MAX; /* doesn't actually matter */
1143
1144 wbc.nr_to_write = nr_to_write;
Jens Axboef11fcae2009-09-15 09:53:35 +02001145 bdi_start_writeback(&wbc);
Jens Axboe03ba3782009-09-09 09:08:54 +02001146 wait_sb_inodes(&wbc);
Jens Axboed8a85592009-09-02 12:34:32 +02001147 return nr_to_write - wbc.nr_to_write;
1148}
1149EXPORT_SYMBOL(sync_inodes_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151/**
Andrea Arcangeli7f04c262005-10-30 15:03:05 -08001152 * write_inode_now - write an inode to disk
1153 * @inode: inode to write to disk
1154 * @sync: whether the write should be synchronous or not
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 *
Andrea Arcangeli7f04c262005-10-30 15:03:05 -08001156 * This function commits an inode to disk immediately if it is dirty. This is
1157 * primarily needed by knfsd.
1158 *
1159 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161int write_inode_now(struct inode *inode, int sync)
1162{
1163 int ret;
1164 struct writeback_control wbc = {
1165 .nr_to_write = LONG_MAX,
Mike Galbraith18914b12008-02-08 04:20:23 -08001166 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001167 .range_start = 0,
1168 .range_end = LLONG_MAX,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 };
1170
1171 if (!mapping_cap_writeback_dirty(inode->i_mapping))
Andrew Morton49364ce2005-11-07 00:59:15 -08001172 wbc.nr_to_write = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173
1174 might_sleep();
1175 spin_lock(&inode_lock);
Christoph Hellwig01c03192009-06-08 13:35:40 +02001176 ret = writeback_single_inode(inode, &wbc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 spin_unlock(&inode_lock);
1178 if (sync)
Joern Engel1c0eeaf2007-10-16 23:30:44 -07001179 inode_sync_wait(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 return ret;
1181}
1182EXPORT_SYMBOL(write_inode_now);
1183
1184/**
1185 * sync_inode - write an inode and its pages to disk.
1186 * @inode: the inode to sync
1187 * @wbc: controls the writeback mode
1188 *
1189 * sync_inode() will write an inode and its pages to disk. It will also
1190 * correctly update the inode on its superblock's dirty inode lists and will
1191 * update inode->i_state.
1192 *
1193 * The caller must have a ref on the inode.
1194 */
1195int sync_inode(struct inode *inode, struct writeback_control *wbc)
1196{
1197 int ret;
1198
1199 spin_lock(&inode_lock);
Christoph Hellwig01c03192009-06-08 13:35:40 +02001200 ret = writeback_single_inode(inode, wbc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 spin_unlock(&inode_lock);
1202 return ret;
1203}
1204EXPORT_SYMBOL(sync_inode);