blob: c59d6737036cd8a9c07fab2bded7fd2c1bfff7da [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/fs-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
10 *
Francois Camie1f8e872008-10-15 22:01:59 -070011 * 10Apr2002 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
14 */
15
16#include <linux/kernel.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020017#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/spinlock.h>
19#include <linux/sched.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
Jens Axboe03ba3782009-09-09 09:08:54 +020022#include <linux/kthread.h>
23#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/backing-dev.h>
27#include <linux/buffer_head.h>
David Howells07f3f052006-09-30 20:52:18 +020028#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Jens Axboe66f3b8e2009-09-02 09:19:46 +020030#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
Adrian Bunkf11b00f2008-04-29 00:58:56 -070031
Jens Axboe03ba3782009-09-09 09:08:54 +020032/*
Jens Axboed0bceac2009-05-18 08:20:32 +020033 * We don't actually have pdflush, but this one is exported though /proc...
34 */
35int nr_pdflush_threads;
36
37/*
Jens Axboec4a77a62009-09-16 15:18:25 +020038 * Passed into wb_writeback(), essentially a subset of writeback_control
39 */
40struct wb_writeback_args {
41 long nr_pages;
42 struct super_block *sb;
43 enum writeback_sync_modes sync_mode;
44 int for_kupdate;
45 int range_cyclic;
46};
47
48/*
Jens Axboe03ba3782009-09-09 09:08:54 +020049 * Work items for the bdi_writeback threads
Adrian Bunkf11b00f2008-04-29 00:58:56 -070050 */
Jens Axboe03ba3782009-09-09 09:08:54 +020051struct bdi_work {
Jens Axboe8010c3b2009-09-15 20:04:57 +020052 struct list_head list; /* pending work list */
53 struct rcu_head rcu_head; /* for RCU free/clear of work */
Jens Axboe03ba3782009-09-09 09:08:54 +020054
Jens Axboe8010c3b2009-09-15 20:04:57 +020055 unsigned long seen; /* threads that have seen this work */
56 atomic_t pending; /* number of threads still to do work */
Jens Axboe03ba3782009-09-09 09:08:54 +020057
Jens Axboe8010c3b2009-09-15 20:04:57 +020058 struct wb_writeback_args args; /* writeback arguments */
Jens Axboe03ba3782009-09-09 09:08:54 +020059
Jens Axboe8010c3b2009-09-15 20:04:57 +020060 unsigned long state; /* flag bits, see WS_* */
Jens Axboe03ba3782009-09-09 09:08:54 +020061};
62
63enum {
64 WS_USED_B = 0,
65 WS_ONSTACK_B,
66};
67
68#define WS_USED (1 << WS_USED_B)
69#define WS_ONSTACK (1 << WS_ONSTACK_B)
70
71static inline bool bdi_work_on_stack(struct bdi_work *work)
Adrian Bunkf11b00f2008-04-29 00:58:56 -070072{
Jens Axboe03ba3782009-09-09 09:08:54 +020073 return test_bit(WS_ONSTACK_B, &work->state);
74}
75
76static inline void bdi_work_init(struct bdi_work *work,
Jens Axboeb6e51312009-09-16 15:13:54 +020077 struct wb_writeback_args *args)
Jens Axboe03ba3782009-09-09 09:08:54 +020078{
79 INIT_RCU_HEAD(&work->rcu_head);
Jens Axboeb6e51312009-09-16 15:13:54 +020080 work->args = *args;
Jens Axboe03ba3782009-09-09 09:08:54 +020081 work->state = WS_USED;
82}
83
Adrian Bunkf11b00f2008-04-29 00:58:56 -070084/**
85 * writeback_in_progress - determine whether there is writeback in progress
86 * @bdi: the device's backing_dev_info structure.
87 *
Jens Axboe03ba3782009-09-09 09:08:54 +020088 * Determine whether there is writeback waiting to be handled against a
89 * backing device.
Adrian Bunkf11b00f2008-04-29 00:58:56 -070090 */
91int writeback_in_progress(struct backing_dev_info *bdi)
92{
Jens Axboe03ba3782009-09-09 09:08:54 +020093 return !list_empty(&bdi->work_list);
Adrian Bunkf11b00f2008-04-29 00:58:56 -070094}
95
Jens Axboe03ba3782009-09-09 09:08:54 +020096static void bdi_work_clear(struct bdi_work *work)
Adrian Bunkf11b00f2008-04-29 00:58:56 -070097{
Jens Axboe03ba3782009-09-09 09:08:54 +020098 clear_bit(WS_USED_B, &work->state);
99 smp_mb__after_clear_bit();
Nick Piggin1ef7d9a2009-09-15 21:37:55 +0200100 /*
101 * work can have disappeared at this point. bit waitq functions
102 * should be able to tolerate this, provided bdi_sched_wait does
103 * not dereference it's pointer argument.
104 */
Jens Axboe03ba3782009-09-09 09:08:54 +0200105 wake_up_bit(&work->state, WS_USED_B);
Adrian Bunkf11b00f2008-04-29 00:58:56 -0700106}
107
Jens Axboe03ba3782009-09-09 09:08:54 +0200108static void bdi_work_free(struct rcu_head *head)
Nick Piggin4195f732009-05-28 09:01:15 +0200109{
Jens Axboe03ba3782009-09-09 09:08:54 +0200110 struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
Nick Piggin4195f732009-05-28 09:01:15 +0200111
Jens Axboe03ba3782009-09-09 09:08:54 +0200112 if (!bdi_work_on_stack(work))
113 kfree(work);
114 else
115 bdi_work_clear(work);
116}
117
118static void wb_work_complete(struct bdi_work *work)
119{
Jens Axboec4a77a62009-09-16 15:18:25 +0200120 const enum writeback_sync_modes sync_mode = work->args.sync_mode;
Nick Piggin77b9d052009-09-15 21:34:51 +0200121 int onstack = bdi_work_on_stack(work);
Jens Axboe03ba3782009-09-09 09:08:54 +0200122
123 /*
124 * For allocated work, we can clear the done/seen bit right here.
125 * For on-stack work, we need to postpone both the clear and free
126 * to after the RCU grace period, since the stack could be invalidated
127 * as soon as bdi_work_clear() has done the wakeup.
128 */
Nick Piggin77b9d052009-09-15 21:34:51 +0200129 if (!onstack)
Jens Axboe03ba3782009-09-09 09:08:54 +0200130 bdi_work_clear(work);
Nick Piggin77b9d052009-09-15 21:34:51 +0200131 if (sync_mode == WB_SYNC_NONE || onstack)
Jens Axboe03ba3782009-09-09 09:08:54 +0200132 call_rcu(&work->rcu_head, bdi_work_free);
133}
134
135static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
136{
137 /*
138 * The caller has retrieved the work arguments from this work,
139 * drop our reference. If this is the last ref, delete and free it
140 */
141 if (atomic_dec_and_test(&work->pending)) {
142 struct backing_dev_info *bdi = wb->bdi;
143
144 spin_lock(&bdi->wb_lock);
145 list_del_rcu(&work->list);
146 spin_unlock(&bdi->wb_lock);
147
148 wb_work_complete(work);
Nick Piggin4195f732009-05-28 09:01:15 +0200149 }
150}
151
Jens Axboe03ba3782009-09-09 09:08:54 +0200152static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153{
Jens Axboebcddc3f2009-09-13 20:07:36 +0200154 work->seen = bdi->wb_mask;
155 BUG_ON(!work->seen);
156 atomic_set(&work->pending, bdi->wb_cnt);
157 BUG_ON(!bdi->wb_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Jens Axboebcddc3f2009-09-13 20:07:36 +0200159 /*
Nick Piggindeed62e2009-09-15 21:32:58 +0200160 * list_add_tail_rcu() contains the necessary barriers to
161 * make sure the above stores are seen before the item is
162 * noticed on the list
Jens Axboebcddc3f2009-09-13 20:07:36 +0200163 */
Jens Axboebcddc3f2009-09-13 20:07:36 +0200164 spin_lock(&bdi->wb_lock);
165 list_add_tail_rcu(&work->list, &bdi->work_list);
166 spin_unlock(&bdi->wb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168 /*
Jens Axboe03ba3782009-09-09 09:08:54 +0200169 * If the default thread isn't there, make sure we add it. When
170 * it gets created and wakes up, we'll run this work.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 */
Jens Axboe03ba3782009-09-09 09:08:54 +0200172 if (unlikely(list_empty_careful(&bdi->wb_list)))
173 wake_up_process(default_backing_dev_info.wb.task);
174 else {
175 struct bdi_writeback *wb = &bdi->wb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Nick Piggin1ef7d9a2009-09-15 21:37:55 +0200177 if (wb->task)
Jens Axboe03ba3782009-09-09 09:08:54 +0200178 wake_up_process(wb->task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180}
181
Jens Axboe03ba3782009-09-09 09:08:54 +0200182/*
183 * Used for on-stack allocated work items. The caller needs to wait until
184 * the wb threads have acked the work before it's safe to continue.
185 */
186static void bdi_wait_on_work_clear(struct bdi_work *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187{
Jens Axboe03ba3782009-09-09 09:08:54 +0200188 wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
189 TASK_UNINTERRUPTIBLE);
190}
191
Jens Axboef11fcae2009-09-15 09:53:35 +0200192static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
Jens Axboeb6e51312009-09-16 15:13:54 +0200193 struct wb_writeback_args *args)
Jens Axboe03ba3782009-09-09 09:08:54 +0200194{
195 struct bdi_work *work;
196
Jens Axboebcddc3f2009-09-13 20:07:36 +0200197 /*
198 * This is WB_SYNC_NONE writeback, so if allocation fails just
199 * wakeup the thread for old dirty data writeback
200 */
Jens Axboe03ba3782009-09-09 09:08:54 +0200201 work = kmalloc(sizeof(*work), GFP_ATOMIC);
Jens Axboebcddc3f2009-09-13 20:07:36 +0200202 if (work) {
Jens Axboeb6e51312009-09-16 15:13:54 +0200203 bdi_work_init(work, args);
Jens Axboebcddc3f2009-09-13 20:07:36 +0200204 bdi_queue_work(bdi, work);
205 } else {
206 struct bdi_writeback *wb = &bdi->wb;
Jens Axboe03ba3782009-09-09 09:08:54 +0200207
Jens Axboebcddc3f2009-09-13 20:07:36 +0200208 if (wb->task)
209 wake_up_process(wb->task);
210 }
Jens Axboe03ba3782009-09-09 09:08:54 +0200211}
212
Jens Axboeb6e51312009-09-16 15:13:54 +0200213/**
214 * bdi_sync_writeback - start and wait for writeback
215 * @bdi: the backing device to write from
216 * @sb: write inodes from this super_block
217 *
218 * Description:
219 * This does WB_SYNC_ALL data integrity writeback and waits for the
220 * IO to complete. Callers must hold the sb s_umount semaphore for
221 * reading, to avoid having the super disappear before we are done.
222 */
223static void bdi_sync_writeback(struct backing_dev_info *bdi,
224 struct super_block *sb)
Jens Axboe03ba3782009-09-09 09:08:54 +0200225{
Jens Axboeb6e51312009-09-16 15:13:54 +0200226 struct wb_writeback_args args = {
227 .sb = sb,
228 .sync_mode = WB_SYNC_ALL,
229 .nr_pages = LONG_MAX,
230 .range_cyclic = 0,
231 };
232 struct bdi_work work;
Christoph Hellwigf0fad8a2009-09-11 09:47:56 +0200233
Jens Axboeb6e51312009-09-16 15:13:54 +0200234 bdi_work_init(&work, &args);
235 work.state |= WS_ONSTACK;
Christoph Hellwigf0fad8a2009-09-11 09:47:56 +0200236
Jens Axboeb6e51312009-09-16 15:13:54 +0200237 bdi_queue_work(bdi, &work);
238 bdi_wait_on_work_clear(&work);
239}
240
241/**
242 * bdi_start_writeback - start writeback
243 * @bdi: the backing device to write from
244 * @nr_pages: the number of pages to write
245 *
246 * Description:
247 * This does WB_SYNC_NONE opportunistic writeback. The IO is only
248 * started when this function returns, we make no guarentees on
249 * completion. Caller need not hold sb s_umount semaphore.
250 *
251 */
252void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
253{
254 struct wb_writeback_args args = {
255 .sync_mode = WB_SYNC_NONE,
256 .nr_pages = nr_pages,
257 .range_cyclic = 1,
258 };
259
260 bdi_alloc_queue_work(bdi, &args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261}
262
263/*
Andrew Morton6610a0b2007-10-16 23:30:32 -0700264 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
265 * furthest end of its superblock's dirty-inode list.
266 *
267 * Before stamping the inode's ->dirtied_when, we check to see whether it is
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200268 * already the most-recently-dirtied inode on the b_dirty list. If that is
Andrew Morton6610a0b2007-10-16 23:30:32 -0700269 * the case then the inode must have been redirtied while it was being written
270 * out and we don't reset its dirtied_when.
271 */
272static void redirty_tail(struct inode *inode)
273{
Jens Axboe03ba3782009-09-09 09:08:54 +0200274 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
Andrew Morton6610a0b2007-10-16 23:30:32 -0700275
Jens Axboe03ba3782009-09-09 09:08:54 +0200276 if (!list_empty(&wb->b_dirty)) {
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200277 struct inode *tail;
Andrew Morton6610a0b2007-10-16 23:30:32 -0700278
Jens Axboe03ba3782009-09-09 09:08:54 +0200279 tail = list_entry(wb->b_dirty.next, struct inode, i_list);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200280 if (time_before(inode->dirtied_when, tail->dirtied_when))
Andrew Morton6610a0b2007-10-16 23:30:32 -0700281 inode->dirtied_when = jiffies;
282 }
Jens Axboe03ba3782009-09-09 09:08:54 +0200283 list_move(&inode->i_list, &wb->b_dirty);
Andrew Morton6610a0b2007-10-16 23:30:32 -0700284}
285
286/*
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200287 * requeue inode for re-scanning after bdi->b_io list is exhausted.
Andrew Mortonc986d1e2007-10-16 23:30:34 -0700288 */
Ken Chen0e0f4fc2007-10-16 23:30:38 -0700289static void requeue_io(struct inode *inode)
Andrew Mortonc986d1e2007-10-16 23:30:34 -0700290{
Jens Axboe03ba3782009-09-09 09:08:54 +0200291 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
292
293 list_move(&inode->i_list, &wb->b_more_io);
Andrew Mortonc986d1e2007-10-16 23:30:34 -0700294}
295
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700296static void inode_sync_complete(struct inode *inode)
297{
298 /*
299 * Prevent speculative execution through spin_unlock(&inode_lock);
300 */
301 smp_mb();
302 wake_up_bit(&inode->i_state, __I_SYNC);
303}
304
Jeff Laytond2caa3c52009-04-02 16:56:37 -0700305static bool inode_dirtied_after(struct inode *inode, unsigned long t)
306{
307 bool ret = time_after(inode->dirtied_when, t);
308#ifndef CONFIG_64BIT
309 /*
310 * For inodes being constantly redirtied, dirtied_when can get stuck.
311 * It _appears_ to be in the future, but is actually in distant past.
312 * This test is necessary to prevent such wrapped-around relative times
313 * from permanently stopping the whole pdflush writeback.
314 */
315 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
316#endif
317 return ret;
318}
319
Andrew Mortonc986d1e2007-10-16 23:30:34 -0700320/*
Fengguang Wu2c136572007-10-16 23:30:39 -0700321 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
322 */
323static void move_expired_inodes(struct list_head *delaying_queue,
324 struct list_head *dispatch_queue,
325 unsigned long *older_than_this)
326{
327 while (!list_empty(delaying_queue)) {
328 struct inode *inode = list_entry(delaying_queue->prev,
329 struct inode, i_list);
330 if (older_than_this &&
Jeff Laytond2caa3c52009-04-02 16:56:37 -0700331 inode_dirtied_after(inode, *older_than_this))
Fengguang Wu2c136572007-10-16 23:30:39 -0700332 break;
333 list_move(&inode->i_list, dispatch_queue);
334 }
335}
336
337/*
338 * Queue all expired dirty inodes for io, eldest first.
339 */
Jens Axboe03ba3782009-09-09 09:08:54 +0200340static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
Fengguang Wu2c136572007-10-16 23:30:39 -0700341{
Jens Axboe03ba3782009-09-09 09:08:54 +0200342 list_splice_init(&wb->b_more_io, wb->b_io.prev);
343 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200344}
345
Jens Axboe03ba3782009-09-09 09:08:54 +0200346static int write_inode(struct inode *inode, int sync)
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200347{
Jens Axboe03ba3782009-09-09 09:08:54 +0200348 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
349 return inode->i_sb->s_op->write_inode(inode, sync);
350 return 0;
Fengguang Wu2c136572007-10-16 23:30:39 -0700351}
352
353/*
Christoph Hellwig01c03192009-06-08 13:35:40 +0200354 * Wait for writeback on an inode to complete.
355 */
356static void inode_wait_for_writeback(struct inode *inode)
357{
358 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
359 wait_queue_head_t *wqh;
360
361 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
362 do {
363 spin_unlock(&inode_lock);
364 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
365 spin_lock(&inode_lock);
366 } while (inode->i_state & I_SYNC);
367}
368
369/*
370 * Write out an inode's dirty pages. Called under inode_lock. Either the
371 * caller has ref on the inode (either via __iget or via syscall against an fd)
372 * or the inode has I_WILL_FREE set (via generic_forget_inode)
373 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 * If `wait' is set, wait on the writeout.
375 *
376 * The whole writeout design is quite complex and fragile. We want to avoid
377 * starvation of particular inodes when others are being redirtied, prevent
378 * livelocks, etc.
379 *
380 * Called under inode_lock.
381 */
382static int
Christoph Hellwig01c03192009-06-08 13:35:40 +0200383writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 struct address_space *mapping = inode->i_mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 int wait = wbc->sync_mode == WB_SYNC_ALL;
Christoph Hellwig01c03192009-06-08 13:35:40 +0200387 unsigned dirty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 int ret;
389
Christoph Hellwig01c03192009-06-08 13:35:40 +0200390 if (!atomic_read(&inode->i_count))
391 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
392 else
393 WARN_ON(inode->i_state & I_WILL_FREE);
394
395 if (inode->i_state & I_SYNC) {
396 /*
397 * If this inode is locked for writeback and we are not doing
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200398 * writeback-for-data-integrity, move it to b_more_io so that
Christoph Hellwig01c03192009-06-08 13:35:40 +0200399 * writeback can proceed with the other inodes on s_io.
400 *
401 * We'll have another go at writing back this inode when we
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200402 * completed a full scan of b_io.
Christoph Hellwig01c03192009-06-08 13:35:40 +0200403 */
404 if (!wait) {
405 requeue_io(inode);
406 return 0;
407 }
408
409 /*
410 * It's a data-integrity sync. We must wait.
411 */
412 inode_wait_for_writeback(inode);
413 }
414
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700415 BUG_ON(inode->i_state & I_SYNC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700417 /* Set I_SYNC, reset I_DIRTY */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 dirty = inode->i_state & I_DIRTY;
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700419 inode->i_state |= I_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 inode->i_state &= ~I_DIRTY;
421
422 spin_unlock(&inode_lock);
423
424 ret = do_writepages(mapping, wbc);
425
426 /* Don't write the inode if only I_DIRTY_PAGES was set */
427 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
428 int err = write_inode(inode, wait);
429 if (ret == 0)
430 ret = err;
431 }
432
433 if (wait) {
434 int err = filemap_fdatawait(mapping);
435 if (ret == 0)
436 ret = err;
437 }
438
439 spin_lock(&inode_lock);
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700440 inode->i_state &= ~I_SYNC;
Wu Fengguang84a89242009-06-16 15:33:17 -0700441 if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 if (!(inode->i_state & I_DIRTY) &&
443 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
444 /*
445 * We didn't write back all the pages. nfs_writepages()
446 * sometimes bales out without doing anything. Redirty
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200447 * the inode; Move it from b_io onto b_more_io/b_dirty.
Andrew Morton1b43ef92007-10-16 23:30:35 -0700448 */
449 /*
450 * akpm: if the caller was the kupdate function we put
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200451 * this inode at the head of b_dirty so it gets first
Andrew Morton1b43ef92007-10-16 23:30:35 -0700452 * consideration. Otherwise, move it to the tail, for
453 * the reasons described there. I'm not really sure
454 * how much sense this makes. Presumably I had a good
455 * reasons for doing it this way, and I'd rather not
456 * muck with it at present.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 */
458 if (wbc->for_kupdate) {
459 /*
Fengguang Wu2c136572007-10-16 23:30:39 -0700460 * For the kupdate function we move the inode
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200461 * to b_more_io so it will get more writeout as
Fengguang Wu2c136572007-10-16 23:30:39 -0700462 * soon as the queue becomes uncongested.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 */
464 inode->i_state |= I_DIRTY_PAGES;
Fengguang Wu8bc3be22008-02-04 22:29:36 -0800465 if (wbc->nr_to_write <= 0) {
466 /*
467 * slice used up: queue for next turn
468 */
469 requeue_io(inode);
470 } else {
471 /*
472 * somehow blocked: retry later
473 */
474 redirty_tail(inode);
475 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 } else {
477 /*
478 * Otherwise fully redirty the inode so that
479 * other inodes on this superblock will get some
480 * writeout. Otherwise heavy writing to one
481 * file would indefinitely suspend writeout of
482 * all the other files.
483 */
484 inode->i_state |= I_DIRTY_PAGES;
Andrew Morton1b43ef92007-10-16 23:30:35 -0700485 redirty_tail(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 }
487 } else if (inode->i_state & I_DIRTY) {
488 /*
489 * Someone redirtied the inode while were writing back
490 * the pages.
491 */
Andrew Morton6610a0b2007-10-16 23:30:32 -0700492 redirty_tail(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 } else if (atomic_read(&inode->i_count)) {
494 /*
495 * The inode is clean, inuse
496 */
497 list_move(&inode->i_list, &inode_in_use);
498 } else {
499 /*
500 * The inode is clean, unused
501 */
502 list_move(&inode->i_list, &inode_unused);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 }
504 }
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700505 inode_sync_complete(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 return ret;
507}
508
Jens Axboe03ba3782009-09-09 09:08:54 +0200509/*
510 * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
511 * before calling writeback. So make sure that we do pin it, so it doesn't
512 * go away while we are writing inodes from it.
513 *
514 * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
515 * 1 if we failed.
516 */
517static int pin_sb_for_writeback(struct writeback_control *wbc,
518 struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519{
Jens Axboe03ba3782009-09-09 09:08:54 +0200520 struct super_block *sb = inode->i_sb;
521
522 /*
523 * Caller must already hold the ref for this
524 */
525 if (wbc->sync_mode == WB_SYNC_ALL) {
526 WARN_ON(!rwsem_is_locked(&sb->s_umount));
527 return 0;
528 }
529
530 spin_lock(&sb_lock);
531 sb->s_count++;
532 if (down_read_trylock(&sb->s_umount)) {
533 if (sb->s_root) {
534 spin_unlock(&sb_lock);
535 return 0;
536 }
537 /*
538 * umounted, drop rwsem again and fall through to failure
539 */
540 up_read(&sb->s_umount);
541 }
542
543 sb->s_count--;
544 spin_unlock(&sb_lock);
545 return 1;
546}
547
548static void unpin_sb_for_writeback(struct writeback_control *wbc,
549 struct inode *inode)
550{
551 struct super_block *sb = inode->i_sb;
552
553 if (wbc->sync_mode == WB_SYNC_ALL)
554 return;
555
556 up_read(&sb->s_umount);
557 put_super(sb);
558}
559
560static void writeback_inodes_wb(struct bdi_writeback *wb,
561 struct writeback_control *wbc)
562{
563 struct super_block *sb = wbc->sb;
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200564 const int is_blkdev_sb = sb_is_blkdev_sb(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 const unsigned long start = jiffies; /* livelock avoidance */
566
Hans Reiserae8547b2008-05-07 15:48:57 +0300567 spin_lock(&inode_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Jens Axboe03ba3782009-09-09 09:08:54 +0200569 if (!wbc->for_kupdate || list_empty(&wb->b_io))
570 queue_io(wb, wbc->older_than_this);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200571
Jens Axboe03ba3782009-09-09 09:08:54 +0200572 while (!list_empty(&wb->b_io)) {
573 struct inode *inode = list_entry(wb->b_io.prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 struct inode, i_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 long pages_skipped;
576
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200577 /*
578 * super block given and doesn't match, skip this inode
579 */
580 if (sb && sb != inode->i_sb) {
581 redirty_tail(inode);
582 continue;
583 }
584
Jens Axboe03ba3782009-09-09 09:08:54 +0200585 if (!bdi_cap_writeback_dirty(wb->bdi)) {
Andrew Morton9852a0e72007-10-16 23:30:33 -0700586 redirty_tail(inode);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200587 if (is_blkdev_sb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 /*
589 * Dirty memory-backed blockdev: the ramdisk
590 * driver does this. Skip just this inode
591 */
592 continue;
593 }
594 /*
595 * Dirty memory-backed inode against a filesystem other
596 * than the kernel-internal bdev filesystem. Skip the
597 * entire superblock.
598 */
599 break;
600 }
601
Wu Fengguang84a89242009-06-16 15:33:17 -0700602 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
Nick Piggin7ef0d732009-03-12 14:31:38 -0700603 requeue_io(inode);
604 continue;
605 }
606
Jens Axboe03ba3782009-09-09 09:08:54 +0200607 if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 wbc->encountered_congestion = 1;
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200609 if (!is_blkdev_sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 break; /* Skip a congested fs */
Ken Chen0e0f4fc2007-10-16 23:30:38 -0700611 requeue_io(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 continue; /* Skip a congested blockdev */
613 }
614
Jeff Laytond2caa3c52009-04-02 16:56:37 -0700615 /*
616 * Was this inode dirtied after sync_sb_inodes was called?
617 * This keeps sync from extra jobs and livelock.
618 */
619 if (inode_dirtied_after(inode, start))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 break;
621
Jens Axboe03ba3782009-09-09 09:08:54 +0200622 if (pin_sb_for_writeback(wbc, inode)) {
623 requeue_io(inode);
624 continue;
625 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Wu Fengguang84a89242009-06-16 15:33:17 -0700627 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 __iget(inode);
629 pages_skipped = wbc->pages_skipped;
Christoph Hellwig01c03192009-06-08 13:35:40 +0200630 writeback_single_inode(inode, wbc);
Jens Axboe03ba3782009-09-09 09:08:54 +0200631 unpin_sb_for_writeback(wbc, inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 if (wbc->pages_skipped != pages_skipped) {
633 /*
634 * writeback is not making progress due to locked
635 * buffers. Skip this inode for now.
636 */
Andrew Mortonf57b9b72007-10-16 23:30:34 -0700637 redirty_tail(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 }
639 spin_unlock(&inode_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 iput(inode);
OGAWA Hirofumi4ffc8442006-03-25 03:07:44 -0800641 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 spin_lock(&inode_lock);
Fengguang Wu8bc3be22008-02-04 22:29:36 -0800643 if (wbc->nr_to_write <= 0) {
644 wbc->more_io = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 break;
Fengguang Wu8bc3be22008-02-04 22:29:36 -0800646 }
Jens Axboe03ba3782009-09-09 09:08:54 +0200647 if (!list_empty(&wb->b_more_io))
Fengguang Wu8bc3be22008-02-04 22:29:36 -0800648 wbc->more_io = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 }
Nick Piggin38f21972009-01-06 14:40:25 -0800650
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200651 spin_unlock(&inode_lock);
652 /* Leave any unwritten inodes on b_io */
653}
654
Jens Axboe03ba3782009-09-09 09:08:54 +0200655void writeback_inodes_wbc(struct writeback_control *wbc)
656{
657 struct backing_dev_info *bdi = wbc->bdi;
658
659 writeback_inodes_wb(&bdi->wb, wbc);
660}
661
662/*
663 * The maximum number of pages to writeout in a single bdi flush/kupdate
664 * operation. We do this so we don't hold I_SYNC against an inode for
665 * enormous amounts of time, which would block a userspace task which has
666 * been forced to throttle against that inode. Also, the code reevaluates
667 * the dirty each time it has written this many pages.
668 */
669#define MAX_WRITEBACK_PAGES 1024
670
671static inline bool over_bground_thresh(void)
672{
673 unsigned long background_thresh, dirty_thresh;
674
675 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
676
677 return (global_page_state(NR_FILE_DIRTY) +
678 global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
679}
680
681/*
682 * Explicit flushing or periodic writeback of "old" data.
683 *
684 * Define "old": the first time one of an inode's pages is dirtied, we mark the
685 * dirtying-time in the inode's address_space. So this periodic writeback code
686 * just walks the superblock inode list, writing back any inodes which are
687 * older than a specific point in time.
688 *
689 * Try to run once per dirty_writeback_interval. But if a writeback event
690 * takes longer than a dirty_writeback_interval interval, then leave a
691 * one-second gap.
692 *
693 * older_than_this takes precedence over nr_to_write. So we'll only write back
694 * all dirty pages if they are all attached to "old" mappings.
695 */
Jens Axboec4a77a62009-09-16 15:18:25 +0200696static long wb_writeback(struct bdi_writeback *wb,
697 struct wb_writeback_args *args)
Jens Axboe03ba3782009-09-09 09:08:54 +0200698{
699 struct writeback_control wbc = {
700 .bdi = wb->bdi,
Jens Axboec4a77a62009-09-16 15:18:25 +0200701 .sb = args->sb,
702 .sync_mode = args->sync_mode,
Jens Axboe03ba3782009-09-09 09:08:54 +0200703 .older_than_this = NULL,
Jens Axboec4a77a62009-09-16 15:18:25 +0200704 .for_kupdate = args->for_kupdate,
705 .range_cyclic = args->range_cyclic,
Jens Axboe03ba3782009-09-09 09:08:54 +0200706 };
707 unsigned long oldest_jif;
708 long wrote = 0;
Jan Karaa5989bd2009-09-16 19:22:48 +0200709 struct inode *inode;
Jens Axboe03ba3782009-09-09 09:08:54 +0200710
711 if (wbc.for_kupdate) {
712 wbc.older_than_this = &oldest_jif;
713 oldest_jif = jiffies -
714 msecs_to_jiffies(dirty_expire_interval * 10);
715 }
Jens Axboec4a77a62009-09-16 15:18:25 +0200716 if (!wbc.range_cyclic) {
717 wbc.range_start = 0;
718 wbc.range_end = LLONG_MAX;
719 }
Jens Axboe03ba3782009-09-09 09:08:54 +0200720
721 for (;;) {
722 /*
723 * Don't flush anything for non-integrity writeback where
724 * no nr_pages was given
725 */
Jens Axboec4a77a62009-09-16 15:18:25 +0200726 if (!args->for_kupdate && args->nr_pages <= 0 &&
727 args->sync_mode == WB_SYNC_NONE)
Jens Axboe03ba3782009-09-09 09:08:54 +0200728 break;
729
730 /*
731 * If no specific pages were given and this is just a
732 * periodic background writeout and we are below the
733 * background dirty threshold, don't do anything
734 */
Jens Axboec4a77a62009-09-16 15:18:25 +0200735 if (args->for_kupdate && args->nr_pages <= 0 &&
736 !over_bground_thresh())
Jens Axboe03ba3782009-09-09 09:08:54 +0200737 break;
738
739 wbc.more_io = 0;
740 wbc.encountered_congestion = 0;
741 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
742 wbc.pages_skipped = 0;
743 writeback_inodes_wb(wb, &wbc);
Jens Axboec4a77a62009-09-16 15:18:25 +0200744 args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
Jens Axboe03ba3782009-09-09 09:08:54 +0200745 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
746
747 /*
748 * If we ran out of stuff to write, bail unless more_io got set
749 */
750 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
Jan Karaa5989bd2009-09-16 19:22:48 +0200751 if (wbc.more_io && !wbc.for_kupdate) {
752 if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
753 continue;
754 /*
755 * Nothing written. Wait for some inode to
756 * become available for writeback. Otherwise
757 * we'll just busyloop.
758 */
759 spin_lock(&inode_lock);
760 if (!list_empty(&wb->b_more_io)) {
761 inode = list_entry(
762 wb->b_more_io.prev,
763 struct inode, i_list);
764 inode_wait_for_writeback(inode);
765 }
766 spin_unlock(&inode_lock);
Jens Axboe03ba3782009-09-09 09:08:54 +0200767 continue;
Jan Karaa5989bd2009-09-16 19:22:48 +0200768 }
Jens Axboe03ba3782009-09-09 09:08:54 +0200769 break;
770 }
771 }
772
773 return wrote;
774}
775
776/*
777 * Return the next bdi_work struct that hasn't been processed by this
Jens Axboe8010c3b2009-09-15 20:04:57 +0200778 * wb thread yet. ->seen is initially set for each thread that exists
779 * for this device, when a thread first notices a piece of work it
780 * clears its bit. Depending on writeback type, the thread will notify
781 * completion on either receiving the work (WB_SYNC_NONE) or after
782 * it is done (WB_SYNC_ALL).
Jens Axboe03ba3782009-09-09 09:08:54 +0200783 */
784static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
785 struct bdi_writeback *wb)
786{
787 struct bdi_work *work, *ret = NULL;
788
789 rcu_read_lock();
790
791 list_for_each_entry_rcu(work, &bdi->work_list, list) {
Nick Piggin77fad5e2009-09-15 21:34:12 +0200792 if (!test_bit(wb->nr, &work->seen))
Jens Axboe03ba3782009-09-09 09:08:54 +0200793 continue;
Nick Piggin77fad5e2009-09-15 21:34:12 +0200794 clear_bit(wb->nr, &work->seen);
Jens Axboe03ba3782009-09-09 09:08:54 +0200795
796 ret = work;
797 break;
798 }
799
800 rcu_read_unlock();
801 return ret;
802}
803
804static long wb_check_old_data_flush(struct bdi_writeback *wb)
805{
806 unsigned long expired;
807 long nr_pages;
808
809 expired = wb->last_old_flush +
810 msecs_to_jiffies(dirty_writeback_interval * 10);
811 if (time_before(jiffies, expired))
812 return 0;
813
814 wb->last_old_flush = jiffies;
815 nr_pages = global_page_state(NR_FILE_DIRTY) +
816 global_page_state(NR_UNSTABLE_NFS) +
817 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
818
Jens Axboec4a77a62009-09-16 15:18:25 +0200819 if (nr_pages) {
820 struct wb_writeback_args args = {
821 .nr_pages = nr_pages,
822 .sync_mode = WB_SYNC_NONE,
823 .for_kupdate = 1,
824 .range_cyclic = 1,
825 };
826
827 return wb_writeback(wb, &args);
828 }
Jens Axboe03ba3782009-09-09 09:08:54 +0200829
830 return 0;
831}
832
833/*
834 * Retrieve work items and do the writeback they describe
835 */
836long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
837{
838 struct backing_dev_info *bdi = wb->bdi;
839 struct bdi_work *work;
Jens Axboec4a77a62009-09-16 15:18:25 +0200840 long wrote = 0;
Jens Axboe03ba3782009-09-09 09:08:54 +0200841
842 while ((work = get_next_work_item(bdi, wb)) != NULL) {
Jens Axboec4a77a62009-09-16 15:18:25 +0200843 struct wb_writeback_args args = work->args;
Jens Axboe03ba3782009-09-09 09:08:54 +0200844
845 /*
846 * Override sync mode, in case we must wait for completion
847 */
848 if (force_wait)
Jens Axboec4a77a62009-09-16 15:18:25 +0200849 work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
Jens Axboe03ba3782009-09-09 09:08:54 +0200850
851 /*
852 * If this isn't a data integrity operation, just notify
853 * that we have seen this work and we are now starting it.
854 */
Jens Axboec4a77a62009-09-16 15:18:25 +0200855 if (args.sync_mode == WB_SYNC_NONE)
Jens Axboe03ba3782009-09-09 09:08:54 +0200856 wb_clear_pending(wb, work);
857
Jens Axboec4a77a62009-09-16 15:18:25 +0200858 wrote += wb_writeback(wb, &args);
Jens Axboe03ba3782009-09-09 09:08:54 +0200859
860 /*
861 * This is a data integrity writeback, so only do the
862 * notification when we have completed the work.
863 */
Jens Axboec4a77a62009-09-16 15:18:25 +0200864 if (args.sync_mode == WB_SYNC_ALL)
Jens Axboe03ba3782009-09-09 09:08:54 +0200865 wb_clear_pending(wb, work);
866 }
867
868 /*
869 * Check for periodic writeback, kupdated() style
870 */
871 wrote += wb_check_old_data_flush(wb);
872
873 return wrote;
874}
875
876/*
877 * Handle writeback of dirty data for the device backed by this bdi. Also
878 * wakes up periodically and does kupdated style flushing.
879 */
880int bdi_writeback_task(struct bdi_writeback *wb)
881{
882 unsigned long last_active = jiffies;
883 unsigned long wait_jiffies = -1UL;
884 long pages_written;
885
886 while (!kthread_should_stop()) {
887 pages_written = wb_do_writeback(wb, 0);
888
889 if (pages_written)
890 last_active = jiffies;
891 else if (wait_jiffies != -1UL) {
892 unsigned long max_idle;
893
894 /*
895 * Longest period of inactivity that we tolerate. If we
896 * see dirty data again later, the task will get
897 * recreated automatically.
898 */
899 max_idle = max(5UL * 60 * HZ, wait_jiffies);
900 if (time_after(jiffies, max_idle + last_active))
901 break;
902 }
903
904 wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
Jens Axboe49db0412009-09-15 21:27:40 +0200905 schedule_timeout_interruptible(wait_jiffies);
Jens Axboe03ba3782009-09-09 09:08:54 +0200906 try_to_freeze();
907 }
908
909 return 0;
910}
911
912/*
Jens Axboeb6e51312009-09-16 15:13:54 +0200913 * Schedule writeback for all backing devices. This does WB_SYNC_NONE
914 * writeback, for integrity writeback see bdi_sync_writeback().
Jens Axboe03ba3782009-09-09 09:08:54 +0200915 */
Jens Axboeb6e51312009-09-16 15:13:54 +0200916static void bdi_writeback_all(struct super_block *sb, long nr_pages)
Jens Axboe03ba3782009-09-09 09:08:54 +0200917{
Jens Axboeb6e51312009-09-16 15:13:54 +0200918 struct wb_writeback_args args = {
919 .sb = sb,
920 .nr_pages = nr_pages,
921 .sync_mode = WB_SYNC_NONE,
922 };
Jens Axboe03ba3782009-09-09 09:08:54 +0200923 struct backing_dev_info *bdi;
Jens Axboe03ba3782009-09-09 09:08:54 +0200924
Jens Axboecfc4ba52009-09-14 13:12:40 +0200925 rcu_read_lock();
Jens Axboe03ba3782009-09-09 09:08:54 +0200926
Jens Axboecfc4ba52009-09-14 13:12:40 +0200927 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
Jens Axboe03ba3782009-09-09 09:08:54 +0200928 if (!bdi_has_dirty_io(bdi))
929 continue;
930
Jens Axboeb6e51312009-09-16 15:13:54 +0200931 bdi_alloc_queue_work(bdi, &args);
Jens Axboe03ba3782009-09-09 09:08:54 +0200932 }
933
Jens Axboecfc4ba52009-09-14 13:12:40 +0200934 rcu_read_unlock();
Jens Axboe03ba3782009-09-09 09:08:54 +0200935}
936
937/*
938 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
939 * the whole world.
940 */
941void wakeup_flusher_threads(long nr_pages)
942{
Jens Axboe03ba3782009-09-09 09:08:54 +0200943 if (nr_pages == 0)
944 nr_pages = global_page_state(NR_FILE_DIRTY) +
945 global_page_state(NR_UNSTABLE_NFS);
Jens Axboeb6e51312009-09-16 15:13:54 +0200946 bdi_writeback_all(NULL, nr_pages);
Jens Axboe03ba3782009-09-09 09:08:54 +0200947}
948
949static noinline void block_dump___mark_inode_dirty(struct inode *inode)
950{
951 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
952 struct dentry *dentry;
953 const char *name = "?";
954
955 dentry = d_find_alias(inode);
956 if (dentry) {
957 spin_lock(&dentry->d_lock);
958 name = (const char *) dentry->d_name.name;
959 }
960 printk(KERN_DEBUG
961 "%s(%d): dirtied inode %lu (%s) on %s\n",
962 current->comm, task_pid_nr(current), inode->i_ino,
963 name, inode->i_sb->s_id);
964 if (dentry) {
965 spin_unlock(&dentry->d_lock);
966 dput(dentry);
967 }
968 }
969}
970
971/**
972 * __mark_inode_dirty - internal function
973 * @inode: inode to mark
974 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
975 * Mark an inode as dirty. Callers should use mark_inode_dirty or
976 * mark_inode_dirty_sync.
977 *
978 * Put the inode on the super block's dirty list.
979 *
980 * CAREFUL! We mark it dirty unconditionally, but move it onto the
981 * dirty list only if it is hashed or if it refers to a blockdev.
982 * If it was not hashed, it will never be added to the dirty list
983 * even if it is later hashed, as it will have been marked dirty already.
984 *
985 * In short, make sure you hash any inodes _before_ you start marking
986 * them dirty.
987 *
988 * This function *must* be atomic for the I_DIRTY_PAGES case -
989 * set_page_dirty() is called under spinlock in several places.
990 *
991 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
992 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
993 * the kernel-internal blockdev inode represents the dirtying time of the
994 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
995 * page->mapping->host, so the page-dirtying time is recorded in the internal
996 * blockdev inode.
997 */
998void __mark_inode_dirty(struct inode *inode, int flags)
999{
1000 struct super_block *sb = inode->i_sb;
1001
1002 /*
1003 * Don't do this for I_DIRTY_PAGES - that doesn't actually
1004 * dirty the inode itself
1005 */
1006 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1007 if (sb->s_op->dirty_inode)
1008 sb->s_op->dirty_inode(inode);
1009 }
1010
1011 /*
1012 * make sure that changes are seen by all cpus before we test i_state
1013 * -- mikulas
1014 */
1015 smp_mb();
1016
1017 /* avoid the locking if we can */
1018 if ((inode->i_state & flags) == flags)
1019 return;
1020
1021 if (unlikely(block_dump))
1022 block_dump___mark_inode_dirty(inode);
1023
1024 spin_lock(&inode_lock);
1025 if ((inode->i_state & flags) != flags) {
1026 const int was_dirty = inode->i_state & I_DIRTY;
1027
1028 inode->i_state |= flags;
1029
1030 /*
1031 * If the inode is being synced, just update its dirty state.
1032 * The unlocker will place the inode on the appropriate
1033 * superblock list, based upon its state.
1034 */
1035 if (inode->i_state & I_SYNC)
1036 goto out;
1037
1038 /*
1039 * Only add valid (hashed) inodes to the superblock's
1040 * dirty list. Add blockdev inodes as well.
1041 */
1042 if (!S_ISBLK(inode->i_mode)) {
1043 if (hlist_unhashed(&inode->i_hash))
1044 goto out;
1045 }
1046 if (inode->i_state & (I_FREEING|I_CLEAR))
1047 goto out;
1048
1049 /*
1050 * If the inode was already on b_dirty/b_io/b_more_io, don't
1051 * reposition it (that would break b_dirty time-ordering).
1052 */
1053 if (!was_dirty) {
1054 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
Jens Axboe500b0672009-09-09 09:10:25 +02001055 struct backing_dev_info *bdi = wb->bdi;
1056
1057 if (bdi_cap_writeback_dirty(bdi) &&
1058 !test_bit(BDI_registered, &bdi->state)) {
1059 WARN_ON(1);
1060 printk(KERN_ERR "bdi-%s not registered\n",
1061 bdi->name);
1062 }
Jens Axboe03ba3782009-09-09 09:08:54 +02001063
1064 inode->dirtied_when = jiffies;
1065 list_move(&inode->i_list, &wb->b_dirty);
1066 }
1067 }
1068out:
1069 spin_unlock(&inode_lock);
1070}
1071EXPORT_SYMBOL(__mark_inode_dirty);
1072
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001073/*
1074 * Write out a superblock's list of dirty inodes. A wait will be performed
1075 * upon no inodes, all inodes or the final one, depending upon sync_mode.
1076 *
1077 * If older_than_this is non-NULL, then only write out inodes which
1078 * had their first dirtying at a time earlier than *older_than_this.
1079 *
1080 * If we're a pdlfush thread, then implement pdflush collision avoidance
1081 * against the entire list.
1082 *
1083 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1084 * This function assumes that the blockdev superblock's inodes are backed by
1085 * a variety of queues, so all inodes are searched. For other superblocks,
1086 * assume that all inodes are backed by the same queue.
1087 *
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001088 * The inodes to be written are parked on bdi->b_io. They are moved back onto
1089 * bdi->b_dirty as they are selected for writing. This way, none can be missed
1090 * on the writer throttling path, and we get decent balancing between many
1091 * throttled threads: we don't want them all piling up on inode_sync_wait.
1092 */
Jens Axboeb6e51312009-09-16 15:13:54 +02001093static void wait_sb_inodes(struct super_block *sb)
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001094{
Jens Axboe03ba3782009-09-09 09:08:54 +02001095 struct inode *inode, *old_inode = NULL;
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001096
Jens Axboe03ba3782009-09-09 09:08:54 +02001097 /*
1098 * We need to be protected against the filesystem going from
1099 * r/o to r/w or vice versa.
1100 */
Jens Axboeb6e51312009-09-16 15:13:54 +02001101 WARN_ON(!rwsem_is_locked(&sb->s_umount));
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001102
Jens Axboe03ba3782009-09-09 09:08:54 +02001103 spin_lock(&inode_lock);
1104
1105 /*
1106 * Data integrity sync. Must wait for all pages under writeback,
1107 * because there may have been pages dirtied before our sync
1108 * call, but which had writeout started before we write it out.
1109 * In which case, the inode may not be on the dirty list, but
1110 * we still have to wait for that writeout.
1111 */
Jens Axboeb6e51312009-09-16 15:13:54 +02001112 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
Jens Axboe03ba3782009-09-09 09:08:54 +02001113 struct address_space *mapping;
1114
1115 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
1116 continue;
1117 mapping = inode->i_mapping;
1118 if (mapping->nrpages == 0)
1119 continue;
1120 __iget(inode);
1121 spin_unlock(&inode_lock);
1122 /*
1123 * We hold a reference to 'inode' so it couldn't have
1124 * been removed from s_inodes list while we dropped the
1125 * inode_lock. We cannot iput the inode now as we can
1126 * be holding the last reference and we cannot iput it
1127 * under inode_lock. So we keep the reference and iput
1128 * it later.
1129 */
1130 iput(old_inode);
1131 old_inode = inode;
1132
1133 filemap_fdatawait(mapping);
1134
1135 cond_resched();
Nick Piggin38f21972009-01-06 14:40:25 -08001136
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001137 spin_lock(&inode_lock);
Jens Axboe66f3b8e2009-09-02 09:19:46 +02001138 }
Jens Axboe03ba3782009-09-09 09:08:54 +02001139 spin_unlock(&inode_lock);
1140 iput(old_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141}
1142
Jens Axboed8a85592009-09-02 12:34:32 +02001143/**
1144 * writeback_inodes_sb - writeback dirty inodes from given super_block
1145 * @sb: the superblock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 *
Jens Axboed8a85592009-09-02 12:34:32 +02001147 * Start writeback on some inodes on this super_block. No guarantees are made
1148 * on how many (if any) will be written, and this function does not wait
1149 * for IO completion of submitted IO. The number of pages submitted is
1150 * returned.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 */
Jens Axboeb6e51312009-09-16 15:13:54 +02001152void writeback_inodes_sb(struct super_block *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153{
Jens Axboed8a85592009-09-02 12:34:32 +02001154 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1155 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1156 long nr_to_write;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157
Jens Axboed8a85592009-09-02 12:34:32 +02001158 nr_to_write = nr_dirty + nr_unstable +
Nick Piggin38f21972009-01-06 14:40:25 -08001159 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
Nick Piggin38f21972009-01-06 14:40:25 -08001160
Jens Axboeb6e51312009-09-16 15:13:54 +02001161 bdi_writeback_all(sb, nr_to_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162}
Jens Axboed8a85592009-09-02 12:34:32 +02001163EXPORT_SYMBOL(writeback_inodes_sb);
1164
1165/**
1166 * sync_inodes_sb - sync sb inode pages
1167 * @sb: the superblock
1168 *
1169 * This function writes and waits on any dirty inode belonging to this
1170 * super_block. The number of pages synced is returned.
1171 */
Jens Axboeb6e51312009-09-16 15:13:54 +02001172void sync_inodes_sb(struct super_block *sb)
Jens Axboed8a85592009-09-02 12:34:32 +02001173{
Jens Axboeb6e51312009-09-16 15:13:54 +02001174 bdi_sync_writeback(sb->s_bdi, sb);
1175 wait_sb_inodes(sb);
Jens Axboed8a85592009-09-02 12:34:32 +02001176}
1177EXPORT_SYMBOL(sync_inodes_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179/**
Andrea Arcangeli7f04c262005-10-30 15:03:05 -08001180 * write_inode_now - write an inode to disk
1181 * @inode: inode to write to disk
1182 * @sync: whether the write should be synchronous or not
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 *
Andrea Arcangeli7f04c262005-10-30 15:03:05 -08001184 * This function commits an inode to disk immediately if it is dirty. This is
1185 * primarily needed by knfsd.
1186 *
1187 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189int write_inode_now(struct inode *inode, int sync)
1190{
1191 int ret;
1192 struct writeback_control wbc = {
1193 .nr_to_write = LONG_MAX,
Mike Galbraith18914b12008-02-08 04:20:23 -08001194 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001195 .range_start = 0,
1196 .range_end = LLONG_MAX,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 };
1198
1199 if (!mapping_cap_writeback_dirty(inode->i_mapping))
Andrew Morton49364ce2005-11-07 00:59:15 -08001200 wbc.nr_to_write = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
1202 might_sleep();
1203 spin_lock(&inode_lock);
Christoph Hellwig01c03192009-06-08 13:35:40 +02001204 ret = writeback_single_inode(inode, &wbc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 spin_unlock(&inode_lock);
1206 if (sync)
Joern Engel1c0eeaf2007-10-16 23:30:44 -07001207 inode_sync_wait(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 return ret;
1209}
1210EXPORT_SYMBOL(write_inode_now);
1211
1212/**
1213 * sync_inode - write an inode and its pages to disk.
1214 * @inode: the inode to sync
1215 * @wbc: controls the writeback mode
1216 *
1217 * sync_inode() will write an inode and its pages to disk. It will also
1218 * correctly update the inode on its superblock's dirty inode lists and will
1219 * update inode->i_state.
1220 *
1221 * The caller must have a ref on the inode.
1222 */
1223int sync_inode(struct inode *inode, struct writeback_control *wbc)
1224{
1225 int ret;
1226
1227 spin_lock(&inode_lock);
Christoph Hellwig01c03192009-06-08 13:35:40 +02001228 ret = writeback_single_inode(inode, wbc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 spin_unlock(&inode_lock);
1230 return ret;
1231}
1232EXPORT_SYMBOL(sync_inode);