Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * fs/fs-writeback.c |
| 3 | * |
| 4 | * Copyright (C) 2002, Linus Torvalds. |
| 5 | * |
| 6 | * Contains all the functions related to writing back and waiting |
| 7 | * upon dirty inodes against superblocks, and writing back dirty |
| 8 | * pages against inodes. ie: data writeback. Writeout of the |
| 9 | * inode itself is not handled here. |
| 10 | * |
| 11 | * 10Apr2002 akpm@zip.com.au |
| 12 | * Split out of fs/inode.c |
| 13 | * Additions for address_space-based writeback |
| 14 | */ |
| 15 | |
| 16 | #include <linux/kernel.h> |
Jens Axboe | f5ff842 | 2007-09-21 09:19:54 +0200 | [diff] [blame] | 17 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/spinlock.h> |
| 19 | #include <linux/sched.h> |
| 20 | #include <linux/fs.h> |
| 21 | #include <linux/mm.h> |
| 22 | #include <linux/writeback.h> |
| 23 | #include <linux/blkdev.h> |
| 24 | #include <linux/backing-dev.h> |
| 25 | #include <linux/buffer_head.h> |
David Howells | 07f3f05 | 2006-09-30 20:52:18 +0200 | [diff] [blame] | 26 | #include "internal.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
| 28 | /** |
| 29 | * __mark_inode_dirty - internal function |
| 30 | * @inode: inode to mark |
| 31 | * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) |
| 32 | * Mark an inode as dirty. Callers should use mark_inode_dirty or |
| 33 | * mark_inode_dirty_sync. |
| 34 | * |
| 35 | * Put the inode on the super block's dirty list. |
| 36 | * |
| 37 | * CAREFUL! We mark it dirty unconditionally, but move it onto the |
| 38 | * dirty list only if it is hashed or if it refers to a blockdev. |
| 39 | * If it was not hashed, it will never be added to the dirty list |
| 40 | * even if it is later hashed, as it will have been marked dirty already. |
| 41 | * |
| 42 | * In short, make sure you hash any inodes _before_ you start marking |
| 43 | * them dirty. |
| 44 | * |
| 45 | * This function *must* be atomic for the I_DIRTY_PAGES case - |
| 46 | * set_page_dirty() is called under spinlock in several places. |
| 47 | * |
| 48 | * Note that for blockdevs, inode->dirtied_when represents the dirtying time of |
| 49 | * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of |
| 50 | * the kernel-internal blockdev inode represents the dirtying time of the |
| 51 | * blockdev's pages. This is why for I_DIRTY_PAGES we always use |
| 52 | * page->mapping->host, so the page-dirtying time is recorded in the internal |
| 53 | * blockdev inode. |
| 54 | */ |
| 55 | void __mark_inode_dirty(struct inode *inode, int flags) |
| 56 | { |
| 57 | struct super_block *sb = inode->i_sb; |
| 58 | |
| 59 | /* |
| 60 | * Don't do this for I_DIRTY_PAGES - that doesn't actually |
| 61 | * dirty the inode itself |
| 62 | */ |
| 63 | if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { |
| 64 | if (sb->s_op->dirty_inode) |
| 65 | sb->s_op->dirty_inode(inode); |
| 66 | } |
| 67 | |
| 68 | /* |
| 69 | * make sure that changes are seen by all cpus before we test i_state |
| 70 | * -- mikulas |
| 71 | */ |
| 72 | smp_mb(); |
| 73 | |
| 74 | /* avoid the locking if we can */ |
| 75 | if ((inode->i_state & flags) == flags) |
| 76 | return; |
| 77 | |
| 78 | if (unlikely(block_dump)) { |
| 79 | struct dentry *dentry = NULL; |
| 80 | const char *name = "?"; |
| 81 | |
| 82 | if (!list_empty(&inode->i_dentry)) { |
| 83 | dentry = list_entry(inode->i_dentry.next, |
| 84 | struct dentry, d_alias); |
| 85 | if (dentry && dentry->d_name.name) |
| 86 | name = (const char *) dentry->d_name.name; |
| 87 | } |
| 88 | |
| 89 | if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) |
| 90 | printk(KERN_DEBUG |
| 91 | "%s(%d): dirtied inode %lu (%s) on %s\n", |
| 92 | current->comm, current->pid, inode->i_ino, |
| 93 | name, inode->i_sb->s_id); |
| 94 | } |
| 95 | |
| 96 | spin_lock(&inode_lock); |
| 97 | if ((inode->i_state & flags) != flags) { |
| 98 | const int was_dirty = inode->i_state & I_DIRTY; |
| 99 | |
| 100 | inode->i_state |= flags; |
| 101 | |
| 102 | /* |
| 103 | * If the inode is locked, just update its dirty state. |
| 104 | * The unlocker will place the inode on the appropriate |
| 105 | * superblock list, based upon its state. |
| 106 | */ |
| 107 | if (inode->i_state & I_LOCK) |
| 108 | goto out; |
| 109 | |
| 110 | /* |
| 111 | * Only add valid (hashed) inodes to the superblock's |
| 112 | * dirty list. Add blockdev inodes as well. |
| 113 | */ |
| 114 | if (!S_ISBLK(inode->i_mode)) { |
| 115 | if (hlist_unhashed(&inode->i_hash)) |
| 116 | goto out; |
| 117 | } |
| 118 | if (inode->i_state & (I_FREEING|I_CLEAR)) |
| 119 | goto out; |
| 120 | |
| 121 | /* |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 122 | * If the inode was already on s_dirty/s_io/s_more_io, don't |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | * reposition it (that would break s_dirty time-ordering). |
| 124 | */ |
| 125 | if (!was_dirty) { |
| 126 | inode->dirtied_when = jiffies; |
| 127 | list_move(&inode->i_list, &sb->s_dirty); |
| 128 | } |
| 129 | } |
| 130 | out: |
| 131 | spin_unlock(&inode_lock); |
| 132 | } |
| 133 | |
| 134 | EXPORT_SYMBOL(__mark_inode_dirty); |
| 135 | |
| 136 | static int write_inode(struct inode *inode, int sync) |
| 137 | { |
| 138 | if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) |
| 139 | return inode->i_sb->s_op->write_inode(inode, sync); |
| 140 | return 0; |
| 141 | } |
| 142 | |
| 143 | /* |
Andrew Morton | 6610a0b | 2007-10-16 23:30:32 -0700 | [diff] [blame] | 144 | * Redirty an inode: set its when-it-was dirtied timestamp and move it to the |
| 145 | * furthest end of its superblock's dirty-inode list. |
| 146 | * |
| 147 | * Before stamping the inode's ->dirtied_when, we check to see whether it is |
| 148 | * already the most-recently-dirtied inode on the s_dirty list. If that is |
| 149 | * the case then the inode must have been redirtied while it was being written |
| 150 | * out and we don't reset its dirtied_when. |
| 151 | */ |
| 152 | static void redirty_tail(struct inode *inode) |
| 153 | { |
| 154 | struct super_block *sb = inode->i_sb; |
| 155 | |
| 156 | if (!list_empty(&sb->s_dirty)) { |
| 157 | struct inode *tail_inode; |
| 158 | |
| 159 | tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list); |
| 160 | if (!time_after_eq(inode->dirtied_when, |
| 161 | tail_inode->dirtied_when)) |
| 162 | inode->dirtied_when = jiffies; |
| 163 | } |
| 164 | list_move(&inode->i_list, &sb->s_dirty); |
| 165 | } |
| 166 | |
| 167 | /* |
Ken Chen | 0e0f4fc | 2007-10-16 23:30:38 -0700 | [diff] [blame] | 168 | * requeue inode for re-scanning after sb->s_io list is exhausted. |
Andrew Morton | c986d1e | 2007-10-16 23:30:34 -0700 | [diff] [blame] | 169 | */ |
Ken Chen | 0e0f4fc | 2007-10-16 23:30:38 -0700 | [diff] [blame] | 170 | static void requeue_io(struct inode *inode) |
Andrew Morton | c986d1e | 2007-10-16 23:30:34 -0700 | [diff] [blame] | 171 | { |
Ken Chen | 0e0f4fc | 2007-10-16 23:30:38 -0700 | [diff] [blame] | 172 | list_move(&inode->i_list, &inode->i_sb->s_more_io); |
Andrew Morton | c986d1e | 2007-10-16 23:30:34 -0700 | [diff] [blame] | 173 | } |
| 174 | |
| 175 | /* |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 176 | * Move expired dirty inodes from @delaying_queue to @dispatch_queue. |
| 177 | */ |
| 178 | static void move_expired_inodes(struct list_head *delaying_queue, |
| 179 | struct list_head *dispatch_queue, |
| 180 | unsigned long *older_than_this) |
| 181 | { |
| 182 | while (!list_empty(delaying_queue)) { |
| 183 | struct inode *inode = list_entry(delaying_queue->prev, |
| 184 | struct inode, i_list); |
| 185 | if (older_than_this && |
| 186 | time_after(inode->dirtied_when, *older_than_this)) |
| 187 | break; |
| 188 | list_move(&inode->i_list, dispatch_queue); |
| 189 | } |
| 190 | } |
| 191 | |
| 192 | /* |
| 193 | * Queue all expired dirty inodes for io, eldest first. |
| 194 | */ |
| 195 | static void queue_io(struct super_block *sb, |
| 196 | unsigned long *older_than_this) |
| 197 | { |
| 198 | list_splice_init(&sb->s_more_io, sb->s_io.prev); |
| 199 | move_expired_inodes(&sb->s_dirty, &sb->s_io, older_than_this); |
| 200 | } |
| 201 | |
Fengguang Wu | 08d8e97 | 2007-10-16 23:30:39 -0700 | [diff] [blame^] | 202 | int sb_has_dirty_inodes(struct super_block *sb) |
| 203 | { |
| 204 | return !list_empty(&sb->s_dirty) || |
| 205 | !list_empty(&sb->s_io) || |
| 206 | !list_empty(&sb->s_more_io); |
| 207 | } |
| 208 | EXPORT_SYMBOL(sb_has_dirty_inodes); |
| 209 | |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 210 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | * Write a single inode's dirty pages and inode data out to disk. |
| 212 | * If `wait' is set, wait on the writeout. |
| 213 | * |
| 214 | * The whole writeout design is quite complex and fragile. We want to avoid |
| 215 | * starvation of particular inodes when others are being redirtied, prevent |
| 216 | * livelocks, etc. |
| 217 | * |
| 218 | * Called under inode_lock. |
| 219 | */ |
| 220 | static int |
| 221 | __sync_single_inode(struct inode *inode, struct writeback_control *wbc) |
| 222 | { |
| 223 | unsigned dirty; |
| 224 | struct address_space *mapping = inode->i_mapping; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | int wait = wbc->sync_mode == WB_SYNC_ALL; |
| 226 | int ret; |
| 227 | |
| 228 | BUG_ON(inode->i_state & I_LOCK); |
| 229 | |
| 230 | /* Set I_LOCK, reset I_DIRTY */ |
| 231 | dirty = inode->i_state & I_DIRTY; |
| 232 | inode->i_state |= I_LOCK; |
| 233 | inode->i_state &= ~I_DIRTY; |
| 234 | |
| 235 | spin_unlock(&inode_lock); |
| 236 | |
| 237 | ret = do_writepages(mapping, wbc); |
| 238 | |
| 239 | /* Don't write the inode if only I_DIRTY_PAGES was set */ |
| 240 | if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { |
| 241 | int err = write_inode(inode, wait); |
| 242 | if (ret == 0) |
| 243 | ret = err; |
| 244 | } |
| 245 | |
| 246 | if (wait) { |
| 247 | int err = filemap_fdatawait(mapping); |
| 248 | if (ret == 0) |
| 249 | ret = err; |
| 250 | } |
| 251 | |
| 252 | spin_lock(&inode_lock); |
| 253 | inode->i_state &= ~I_LOCK; |
| 254 | if (!(inode->i_state & I_FREEING)) { |
| 255 | if (!(inode->i_state & I_DIRTY) && |
| 256 | mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { |
| 257 | /* |
| 258 | * We didn't write back all the pages. nfs_writepages() |
| 259 | * sometimes bales out without doing anything. Redirty |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 260 | * the inode; Move it from s_io onto s_more_io/s_dirty. |
Andrew Morton | 1b43ef9 | 2007-10-16 23:30:35 -0700 | [diff] [blame] | 261 | */ |
| 262 | /* |
| 263 | * akpm: if the caller was the kupdate function we put |
| 264 | * this inode at the head of s_dirty so it gets first |
| 265 | * consideration. Otherwise, move it to the tail, for |
| 266 | * the reasons described there. I'm not really sure |
| 267 | * how much sense this makes. Presumably I had a good |
| 268 | * reasons for doing it this way, and I'd rather not |
| 269 | * muck with it at present. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | */ |
| 271 | if (wbc->for_kupdate) { |
| 272 | /* |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 273 | * For the kupdate function we move the inode |
| 274 | * to s_more_io so it will get more writeout as |
| 275 | * soon as the queue becomes uncongested. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | */ |
| 277 | inode->i_state |= I_DIRTY_PAGES; |
Ken Chen | 0e0f4fc | 2007-10-16 23:30:38 -0700 | [diff] [blame] | 278 | requeue_io(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | } else { |
| 280 | /* |
| 281 | * Otherwise fully redirty the inode so that |
| 282 | * other inodes on this superblock will get some |
| 283 | * writeout. Otherwise heavy writing to one |
| 284 | * file would indefinitely suspend writeout of |
| 285 | * all the other files. |
| 286 | */ |
| 287 | inode->i_state |= I_DIRTY_PAGES; |
Andrew Morton | 1b43ef9 | 2007-10-16 23:30:35 -0700 | [diff] [blame] | 288 | redirty_tail(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | } |
| 290 | } else if (inode->i_state & I_DIRTY) { |
| 291 | /* |
| 292 | * Someone redirtied the inode while were writing back |
| 293 | * the pages. |
| 294 | */ |
Andrew Morton | 6610a0b | 2007-10-16 23:30:32 -0700 | [diff] [blame] | 295 | redirty_tail(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | } else if (atomic_read(&inode->i_count)) { |
| 297 | /* |
| 298 | * The inode is clean, inuse |
| 299 | */ |
| 300 | list_move(&inode->i_list, &inode_in_use); |
| 301 | } else { |
| 302 | /* |
| 303 | * The inode is clean, unused |
| 304 | */ |
| 305 | list_move(&inode->i_list, &inode_unused); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | } |
| 307 | } |
| 308 | wake_up_inode(inode); |
| 309 | return ret; |
| 310 | } |
| 311 | |
| 312 | /* |
Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 313 | * Write out an inode's dirty pages. Called under inode_lock. Either the |
| 314 | * caller has ref on the inode (either via __iget or via syscall against an fd) |
| 315 | * or the inode has I_WILL_FREE set (via generic_forget_inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | */ |
| 317 | static int |
Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 318 | __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | { |
| 320 | wait_queue_head_t *wqh; |
| 321 | |
Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 322 | if (!atomic_read(&inode->i_count)) |
Andrea Arcangeli | 659603e | 2005-10-31 14:08:54 -0800 | [diff] [blame] | 323 | WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); |
Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 324 | else |
| 325 | WARN_ON(inode->i_state & I_WILL_FREE); |
| 326 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_LOCK)) { |
Linus Torvalds | 4b89eed9 | 2007-01-26 12:53:20 -0800 | [diff] [blame] | 328 | struct address_space *mapping = inode->i_mapping; |
| 329 | int ret; |
| 330 | |
Andrew Morton | 65cb9b4 | 2007-10-16 23:30:37 -0700 | [diff] [blame] | 331 | /* |
| 332 | * We're skipping this inode because it's locked, and we're not |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 333 | * doing writeback-for-data-integrity. Move it to s_more_io so |
| 334 | * that writeback can proceed with the other inodes on s_io. |
| 335 | * We'll have another go at writing back this inode when we |
| 336 | * completed a full scan of s_io. |
Andrew Morton | 65cb9b4 | 2007-10-16 23:30:37 -0700 | [diff] [blame] | 337 | */ |
Ken Chen | 0e0f4fc | 2007-10-16 23:30:38 -0700 | [diff] [blame] | 338 | requeue_io(inode); |
Linus Torvalds | 4b89eed9 | 2007-01-26 12:53:20 -0800 | [diff] [blame] | 339 | |
| 340 | /* |
| 341 | * Even if we don't actually write the inode itself here, |
| 342 | * we can at least start some of the data writeout.. |
| 343 | */ |
| 344 | spin_unlock(&inode_lock); |
| 345 | ret = do_writepages(mapping, wbc); |
| 346 | spin_lock(&inode_lock); |
| 347 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | } |
| 349 | |
| 350 | /* |
| 351 | * It's a data-integrity sync. We must wait. |
| 352 | */ |
| 353 | if (inode->i_state & I_LOCK) { |
| 354 | DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LOCK); |
| 355 | |
| 356 | wqh = bit_waitqueue(&inode->i_state, __I_LOCK); |
| 357 | do { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | spin_unlock(&inode_lock); |
| 359 | __wait_on_bit(wqh, &wq, inode_wait, |
| 360 | TASK_UNINTERRUPTIBLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | spin_lock(&inode_lock); |
| 362 | } while (inode->i_state & I_LOCK); |
| 363 | } |
| 364 | return __sync_single_inode(inode, wbc); |
| 365 | } |
| 366 | |
| 367 | /* |
| 368 | * Write out a superblock's list of dirty inodes. A wait will be performed |
| 369 | * upon no inodes, all inodes or the final one, depending upon sync_mode. |
| 370 | * |
| 371 | * If older_than_this is non-NULL, then only write out inodes which |
| 372 | * had their first dirtying at a time earlier than *older_than_this. |
| 373 | * |
| 374 | * If we're a pdlfush thread, then implement pdflush collision avoidance |
| 375 | * against the entire list. |
| 376 | * |
| 377 | * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so |
| 378 | * that it can be located for waiting on in __writeback_single_inode(). |
| 379 | * |
| 380 | * Called under inode_lock. |
| 381 | * |
| 382 | * If `bdi' is non-zero then we're being asked to writeback a specific queue. |
| 383 | * This function assumes that the blockdev superblock's inodes are backed by |
| 384 | * a variety of queues, so all inodes are searched. For other superblocks, |
| 385 | * assume that all inodes are backed by the same queue. |
| 386 | * |
| 387 | * FIXME: this linear search could get expensive with many fileystems. But |
| 388 | * how to fix? We need to go from an address_space to all inodes which share |
| 389 | * a queue with that address_space. (Easy: have a global "dirty superblocks" |
| 390 | * list). |
| 391 | * |
| 392 | * The inodes to be written are parked on sb->s_io. They are moved back onto |
| 393 | * sb->s_dirty as they are selected for writing. This way, none can be missed |
| 394 | * on the writer throttling path, and we get decent balancing between many |
| 395 | * throttled threads: we don't want them all piling up on __wait_on_inode. |
| 396 | */ |
| 397 | static void |
| 398 | sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc) |
| 399 | { |
| 400 | const unsigned long start = jiffies; /* livelock avoidance */ |
| 401 | |
| 402 | if (!wbc->for_kupdate || list_empty(&sb->s_io)) |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 403 | queue_io(sb, wbc->older_than_this); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | |
| 405 | while (!list_empty(&sb->s_io)) { |
| 406 | struct inode *inode = list_entry(sb->s_io.prev, |
| 407 | struct inode, i_list); |
| 408 | struct address_space *mapping = inode->i_mapping; |
| 409 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
| 410 | long pages_skipped; |
| 411 | |
| 412 | if (!bdi_cap_writeback_dirty(bdi)) { |
Andrew Morton | 9852a0e7 | 2007-10-16 23:30:33 -0700 | [diff] [blame] | 413 | redirty_tail(inode); |
David Howells | 7b0de42 | 2006-08-29 19:06:07 +0100 | [diff] [blame] | 414 | if (sb_is_blkdev_sb(sb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | /* |
| 416 | * Dirty memory-backed blockdev: the ramdisk |
| 417 | * driver does this. Skip just this inode |
| 418 | */ |
| 419 | continue; |
| 420 | } |
| 421 | /* |
| 422 | * Dirty memory-backed inode against a filesystem other |
| 423 | * than the kernel-internal bdev filesystem. Skip the |
| 424 | * entire superblock. |
| 425 | */ |
| 426 | break; |
| 427 | } |
| 428 | |
| 429 | if (wbc->nonblocking && bdi_write_congested(bdi)) { |
| 430 | wbc->encountered_congestion = 1; |
David Howells | 7b0de42 | 2006-08-29 19:06:07 +0100 | [diff] [blame] | 431 | if (!sb_is_blkdev_sb(sb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | break; /* Skip a congested fs */ |
Ken Chen | 0e0f4fc | 2007-10-16 23:30:38 -0700 | [diff] [blame] | 433 | requeue_io(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | continue; /* Skip a congested blockdev */ |
| 435 | } |
| 436 | |
| 437 | if (wbc->bdi && bdi != wbc->bdi) { |
David Howells | 7b0de42 | 2006-08-29 19:06:07 +0100 | [diff] [blame] | 438 | if (!sb_is_blkdev_sb(sb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | break; /* fs has the wrong queue */ |
Ken Chen | 0e0f4fc | 2007-10-16 23:30:38 -0700 | [diff] [blame] | 440 | requeue_io(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | continue; /* blockdev has wrong queue */ |
| 442 | } |
| 443 | |
| 444 | /* Was this inode dirtied after sync_sb_inodes was called? */ |
| 445 | if (time_after(inode->dirtied_when, start)) |
| 446 | break; |
| 447 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | /* Is another pdflush already flushing this queue? */ |
| 449 | if (current_is_pdflush() && !writeback_acquire(bdi)) |
| 450 | break; |
| 451 | |
| 452 | BUG_ON(inode->i_state & I_FREEING); |
| 453 | __iget(inode); |
| 454 | pages_skipped = wbc->pages_skipped; |
| 455 | __writeback_single_inode(inode, wbc); |
| 456 | if (wbc->sync_mode == WB_SYNC_HOLD) { |
| 457 | inode->dirtied_when = jiffies; |
| 458 | list_move(&inode->i_list, &sb->s_dirty); |
| 459 | } |
| 460 | if (current_is_pdflush()) |
| 461 | writeback_release(bdi); |
| 462 | if (wbc->pages_skipped != pages_skipped) { |
| 463 | /* |
| 464 | * writeback is not making progress due to locked |
| 465 | * buffers. Skip this inode for now. |
| 466 | */ |
Andrew Morton | f57b9b7 | 2007-10-16 23:30:34 -0700 | [diff] [blame] | 467 | redirty_tail(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | } |
| 469 | spin_unlock(&inode_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | iput(inode); |
OGAWA Hirofumi | 4ffc844 | 2006-03-25 03:07:44 -0800 | [diff] [blame] | 471 | cond_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | spin_lock(&inode_lock); |
| 473 | if (wbc->nr_to_write <= 0) |
| 474 | break; |
| 475 | } |
| 476 | return; /* Leave any unwritten inodes on s_io */ |
| 477 | } |
| 478 | |
| 479 | /* |
| 480 | * Start writeback of dirty pagecache data against all unlocked inodes. |
| 481 | * |
| 482 | * Note: |
| 483 | * We don't need to grab a reference to superblock here. If it has non-empty |
| 484 | * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed |
Fengguang Wu | 2c13657 | 2007-10-16 23:30:39 -0700 | [diff] [blame] | 485 | * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | * empty. Since __sync_single_inode() regains inode_lock before it finally moves |
| 487 | * inode from superblock lists we are OK. |
| 488 | * |
| 489 | * If `older_than_this' is non-zero then only flush inodes which have a |
| 490 | * flushtime older than *older_than_this. |
| 491 | * |
| 492 | * If `bdi' is non-zero then we will scan the first inode against each |
| 493 | * superblock until we find the matching ones. One group will be the dirty |
| 494 | * inodes against a filesystem. Then when we hit the dummy blockdev superblock, |
| 495 | * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not |
| 496 | * super-efficient but we're about to do a ton of I/O... |
| 497 | */ |
| 498 | void |
| 499 | writeback_inodes(struct writeback_control *wbc) |
| 500 | { |
| 501 | struct super_block *sb; |
| 502 | |
| 503 | might_sleep(); |
| 504 | spin_lock(&sb_lock); |
| 505 | restart: |
| 506 | sb = sb_entry(super_blocks.prev); |
| 507 | for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) { |
Fengguang Wu | 08d8e97 | 2007-10-16 23:30:39 -0700 | [diff] [blame^] | 508 | if (sb_has_dirty_inodes(sb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | /* we're making our own get_super here */ |
| 510 | sb->s_count++; |
| 511 | spin_unlock(&sb_lock); |
| 512 | /* |
| 513 | * If we can't get the readlock, there's no sense in |
| 514 | * waiting around, most of the time the FS is going to |
| 515 | * be unmounted by the time it is released. |
| 516 | */ |
| 517 | if (down_read_trylock(&sb->s_umount)) { |
| 518 | if (sb->s_root) { |
| 519 | spin_lock(&inode_lock); |
| 520 | sync_sb_inodes(sb, wbc); |
| 521 | spin_unlock(&inode_lock); |
| 522 | } |
| 523 | up_read(&sb->s_umount); |
| 524 | } |
| 525 | spin_lock(&sb_lock); |
| 526 | if (__put_super_and_need_restart(sb)) |
| 527 | goto restart; |
| 528 | } |
| 529 | if (wbc->nr_to_write <= 0) |
| 530 | break; |
| 531 | } |
| 532 | spin_unlock(&sb_lock); |
| 533 | } |
| 534 | |
| 535 | /* |
| 536 | * writeback and wait upon the filesystem's dirty inodes. The caller will |
| 537 | * do this in two passes - one to write, and one to wait. WB_SYNC_HOLD is |
| 538 | * used to park the written inodes on sb->s_dirty for the wait pass. |
| 539 | * |
| 540 | * A finite limit is set on the number of pages which will be written. |
| 541 | * To prevent infinite livelock of sys_sync(). |
| 542 | * |
| 543 | * We add in the number of potentially dirty inodes, because each inode write |
| 544 | * can dirty pagecache in the underlying blockdev. |
| 545 | */ |
| 546 | void sync_inodes_sb(struct super_block *sb, int wait) |
| 547 | { |
| 548 | struct writeback_control wbc = { |
| 549 | .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD, |
OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 550 | .range_start = 0, |
| 551 | .range_end = LLONG_MAX, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | }; |
Christoph Lameter | b1e7a8f | 2006-06-30 01:55:39 -0700 | [diff] [blame] | 553 | unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); |
Christoph Lameter | fd39fc8 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 554 | unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | |
| 556 | wbc.nr_to_write = nr_dirty + nr_unstable + |
| 557 | (inodes_stat.nr_inodes - inodes_stat.nr_unused) + |
| 558 | nr_dirty + nr_unstable; |
| 559 | wbc.nr_to_write += wbc.nr_to_write / 2; /* Bit more for luck */ |
| 560 | spin_lock(&inode_lock); |
| 561 | sync_sb_inodes(sb, &wbc); |
| 562 | spin_unlock(&inode_lock); |
| 563 | } |
| 564 | |
| 565 | /* |
| 566 | * Rather lame livelock avoidance. |
| 567 | */ |
| 568 | static void set_sb_syncing(int val) |
| 569 | { |
| 570 | struct super_block *sb; |
| 571 | spin_lock(&sb_lock); |
| 572 | sb = sb_entry(super_blocks.prev); |
| 573 | for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) { |
| 574 | sb->s_syncing = val; |
| 575 | } |
| 576 | spin_unlock(&sb_lock); |
| 577 | } |
| 578 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | /** |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 580 | * sync_inodes - writes all inodes to disk |
| 581 | * @wait: wait for completion |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | * |
| 583 | * sync_inodes() goes through each super block's dirty inode list, writes the |
| 584 | * inodes out, waits on the writeout and puts the inodes back on the normal |
| 585 | * list. |
| 586 | * |
| 587 | * This is for sys_sync(). fsync_dev() uses the same algorithm. The subtle |
| 588 | * part of the sync functions is that the blockdev "superblock" is processed |
| 589 | * last. This is because the write_inode() function of a typical fs will |
| 590 | * perform no I/O, but will mark buffers in the blockdev mapping as dirty. |
| 591 | * What we want to do is to perform all that dirtying first, and then write |
| 592 | * back all those inode blocks via the blockdev mapping in one sweep. So the |
| 593 | * additional (somewhat redundant) sync_blockdev() calls here are to make |
| 594 | * sure that really happens. Because if we call sync_inodes_sb(wait=1) with |
| 595 | * outstanding dirty inodes, the writeback goes block-at-a-time within the |
| 596 | * filesystem's write_inode(). This is extremely slow. |
| 597 | */ |
Kirill Korotaev | 618f063 | 2005-06-23 00:09:54 -0700 | [diff] [blame] | 598 | static void __sync_inodes(int wait) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | { |
| 600 | struct super_block *sb; |
| 601 | |
Kirill Korotaev | 618f063 | 2005-06-23 00:09:54 -0700 | [diff] [blame] | 602 | spin_lock(&sb_lock); |
| 603 | restart: |
| 604 | list_for_each_entry(sb, &super_blocks, s_list) { |
| 605 | if (sb->s_syncing) |
| 606 | continue; |
| 607 | sb->s_syncing = 1; |
| 608 | sb->s_count++; |
| 609 | spin_unlock(&sb_lock); |
| 610 | down_read(&sb->s_umount); |
| 611 | if (sb->s_root) { |
| 612 | sync_inodes_sb(sb, wait); |
| 613 | sync_blockdev(sb->s_bdev); |
| 614 | } |
| 615 | up_read(&sb->s_umount); |
| 616 | spin_lock(&sb_lock); |
| 617 | if (__put_super_and_need_restart(sb)) |
| 618 | goto restart; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | } |
Kirill Korotaev | 618f063 | 2005-06-23 00:09:54 -0700 | [diff] [blame] | 620 | spin_unlock(&sb_lock); |
| 621 | } |
| 622 | |
| 623 | void sync_inodes(int wait) |
| 624 | { |
| 625 | set_sb_syncing(0); |
| 626 | __sync_inodes(0); |
| 627 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | if (wait) { |
| 629 | set_sb_syncing(0); |
Kirill Korotaev | 618f063 | 2005-06-23 00:09:54 -0700 | [diff] [blame] | 630 | __sync_inodes(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | } |
| 632 | } |
| 633 | |
| 634 | /** |
Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 635 | * write_inode_now - write an inode to disk |
| 636 | * @inode: inode to write to disk |
| 637 | * @sync: whether the write should be synchronous or not |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | * |
Andrea Arcangeli | 7f04c26 | 2005-10-30 15:03:05 -0800 | [diff] [blame] | 639 | * This function commits an inode to disk immediately if it is dirty. This is |
| 640 | * primarily needed by knfsd. |
| 641 | * |
| 642 | * The caller must either have a ref on the inode or must have set I_WILL_FREE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | int write_inode_now(struct inode *inode, int sync) |
| 645 | { |
| 646 | int ret; |
| 647 | struct writeback_control wbc = { |
| 648 | .nr_to_write = LONG_MAX, |
| 649 | .sync_mode = WB_SYNC_ALL, |
OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 650 | .range_start = 0, |
| 651 | .range_end = LLONG_MAX, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | }; |
| 653 | |
| 654 | if (!mapping_cap_writeback_dirty(inode->i_mapping)) |
Andrew Morton | 49364ce | 2005-11-07 00:59:15 -0800 | [diff] [blame] | 655 | wbc.nr_to_write = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | |
| 657 | might_sleep(); |
| 658 | spin_lock(&inode_lock); |
| 659 | ret = __writeback_single_inode(inode, &wbc); |
| 660 | spin_unlock(&inode_lock); |
| 661 | if (sync) |
| 662 | wait_on_inode(inode); |
| 663 | return ret; |
| 664 | } |
| 665 | EXPORT_SYMBOL(write_inode_now); |
| 666 | |
| 667 | /** |
| 668 | * sync_inode - write an inode and its pages to disk. |
| 669 | * @inode: the inode to sync |
| 670 | * @wbc: controls the writeback mode |
| 671 | * |
| 672 | * sync_inode() will write an inode and its pages to disk. It will also |
| 673 | * correctly update the inode on its superblock's dirty inode lists and will |
| 674 | * update inode->i_state. |
| 675 | * |
| 676 | * The caller must have a ref on the inode. |
| 677 | */ |
| 678 | int sync_inode(struct inode *inode, struct writeback_control *wbc) |
| 679 | { |
| 680 | int ret; |
| 681 | |
| 682 | spin_lock(&inode_lock); |
| 683 | ret = __writeback_single_inode(inode, wbc); |
| 684 | spin_unlock(&inode_lock); |
| 685 | return ret; |
| 686 | } |
| 687 | EXPORT_SYMBOL(sync_inode); |
| 688 | |
| 689 | /** |
| 690 | * generic_osync_inode - flush all dirty data for a given inode to disk |
| 691 | * @inode: inode to write |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 692 | * @mapping: the address_space that should be flushed |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | * @what: what to write and wait upon |
| 694 | * |
| 695 | * This can be called by file_write functions for files which have the |
| 696 | * O_SYNC flag set, to flush dirty writes to disk. |
| 697 | * |
| 698 | * @what is a bitmask, specifying which part of the inode's data should be |
Randy Dunlap | b8887e6 | 2005-11-07 01:01:07 -0800 | [diff] [blame] | 699 | * written and waited upon. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | * |
| 701 | * OSYNC_DATA: i_mapping's dirty data |
| 702 | * OSYNC_METADATA: the buffers at i_mapping->private_list |
| 703 | * OSYNC_INODE: the inode itself |
| 704 | */ |
| 705 | |
| 706 | int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what) |
| 707 | { |
| 708 | int err = 0; |
| 709 | int need_write_inode_now = 0; |
| 710 | int err2; |
| 711 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | if (what & OSYNC_DATA) |
| 713 | err = filemap_fdatawrite(mapping); |
| 714 | if (what & (OSYNC_METADATA|OSYNC_DATA)) { |
| 715 | err2 = sync_mapping_buffers(mapping); |
| 716 | if (!err) |
| 717 | err = err2; |
| 718 | } |
| 719 | if (what & OSYNC_DATA) { |
| 720 | err2 = filemap_fdatawait(mapping); |
| 721 | if (!err) |
| 722 | err = err2; |
| 723 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | |
| 725 | spin_lock(&inode_lock); |
| 726 | if ((inode->i_state & I_DIRTY) && |
| 727 | ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC))) |
| 728 | need_write_inode_now = 1; |
| 729 | spin_unlock(&inode_lock); |
| 730 | |
| 731 | if (need_write_inode_now) { |
| 732 | err2 = write_inode_now(inode, 1); |
| 733 | if (!err) |
| 734 | err = err2; |
| 735 | } |
| 736 | else |
| 737 | wait_on_inode(inode); |
| 738 | |
| 739 | return err; |
| 740 | } |
| 741 | |
| 742 | EXPORT_SYMBOL(generic_osync_inode); |
| 743 | |
| 744 | /** |
| 745 | * writeback_acquire: attempt to get exclusive writeback access to a device |
| 746 | * @bdi: the device's backing_dev_info structure |
| 747 | * |
| 748 | * It is a waste of resources to have more than one pdflush thread blocked on |
| 749 | * a single request queue. Exclusion at the request_queue level is obtained |
| 750 | * via a flag in the request_queue's backing_dev_info.state. |
| 751 | * |
| 752 | * Non-request_queue-backed address_spaces will share default_backing_dev_info, |
| 753 | * unless they implement their own. Which is somewhat inefficient, as this |
| 754 | * may prevent concurrent writeback against multiple devices. |
| 755 | */ |
| 756 | int writeback_acquire(struct backing_dev_info *bdi) |
| 757 | { |
| 758 | return !test_and_set_bit(BDI_pdflush, &bdi->state); |
| 759 | } |
| 760 | |
| 761 | /** |
| 762 | * writeback_in_progress: determine whether there is writeback in progress |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 763 | * @bdi: the device's backing_dev_info structure. |
Randy Dunlap | b8887e6 | 2005-11-07 01:01:07 -0800 | [diff] [blame] | 764 | * |
| 765 | * Determine whether there is writeback in progress against a backing device. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 766 | */ |
| 767 | int writeback_in_progress(struct backing_dev_info *bdi) |
| 768 | { |
| 769 | return test_bit(BDI_pdflush, &bdi->state); |
| 770 | } |
| 771 | |
| 772 | /** |
| 773 | * writeback_release: relinquish exclusive writeback access against a device. |
| 774 | * @bdi: the device's backing_dev_info structure |
| 775 | */ |
| 776 | void writeback_release(struct backing_dev_info *bdi) |
| 777 | { |
| 778 | BUG_ON(!writeback_in_progress(bdi)); |
| 779 | clear_bit(BDI_pdflush, &bdi->state); |
| 780 | } |