Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1 | /* |
Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 2 | * linux/fs/ext4/fsync.c |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 1993 Stephen Tweedie (sct@redhat.com) |
| 5 | * from |
| 6 | * Copyright (C) 1992 Remy Card (card@masi.ibp.fr) |
| 7 | * Laboratoire MASI - Institut Blaise Pascal |
| 8 | * Universite Pierre et Marie Curie (Paris VI) |
| 9 | * from |
| 10 | * linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds |
| 11 | * |
Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 12 | * ext4fs fsync primitive |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 13 | * |
| 14 | * Big-endian to little-endian byte-swapping/bitmaps by |
| 15 | * David S. Miller (davem@caip.rutgers.edu), 1995 |
| 16 | * |
| 17 | * Removed unnecessary code duplication for little endian machines |
| 18 | * and excessive __inline__s. |
| 19 | * Andi Kleen, 1997 |
| 20 | * |
| 21 | * Major simplications and cleanup - we only need to do the metadata, because |
| 22 | * we can depend on generic_block_fdatasync() to sync the data blocks. |
| 23 | */ |
| 24 | |
| 25 | #include <linux/time.h> |
| 26 | #include <linux/fs.h> |
| 27 | #include <linux/sched.h> |
| 28 | #include <linux/writeback.h> |
Mingming Cao | dab291a | 2006-10-11 01:21:01 -0700 | [diff] [blame] | 29 | #include <linux/jbd2.h> |
Eric Sandeen | d755fb3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 30 | #include <linux/blkdev.h> |
Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 31 | |
Christoph Hellwig | 3dcf545 | 2008-04-29 18:13:32 -0400 | [diff] [blame] | 32 | #include "ext4.h" |
| 33 | #include "ext4_jbd2.h" |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 34 | |
Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 35 | #include <trace/events/ext4.h> |
| 36 | |
Theodore Ts'o | 4a873a4 | 2010-10-27 21:30:14 -0400 | [diff] [blame] | 37 | static void dump_completed_IO(struct inode * inode) |
| 38 | { |
| 39 | #ifdef EXT4_DEBUG |
| 40 | struct list_head *cur, *before, *after; |
| 41 | ext4_io_end_t *io, *io0, *io1; |
| 42 | unsigned long flags; |
| 43 | |
| 44 | if (list_empty(&EXT4_I(inode)->i_completed_io_list)){ |
| 45 | ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino); |
| 46 | return; |
| 47 | } |
| 48 | |
| 49 | ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino); |
| 50 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); |
| 51 | list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){ |
| 52 | cur = &io->list; |
| 53 | before = cur->prev; |
| 54 | io0 = container_of(before, ext4_io_end_t, list); |
| 55 | after = cur->next; |
| 56 | io1 = container_of(after, ext4_io_end_t, list); |
| 57 | |
| 58 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", |
| 59 | io, inode->i_ino, io0, io1); |
| 60 | } |
| 61 | spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); |
| 62 | #endif |
| 63 | } |
| 64 | |
| 65 | /* |
| 66 | * This function is called from ext4_sync_file(). |
| 67 | * |
| 68 | * When IO is completed, the work to convert unwritten extents to |
| 69 | * written is queued on workqueue but may not get immediately |
| 70 | * scheduled. When fsync is called, we need to ensure the |
| 71 | * conversion is complete before fsync returns. |
| 72 | * The inode keeps track of a list of pending/completed IO that |
| 73 | * might needs to do the conversion. This function walks through |
| 74 | * the list and convert the related unwritten extents for completed IO |
| 75 | * to written. |
| 76 | * The function return the number of pending IOs on success. |
| 77 | */ |
Jiaying Zhang | 3889fd5 | 2011-01-10 12:47:05 -0500 | [diff] [blame] | 78 | extern int ext4_flush_completed_IO(struct inode *inode) |
Theodore Ts'o | 4a873a4 | 2010-10-27 21:30:14 -0400 | [diff] [blame] | 79 | { |
| 80 | ext4_io_end_t *io; |
| 81 | struct ext4_inode_info *ei = EXT4_I(inode); |
| 82 | unsigned long flags; |
| 83 | int ret = 0; |
| 84 | int ret2 = 0; |
| 85 | |
| 86 | if (list_empty(&ei->i_completed_io_list)) |
| 87 | return ret; |
| 88 | |
| 89 | dump_completed_IO(inode); |
| 90 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
| 91 | while (!list_empty(&ei->i_completed_io_list)){ |
| 92 | io = list_entry(ei->i_completed_io_list.next, |
| 93 | ext4_io_end_t, list); |
| 94 | /* |
| 95 | * Calling ext4_end_io_nolock() to convert completed |
| 96 | * IO to written. |
| 97 | * |
| 98 | * When ext4_sync_file() is called, run_queue() may already |
| 99 | * about to flush the work corresponding to this io structure. |
| 100 | * It will be upset if it founds the io structure related |
| 101 | * to the work-to-be schedule is freed. |
| 102 | * |
| 103 | * Thus we need to keep the io structure still valid here after |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 104 | * conversion finished. The io structure has a flag to |
Theodore Ts'o | 4a873a4 | 2010-10-27 21:30:14 -0400 | [diff] [blame] | 105 | * avoid double converting from both fsync and background work |
| 106 | * queue work. |
| 107 | */ |
| 108 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
| 109 | ret = ext4_end_io_nolock(io); |
| 110 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
| 111 | if (ret < 0) |
| 112 | ret2 = ret; |
| 113 | else |
| 114 | list_del_init(&io->list); |
| 115 | } |
| 116 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
| 117 | return (ret2 < 0) ? ret2 : 0; |
| 118 | } |
| 119 | |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 120 | /* |
Frank Mayhar | 14ece10 | 2010-05-17 08:00:00 -0400 | [diff] [blame] | 121 | * If we're not journaling and this is a just-created file, we have to |
| 122 | * sync our parent directory (if it was freshly created) since |
| 123 | * otherwise it will only be written by writeback, leaving a huge |
| 124 | * window during which a crash may lose the file. This may apply for |
| 125 | * the parent directory's parent as well, and so on recursively, if |
| 126 | * they are also freshly created. |
| 127 | */ |
Curt Wohlgemuth | 0893ed4 | 2011-04-10 22:05:31 -0400 | [diff] [blame] | 128 | static int ext4_sync_parent(struct inode *inode) |
Frank Mayhar | 14ece10 | 2010-05-17 08:00:00 -0400 | [diff] [blame] | 129 | { |
Curt Wohlgemuth | 0893ed4 | 2011-04-10 22:05:31 -0400 | [diff] [blame] | 130 | struct writeback_control wbc; |
Frank Mayhar | 14ece10 | 2010-05-17 08:00:00 -0400 | [diff] [blame] | 131 | struct dentry *dentry = NULL; |
Curt Wohlgemuth | 0893ed4 | 2011-04-10 22:05:31 -0400 | [diff] [blame] | 132 | int ret = 0; |
Frank Mayhar | 14ece10 | 2010-05-17 08:00:00 -0400 | [diff] [blame] | 133 | |
| 134 | while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { |
| 135 | ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); |
| 136 | dentry = list_entry(inode->i_dentry.next, |
| 137 | struct dentry, d_alias); |
| 138 | if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode) |
| 139 | break; |
| 140 | inode = dentry->d_parent->d_inode; |
Curt Wohlgemuth | 0893ed4 | 2011-04-10 22:05:31 -0400 | [diff] [blame] | 141 | ret = sync_mapping_buffers(inode->i_mapping); |
| 142 | if (ret) |
| 143 | break; |
| 144 | memset(&wbc, 0, sizeof(wbc)); |
| 145 | wbc.sync_mode = WB_SYNC_ALL; |
| 146 | wbc.nr_to_write = 0; /* only write out the inode */ |
| 147 | ret = sync_inode(inode, &wbc); |
| 148 | if (ret) |
| 149 | break; |
Frank Mayhar | 14ece10 | 2010-05-17 08:00:00 -0400 | [diff] [blame] | 150 | } |
Curt Wohlgemuth | 0893ed4 | 2011-04-10 22:05:31 -0400 | [diff] [blame] | 151 | return ret; |
Frank Mayhar | 14ece10 | 2010-05-17 08:00:00 -0400 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | /* |
Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 155 | * akpm: A new design for ext4_sync_file(). |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 156 | * |
| 157 | * This is only called from sys_fsync(), sys_fdatasync() and sys_msync(). |
| 158 | * There cannot be a transaction open by this task. |
| 159 | * Another task could have dirtied this inode. Its data can be in any |
| 160 | * state in the journalling system. |
| 161 | * |
| 162 | * What we do is just kick off a commit and wait on it. This will snapshot the |
| 163 | * inode to disk. |
Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 164 | * |
| 165 | * i_mutex lock is held when entering and exiting this function |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 166 | */ |
| 167 | |
Christoph Hellwig | 7ea8085 | 2010-05-26 17:53:25 +0200 | [diff] [blame] | 168 | int ext4_sync_file(struct file *file, int datasync) |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 169 | { |
Christoph Hellwig | 7ea8085 | 2010-05-26 17:53:25 +0200 | [diff] [blame] | 170 | struct inode *inode = file->f_mapping->host; |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 171 | struct ext4_inode_info *ei = EXT4_I(inode); |
Eric Sandeen | d755fb3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 172 | journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 173 | int ret; |
| 174 | tid_t commit_tid; |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 175 | |
Aneesh Kumar K.V | ac39849 | 2007-10-16 18:38:25 -0400 | [diff] [blame] | 176 | J_ASSERT(ext4_journal_current_handle() == NULL); |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 177 | |
Jiaying Zhang | 0562e0b | 2011-03-21 21:38:05 -0400 | [diff] [blame] | 178 | trace_ext4_sync_file_enter(file, datasync); |
Theodore Ts'o | ede86cc | 2008-10-05 20:50:06 -0400 | [diff] [blame] | 179 | |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 180 | if (inode->i_sb->s_flags & MS_RDONLY) |
| 181 | return 0; |
| 182 | |
Jiaying Zhang | 3889fd5 | 2011-01-10 12:47:05 -0500 | [diff] [blame] | 183 | ret = ext4_flush_completed_IO(inode); |
Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 184 | if (ret < 0) |
Jiaying Zhang | 0562e0b | 2011-03-21 21:38:05 -0400 | [diff] [blame] | 185 | goto out; |
Theodore Ts'o | 60e6679 | 2010-05-17 07:00:00 -0400 | [diff] [blame] | 186 | |
Frank Mayhar | 14ece10 | 2010-05-17 08:00:00 -0400 | [diff] [blame] | 187 | if (!journal) { |
Christoph Hellwig | 1b061d9 | 2010-05-26 17:53:41 +0200 | [diff] [blame] | 188 | ret = generic_file_fsync(file, datasync); |
Frank Mayhar | 14ece10 | 2010-05-17 08:00:00 -0400 | [diff] [blame] | 189 | if (!ret && !list_empty(&inode->i_dentry)) |
Curt Wohlgemuth | 0893ed4 | 2011-04-10 22:05:31 -0400 | [diff] [blame] | 190 | ret = ext4_sync_parent(inode); |
Jiaying Zhang | 0562e0b | 2011-03-21 21:38:05 -0400 | [diff] [blame] | 191 | goto out; |
Frank Mayhar | 14ece10 | 2010-05-17 08:00:00 -0400 | [diff] [blame] | 192 | } |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 193 | |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 194 | /* |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 195 | * data=writeback,ordered: |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 196 | * The caller's filemap_fdatawrite()/wait will sync the data. |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 197 | * Metadata is in the journal, we wait for proper transaction to |
| 198 | * commit here. |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 199 | * |
| 200 | * data=journal: |
| 201 | * filemap_fdatawrite won't do anything (the buffers are clean). |
Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 202 | * ext4_force_commit will write the file data into the journal and |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 203 | * will wait on that. |
| 204 | * filemap_fdatawait() will encounter a ton of newly-dirtied pages |
| 205 | * (they were dirtied by commit). But that's OK - the blocks are |
| 206 | * safe in-journal, which is all fsync() needs to ensure. |
| 207 | */ |
Jiaying Zhang | 0562e0b | 2011-03-21 21:38:05 -0400 | [diff] [blame] | 208 | if (ext4_should_journal_data(inode)) { |
| 209 | ret = ext4_force_commit(inode->i_sb); |
| 210 | goto out; |
| 211 | } |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 212 | |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 213 | commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; |
Theodore Ts'o | cc3e1be | 2009-12-23 06:52:08 -0500 | [diff] [blame] | 214 | if (jbd2_log_start_commit(journal, commit_tid)) { |
| 215 | /* |
| 216 | * When the journal is on a different device than the |
| 217 | * fs data disk, we need to issue the barrier in |
| 218 | * writeback mode. (In ordered mode, the jbd2 layer |
| 219 | * will take care of issuing the barrier. In |
| 220 | * data=journal, all of the data blocks are written to |
| 221 | * the journal device.) |
| 222 | */ |
| 223 | if (ext4_should_writeback_data(inode) && |
| 224 | (journal->j_fs_dev != journal->j_dev) && |
| 225 | (journal->j_flags & JBD2_BARRIER)) |
Dmitry Monakhov | fbd9b09 | 2010-04-28 17:55:06 +0400 | [diff] [blame] | 226 | blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, |
Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 227 | NULL); |
Dmitry Monakhov | 0671e70 | 2010-05-10 00:00:00 -0400 | [diff] [blame] | 228 | ret = jbd2_log_wait_commit(journal, commit_tid); |
Theodore Ts'o | cc3e1be | 2009-12-23 06:52:08 -0500 | [diff] [blame] | 229 | } else if (journal->j_flags & JBD2_BARRIER) |
Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 230 | blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); |
Jiaying Zhang | 0562e0b | 2011-03-21 21:38:05 -0400 | [diff] [blame] | 231 | out: |
| 232 | trace_ext4_sync_file_exit(inode, ret); |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 233 | return ret; |
| 234 | } |