Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 1 | /* |
Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 2 | * linux/fs/ext4/fsync.c |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 1993 Stephen Tweedie (sct@redhat.com) |
| 5 | * from |
| 6 | * Copyright (C) 1992 Remy Card (card@masi.ibp.fr) |
| 7 | * Laboratoire MASI - Institut Blaise Pascal |
| 8 | * Universite Pierre et Marie Curie (Paris VI) |
| 9 | * from |
| 10 | * linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds |
| 11 | * |
Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 12 | * ext4fs fsync primitive |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 13 | * |
| 14 | * Big-endian to little-endian byte-swapping/bitmaps by |
| 15 | * David S. Miller (davem@caip.rutgers.edu), 1995 |
| 16 | * |
| 17 | * Removed unnecessary code duplication for little endian machines |
| 18 | * and excessive __inline__s. |
| 19 | * Andi Kleen, 1997 |
| 20 | * |
| 21 | * Major simplications and cleanup - we only need to do the metadata, because |
| 22 | * we can depend on generic_block_fdatasync() to sync the data blocks. |
| 23 | */ |
| 24 | |
| 25 | #include <linux/time.h> |
| 26 | #include <linux/fs.h> |
| 27 | #include <linux/sched.h> |
| 28 | #include <linux/writeback.h> |
Mingming Cao | dab291a | 2006-10-11 01:21:01 -0700 | [diff] [blame] | 29 | #include <linux/jbd2.h> |
Eric Sandeen | d755fb3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 30 | #include <linux/blkdev.h> |
Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 31 | |
Christoph Hellwig | 3dcf545 | 2008-04-29 18:13:32 -0400 | [diff] [blame] | 32 | #include "ext4.h" |
| 33 | #include "ext4_jbd2.h" |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 34 | |
Theodore Ts'o | 9bffad1 | 2009-06-17 11:48:11 -0400 | [diff] [blame] | 35 | #include <trace/events/ext4.h> |
| 36 | |
Theodore Ts'o | 4a873a4 | 2010-10-27 21:30:14 -0400 | [diff] [blame] | 37 | static void dump_completed_IO(struct inode * inode) |
| 38 | { |
| 39 | #ifdef EXT4_DEBUG |
| 40 | struct list_head *cur, *before, *after; |
| 41 | ext4_io_end_t *io, *io0, *io1; |
| 42 | unsigned long flags; |
| 43 | |
| 44 | if (list_empty(&EXT4_I(inode)->i_completed_io_list)){ |
| 45 | ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino); |
| 46 | return; |
| 47 | } |
| 48 | |
| 49 | ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino); |
| 50 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); |
| 51 | list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){ |
| 52 | cur = &io->list; |
| 53 | before = cur->prev; |
| 54 | io0 = container_of(before, ext4_io_end_t, list); |
| 55 | after = cur->next; |
| 56 | io1 = container_of(after, ext4_io_end_t, list); |
| 57 | |
| 58 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", |
| 59 | io, inode->i_ino, io0, io1); |
| 60 | } |
| 61 | spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); |
| 62 | #endif |
| 63 | } |
| 64 | |
| 65 | /* |
| 66 | * This function is called from ext4_sync_file(). |
| 67 | * |
| 68 | * When IO is completed, the work to convert unwritten extents to |
| 69 | * written is queued on workqueue but may not get immediately |
| 70 | * scheduled. When fsync is called, we need to ensure the |
| 71 | * conversion is complete before fsync returns. |
| 72 | * The inode keeps track of a list of pending/completed IO that |
| 73 | * might needs to do the conversion. This function walks through |
| 74 | * the list and convert the related unwritten extents for completed IO |
| 75 | * to written. |
| 76 | * The function return the number of pending IOs on success. |
| 77 | */ |
| 78 | static int flush_completed_IO(struct inode *inode) |
| 79 | { |
| 80 | ext4_io_end_t *io; |
| 81 | struct ext4_inode_info *ei = EXT4_I(inode); |
| 82 | unsigned long flags; |
| 83 | int ret = 0; |
| 84 | int ret2 = 0; |
| 85 | |
| 86 | if (list_empty(&ei->i_completed_io_list)) |
| 87 | return ret; |
| 88 | |
| 89 | dump_completed_IO(inode); |
| 90 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
| 91 | while (!list_empty(&ei->i_completed_io_list)){ |
| 92 | io = list_entry(ei->i_completed_io_list.next, |
| 93 | ext4_io_end_t, list); |
| 94 | /* |
| 95 | * Calling ext4_end_io_nolock() to convert completed |
| 96 | * IO to written. |
| 97 | * |
| 98 | * When ext4_sync_file() is called, run_queue() may already |
| 99 | * about to flush the work corresponding to this io structure. |
| 100 | * It will be upset if it founds the io structure related |
| 101 | * to the work-to-be schedule is freed. |
| 102 | * |
| 103 | * Thus we need to keep the io structure still valid here after |
| 104 | * convertion finished. The io structure has a flag to |
| 105 | * avoid double converting from both fsync and background work |
| 106 | * queue work. |
| 107 | */ |
| 108 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
| 109 | ret = ext4_end_io_nolock(io); |
| 110 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
| 111 | if (ret < 0) |
| 112 | ret2 = ret; |
| 113 | else |
| 114 | list_del_init(&io->list); |
| 115 | } |
| 116 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
| 117 | return (ret2 < 0) ? ret2 : 0; |
| 118 | } |
| 119 | |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 120 | /* |
Frank Mayhar | 14ece10 | 2010-05-17 08:00:00 -0400 | [diff] [blame] | 121 | * If we're not journaling and this is a just-created file, we have to |
| 122 | * sync our parent directory (if it was freshly created) since |
| 123 | * otherwise it will only be written by writeback, leaving a huge |
| 124 | * window during which a crash may lose the file. This may apply for |
| 125 | * the parent directory's parent as well, and so on recursively, if |
| 126 | * they are also freshly created. |
| 127 | */ |
| 128 | static void ext4_sync_parent(struct inode *inode) |
| 129 | { |
| 130 | struct dentry *dentry = NULL; |
| 131 | |
| 132 | while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { |
| 133 | ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); |
| 134 | dentry = list_entry(inode->i_dentry.next, |
| 135 | struct dentry, d_alias); |
| 136 | if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode) |
| 137 | break; |
| 138 | inode = dentry->d_parent->d_inode; |
| 139 | sync_mapping_buffers(inode->i_mapping); |
| 140 | } |
| 141 | } |
| 142 | |
| 143 | /* |
Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 144 | * akpm: A new design for ext4_sync_file(). |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 145 | * |
| 146 | * This is only called from sys_fsync(), sys_fdatasync() and sys_msync(). |
| 147 | * There cannot be a transaction open by this task. |
| 148 | * Another task could have dirtied this inode. Its data can be in any |
| 149 | * state in the journalling system. |
| 150 | * |
| 151 | * What we do is just kick off a commit and wait on it. This will snapshot the |
| 152 | * inode to disk. |
Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 153 | * |
| 154 | * i_mutex lock is held when entering and exiting this function |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 155 | */ |
| 156 | |
Christoph Hellwig | 7ea8085 | 2010-05-26 17:53:25 +0200 | [diff] [blame] | 157 | int ext4_sync_file(struct file *file, int datasync) |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 158 | { |
Christoph Hellwig | 7ea8085 | 2010-05-26 17:53:25 +0200 | [diff] [blame] | 159 | struct inode *inode = file->f_mapping->host; |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 160 | struct ext4_inode_info *ei = EXT4_I(inode); |
Eric Sandeen | d755fb3 | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 161 | journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 162 | int ret; |
| 163 | tid_t commit_tid; |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 164 | |
Aneesh Kumar K.V | ac39849 | 2007-10-16 18:38:25 -0400 | [diff] [blame] | 165 | J_ASSERT(ext4_journal_current_handle() == NULL); |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 166 | |
Christoph Hellwig | 7ea8085 | 2010-05-26 17:53:25 +0200 | [diff] [blame] | 167 | trace_ext4_sync_file(file, datasync); |
Theodore Ts'o | ede86cc | 2008-10-05 20:50:06 -0400 | [diff] [blame] | 168 | |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 169 | if (inode->i_sb->s_flags & MS_RDONLY) |
| 170 | return 0; |
| 171 | |
Jiaying Zhang | c7064ef | 2010-03-02 13:28:44 -0500 | [diff] [blame] | 172 | ret = flush_completed_IO(inode); |
Mingming Cao | 8d5d02e | 2009-09-28 15:48:29 -0400 | [diff] [blame] | 173 | if (ret < 0) |
Theodore Ts'o | 6b17d90 | 2009-11-23 07:24:57 -0500 | [diff] [blame] | 174 | return ret; |
Theodore Ts'o | 60e6679 | 2010-05-17 07:00:00 -0400 | [diff] [blame] | 175 | |
Frank Mayhar | 14ece10 | 2010-05-17 08:00:00 -0400 | [diff] [blame] | 176 | if (!journal) { |
Christoph Hellwig | 1b061d9 | 2010-05-26 17:53:41 +0200 | [diff] [blame] | 177 | ret = generic_file_fsync(file, datasync); |
Frank Mayhar | 14ece10 | 2010-05-17 08:00:00 -0400 | [diff] [blame] | 178 | if (!ret && !list_empty(&inode->i_dentry)) |
| 179 | ext4_sync_parent(inode); |
| 180 | return ret; |
| 181 | } |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 182 | |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 183 | /* |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 184 | * data=writeback,ordered: |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 185 | * The caller's filemap_fdatawrite()/wait will sync the data. |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 186 | * Metadata is in the journal, we wait for proper transaction to |
| 187 | * commit here. |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 188 | * |
| 189 | * data=journal: |
| 190 | * filemap_fdatawrite won't do anything (the buffers are clean). |
Mingming Cao | 617ba13 | 2006-10-11 01:20:53 -0700 | [diff] [blame] | 191 | * ext4_force_commit will write the file data into the journal and |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 192 | * will wait on that. |
| 193 | * filemap_fdatawait() will encounter a ton of newly-dirtied pages |
| 194 | * (they were dirtied by commit). But that's OK - the blocks are |
| 195 | * safe in-journal, which is all fsync() needs to ensure. |
| 196 | */ |
Theodore Ts'o | 6b17d90 | 2009-11-23 07:24:57 -0500 | [diff] [blame] | 197 | if (ext4_should_journal_data(inode)) |
| 198 | return ext4_force_commit(inode->i_sb); |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 199 | |
Jan Kara | b436b9b | 2009-12-08 23:51:10 -0500 | [diff] [blame] | 200 | commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; |
Theodore Ts'o | cc3e1be | 2009-12-23 06:52:08 -0500 | [diff] [blame] | 201 | if (jbd2_log_start_commit(journal, commit_tid)) { |
| 202 | /* |
| 203 | * When the journal is on a different device than the |
| 204 | * fs data disk, we need to issue the barrier in |
| 205 | * writeback mode. (In ordered mode, the jbd2 layer |
| 206 | * will take care of issuing the barrier. In |
| 207 | * data=journal, all of the data blocks are written to |
| 208 | * the journal device.) |
| 209 | */ |
| 210 | if (ext4_should_writeback_data(inode) && |
| 211 | (journal->j_fs_dev != journal->j_dev) && |
| 212 | (journal->j_flags & JBD2_BARRIER)) |
Dmitry Monakhov | fbd9b09 | 2010-04-28 17:55:06 +0400 | [diff] [blame] | 213 | blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, |
Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 214 | NULL); |
Dmitry Monakhov | 0671e70 | 2010-05-10 00:00:00 -0400 | [diff] [blame] | 215 | ret = jbd2_log_wait_commit(journal, commit_tid); |
Theodore Ts'o | cc3e1be | 2009-12-23 06:52:08 -0500 | [diff] [blame] | 216 | } else if (journal->j_flags & JBD2_BARRIER) |
Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 217 | blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); |
Dave Kleikamp | ac27a0e | 2006-10-11 01:20:50 -0700 | [diff] [blame] | 218 | return ret; |
| 219 | } |