Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 1 | /* |
| 2 | * linux/fs/ext4/page-io.c |
| 3 | * |
| 4 | * This contains the new page_io functions for ext4 |
| 5 | * |
| 6 | * Written by Theodore Ts'o, 2010. |
| 7 | */ |
| 8 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 9 | #include <linux/fs.h> |
| 10 | #include <linux/time.h> |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 11 | #include <linux/highuid.h> |
| 12 | #include <linux/pagemap.h> |
| 13 | #include <linux/quotaops.h> |
| 14 | #include <linux/string.h> |
| 15 | #include <linux/buffer_head.h> |
| 16 | #include <linux/writeback.h> |
| 17 | #include <linux/pagevec.h> |
| 18 | #include <linux/mpage.h> |
| 19 | #include <linux/namei.h> |
| 20 | #include <linux/uio.h> |
| 21 | #include <linux/bio.h> |
| 22 | #include <linux/workqueue.h> |
| 23 | #include <linux/kernel.h> |
| 24 | #include <linux/slab.h> |
Jan Kara | 1ae48a6 | 2013-01-28 09:32:54 -0500 | [diff] [blame] | 25 | #include <linux/mm.h> |
Theodore Ts'o | c9af28f | 2016-03-26 16:14:34 -0400 | [diff] [blame] | 26 | #include <linux/backing-dev.h> |
Jaegeuk Kim | a7550b3 | 2016-07-10 14:01:03 -0400 | [diff] [blame] | 27 | #include <linux/fscrypto.h> |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 28 | |
| 29 | #include "ext4_jbd2.h" |
| 30 | #include "xattr.h" |
| 31 | #include "acl.h" |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 32 | |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 33 | static struct kmem_cache *io_end_cachep; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 34 | |
Theodore Ts'o | 5dabfc7 | 2010-10-27 21:30:14 -0400 | [diff] [blame] | 35 | int __init ext4_init_pageio(void) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 36 | { |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 37 | io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT); |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 38 | if (io_end_cachep == NULL) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 39 | return -ENOMEM; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 40 | return 0; |
| 41 | } |
| 42 | |
Theodore Ts'o | 5dabfc7 | 2010-10-27 21:30:14 -0400 | [diff] [blame] | 43 | void ext4_exit_pageio(void) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 44 | { |
| 45 | kmem_cache_destroy(io_end_cachep); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 46 | } |
| 47 | |
Theodore Ts'o | 1ada47d | 2013-03-20 09:39:42 -0400 | [diff] [blame] | 48 | /* |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 49 | * Print an buffer I/O error compatible with the fs/buffer.c. This |
| 50 | * provides compatibility with dmesg scrapers that look for a specific |
| 51 | * buffer I/O error message. We really need a unified error reporting |
| 52 | * structure to userspace ala Digital Unix's uerf system, but it's |
| 53 | * probably not going to happen in my lifetime, due to LKML politics... |
| 54 | */ |
| 55 | static void buffer_io_error(struct buffer_head *bh) |
| 56 | { |
Dmitry Monakhov | a1c6f057 | 2015-04-13 16:31:37 +0400 | [diff] [blame] | 57 | printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n", |
| 58 | bh->b_bdev, |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 59 | (unsigned long long)bh->b_blocknr); |
| 60 | } |
| 61 | |
| 62 | static void ext4_finish_bio(struct bio *bio) |
| 63 | { |
| 64 | int i; |
Kent Overstreet | 2c30c71 | 2013-11-07 12:20:26 -0800 | [diff] [blame] | 65 | struct bio_vec *bvec; |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 66 | |
Kent Overstreet | 2c30c71 | 2013-11-07 12:20:26 -0800 | [diff] [blame] | 67 | bio_for_each_segment_all(bvec, bio, i) { |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 68 | struct page *page = bvec->bv_page; |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 69 | #ifdef CONFIG_EXT4_FS_ENCRYPTION |
| 70 | struct page *data_page = NULL; |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 71 | #endif |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 72 | struct buffer_head *bh, *head; |
| 73 | unsigned bio_start = bvec->bv_offset; |
| 74 | unsigned bio_end = bio_start + bvec->bv_len; |
| 75 | unsigned under_io = 0; |
| 76 | unsigned long flags; |
| 77 | |
| 78 | if (!page) |
| 79 | continue; |
| 80 | |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 81 | #ifdef CONFIG_EXT4_FS_ENCRYPTION |
| 82 | if (!page->mapping) { |
| 83 | /* The bounce data pages are unmapped. */ |
| 84 | data_page = page; |
Jaegeuk Kim | a7550b3 | 2016-07-10 14:01:03 -0400 | [diff] [blame] | 85 | fscrypt_pullback_bio_page(&page, false); |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 86 | } |
| 87 | #endif |
| 88 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 89 | if (bio->bi_error) { |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 90 | SetPageError(page); |
| 91 | set_bit(AS_EIO, &page->mapping->flags); |
| 92 | } |
| 93 | bh = head = page_buffers(page); |
| 94 | /* |
| 95 | * We check all buffers in the page under BH_Uptodate_Lock |
| 96 | * to avoid races with other end io clearing async_write flags |
| 97 | */ |
| 98 | local_irq_save(flags); |
| 99 | bit_spin_lock(BH_Uptodate_Lock, &head->b_state); |
| 100 | do { |
| 101 | if (bh_offset(bh) < bio_start || |
| 102 | bh_offset(bh) + bh->b_size > bio_end) { |
| 103 | if (buffer_async_write(bh)) |
| 104 | under_io++; |
| 105 | continue; |
| 106 | } |
| 107 | clear_buffer_async_write(bh); |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 108 | if (bio->bi_error) |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 109 | buffer_io_error(bh); |
| 110 | } while ((bh = bh->b_this_page) != head); |
| 111 | bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); |
| 112 | local_irq_restore(flags); |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 113 | if (!under_io) { |
| 114 | #ifdef CONFIG_EXT4_FS_ENCRYPTION |
Jaegeuk Kim | a7550b3 | 2016-07-10 14:01:03 -0400 | [diff] [blame] | 115 | if (data_page) |
| 116 | fscrypt_restore_control_page(data_page); |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 117 | #endif |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 118 | end_page_writeback(page); |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 119 | } |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 120 | } |
| 121 | } |
| 122 | |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 123 | static void ext4_release_io_end(ext4_io_end_t *io_end) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 124 | { |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 125 | struct bio *bio, *next_bio; |
| 126 | |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 127 | BUG_ON(!list_empty(&io_end->list)); |
| 128 | BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); |
Jan Kara | 6b523df | 2013-06-04 13:21:11 -0400 | [diff] [blame] | 129 | WARN_ON(io_end->handle); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 130 | |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 131 | for (bio = io_end->bio; bio; bio = next_bio) { |
| 132 | next_bio = bio->bi_private; |
| 133 | ext4_finish_bio(bio); |
| 134 | bio_put(bio); |
| 135 | } |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 136 | kmem_cache_free(io_end_cachep, io_end); |
| 137 | } |
| 138 | |
Jan Kara | a115f74 | 2013-06-04 14:30:00 -0400 | [diff] [blame] | 139 | /* |
| 140 | * Check a range of space and convert unwritten extents to written. Note that |
| 141 | * we are protected from truncate touching same part of extent tree by the |
| 142 | * fact that truncate code waits for all DIO to finish (thus exclusion from |
| 143 | * direct IO is achieved) and also waits for PageWriteback bits. Thus we |
| 144 | * cannot get to ext4_ext_truncate() before all IOs overlapping that range are |
| 145 | * completed (happens from ext4_free_ioend()). |
| 146 | */ |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 147 | static int ext4_end_io(ext4_io_end_t *io) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 148 | { |
| 149 | struct inode *inode = io->inode; |
| 150 | loff_t offset = io->offset; |
| 151 | ssize_t size = io->size; |
Jan Kara | 6b523df | 2013-06-04 13:21:11 -0400 | [diff] [blame] | 152 | handle_t *handle = io->handle; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 153 | int ret = 0; |
| 154 | |
| 155 | ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," |
| 156 | "list->prev 0x%p\n", |
| 157 | io, inode->i_ino, io->list.next, io->list.prev); |
| 158 | |
Jan Kara | 6b523df | 2013-06-04 13:21:11 -0400 | [diff] [blame] | 159 | io->handle = NULL; /* Following call will use up the handle */ |
| 160 | ret = ext4_convert_unwritten_extents(handle, inode, offset, size); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 161 | if (ret < 0) { |
Theodore Ts'o | b82e384 | 2011-10-31 10:56:32 -0400 | [diff] [blame] | 162 | ext4_msg(inode->i_sb, KERN_EMERG, |
| 163 | "failed to convert unwritten extents to written " |
| 164 | "extents -- potential data loss! " |
| 165 | "(inode %lu, offset %llu, size %zd, error %d)", |
| 166 | inode->i_ino, offset, size, ret); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 167 | } |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 168 | ext4_clear_io_unwritten_flag(io); |
| 169 | ext4_release_io_end(io); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 170 | return ret; |
| 171 | } |
| 172 | |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 173 | static void dump_completed_IO(struct inode *inode, struct list_head *head) |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 174 | { |
| 175 | #ifdef EXT4FS_DEBUG |
| 176 | struct list_head *cur, *before, *after; |
| 177 | ext4_io_end_t *io, *io0, *io1; |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 178 | |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 179 | if (list_empty(head)) |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 180 | return; |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 181 | |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 182 | ext4_debug("Dump inode %lu completed io list\n", inode->i_ino); |
| 183 | list_for_each_entry(io, head, list) { |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 184 | cur = &io->list; |
| 185 | before = cur->prev; |
| 186 | io0 = container_of(before, ext4_io_end_t, list); |
| 187 | after = cur->next; |
| 188 | io1 = container_of(after, ext4_io_end_t, list); |
| 189 | |
| 190 | ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", |
| 191 | io, inode->i_ino, io0, io1); |
| 192 | } |
| 193 | #endif |
| 194 | } |
| 195 | |
| 196 | /* Add the io_end to per-inode completed end_io list. */ |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 197 | static void ext4_add_complete_io(ext4_io_end_t *io_end) |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 198 | { |
| 199 | struct ext4_inode_info *ei = EXT4_I(io_end->inode); |
Jan Kara | 78371a4 | 2013-10-16 08:25:11 -0400 | [diff] [blame] | 200 | struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb); |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 201 | struct workqueue_struct *wq; |
| 202 | unsigned long flags; |
| 203 | |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 204 | /* Only reserved conversions from writeback should enter here */ |
| 205 | WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN)); |
Jan Kara | 78371a4 | 2013-10-16 08:25:11 -0400 | [diff] [blame] | 206 | WARN_ON(!io_end->handle && sbi->s_journal); |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 207 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
Jan Kara | 78371a4 | 2013-10-16 08:25:11 -0400 | [diff] [blame] | 208 | wq = sbi->rsv_conversion_wq; |
Christoph Hellwig | 7b7a866 | 2013-09-04 15:04:39 +0200 | [diff] [blame] | 209 | if (list_empty(&ei->i_rsv_conversion_list)) |
| 210 | queue_work(wq, &ei->i_rsv_conversion_work); |
| 211 | list_add_tail(&io_end->list, &ei->i_rsv_conversion_list); |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 212 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
| 213 | } |
| 214 | |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 215 | static int ext4_do_flush_completed_IO(struct inode *inode, |
| 216 | struct list_head *head) |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 217 | { |
| 218 | ext4_io_end_t *io; |
Jan Kara | 002bd7f | 2013-01-28 09:49:15 -0500 | [diff] [blame] | 219 | struct list_head unwritten; |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 220 | unsigned long flags; |
| 221 | struct ext4_inode_info *ei = EXT4_I(inode); |
| 222 | int err, ret = 0; |
| 223 | |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 224 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 225 | dump_completed_IO(inode, head); |
| 226 | list_replace_init(head, &unwritten); |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 227 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
| 228 | |
| 229 | while (!list_empty(&unwritten)) { |
| 230 | io = list_entry(unwritten.next, ext4_io_end_t, list); |
| 231 | BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN)); |
| 232 | list_del_init(&io->list); |
| 233 | |
| 234 | err = ext4_end_io(io); |
| 235 | if (unlikely(!ret && err)) |
| 236 | ret = err; |
Dmitry Monakhov | 28a535f | 2012-09-29 00:14:55 -0400 | [diff] [blame] | 237 | } |
| 238 | return ret; |
| 239 | } |
| 240 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 241 | /* |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 242 | * work on completed IO, to convert unwritten extents to extents |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 243 | */ |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 244 | void ext4_end_io_rsv_work(struct work_struct *work) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 245 | { |
Jan Kara | 84c1754 | 2013-01-28 09:43:46 -0500 | [diff] [blame] | 246 | struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, |
Jan Kara | 2e8fa54 | 2013-06-04 14:21:02 -0400 | [diff] [blame] | 247 | i_rsv_conversion_work); |
| 248 | ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list); |
| 249 | } |
| 250 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 251 | ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) |
| 252 | { |
Jesper Juhl | b17b35e | 2010-12-19 21:41:55 -0500 | [diff] [blame] | 253 | ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 254 | if (io) { |
Theodore Ts'o | f7ad6d2 | 2010-11-08 13:43:33 -0500 | [diff] [blame] | 255 | io->inode = inode; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 256 | INIT_LIST_HEAD(&io->list); |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 257 | atomic_set(&io->count, 1); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 258 | } |
| 259 | return io; |
| 260 | } |
| 261 | |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 262 | void ext4_put_io_end_defer(ext4_io_end_t *io_end) |
| 263 | { |
| 264 | if (atomic_dec_and_test(&io_end->count)) { |
| 265 | if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) { |
| 266 | ext4_release_io_end(io_end); |
| 267 | return; |
| 268 | } |
| 269 | ext4_add_complete_io(io_end); |
| 270 | } |
| 271 | } |
| 272 | |
| 273 | int ext4_put_io_end(ext4_io_end_t *io_end) |
| 274 | { |
| 275 | int err = 0; |
| 276 | |
| 277 | if (atomic_dec_and_test(&io_end->count)) { |
| 278 | if (io_end->flag & EXT4_IO_END_UNWRITTEN) { |
Jan Kara | 6b523df | 2013-06-04 13:21:11 -0400 | [diff] [blame] | 279 | err = ext4_convert_unwritten_extents(io_end->handle, |
| 280 | io_end->inode, io_end->offset, |
| 281 | io_end->size); |
| 282 | io_end->handle = NULL; |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 283 | ext4_clear_io_unwritten_flag(io_end); |
| 284 | } |
| 285 | ext4_release_io_end(io_end); |
| 286 | } |
| 287 | return err; |
| 288 | } |
| 289 | |
| 290 | ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) |
| 291 | { |
| 292 | atomic_inc(&io_end->count); |
| 293 | return io_end; |
| 294 | } |
| 295 | |
Jan Kara | 822dbba | 2013-07-10 21:31:04 -0400 | [diff] [blame] | 296 | /* BIO completion function for page writeback */ |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 297 | static void ext4_end_bio(struct bio *bio) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 298 | { |
| 299 | ext4_io_end_t *io_end = bio->bi_private; |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 300 | sector_t bi_sector = bio->bi_iter.bi_sector; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 301 | |
| 302 | BUG_ON(!io_end); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 303 | bio->bi_end_io = NULL; |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 304 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 305 | if (bio->bi_error) { |
Jan Kara | b0857d3 | 2013-06-04 14:23:41 -0400 | [diff] [blame] | 306 | struct inode *inode = io_end->inode; |
| 307 | |
Matthew Wilcox | 9503c67 | 2014-04-07 10:54:20 -0400 | [diff] [blame] | 308 | ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu " |
Theodore Ts'o | f7ad6d2 | 2010-11-08 13:43:33 -0500 | [diff] [blame] | 309 | "(offset %llu size %ld starting block %llu)", |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 310 | bio->bi_error, inode->i_ino, |
Theodore Ts'o | f7ad6d2 | 2010-11-08 13:43:33 -0500 | [diff] [blame] | 311 | (unsigned long long) io_end->offset, |
| 312 | (long) io_end->size, |
| 313 | (unsigned long long) |
Curt Wohlgemuth | d50bdd5 | 2011-02-07 12:46:14 -0500 | [diff] [blame] | 314 | bi_sector >> (inode->i_blkbits - 9)); |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 315 | mapping_set_error(inode->i_mapping, bio->bi_error); |
Theodore Ts'o | f7ad6d2 | 2010-11-08 13:43:33 -0500 | [diff] [blame] | 316 | } |
Jan Kara | 822dbba | 2013-07-10 21:31:04 -0400 | [diff] [blame] | 317 | |
| 318 | if (io_end->flag & EXT4_IO_END_UNWRITTEN) { |
| 319 | /* |
| 320 | * Link bio into list hanging from io_end. We have to do it |
| 321 | * atomically as bio completions can be racing against each |
| 322 | * other. |
| 323 | */ |
| 324 | bio->bi_private = xchg(&io_end->bio, bio); |
| 325 | ext4_put_io_end_defer(io_end); |
| 326 | } else { |
| 327 | /* |
| 328 | * Drop io_end reference early. Inode can get freed once |
| 329 | * we finish the bio. |
| 330 | */ |
| 331 | ext4_put_io_end_defer(io_end); |
| 332 | ext4_finish_bio(bio); |
| 333 | bio_put(bio); |
| 334 | } |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 335 | } |
| 336 | |
| 337 | void ext4_io_submit(struct ext4_io_submit *io) |
| 338 | { |
| 339 | struct bio *bio = io->io_bio; |
| 340 | |
| 341 | if (bio) { |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 342 | int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ? |
| 343 | WRITE_SYNC : 0; |
| 344 | bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags); |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 345 | submit_bio(io->io_bio); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 346 | } |
Peter Huewe | 7dc5761 | 2011-02-21 21:01:42 -0500 | [diff] [blame] | 347 | io->io_bio = NULL; |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 348 | } |
| 349 | |
| 350 | void ext4_io_submit_init(struct ext4_io_submit *io, |
| 351 | struct writeback_control *wbc) |
| 352 | { |
Tejun Heo | 5a33911 | 2015-07-21 23:50:24 -0400 | [diff] [blame] | 353 | io->io_wbc = wbc; |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 354 | io->io_bio = NULL; |
Peter Huewe | 7dc5761 | 2011-02-21 21:01:42 -0500 | [diff] [blame] | 355 | io->io_end = NULL; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 356 | } |
| 357 | |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 358 | static int io_submit_init_bio(struct ext4_io_submit *io, |
| 359 | struct buffer_head *bh) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 360 | { |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 361 | struct bio *bio; |
| 362 | |
Kent Overstreet | b54ffb7 | 2015-05-19 14:31:01 +0200 | [diff] [blame] | 363 | bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); |
Theodore Ts'o | a1d8d9a7 | 2013-06-06 10:18:22 -0400 | [diff] [blame] | 364 | if (!bio) |
| 365 | return -ENOMEM; |
Tejun Heo | 001e4a8 | 2015-07-21 23:51:26 -0400 | [diff] [blame] | 366 | wbc_init_bio(io->io_wbc, bio); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 367 | bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 368 | bio->bi_bdev = bh->b_bdev; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 369 | bio->bi_end_io = ext4_end_bio; |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 370 | bio->bi_private = ext4_get_io_end(io->io_end); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 371 | io->io_bio = bio; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 372 | io->io_next_block = bh->b_blocknr; |
| 373 | return 0; |
| 374 | } |
| 375 | |
| 376 | static int io_submit_add_bh(struct ext4_io_submit *io, |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 377 | struct inode *inode, |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 378 | struct page *page, |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 379 | struct buffer_head *bh) |
| 380 | { |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 381 | int ret; |
| 382 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 383 | if (io->io_bio && bh->b_blocknr != io->io_next_block) { |
| 384 | submit_and_retry: |
| 385 | ext4_io_submit(io); |
| 386 | } |
| 387 | if (io->io_bio == NULL) { |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 388 | ret = io_submit_init_bio(io, bh); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 389 | if (ret) |
| 390 | return ret; |
| 391 | } |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 392 | ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh)); |
Theodore Ts'o | a549984 | 2013-05-11 19:07:42 -0400 | [diff] [blame] | 393 | if (ret != bh->b_size) |
| 394 | goto submit_and_retry; |
Tejun Heo | 001e4a8 | 2015-07-21 23:51:26 -0400 | [diff] [blame] | 395 | wbc_account_io(io->io_wbc, page, bh->b_size); |
Jan Kara | 97a851e | 2013-06-04 11:58:58 -0400 | [diff] [blame] | 396 | io->io_next_block++; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 397 | return 0; |
| 398 | } |
| 399 | |
| 400 | int ext4_bio_write_page(struct ext4_io_submit *io, |
| 401 | struct page *page, |
| 402 | int len, |
Namjae Jeon | 1c8349a | 2014-05-12 08:12:25 -0400 | [diff] [blame] | 403 | struct writeback_control *wbc, |
| 404 | bool keep_towrite) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 405 | { |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 406 | struct page *data_page = NULL; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 407 | struct inode *inode = page->mapping->host; |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 408 | unsigned block_start, blocksize; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 409 | struct buffer_head *bh, *head; |
| 410 | int ret = 0; |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 411 | int nr_submitted = 0; |
Theodore Ts'o | 937d7b8 | 2015-10-02 23:54:58 -0400 | [diff] [blame] | 412 | int nr_to_submit = 0; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 413 | |
| 414 | blocksize = 1 << inode->i_blkbits; |
| 415 | |
Curt Wohlgemuth | d50bdd5 | 2011-02-07 12:46:14 -0500 | [diff] [blame] | 416 | BUG_ON(!PageLocked(page)); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 417 | BUG_ON(PageWriteback(page)); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 418 | |
Namjae Jeon | 1c8349a | 2014-05-12 08:12:25 -0400 | [diff] [blame] | 419 | if (keep_towrite) |
| 420 | set_page_writeback_keepwrite(page); |
| 421 | else |
| 422 | set_page_writeback(page); |
Theodore Ts'o | a54aa76 | 2011-02-27 16:43:24 -0500 | [diff] [blame] | 423 | ClearPageError(page); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 424 | |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 425 | /* |
Linus Torvalds | f8409ab | 2014-06-08 13:03:35 -0700 | [diff] [blame] | 426 | * Comments copied from block_write_full_page: |
Jan Kara | eeece46 | 2014-05-27 12:48:55 -0400 | [diff] [blame] | 427 | * |
| 428 | * The page straddles i_size. It must be zeroed out on each and every |
| 429 | * writepage invocation because it may be mmapped. "A file is mapped |
| 430 | * in multiples of the page size. For a file that is not a multiple of |
| 431 | * the page size, the remaining memory is zeroed when mapped, and |
| 432 | * writes to that region are not written out to the file." |
| 433 | */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 434 | if (len < PAGE_SIZE) |
| 435 | zero_user_segment(page, len, PAGE_SIZE); |
Jan Kara | eeece46 | 2014-05-27 12:48:55 -0400 | [diff] [blame] | 436 | /* |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 437 | * In the first loop we prepare and mark buffers to submit. We have to |
| 438 | * mark all buffers in the page before submitting so that |
| 439 | * end_page_writeback() cannot be called from ext4_bio_end_io() when IO |
| 440 | * on the first buffer finishes and we are still working on submitting |
| 441 | * the second buffer. |
| 442 | */ |
| 443 | bh = head = page_buffers(page); |
| 444 | do { |
| 445 | block_start = bh_offset(bh); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 446 | if (block_start >= len) { |
| 447 | clear_buffer_dirty(bh); |
| 448 | set_buffer_uptodate(bh); |
| 449 | continue; |
| 450 | } |
Jan Kara | 8a850c3 | 2013-01-28 20:53:28 -0500 | [diff] [blame] | 451 | if (!buffer_dirty(bh) || buffer_delay(bh) || |
| 452 | !buffer_mapped(bh) || buffer_unwritten(bh)) { |
| 453 | /* A hole? We can safely clear the dirty bit */ |
| 454 | if (!buffer_mapped(bh)) |
| 455 | clear_buffer_dirty(bh); |
| 456 | if (io->io_bio) |
| 457 | ext4_io_submit(io); |
| 458 | continue; |
| 459 | } |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 460 | if (buffer_new(bh)) { |
| 461 | clear_buffer_new(bh); |
| 462 | unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); |
| 463 | } |
| 464 | set_buffer_async_write(bh); |
Theodore Ts'o | 937d7b8 | 2015-10-02 23:54:58 -0400 | [diff] [blame] | 465 | nr_to_submit++; |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 466 | } while ((bh = bh->b_this_page) != head); |
| 467 | |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 468 | bh = head = page_buffers(page); |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 469 | |
Theodore Ts'o | 937d7b8 | 2015-10-02 23:54:58 -0400 | [diff] [blame] | 470 | if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) && |
| 471 | nr_to_submit) { |
Theodore Ts'o | c9af28f | 2016-03-26 16:14:34 -0400 | [diff] [blame] | 472 | gfp_t gfp_flags = GFP_NOFS; |
| 473 | |
| 474 | retry_encrypt: |
Jaegeuk Kim | a7550b3 | 2016-07-10 14:01:03 -0400 | [diff] [blame] | 475 | data_page = fscrypt_encrypt_page(inode, page, gfp_flags); |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 476 | if (IS_ERR(data_page)) { |
| 477 | ret = PTR_ERR(data_page); |
Dan Carpenter | 4762cc3 | 2016-04-02 18:13:38 -0400 | [diff] [blame] | 478 | if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) { |
Theodore Ts'o | c9af28f | 2016-03-26 16:14:34 -0400 | [diff] [blame] | 479 | if (io->io_bio) { |
| 480 | ext4_io_submit(io); |
| 481 | congestion_wait(BLK_RW_ASYNC, HZ/50); |
| 482 | } |
| 483 | gfp_flags |= __GFP_NOFAIL; |
| 484 | goto retry_encrypt; |
| 485 | } |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 486 | data_page = NULL; |
| 487 | goto out; |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | /* Now submit buffers to write */ |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 492 | do { |
| 493 | if (!buffer_async_write(bh)) |
| 494 | continue; |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 495 | ret = io_submit_add_bh(io, inode, |
| 496 | data_page ? data_page : page, bh); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 497 | if (ret) { |
| 498 | /* |
| 499 | * We only get here on ENOMEM. Not much else |
| 500 | * we can do but mark the page as dirty, and |
| 501 | * better luck next time. |
| 502 | */ |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 503 | break; |
| 504 | } |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 505 | nr_submitted++; |
Jan Kara | 1ae48a6 | 2013-01-28 09:32:54 -0500 | [diff] [blame] | 506 | clear_buffer_dirty(bh); |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 507 | } while ((bh = bh->b_this_page) != head); |
| 508 | |
| 509 | /* Error stopped previous loop? Clean up buffers... */ |
| 510 | if (ret) { |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 511 | out: |
| 512 | if (data_page) |
Jaegeuk Kim | a7550b3 | 2016-07-10 14:01:03 -0400 | [diff] [blame] | 513 | fscrypt_restore_control_page(data_page); |
Michael Halcrow | 2058f83 | 2015-04-12 00:55:10 -0400 | [diff] [blame] | 514 | printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret); |
| 515 | redirty_page_for_writepage(wbc, page); |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 516 | do { |
| 517 | clear_buffer_async_write(bh); |
| 518 | bh = bh->b_this_page; |
| 519 | } while (bh != head); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 520 | } |
| 521 | unlock_page(page); |
Jan Kara | 0058f96 | 2013-04-11 23:48:32 -0400 | [diff] [blame] | 522 | /* Nothing submitted - we have to end page writeback */ |
| 523 | if (!nr_submitted) |
| 524 | end_page_writeback(page); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 525 | return ret; |
| 526 | } |