Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 1 | /* |
| 2 | * linux/fs/ext4/page-io.c |
| 3 | * |
| 4 | * This contains the new page_io functions for ext4 |
| 5 | * |
| 6 | * Written by Theodore Ts'o, 2010. |
| 7 | */ |
| 8 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 9 | #include <linux/fs.h> |
| 10 | #include <linux/time.h> |
| 11 | #include <linux/jbd2.h> |
| 12 | #include <linux/highuid.h> |
| 13 | #include <linux/pagemap.h> |
| 14 | #include <linux/quotaops.h> |
| 15 | #include <linux/string.h> |
| 16 | #include <linux/buffer_head.h> |
| 17 | #include <linux/writeback.h> |
| 18 | #include <linux/pagevec.h> |
| 19 | #include <linux/mpage.h> |
| 20 | #include <linux/namei.h> |
| 21 | #include <linux/uio.h> |
| 22 | #include <linux/bio.h> |
| 23 | #include <linux/workqueue.h> |
| 24 | #include <linux/kernel.h> |
| 25 | #include <linux/slab.h> |
| 26 | |
| 27 | #include "ext4_jbd2.h" |
| 28 | #include "xattr.h" |
| 29 | #include "acl.h" |
| 30 | #include "ext4_extents.h" |
| 31 | |
| 32 | static struct kmem_cache *io_page_cachep, *io_end_cachep; |
| 33 | |
Theodore Ts'o | 5dabfc7 | 2010-10-27 21:30:14 -0400 | [diff] [blame] | 34 | int __init ext4_init_pageio(void) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 35 | { |
| 36 | io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); |
| 37 | if (io_page_cachep == NULL) |
| 38 | return -ENOMEM; |
| 39 | io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT); |
Dan Carpenter | 1319518 | 2011-01-10 12:10:44 -0500 | [diff] [blame] | 40 | if (io_end_cachep == NULL) { |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 41 | kmem_cache_destroy(io_page_cachep); |
| 42 | return -ENOMEM; |
| 43 | } |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 44 | return 0; |
| 45 | } |
| 46 | |
Theodore Ts'o | 5dabfc7 | 2010-10-27 21:30:14 -0400 | [diff] [blame] | 47 | void ext4_exit_pageio(void) |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 48 | { |
| 49 | kmem_cache_destroy(io_end_cachep); |
| 50 | kmem_cache_destroy(io_page_cachep); |
| 51 | } |
| 52 | |
Theodore Ts'o | f7ad6d2 | 2010-11-08 13:43:33 -0500 | [diff] [blame] | 53 | void ext4_ioend_wait(struct inode *inode) |
| 54 | { |
Eric Sandeen | e9e3bce | 2011-02-12 08:17:34 -0500 | [diff] [blame] | 55 | wait_queue_head_t *wq = ext4_ioend_wq(inode); |
Theodore Ts'o | f7ad6d2 | 2010-11-08 13:43:33 -0500 | [diff] [blame] | 56 | |
| 57 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); |
| 58 | } |
| 59 | |
Theodore Ts'o | 83668e7 | 2010-11-08 13:45:33 -0500 | [diff] [blame] | 60 | static void put_io_page(struct ext4_io_page *io_page) |
| 61 | { |
| 62 | if (atomic_dec_and_test(&io_page->p_count)) { |
| 63 | end_page_writeback(io_page->p_page); |
| 64 | put_page(io_page->p_page); |
| 65 | kmem_cache_free(io_page_cachep, io_page); |
| 66 | } |
| 67 | } |
| 68 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 69 | void ext4_free_io_end(ext4_io_end_t *io) |
| 70 | { |
| 71 | int i; |
| 72 | |
| 73 | BUG_ON(!io); |
| 74 | if (io->page) |
| 75 | put_page(io->page); |
Theodore Ts'o | 83668e7 | 2010-11-08 13:45:33 -0500 | [diff] [blame] | 76 | for (i = 0; i < io->num_io_pages; i++) |
| 77 | put_io_page(io->pages[i]); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 78 | io->num_io_pages = 0; |
Theodore Ts'o | 4e29802 | 2011-10-30 18:41:19 -0400 | [diff] [blame] | 79 | if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count)) |
| 80 | wake_up_all(ext4_ioend_wq(io->inode)); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 81 | kmem_cache_free(io_end_cachep, io); |
| 82 | } |
| 83 | |
| 84 | /* |
| 85 | * check a range of space and convert unwritten extents to written. |
Tao Ma | d73d504 | 2011-10-30 18:26:08 -0400 | [diff] [blame] | 86 | * |
| 87 | * Called with inode->i_mutex; we depend on this when we manipulate |
| 88 | * io->flag, since we could otherwise race with ext4_flush_completed_IO() |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 89 | */ |
| 90 | int ext4_end_io_nolock(ext4_io_end_t *io) |
| 91 | { |
| 92 | struct inode *inode = io->inode; |
| 93 | loff_t offset = io->offset; |
| 94 | ssize_t size = io->size; |
| 95 | int ret = 0; |
| 96 | |
| 97 | ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," |
| 98 | "list->prev 0x%p\n", |
| 99 | io, inode->i_ino, io->list.next, io->list.prev); |
| 100 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 101 | ret = ext4_convert_unwritten_extents(inode, offset, size); |
| 102 | if (ret < 0) { |
Theodore Ts'o | b82e384 | 2011-10-31 10:56:32 -0400 | [diff] [blame] | 103 | ext4_msg(inode->i_sb, KERN_EMERG, |
| 104 | "failed to convert unwritten extents to written " |
| 105 | "extents -- potential data loss! " |
| 106 | "(inode %lu, offset %llu, size %zd, error %d)", |
| 107 | inode->i_ino, offset, size, ret); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 108 | } |
| 109 | |
| 110 | if (io->iocb) |
| 111 | aio_complete(io->iocb, io->result, 0); |
Eric Sandeen | e9e3bce | 2011-02-12 08:17:34 -0500 | [diff] [blame] | 112 | |
Theodore Ts'o | b82e384 | 2011-10-31 10:56:32 -0400 | [diff] [blame] | 113 | /* Wake up anyone waiting on unwritten extent conversion */ |
| 114 | if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten)) |
| 115 | wake_up_all(ext4_ioend_wq(io->inode)); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 116 | return ret; |
| 117 | } |
| 118 | |
| 119 | /* |
| 120 | * work on completed aio dio IO, to convert unwritten extents to extents |
| 121 | */ |
| 122 | static void ext4_end_io_work(struct work_struct *work) |
| 123 | { |
| 124 | ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); |
| 125 | struct inode *inode = io->inode; |
| 126 | struct ext4_inode_info *ei = EXT4_I(inode); |
| 127 | unsigned long flags; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 128 | |
Tao Ma | d73d504 | 2011-10-30 18:26:08 -0400 | [diff] [blame] | 129 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
| 130 | if (list_empty(&io->list)) { |
| 131 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
| 132 | goto free; |
| 133 | } |
Tao Ma | d73d504 | 2011-10-30 18:26:08 -0400 | [diff] [blame] | 134 | |
Jiaying Zhang | 8c0bec2 | 2011-08-31 11:50:51 -0400 | [diff] [blame] | 135 | if (!mutex_trylock(&inode->i_mutex)) { |
Theodore Ts'o | b82e384 | 2011-10-31 10:56:32 -0400 | [diff] [blame] | 136 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
Jiaying Zhang | 8c0bec2 | 2011-08-31 11:50:51 -0400 | [diff] [blame] | 137 | /* |
| 138 | * Requeue the work instead of waiting so that the work |
| 139 | * items queued after this can be processed. |
| 140 | */ |
| 141 | queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work); |
| 142 | /* |
| 143 | * To prevent the ext4-dio-unwritten thread from keeping |
| 144 | * requeueing end_io requests and occupying cpu for too long, |
| 145 | * yield the cpu if it sees an end_io request that has already |
| 146 | * been requeued. |
| 147 | */ |
| 148 | if (io->flag & EXT4_IO_END_QUEUED) |
| 149 | yield(); |
| 150 | io->flag |= EXT4_IO_END_QUEUED; |
| 151 | return; |
| 152 | } |
Theodore Ts'o | b82e384 | 2011-10-31 10:56:32 -0400 | [diff] [blame] | 153 | list_del_init(&io->list); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 154 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
Theodore Ts'o | b82e384 | 2011-10-31 10:56:32 -0400 | [diff] [blame] | 155 | (void) ext4_end_io_nolock(io); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 156 | mutex_unlock(&inode->i_mutex); |
Tao Ma | d73d504 | 2011-10-30 18:26:08 -0400 | [diff] [blame] | 157 | free: |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 158 | ext4_free_io_end(io); |
| 159 | } |
| 160 | |
| 161 | ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) |
| 162 | { |
Jesper Juhl | b17b35e | 2010-12-19 21:41:55 -0500 | [diff] [blame] | 163 | ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 164 | if (io) { |
Theodore Ts'o | f7ad6d2 | 2010-11-08 13:43:33 -0500 | [diff] [blame] | 165 | atomic_inc(&EXT4_I(inode)->i_ioend_count); |
| 166 | io->inode = inode; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 167 | INIT_WORK(&io->work, ext4_end_io_work); |
| 168 | INIT_LIST_HEAD(&io->list); |
| 169 | } |
| 170 | return io; |
| 171 | } |
| 172 | |
| 173 | /* |
| 174 | * Print an buffer I/O error compatible with the fs/buffer.c. This |
| 175 | * provides compatibility with dmesg scrapers that look for a specific |
| 176 | * buffer I/O error message. We really need a unified error reporting |
| 177 | * structure to userspace ala Digital Unix's uerf system, but it's |
| 178 | * probably not going to happen in my lifetime, due to LKML politics... |
| 179 | */ |
| 180 | static void buffer_io_error(struct buffer_head *bh) |
| 181 | { |
| 182 | char b[BDEVNAME_SIZE]; |
| 183 | printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n", |
| 184 | bdevname(bh->b_bdev, b), |
| 185 | (unsigned long long)bh->b_blocknr); |
| 186 | } |
| 187 | |
| 188 | static void ext4_end_bio(struct bio *bio, int error) |
| 189 | { |
| 190 | ext4_io_end_t *io_end = bio->bi_private; |
| 191 | struct workqueue_struct *wq; |
| 192 | struct inode *inode; |
| 193 | unsigned long flags; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 194 | int i; |
Curt Wohlgemuth | d50bdd5 | 2011-02-07 12:46:14 -0500 | [diff] [blame] | 195 | sector_t bi_sector = bio->bi_sector; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 196 | |
| 197 | BUG_ON(!io_end); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 198 | bio->bi_private = NULL; |
| 199 | bio->bi_end_io = NULL; |
| 200 | if (test_bit(BIO_UPTODATE, &bio->bi_flags)) |
| 201 | error = 0; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 202 | bio_put(bio); |
| 203 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 204 | for (i = 0; i < io_end->num_io_pages; i++) { |
| 205 | struct page *page = io_end->pages[i]->p_page; |
| 206 | struct buffer_head *bh, *head; |
Curt Wohlgemuth | 39db00f | 2011-04-30 13:26:26 -0400 | [diff] [blame] | 207 | loff_t offset; |
| 208 | loff_t io_end_offset; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 209 | |
Curt Wohlgemuth | 39db00f | 2011-04-30 13:26:26 -0400 | [diff] [blame] | 210 | if (error) { |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 211 | SetPageError(page); |
Curt Wohlgemuth | 39db00f | 2011-04-30 13:26:26 -0400 | [diff] [blame] | 212 | set_bit(AS_EIO, &page->mapping->flags); |
| 213 | head = page_buffers(page); |
| 214 | BUG_ON(!head); |
| 215 | |
| 216 | io_end_offset = io_end->offset + io_end->size; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 217 | |
| 218 | offset = (sector_t) page->index << PAGE_CACHE_SHIFT; |
| 219 | bh = head; |
| 220 | do { |
| 221 | if ((offset >= io_end->offset) && |
Curt Wohlgemuth | 39db00f | 2011-04-30 13:26:26 -0400 | [diff] [blame] | 222 | (offset+bh->b_size <= io_end_offset)) |
| 223 | buffer_io_error(bh); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 224 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 225 | offset += bh->b_size; |
| 226 | bh = bh->b_this_page; |
| 227 | } while (bh != head); |
| 228 | } |
| 229 | |
Markus Trippelsdorf | 08da119 | 2010-11-17 21:46:06 -0500 | [diff] [blame] | 230 | put_io_page(io_end->pages[i]); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 231 | } |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 232 | io_end->num_io_pages = 0; |
Theodore Ts'o | f7ad6d2 | 2010-11-08 13:43:33 -0500 | [diff] [blame] | 233 | inode = io_end->inode; |
| 234 | |
| 235 | if (error) { |
| 236 | io_end->flag |= EXT4_IO_END_ERROR; |
| 237 | ext4_warning(inode->i_sb, "I/O error writing to inode %lu " |
| 238 | "(offset %llu size %ld starting block %llu)", |
| 239 | inode->i_ino, |
| 240 | (unsigned long long) io_end->offset, |
| 241 | (long) io_end->size, |
| 242 | (unsigned long long) |
Curt Wohlgemuth | d50bdd5 | 2011-02-07 12:46:14 -0500 | [diff] [blame] | 243 | bi_sector >> (inode->i_blkbits - 9)); |
Theodore Ts'o | f7ad6d2 | 2010-11-08 13:43:33 -0500 | [diff] [blame] | 244 | } |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 245 | |
Theodore Ts'o | b616844 | 2011-02-28 13:12:38 -0500 | [diff] [blame] | 246 | if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { |
| 247 | ext4_free_io_end(io_end); |
| 248 | return; |
| 249 | } |
| 250 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 251 | /* Add the io_end to per-inode completed io list*/ |
| 252 | spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); |
| 253 | list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); |
| 254 | spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); |
| 255 | |
| 256 | wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; |
| 257 | /* queue the work to convert unwritten extents to written */ |
| 258 | queue_work(wq, &io_end->work); |
| 259 | } |
| 260 | |
| 261 | void ext4_io_submit(struct ext4_io_submit *io) |
| 262 | { |
| 263 | struct bio *bio = io->io_bio; |
| 264 | |
| 265 | if (bio) { |
| 266 | bio_get(io->io_bio); |
| 267 | submit_bio(io->io_op, io->io_bio); |
| 268 | BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP)); |
| 269 | bio_put(io->io_bio); |
| 270 | } |
Peter Huewe | 7dc5761 | 2011-02-21 21:01:42 -0500 | [diff] [blame] | 271 | io->io_bio = NULL; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 272 | io->io_op = 0; |
Peter Huewe | 7dc5761 | 2011-02-21 21:01:42 -0500 | [diff] [blame] | 273 | io->io_end = NULL; |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 274 | } |
| 275 | |
| 276 | static int io_submit_init(struct ext4_io_submit *io, |
| 277 | struct inode *inode, |
| 278 | struct writeback_control *wbc, |
| 279 | struct buffer_head *bh) |
| 280 | { |
| 281 | ext4_io_end_t *io_end; |
| 282 | struct page *page = bh->b_page; |
| 283 | int nvecs = bio_get_nr_vecs(bh->b_bdev); |
| 284 | struct bio *bio; |
| 285 | |
| 286 | io_end = ext4_init_io_end(inode, GFP_NOFS); |
| 287 | if (!io_end) |
| 288 | return -ENOMEM; |
Theodore Ts'o | 275d3ba | 2011-06-29 21:44:45 -0400 | [diff] [blame] | 289 | bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 290 | bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); |
| 291 | bio->bi_bdev = bh->b_bdev; |
| 292 | bio->bi_private = io->io_end = io_end; |
| 293 | bio->bi_end_io = ext4_end_bio; |
| 294 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 295 | io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); |
| 296 | |
| 297 | io->io_bio = bio; |
Jens Axboe | 721a960 | 2011-03-09 11:56:30 +0100 | [diff] [blame] | 298 | io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 299 | io->io_next_block = bh->b_blocknr; |
| 300 | return 0; |
| 301 | } |
| 302 | |
| 303 | static int io_submit_add_bh(struct ext4_io_submit *io, |
| 304 | struct ext4_io_page *io_page, |
| 305 | struct inode *inode, |
| 306 | struct writeback_control *wbc, |
| 307 | struct buffer_head *bh) |
| 308 | { |
| 309 | ext4_io_end_t *io_end; |
| 310 | int ret; |
| 311 | |
| 312 | if (buffer_new(bh)) { |
| 313 | clear_buffer_new(bh); |
| 314 | unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); |
| 315 | } |
| 316 | |
| 317 | if (!buffer_mapped(bh) || buffer_delay(bh)) { |
| 318 | if (!buffer_mapped(bh)) |
| 319 | clear_buffer_dirty(bh); |
| 320 | if (io->io_bio) |
| 321 | ext4_io_submit(io); |
| 322 | return 0; |
| 323 | } |
| 324 | |
| 325 | if (io->io_bio && bh->b_blocknr != io->io_next_block) { |
| 326 | submit_and_retry: |
| 327 | ext4_io_submit(io); |
| 328 | } |
| 329 | if (io->io_bio == NULL) { |
| 330 | ret = io_submit_init(io, inode, wbc, bh); |
| 331 | if (ret) |
| 332 | return ret; |
| 333 | } |
| 334 | io_end = io->io_end; |
| 335 | if ((io_end->num_io_pages >= MAX_IO_PAGES) && |
| 336 | (io_end->pages[io_end->num_io_pages-1] != io_page)) |
| 337 | goto submit_and_retry; |
Tao Ma | 0edeb71 | 2011-10-31 17:30:44 -0400 | [diff] [blame] | 338 | if (buffer_uninit(bh)) |
| 339 | ext4_set_io_unwritten_flag(inode, io_end); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 340 | io->io_end->size += bh->b_size; |
| 341 | io->io_next_block++; |
| 342 | ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); |
| 343 | if (ret != bh->b_size) |
| 344 | goto submit_and_retry; |
| 345 | if ((io_end->num_io_pages == 0) || |
| 346 | (io_end->pages[io_end->num_io_pages-1] != io_page)) { |
| 347 | io_end->pages[io_end->num_io_pages++] = io_page; |
Theodore Ts'o | 83668e7 | 2010-11-08 13:45:33 -0500 | [diff] [blame] | 348 | atomic_inc(&io_page->p_count); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 349 | } |
| 350 | return 0; |
| 351 | } |
| 352 | |
| 353 | int ext4_bio_write_page(struct ext4_io_submit *io, |
| 354 | struct page *page, |
| 355 | int len, |
| 356 | struct writeback_control *wbc) |
| 357 | { |
| 358 | struct inode *inode = page->mapping->host; |
| 359 | unsigned block_start, block_end, blocksize; |
| 360 | struct ext4_io_page *io_page; |
| 361 | struct buffer_head *bh, *head; |
| 362 | int ret = 0; |
| 363 | |
| 364 | blocksize = 1 << inode->i_blkbits; |
| 365 | |
Curt Wohlgemuth | d50bdd5 | 2011-02-07 12:46:14 -0500 | [diff] [blame] | 366 | BUG_ON(!PageLocked(page)); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 367 | BUG_ON(PageWriteback(page)); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 368 | |
| 369 | io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS); |
| 370 | if (!io_page) { |
| 371 | set_page_dirty(page); |
| 372 | unlock_page(page); |
| 373 | return -ENOMEM; |
| 374 | } |
| 375 | io_page->p_page = page; |
Theodore Ts'o | 83668e7 | 2010-11-08 13:45:33 -0500 | [diff] [blame] | 376 | atomic_set(&io_page->p_count, 1); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 377 | get_page(page); |
Theodore Ts'o | a54aa76 | 2011-02-27 16:43:24 -0500 | [diff] [blame] | 378 | set_page_writeback(page); |
| 379 | ClearPageError(page); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 380 | |
| 381 | for (bh = head = page_buffers(page), block_start = 0; |
| 382 | bh != head || !block_start; |
| 383 | block_start = block_end, bh = bh->b_this_page) { |
Curt Wohlgemuth | d50bdd5 | 2011-02-07 12:46:14 -0500 | [diff] [blame] | 384 | |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 385 | block_end = block_start + blocksize; |
| 386 | if (block_start >= len) { |
Yongqiang Yang | 5a0dc73 | 2011-12-13 22:29:12 -0500 | [diff] [blame] | 387 | /* |
| 388 | * Comments copied from block_write_full_page_endio: |
| 389 | * |
| 390 | * The page straddles i_size. It must be zeroed out on |
| 391 | * each and every writepage invocation because it may |
| 392 | * be mmapped. "A file is mapped in multiples of the |
| 393 | * page size. For a file that is not a multiple of |
| 394 | * the page size, the remaining memory is zeroed when |
| 395 | * mapped, and writes to that region are not written |
| 396 | * out to the file." |
| 397 | */ |
| 398 | zero_user_segment(page, block_start, block_end); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 399 | clear_buffer_dirty(bh); |
| 400 | set_buffer_uptodate(bh); |
| 401 | continue; |
| 402 | } |
Curt Wohlgemuth | d50bdd5 | 2011-02-07 12:46:14 -0500 | [diff] [blame] | 403 | clear_buffer_dirty(bh); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 404 | ret = io_submit_add_bh(io, io_page, inode, wbc, bh); |
| 405 | if (ret) { |
| 406 | /* |
| 407 | * We only get here on ENOMEM. Not much else |
| 408 | * we can do but mark the page as dirty, and |
| 409 | * better luck next time. |
| 410 | */ |
| 411 | set_page_dirty(page); |
| 412 | break; |
| 413 | } |
| 414 | } |
| 415 | unlock_page(page); |
| 416 | /* |
| 417 | * If the page was truncated before we could do the writeback, |
| 418 | * or we had a memory allocation error while trying to write |
| 419 | * the first buffer head, we won't have submitted any pages for |
| 420 | * I/O. In that case we need to make sure we've cleared the |
| 421 | * PageWriteback bit from the page to prevent the system from |
| 422 | * wedging later on. |
| 423 | */ |
Theodore Ts'o | 83668e7 | 2010-11-08 13:45:33 -0500 | [diff] [blame] | 424 | put_io_page(io_page); |
Theodore Ts'o | bd2d021 | 2010-10-27 21:30:10 -0400 | [diff] [blame] | 425 | return ret; |
| 426 | } |