blob: 87813e405cef811ddc303608b51ea94d869e84d7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_dir2.h"
25#include "xfs_trans.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_dir2_sf.h"
Nathan Scotta844f452005-11-02 14:38:42 +110032#include "xfs_attr_sf.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_dinode.h"
34#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110035#include "xfs_alloc.h"
36#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_error.h"
38#include "xfs_rw.h"
39#include "xfs_iomap.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100040#include "xfs_vnodeops.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110042#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/writeback.h>
44
Christoph Hellwig25e41b32008-12-03 12:20:39 +010045
46/*
47 * Prime number of hash buckets since address is used as the key.
48 */
49#define NVSYNC 37
50#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
51static wait_queue_head_t xfs_ioend_wq[NVSYNC];
52
53void __init
54xfs_ioend_init(void)
55{
56 int i;
57
58 for (i = 0; i < NVSYNC; i++)
59 init_waitqueue_head(&xfs_ioend_wq[i]);
60}
61
62void
63xfs_ioend_wait(
64 xfs_inode_t *ip)
65{
66 wait_queue_head_t *wq = to_ioend_wq(ip);
67
68 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
69}
70
71STATIC void
72xfs_ioend_wake(
73 xfs_inode_t *ip)
74{
75 if (atomic_dec_and_test(&ip->i_iocount))
76 wake_up(to_ioend_wq(ip));
77}
78
Nathan Scottf51623b2006-03-14 13:26:27 +110079STATIC void
80xfs_count_page_state(
81 struct page *page,
82 int *delalloc,
83 int *unmapped,
84 int *unwritten)
85{
86 struct buffer_head *bh, *head;
87
88 *delalloc = *unmapped = *unwritten = 0;
89
90 bh = head = page_buffers(page);
91 do {
92 if (buffer_uptodate(bh) && !buffer_mapped(bh))
93 (*unmapped) = 1;
Nathan Scottf51623b2006-03-14 13:26:27 +110094 else if (buffer_unwritten(bh))
95 (*unwritten) = 1;
96 else if (buffer_delay(bh))
97 (*delalloc) = 1;
98 } while ((bh = bh->b_this_page) != head);
99}
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#if defined(XFS_RW_TRACE)
102void
103xfs_page_trace(
104 int tag,
105 struct inode *inode,
106 struct page *page,
Nathan Scotted9d88f2006-09-28 10:56:43 +1000107 unsigned long pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108{
109 xfs_inode_t *ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 loff_t isize = i_size_read(inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100111 loff_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 int delalloc = -1, unmapped = -1, unwritten = -1;
113
114 if (page_has_buffers(page))
115 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
116
David Chinnere6064d32008-08-13 16:01:45 +1000117 ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 if (!ip->i_rwtrace)
119 return;
120
121 ktrace_enter(ip->i_rwtrace,
122 (void *)((unsigned long)tag),
123 (void *)ip,
124 (void *)inode,
125 (void *)page,
Nathan Scotted9d88f2006-09-28 10:56:43 +1000126 (void *)pgoff,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
128 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
129 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
130 (void *)((unsigned long)(isize & 0xffffffff)),
131 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
132 (void *)((unsigned long)(offset & 0xffffffff)),
133 (void *)((unsigned long)delalloc),
134 (void *)((unsigned long)unmapped),
135 (void *)((unsigned long)unwritten),
Yingping Luf1fdc842006-03-22 12:44:15 +1100136 (void *)((unsigned long)current_pid()),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 (void *)NULL);
138}
139#else
Nathan Scotted9d88f2006-09-28 10:56:43 +1000140#define xfs_page_trace(tag, inode, page, pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#endif
142
Christoph Hellwig6214ed42007-09-14 15:23:17 +1000143STATIC struct block_device *
144xfs_find_bdev_for_inode(
145 struct xfs_inode *ip)
146{
147 struct xfs_mount *mp = ip->i_mount;
148
Eric Sandeen71ddabb2007-11-23 16:29:42 +1100149 if (XFS_IS_REALTIME_INODE(ip))
Christoph Hellwig6214ed42007-09-14 15:23:17 +1000150 return mp->m_rtdev_targp->bt_bdev;
151 else
152 return mp->m_ddev_targp->bt_bdev;
153}
154
Christoph Hellwig0829c362005-09-02 16:58:49 +1000155/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100156 * We're now finished for good with this ioend structure.
157 * Update the page state via the associated buffer_heads,
158 * release holds on the inode and bio, and finally free
159 * up memory. Do not use the ioend after this.
160 */
Christoph Hellwig0829c362005-09-02 16:58:49 +1000161STATIC void
162xfs_destroy_ioend(
163 xfs_ioend_t *ioend)
164{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100165 struct buffer_head *bh, *next;
Christoph Hellwig583fa582008-12-03 12:20:38 +0100166 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100167
168 for (bh = ioend->io_buffer_head; bh; bh = next) {
169 next = bh->b_private;
Nathan Scott7d04a332006-06-09 14:58:38 +1000170 bh->b_end_io(bh, !ioend->io_error);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100171 }
Christoph Hellwig583fa582008-12-03 12:20:38 +0100172
173 /*
174 * Volume managers supporting multiple paths can send back ENODEV
175 * when the final path disappears. In this case continuing to fill
176 * the page cache with dirty data which cannot be written out is
177 * evil, so prevent that.
178 */
179 if (unlikely(ioend->io_error == -ENODEV)) {
180 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
181 __FILE__, __LINE__);
Christoph Hellwigb677c212007-08-29 11:46:28 +1000182 }
Christoph Hellwig583fa582008-12-03 12:20:38 +0100183
Christoph Hellwig25e41b32008-12-03 12:20:39 +0100184 xfs_ioend_wake(ip);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000185 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
188/*
Dave Chinner932640e2009-10-06 20:29:29 +0000189 * If the end of the current ioend is beyond the current EOF,
190 * return the new EOF value, otherwise zero.
191 */
192STATIC xfs_fsize_t
193xfs_ioend_new_eof(
194 xfs_ioend_t *ioend)
195{
196 xfs_inode_t *ip = XFS_I(ioend->io_inode);
197 xfs_fsize_t isize;
198 xfs_fsize_t bsize;
199
200 bsize = ioend->io_offset + ioend->io_size;
201 isize = MAX(ip->i_size, ip->i_new_size);
202 isize = MIN(isize, bsize);
203 return isize > ip->i_d.di_size ? isize : 0;
204}
205
206/*
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000207 * Update on-disk file size now that data has been written to disk.
208 * The current in-memory file size is i_size. If a write is beyond
Christoph Hellwig613d7042007-10-11 17:44:08 +1000209 * eof i_new_size will be the intended file size until i_size is
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000210 * updated. If this write does not extend all the way to the valid
211 * file size then restrict this update to the end of the write.
212 */
Dave Chinner932640e2009-10-06 20:29:29 +0000213
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000214STATIC void
215xfs_setfilesize(
216 xfs_ioend_t *ioend)
217{
Christoph Hellwigb677c212007-08-29 11:46:28 +1000218 xfs_inode_t *ip = XFS_I(ioend->io_inode);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000219 xfs_fsize_t isize;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000220
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000221 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
222 ASSERT(ioend->io_type != IOMAP_READ);
223
224 if (unlikely(ioend->io_error))
225 return;
226
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000227 xfs_ilock(ip, XFS_ILOCK_EXCL);
Dave Chinner932640e2009-10-06 20:29:29 +0000228 isize = xfs_ioend_new_eof(ioend);
229 if (isize) {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000230 ip->i_d.di_size = isize;
David Chinner94b97e32008-10-30 17:21:30 +1100231 xfs_mark_inode_dirty_sync(ip);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000232 }
233
234 xfs_iunlock(ip, XFS_ILOCK_EXCL);
235}
236
237/*
Christoph Hellwig5ec4fab2009-10-30 09:11:47 +0000238 * IO write completion.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100239 */
240STATIC void
Christoph Hellwig5ec4fab2009-10-30 09:11:47 +0000241xfs_end_io(
David Howellsc4028952006-11-22 14:57:56 +0000242 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243{
David Howellsc4028952006-11-22 14:57:56 +0000244 xfs_ioend_t *ioend =
245 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwig76428612007-09-14 15:23:31 +1000246 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Christoph Hellwig5ec4fab2009-10-30 09:11:47 +0000248 /*
249 * For unwritten extents we need to issue transactions to convert a
250 * range to normal written extens after the data I/O has finished.
251 */
252 if (ioend->io_type == IOMAP_UNWRITTEN &&
253 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
254 int error;
255
256 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
257 ioend->io_size);
258 if (error)
259 ioend->io_error = error;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000260 }
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000261
Christoph Hellwig5ec4fab2009-10-30 09:11:47 +0000262 /*
263 * We might have to update the on-disk file size after extending
264 * writes.
265 */
266 if (ioend->io_type != IOMAP_READ)
267 xfs_setfilesize(ioend);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000268 xfs_destroy_ioend(ioend);
269}
270
271/*
Dave Chinnerc626d172009-04-06 18:42:11 +0200272 * Schedule IO completion handling on a xfsdatad if this was
273 * the final hold on this ioend. If we are asked to wait,
274 * flush the workqueue.
275 */
276STATIC void
277xfs_finish_ioend(
278 xfs_ioend_t *ioend,
279 int wait)
280{
281 if (atomic_dec_and_test(&ioend->io_remaining)) {
Christoph Hellwig5ec4fab2009-10-30 09:11:47 +0000282 struct workqueue_struct *wq;
Dave Chinnerc626d172009-04-06 18:42:11 +0200283
Christoph Hellwig5ec4fab2009-10-30 09:11:47 +0000284 wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
285 xfsconvertd_workqueue : xfsdatad_workqueue;
Dave Chinnerc626d172009-04-06 18:42:11 +0200286 queue_work(wq, &ioend->io_work);
287 if (wait)
288 flush_workqueue(wq);
289 }
290}
291
292/*
Christoph Hellwig0829c362005-09-02 16:58:49 +1000293 * Allocate and initialise an IO completion structure.
294 * We need to track unwritten extent write completion here initially.
295 * We'll need to extend this for updating the ondisk inode size later
296 * (vs. incore size).
297 */
298STATIC xfs_ioend_t *
299xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100300 struct inode *inode,
301 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000302{
303 xfs_ioend_t *ioend;
304
305 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
306
307 /*
308 * Set the count to 1 initially, which will prevent an I/O
309 * completion callback from happening before we have started
310 * all the I/O from calling the completion routine too early.
311 */
312 atomic_set(&ioend->io_remaining, 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000313 ioend->io_error = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100314 ioend->io_list = NULL;
315 ioend->io_type = type;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000316 ioend->io_inode = inode;
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000317 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100318 ioend->io_buffer_tail = NULL;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000319 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000320 ioend->io_offset = 0;
321 ioend->io_size = 0;
322
Christoph Hellwig5ec4fab2009-10-30 09:11:47 +0000323 INIT_WORK(&ioend->io_work, xfs_end_io);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000324 return ioend;
325}
326
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327STATIC int
328xfs_map_blocks(
329 struct inode *inode,
330 loff_t offset,
331 ssize_t count,
332 xfs_iomap_t *mapp,
333 int flags)
334{
Christoph Hellwig6bd16ff2008-12-03 12:20:32 +0100335 int nmaps = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Christoph Hellwig6bd16ff2008-12-03 12:20:32 +0100337 return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338}
339
Christoph Hellwigb8f82a42009-11-14 16:17:22 +0000340STATIC int
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100341xfs_iomap_valid(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 xfs_iomap_t *iomapp,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100343 loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344{
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100345 return offset >= iomapp->iomap_offset &&
346 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}
348
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100349/*
350 * BIO completion handler for buffered IO.
351 */
Al Viro782e3b32007-10-12 07:17:47 +0100352STATIC void
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100353xfs_end_bio(
354 struct bio *bio,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100355 int error)
356{
357 xfs_ioend_t *ioend = bio->bi_private;
358
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100359 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000360 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100361
362 /* Toss bio and pass work off to an xfsdatad thread */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100363 bio->bi_private = NULL;
364 bio->bi_end_io = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100365 bio_put(bio);
Nathan Scott7d04a332006-06-09 14:58:38 +1000366
David Chinnere927af92007-06-05 16:24:36 +1000367 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100368}
369
370STATIC void
371xfs_submit_ioend_bio(
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000372 struct writeback_control *wbc,
373 xfs_ioend_t *ioend,
374 struct bio *bio)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100375{
376 atomic_inc(&ioend->io_remaining);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100377 bio->bi_private = ioend;
378 bio->bi_end_io = xfs_end_bio;
379
Dave Chinner932640e2009-10-06 20:29:29 +0000380 /*
381 * If the I/O is beyond EOF we mark the inode dirty immediately
382 * but don't update the inode size until I/O completion.
383 */
384 if (xfs_ioend_new_eof(ioend))
385 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode));
386
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000387 submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
388 WRITE_SYNC_PLUG : WRITE, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100389 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
390 bio_put(bio);
391}
392
393STATIC struct bio *
394xfs_alloc_ioend_bio(
395 struct buffer_head *bh)
396{
397 struct bio *bio;
398 int nvecs = bio_get_nr_vecs(bh->b_bdev);
399
400 do {
401 bio = bio_alloc(GFP_NOIO, nvecs);
402 nvecs >>= 1;
403 } while (!bio);
404
405 ASSERT(bio->bi_private == NULL);
406 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
407 bio->bi_bdev = bh->b_bdev;
408 bio_get(bio);
409 return bio;
410}
411
412STATIC void
413xfs_start_buffer_writeback(
414 struct buffer_head *bh)
415{
416 ASSERT(buffer_mapped(bh));
417 ASSERT(buffer_locked(bh));
418 ASSERT(!buffer_delay(bh));
419 ASSERT(!buffer_unwritten(bh));
420
421 mark_buffer_async_write(bh);
422 set_buffer_uptodate(bh);
423 clear_buffer_dirty(bh);
424}
425
426STATIC void
427xfs_start_page_writeback(
428 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100429 int clear_dirty,
430 int buffers)
431{
432 ASSERT(PageLocked(page));
433 ASSERT(!PageWriteback(page));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100434 if (clear_dirty)
David Chinner92132022006-12-21 10:24:01 +1100435 clear_page_dirty_for_io(page);
436 set_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100437 unlock_page(page);
Fengguang Wu1f7decf2007-10-16 23:30:42 -0700438 /* If no buffers on the page are to be written, finish it here */
439 if (!buffers)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100440 end_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100441}
442
443static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
444{
445 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
446}
447
448/*
David Chinnerd88992f2006-01-18 13:38:12 +1100449 * Submit all of the bios for all of the ioends we have saved up, covering the
450 * initial writepage page and also any probed pages.
451 *
452 * Because we may have multiple ioends spanning a page, we need to start
453 * writeback on all the buffers before we submit them for I/O. If we mark the
454 * buffers as we got, then we can end up with a page that only has buffers
455 * marked async write and I/O complete on can occur before we mark the other
456 * buffers async write.
457 *
458 * The end result of this is that we trip a bug in end_page_writeback() because
459 * we call it twice for the one page as the code in end_buffer_async_write()
460 * assumes that all buffers on the page are started at the same time.
461 *
462 * The fix is two passes across the ioend list - one to start writeback on the
Nathan Scottc41564b2006-03-29 08:55:14 +1000463 * buffer_heads, and then submit them for I/O on the second pass.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100464 */
465STATIC void
466xfs_submit_ioend(
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000467 struct writeback_control *wbc,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100468 xfs_ioend_t *ioend)
469{
David Chinnerd88992f2006-01-18 13:38:12 +1100470 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100471 xfs_ioend_t *next;
472 struct buffer_head *bh;
473 struct bio *bio;
474 sector_t lastblock = 0;
475
David Chinnerd88992f2006-01-18 13:38:12 +1100476 /* Pass 1 - start writeback */
477 do {
478 next = ioend->io_list;
479 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
480 xfs_start_buffer_writeback(bh);
481 }
482 } while ((ioend = next) != NULL);
483
484 /* Pass 2 - submit I/O */
485 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100486 do {
487 next = ioend->io_list;
488 bio = NULL;
489
490 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100491
492 if (!bio) {
493 retry:
494 bio = xfs_alloc_ioend_bio(bh);
495 } else if (bh->b_blocknr != lastblock + 1) {
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000496 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100497 goto retry;
498 }
499
500 if (bio_add_buffer(bio, bh) != bh->b_size) {
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000501 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100502 goto retry;
503 }
504
505 lastblock = bh->b_blocknr;
506 }
507 if (bio)
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000508 xfs_submit_ioend_bio(wbc, ioend, bio);
David Chinnere927af92007-06-05 16:24:36 +1000509 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100510 } while ((ioend = next) != NULL);
511}
512
513/*
514 * Cancel submission of all buffer_heads so far in this endio.
515 * Toss the endio too. Only ever called for the initial page
516 * in a writepage request, so only ever one page.
517 */
518STATIC void
519xfs_cancel_ioend(
520 xfs_ioend_t *ioend)
521{
522 xfs_ioend_t *next;
523 struct buffer_head *bh, *next_bh;
524
525 do {
526 next = ioend->io_list;
527 bh = ioend->io_buffer_head;
528 do {
529 next_bh = bh->b_private;
530 clear_buffer_async_write(bh);
531 unlock_buffer(bh);
532 } while ((bh = next_bh) != NULL);
533
Christoph Hellwig25e41b32008-12-03 12:20:39 +0100534 xfs_ioend_wake(XFS_I(ioend->io_inode));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100535 mempool_free(ioend, xfs_ioend_pool);
536 } while ((ioend = next) != NULL);
537}
538
539/*
540 * Test to see if we've been building up a completion structure for
541 * earlier buffers -- if so, we try to append to this ioend if we
542 * can, otherwise we finish off any current ioend and start another.
543 * Return true if we've finished the given ioend.
544 */
545STATIC void
546xfs_add_to_ioend(
547 struct inode *inode,
548 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100549 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100550 unsigned int type,
551 xfs_ioend_t **result,
552 int need_ioend)
553{
554 xfs_ioend_t *ioend = *result;
555
556 if (!ioend || need_ioend || type != ioend->io_type) {
557 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100558
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100559 ioend = xfs_alloc_ioend(inode, type);
560 ioend->io_offset = offset;
561 ioend->io_buffer_head = bh;
562 ioend->io_buffer_tail = bh;
563 if (previous)
564 previous->io_list = ioend;
565 *result = ioend;
566 } else {
567 ioend->io_buffer_tail->b_private = bh;
568 ioend->io_buffer_tail = bh;
569 }
570
571 bh->b_private = NULL;
572 ioend->io_size += bh->b_size;
573}
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100576xfs_map_buffer(
577 struct buffer_head *bh,
578 xfs_iomap_t *mp,
579 xfs_off_t offset,
580 uint block_bits)
581{
582 sector_t bn;
583
584 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
585
586 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
587 ((offset - mp->iomap_offset) >> block_bits);
588
589 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
590
591 bh->b_blocknr = bn;
592 set_buffer_mapped(bh);
593}
594
595STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596xfs_map_at_offset(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 struct buffer_head *bh,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100598 loff_t offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 int block_bits,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100600 xfs_iomap_t *iomapp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
603 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
605 lock_buffer(bh);
Nathan Scott87cbc492006-03-14 13:26:43 +1100606 xfs_map_buffer(bh, iomapp, offset, block_bits);
Nathan Scottce8e9222006-01-11 15:39:08 +1100607 bh->b_bdev = iomapp->iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 set_buffer_mapped(bh);
609 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100610 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611}
612
613/*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100614 * Look for a page at index that is suitable for clustering.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 */
616STATIC unsigned int
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100617xfs_probe_page(
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100618 struct page *page,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100619 unsigned int pg_offset,
620 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 int ret = 0;
623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100625 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
627 if (page->mapping && PageDirty(page)) {
628 if (page_has_buffers(page)) {
629 struct buffer_head *bh, *head;
630
631 bh = head = page_buffers(page);
632 do {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100633 if (!buffer_uptodate(bh))
634 break;
635 if (mapped != buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 break;
637 ret += bh->b_size;
638 if (ret >= pg_offset)
639 break;
640 } while ((bh = bh->b_this_page) != head);
641 } else
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100642 ret = mapped ? 0 : PAGE_CACHE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 }
644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 return ret;
646}
647
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100648STATIC size_t
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100649xfs_probe_cluster(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 struct inode *inode,
651 struct page *startpage,
652 struct buffer_head *bh,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100653 struct buffer_head *head,
654 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100656 struct pagevec pvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 pgoff_t tindex, tlast, tloff;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100658 size_t total = 0;
659 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
661 /* First sum forwards in this page */
662 do {
Eric Sandeen2353e8e2006-02-28 12:30:30 +1100663 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100664 return total;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 total += bh->b_size;
666 } while ((bh = bh->b_this_page) != head);
667
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100668 /* if we reached the end of the page, sum forwards in following pages */
669 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
670 tindex = startpage->index + 1;
671
672 /* Prune this back to avoid pathological behavior */
673 tloff = min(tlast, startpage->index + 64);
674
675 pagevec_init(&pvec, 0);
676 while (!done && tindex <= tloff) {
677 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
678
679 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
680 break;
681
682 for (i = 0; i < pagevec_count(&pvec); i++) {
683 struct page *page = pvec.pages[i];
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000684 size_t pg_offset, pg_len = 0;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100685
686 if (tindex == tlast) {
687 pg_offset =
688 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100689 if (!pg_offset) {
690 done = 1;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100691 break;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100692 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100693 } else
694 pg_offset = PAGE_CACHE_SIZE;
695
Nick Piggin529ae9a2008-08-02 12:01:03 +0200696 if (page->index == tindex && trylock_page(page)) {
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000697 pg_len = xfs_probe_page(page, pg_offset, mapped);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100698 unlock_page(page);
699 }
700
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000701 if (!pg_len) {
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100702 done = 1;
703 break;
704 }
705
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000706 total += pg_len;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100707 tindex++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100709
710 pagevec_release(&pvec);
711 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100713
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 return total;
715}
716
717/*
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100718 * Test if a given page is suitable for writing as part of an unwritten
719 * or delayed allocate extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 */
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100721STATIC int
722xfs_is_delayed_page(
723 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100724 unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100727 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
729 if (page->mapping && page_has_buffers(page)) {
730 struct buffer_head *bh, *head;
731 int acceptable = 0;
732
733 bh = head = page_buffers(page);
734 do {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100735 if (buffer_unwritten(bh))
736 acceptable = (type == IOMAP_UNWRITTEN);
737 else if (buffer_delay(bh))
738 acceptable = (type == IOMAP_DELAY);
David Chinner2ddee842006-03-22 12:47:40 +1100739 else if (buffer_dirty(bh) && buffer_mapped(bh))
David Chinnerdf3c7242007-05-24 15:27:03 +1000740 acceptable = (type == IOMAP_NEW);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100741 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 } while ((bh = bh->b_this_page) != head);
744
745 if (acceptable)
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100746 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 }
748
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100749 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750}
751
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752/*
753 * Allocate & map buffers for page given the extent map. Write it out.
754 * except for the original page of a writepage, this is called on
755 * delalloc/unwritten pages only, for the original page it is possible
756 * that the page has no mapping at all.
757 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100758STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759xfs_convert_page(
760 struct inode *inode,
761 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100762 loff_t tindex,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100763 xfs_iomap_t *mp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100764 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 int startio,
767 int all_bh)
768{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100769 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100770 xfs_off_t end_offset;
771 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100772 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 int bbits = inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -0700774 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100775 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100776 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100778 if (page->index != tindex)
779 goto fail;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200780 if (!trylock_page(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100781 goto fail;
782 if (PageWriteback(page))
783 goto fail_unlock_page;
784 if (page->mapping != inode->i_mapping)
785 goto fail_unlock_page;
786 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
787 goto fail_unlock_page;
788
Nathan Scott24e17b52005-05-05 13:33:20 -0700789 /*
790 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000791 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100792 *
793 * Derivation:
794 *
795 * End offset is the highest offset that this page should represent.
796 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
797 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
798 * hence give us the correct page_dirty count. On any other page,
799 * it will be zero and in that case we need page_dirty to be the
800 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700801 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100802 end_offset = min_t(unsigned long long,
803 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
804 i_size_read(inode));
805
Nathan Scott24e17b52005-05-05 13:33:20 -0700806 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100807 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
808 PAGE_CACHE_SIZE);
809 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
810 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 bh = head = page_buffers(page);
813 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100814 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100816 if (!buffer_uptodate(bh))
817 uptodate = 0;
818 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
819 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100821 }
822
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100823 if (buffer_unwritten(bh) || buffer_delay(bh)) {
824 if (buffer_unwritten(bh))
825 type = IOMAP_UNWRITTEN;
826 else
827 type = IOMAP_DELAY;
828
829 if (!xfs_iomap_valid(mp, offset)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100830 done = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100831 continue;
832 }
833
834 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
835 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
836
837 xfs_map_at_offset(bh, offset, bbits, mp);
838 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100839 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100840 type, ioendp, done);
841 } else {
842 set_buffer_dirty(bh);
843 unlock_buffer(bh);
844 mark_buffer_dirty(bh);
845 }
846 page_dirty--;
847 count++;
848 } else {
David Chinnerdf3c7242007-05-24 15:27:03 +1000849 type = IOMAP_NEW;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100850 if (buffer_mapped(bh) && all_bh && startio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 lock_buffer(bh);
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100852 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100853 type, ioendp, done);
854 count++;
Nathan Scott24e17b52005-05-05 13:33:20 -0700855 page_dirty--;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100856 } else {
857 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100860 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100862 if (uptodate && bh == head)
863 SetPageUptodate(page);
864
865 if (startio) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100866 if (count) {
David Chinner9fddaca2006-02-07 20:27:24 +1100867 wbc->nr_to_write--;
Wu Fengguang0d995192009-12-03 13:54:25 +0100868 if (wbc->nr_to_write <= 0)
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100869 done = 1;
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100870 }
Denys Vlasenkob41759c2008-05-19 16:34:11 +1000871 xfs_start_page_writeback(page, !page_dirty, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100873
874 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100875 fail_unlock_page:
876 unlock_page(page);
877 fail:
878 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879}
880
881/*
882 * Convert & write out a cluster of pages in the same extent as defined
883 * by mp and following the start page.
884 */
885STATIC void
886xfs_cluster_write(
887 struct inode *inode,
888 pgoff_t tindex,
889 xfs_iomap_t *iomapp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100890 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 struct writeback_control *wbc,
892 int startio,
893 int all_bh,
894 pgoff_t tlast)
895{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100896 struct pagevec pvec;
897 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100899 pagevec_init(&pvec, 0);
900 while (!done && tindex <= tlast) {
901 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
902
903 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100905
906 for (i = 0; i < pagevec_count(&pvec); i++) {
907 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
908 iomapp, ioendp, wbc, startio, all_bh);
909 if (done)
910 break;
911 }
912
913 pagevec_release(&pvec);
914 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 }
916}
917
918/*
919 * Calling this without startio set means we are being asked to make a dirty
920 * page ready for freeing it's buffers. When called with startio set then
921 * we are coming from writepage.
922 *
923 * When called with startio set it is important that we write the WHOLE
924 * page if possible.
925 * The bh->b_state's cannot know if any of the blocks or which block for
926 * that matter are dirty due to mmap writes, and therefore bh uptodate is
Nathan Scottc41564b2006-03-29 08:55:14 +1000927 * only valid if the page itself isn't completely uptodate. Some layers
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 * may clear the page dirty flag prior to calling write page, under the
929 * assumption the entire page will be written out; by not writing out the
930 * whole page the page can be reused before all valid dirty data is
931 * written out. Note: in the case of a page that has been dirty'd by
932 * mapwrite and but partially setup by block_prepare_write the
933 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
934 * valid state, thus the whole page must be written out thing.
935 */
936
937STATIC int
938xfs_page_state_convert(
939 struct inode *inode,
940 struct page *page,
941 struct writeback_control *wbc,
942 int startio,
943 int unmapped) /* also implies page uptodate */
944{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100945 struct buffer_head *bh, *head;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100946 xfs_iomap_t iomap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100947 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 loff_t offset;
949 unsigned long p_offset = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100950 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 __uint64_t end_offset;
952 pgoff_t end_index, last_index, tlast;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100953 ssize_t size, len;
954 int flags, err, iomap_valid = 0, uptodate = 1;
Nathan Scott82721452006-04-11 15:10:55 +1000955 int page_dirty, count = 0;
956 int trylock = 0;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100957 int all_bh = unmapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
Nathan Scott82721452006-04-11 15:10:55 +1000959 if (startio) {
960 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
961 trylock |= BMAPI_TRYLOCK;
962 }
Daniel Moore3ba08152005-05-05 13:31:34 -0700963
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 /* Is this page beyond the end of the file? */
965 offset = i_size_read(inode);
966 end_index = offset >> PAGE_CACHE_SHIFT;
967 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
968 if (page->index >= end_index) {
969 if ((page->index >= end_index + 1) ||
970 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
Nathan Scott19d5bcf2005-11-02 15:14:09 +1100971 if (startio)
972 unlock_page(page);
973 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 }
975 }
976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 /*
Nathan Scott24e17b52005-05-05 13:33:20 -0700978 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000979 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100980 *
981 * Derivation:
982 *
983 * End offset is the highest offset that this page should represent.
984 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
985 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
986 * hence give us the correct page_dirty count. On any other page,
987 * it will be zero and in that case we need page_dirty to be the
988 * count of buffers on the page.
989 */
990 end_offset = min_t(unsigned long long,
991 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
Nathan Scott24e17b52005-05-05 13:33:20 -0700992 len = 1 << inode->i_blkbits;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100993 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
994 PAGE_CACHE_SIZE);
995 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
Nathan Scott24e17b52005-05-05 13:33:20 -0700996 page_dirty = p_offset / len;
997
Nathan Scott24e17b52005-05-05 13:33:20 -0700998 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100999 offset = page_offset(page);
David Chinnerdf3c7242007-05-24 15:27:03 +10001000 flags = BMAPI_READ;
1001 type = IOMAP_NEW;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001002
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001003 /* TODO: cleanup count and page_dirty */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 do {
1006 if (offset >= end_offset)
1007 break;
1008 if (!buffer_uptodate(bh))
1009 uptodate = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001010 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001011 /*
1012 * the iomap is actually still valid, but the ioend
1013 * isn't. shouldn't happen too often.
1014 */
1015 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001017 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001019 if (iomap_valid)
1020 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
1022 /*
1023 * First case, map an unwritten extent and prepare for
1024 * extent state conversion transaction on completion.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001025 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 * Second case, allocate space for a delalloc buffer.
1027 * We can return EAGAIN here in the release page case.
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001028 *
1029 * Third case, an unmapped buffer was found, and we are
1030 * in a path where we need to write the whole page out.
David Chinnerdf3c7242007-05-24 15:27:03 +10001031 */
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001032 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1033 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1034 !buffer_mapped(bh) && (unmapped || startio))) {
David Chinnereffd1202007-06-18 16:49:58 +10001035 int new_ioend = 0;
1036
David Chinnerdf3c7242007-05-24 15:27:03 +10001037 /*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001038 * Make sure we don't use a read-only iomap
1039 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001040 if (flags == BMAPI_READ)
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001041 iomap_valid = 0;
1042
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001043 if (buffer_unwritten(bh)) {
1044 type = IOMAP_UNWRITTEN;
Nathan Scott82721452006-04-11 15:10:55 +10001045 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001046 } else if (buffer_delay(bh)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001047 type = IOMAP_DELAY;
Nathan Scott82721452006-04-11 15:10:55 +10001048 flags = BMAPI_ALLOCATE | trylock;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001049 } else {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001050 type = IOMAP_NEW;
Nathan Scott82721452006-04-11 15:10:55 +10001051 flags = BMAPI_WRITE | BMAPI_MMAP;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001052 }
1053
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001054 if (!iomap_valid) {
David Chinnereffd1202007-06-18 16:49:58 +10001055 /*
1056 * if we didn't have a valid mapping then we
1057 * need to ensure that we put the new mapping
1058 * in a new ioend structure. This needs to be
1059 * done to ensure that the ioends correctly
1060 * reflect the block mappings at io completion
1061 * for unwritten extent conversion.
1062 */
1063 new_ioend = 1;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001064 if (type == IOMAP_NEW) {
1065 size = xfs_probe_cluster(inode,
1066 page, bh, head, 0);
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001067 } else {
1068 size = len;
1069 }
1070
1071 err = xfs_map_blocks(inode, offset, size,
1072 &iomap, flags);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001073 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 goto error;
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001075 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 }
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001077 if (iomap_valid) {
1078 xfs_map_at_offset(bh, offset,
1079 inode->i_blkbits, &iomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001081 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001082 type, &ioend,
David Chinnereffd1202007-06-18 16:49:58 +10001083 new_ioend);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 } else {
1085 set_buffer_dirty(bh);
1086 unlock_buffer(bh);
1087 mark_buffer_dirty(bh);
1088 }
1089 page_dirty--;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001090 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001092 } else if (buffer_uptodate(bh) && startio) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001093 /*
1094 * we got here because the buffer is already mapped.
1095 * That means it must already have extents allocated
1096 * underneath it. Map the extent by reading it.
1097 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001098 if (!iomap_valid || flags != BMAPI_READ) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001099 flags = BMAPI_READ;
1100 size = xfs_probe_cluster(inode, page, bh,
1101 head, 1);
1102 err = xfs_map_blocks(inode, offset, size,
1103 &iomap, flags);
1104 if (err)
1105 goto error;
1106 iomap_valid = xfs_iomap_valid(&iomap, offset);
1107 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
David Chinnerdf3c7242007-05-24 15:27:03 +10001109 /*
1110 * We set the type to IOMAP_NEW in case we are doing a
1111 * small write at EOF that is extending the file but
1112 * without needing an allocation. We need to update the
1113 * file size on I/O completion in this case so it is
1114 * the same case as having just allocated a new extent
1115 * that we are writing into for the first time.
1116 */
1117 type = IOMAP_NEW;
Nick Pigginca5de402008-08-02 12:02:13 +02001118 if (trylock_buffer(bh)) {
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001119 ASSERT(buffer_mapped(bh));
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001120 if (iomap_valid)
1121 all_bh = 1;
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001122 xfs_add_to_ioend(inode, bh, offset, type,
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001123 &ioend, !iomap_valid);
1124 page_dirty--;
1125 count++;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001126 } else {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001127 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001129 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1130 (unmapped || startio)) {
1131 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001133
1134 if (!iohead)
1135 iohead = ioend;
1136
1137 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
1139 if (uptodate && bh == head)
1140 SetPageUptodate(page);
1141
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001142 if (startio)
Denys Vlasenkob41759c2008-05-19 16:34:11 +10001143 xfs_start_page_writeback(page, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001145 if (ioend && iomap_valid) {
1146 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 PAGE_CACHE_SHIFT;
Nathan Scott775bf6c2005-05-05 13:33:01 -07001148 tlast = min_t(pgoff_t, offset, last_index);
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001149 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001150 wbc, startio, all_bh, tlast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 }
1152
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001153 if (iohead)
Christoph Hellwig06342cf2009-10-30 09:09:15 +00001154 xfs_submit_ioend(wbc, iohead);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001155
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 return page_dirty;
1157
1158error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001159 if (iohead)
1160 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
1162 /*
1163 * If it's delalloc and we have nowhere to put it,
1164 * throw it away, unless the lower layers told
1165 * us to try again.
1166 */
1167 if (err != -EAGAIN) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001168 if (!unmapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 block_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 ClearPageUptodate(page);
1171 }
1172 return err;
1173}
1174
Nathan Scottf51623b2006-03-14 13:26:27 +11001175/*
1176 * writepage: Called from one of two places:
1177 *
1178 * 1. we are flushing a delalloc buffer head.
1179 *
1180 * 2. we are writing out a dirty page. Typically the page dirty
1181 * state is cleared before we get here. In this case is it
1182 * conceivable we have no buffer heads.
1183 *
1184 * For delalloc space on the page we need to allocate space and
1185 * flush it. For unmapped buffer heads on the page we should
1186 * allocate space if the page is uptodate. For any other dirty
1187 * buffer heads on the page we should flush them.
1188 *
1189 * If we detect that a transaction would be required to flush
1190 * the page, we have to check the process flags first, if we
1191 * are already in a transaction or disk I/O during allocations
1192 * is off, we need to fail the writepage and redirty the page.
1193 */
1194
1195STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001196xfs_vm_writepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001197 struct page *page,
1198 struct writeback_control *wbc)
1199{
1200 int error;
1201 int need_trans;
1202 int delalloc, unmapped, unwritten;
1203 struct inode *inode = page->mapping->host;
1204
1205 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1206
1207 /*
1208 * We need a transaction if:
1209 * 1. There are delalloc buffers on the page
1210 * 2. The page is uptodate and we have unmapped buffers
1211 * 3. The page is uptodate and we have no buffers
1212 * 4. There are unwritten buffers on the page
1213 */
1214
1215 if (!page_has_buffers(page)) {
1216 unmapped = 1;
1217 need_trans = 1;
1218 } else {
1219 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1220 if (!PageUptodate(page))
1221 unmapped = 0;
1222 need_trans = delalloc + unmapped + unwritten;
1223 }
1224
1225 /*
1226 * If we need a transaction and the process flags say
1227 * we are already in a transaction, or no IO is allowed
1228 * then mark the page dirty again and leave the page
1229 * as is.
1230 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001231 if (current_test_flags(PF_FSTRANS) && need_trans)
Nathan Scottf51623b2006-03-14 13:26:27 +11001232 goto out_fail;
1233
1234 /*
1235 * Delay hooking up buffer heads until we have
1236 * made our go/no-go decision.
1237 */
1238 if (!page_has_buffers(page))
1239 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1240
Eric Sandeenc8a40512009-07-31 00:02:17 -05001241
1242 /*
1243 * VM calculation for nr_to_write seems off. Bump it way
1244 * up, this gets simple streaming writes zippy again.
1245 * To be reviewed again after Jens' writeback changes.
1246 */
1247 wbc->nr_to_write *= 4;
1248
Nathan Scottf51623b2006-03-14 13:26:27 +11001249 /*
1250 * Convert delayed allocate, unwritten or unmapped space
1251 * to real space and flush out to disk.
1252 */
1253 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1254 if (error == -EAGAIN)
1255 goto out_fail;
1256 if (unlikely(error < 0))
1257 goto out_unlock;
1258
1259 return 0;
1260
1261out_fail:
1262 redirty_page_for_writepage(wbc, page);
1263 unlock_page(page);
1264 return 0;
1265out_unlock:
1266 unlock_page(page);
1267 return error;
1268}
1269
Nathan Scott7d4fb402006-06-09 15:27:16 +10001270STATIC int
1271xfs_vm_writepages(
1272 struct address_space *mapping,
1273 struct writeback_control *wbc)
1274{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +10001275 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
Nathan Scott7d4fb402006-06-09 15:27:16 +10001276 return generic_writepages(mapping, wbc);
1277}
1278
Nathan Scottf51623b2006-03-14 13:26:27 +11001279/*
1280 * Called to move a page into cleanable state - and from there
1281 * to be released. Possibly the page is already clean. We always
1282 * have buffer heads in this call.
1283 *
1284 * Returns 0 if the page is ok to release, 1 otherwise.
1285 *
1286 * Possible scenarios are:
1287 *
1288 * 1. We are being called to release a page which has been written
1289 * to via regular I/O. buffer heads will be dirty and possibly
1290 * delalloc. If no delalloc buffer heads in this case then we
1291 * can just return zero.
1292 *
1293 * 2. We are called to release a page which has been written via
1294 * mmap, all we need to do is ensure there is no delalloc
1295 * state in the buffer heads, if not we can let the caller
1296 * free them and we should come back later via writepage.
1297 */
1298STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001299xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001300 struct page *page,
1301 gfp_t gfp_mask)
1302{
1303 struct inode *inode = page->mapping->host;
1304 int dirty, delalloc, unmapped, unwritten;
1305 struct writeback_control wbc = {
1306 .sync_mode = WB_SYNC_ALL,
1307 .nr_to_write = 1,
1308 };
1309
Nathan Scotted9d88f2006-09-28 10:56:43 +10001310 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
Nathan Scottf51623b2006-03-14 13:26:27 +11001311
Nathan Scott238f4c52006-03-17 17:26:25 +11001312 if (!page_has_buffers(page))
1313 return 0;
1314
Nathan Scottf51623b2006-03-14 13:26:27 +11001315 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1316 if (!delalloc && !unwritten)
1317 goto free_buffers;
1318
1319 if (!(gfp_mask & __GFP_FS))
1320 return 0;
1321
1322 /* If we are already inside a transaction or the thread cannot
1323 * do I/O, we cannot release this page.
1324 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001325 if (current_test_flags(PF_FSTRANS))
Nathan Scottf51623b2006-03-14 13:26:27 +11001326 return 0;
1327
1328 /*
1329 * Convert delalloc space to real space, do not flush the
1330 * data out to disk, that will be done by the caller.
1331 * Never need to allocate space here - we will always
1332 * come back to writepage in that case.
1333 */
1334 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1335 if (dirty == 0 && !unwritten)
1336 goto free_buffers;
1337 return 0;
1338
1339free_buffers:
1340 return try_to_free_buffers(page);
1341}
1342
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001344__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 struct inode *inode,
1346 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 struct buffer_head *bh_result,
1348 int create,
1349 int direct,
1350 bmapi_flags_t flags)
1351{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 xfs_iomap_t iomap;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001353 xfs_off_t offset;
1354 ssize_t size;
Nathan Scottc2536662006-03-29 10:44:40 +10001355 int niomap = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001358 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001359 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1360 size = bh_result->b_size;
Lachlan McIlroy364f3582008-09-17 16:50:14 +10001361
1362 if (!create && direct && offset >= i_size_read(inode))
1363 return 0;
1364
Lachlan McIlroy541d7d32007-10-11 17:34:33 +10001365 error = xfs_iomap(XFS_I(inode), offset, size,
Nathan Scott67fcaa72006-06-09 17:00:52 +10001366 create ? flags : BMAPI_READ, &iomap, &niomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 if (error)
1368 return -error;
Nathan Scottc2536662006-03-29 10:44:40 +10001369 if (niomap == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 return 0;
1371
1372 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001373 /*
1374 * For unwritten extents do not report a disk address on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 * the read case (treat as if we're reading into a hole).
1376 */
1377 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001378 xfs_map_buffer(bh_result, &iomap, offset,
1379 inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 }
1381 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1382 if (direct)
1383 bh_result->b_private = inode;
1384 set_buffer_unwritten(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 }
1386 }
1387
Nathan Scottc2536662006-03-29 10:44:40 +10001388 /*
1389 * If this is a realtime file, data may be on a different device.
1390 * to that pointed to from the buffer_head b_bdev currently.
1391 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001392 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
Nathan Scottc2536662006-03-29 10:44:40 +10001394 /*
David Chinner549054a2007-02-10 18:36:35 +11001395 * If we previously allocated a block out beyond eof and we are now
1396 * coming back to use it then we will need to flag it as new even if it
1397 * has a disk address.
1398 *
1399 * With sub-block writes into unwritten extents we also need to mark
1400 * the buffer as new so that the unwritten parts of the buffer gets
1401 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 */
1403 if (create &&
1404 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001405 (offset >= i_size_read(inode)) ||
1406 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
1409 if (iomap.iomap_flags & IOMAP_DELAY) {
1410 BUG_ON(direct);
1411 if (create) {
1412 set_buffer_uptodate(bh_result);
1413 set_buffer_mapped(bh_result);
1414 set_buffer_delay(bh_result);
1415 }
1416 }
1417
Nathan Scottc2536662006-03-29 10:44:40 +10001418 if (direct || size > (1 << inode->i_blkbits)) {
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001419 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1420 offset = min_t(xfs_off_t,
Nathan Scottc2536662006-03-29 10:44:40 +10001421 iomap.iomap_bsize - iomap.iomap_delta, size);
1422 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 }
1424
1425 return 0;
1426}
1427
1428int
Nathan Scottc2536662006-03-29 10:44:40 +10001429xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 struct inode *inode,
1431 sector_t iblock,
1432 struct buffer_head *bh_result,
1433 int create)
1434{
Nathan Scottc2536662006-03-29 10:44:40 +10001435 return __xfs_get_blocks(inode, iblock,
Badari Pulavartyfa30bd02006-03-26 01:38:01 -08001436 bh_result, create, 0, BMAPI_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437}
1438
1439STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001440xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 struct inode *inode,
1442 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 struct buffer_head *bh_result,
1444 int create)
1445{
Nathan Scottc2536662006-03-29 10:44:40 +10001446 return __xfs_get_blocks(inode, iblock,
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -08001447 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448}
1449
Christoph Hellwigf0973862005-09-05 08:22:52 +10001450STATIC void
Nathan Scotte4c573b2006-03-14 13:54:26 +11001451xfs_end_io_direct(
Christoph Hellwigf0973862005-09-05 08:22:52 +10001452 struct kiocb *iocb,
1453 loff_t offset,
1454 ssize_t size,
1455 void *private)
1456{
1457 xfs_ioend_t *ioend = iocb->private;
1458
1459 /*
1460 * Non-NULL private data means we need to issue a transaction to
1461 * convert a range from unwritten to written extents. This needs
Nathan Scottc41564b2006-03-29 08:55:14 +10001462 * to happen from process context but aio+dio I/O completion
Christoph Hellwigf0973862005-09-05 08:22:52 +10001463 * happens from irq context so we need to defer it to a workqueue.
Nathan Scottc41564b2006-03-29 08:55:14 +10001464 * This is not necessary for synchronous direct I/O, but we do
Christoph Hellwigf0973862005-09-05 08:22:52 +10001465 * it anyway to keep the code uniform and simpler.
1466 *
David Chinnere927af92007-06-05 16:24:36 +10001467 * Well, if only it were that simple. Because synchronous direct I/O
1468 * requires extent conversion to occur *before* we return to userspace,
1469 * we have to wait for extent conversion to complete. Look at the
1470 * iocb that has been passed to us to determine if this is AIO or
1471 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1472 * workqueue and wait for it to complete.
1473 *
Christoph Hellwigf0973862005-09-05 08:22:52 +10001474 * The core direct I/O code might be changed to always call the
1475 * completion handler in the future, in which case all this can
1476 * go away.
1477 */
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001478 ioend->io_offset = offset;
1479 ioend->io_size = size;
1480 if (ioend->io_type == IOMAP_READ) {
David Chinnere927af92007-06-05 16:24:36 +10001481 xfs_finish_ioend(ioend, 0);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001482 } else if (private && size > 0) {
David Chinnere927af92007-06-05 16:24:36 +10001483 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
Christoph Hellwigf0973862005-09-05 08:22:52 +10001484 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001485 /*
1486 * A direct I/O write ioend starts it's life in unwritten
1487 * state in case they map an unwritten extent. This write
1488 * didn't map an unwritten extent so switch it's completion
1489 * handler.
1490 */
Christoph Hellwig5ec4fab2009-10-30 09:11:47 +00001491 ioend->io_type = IOMAP_NEW;
David Chinnere927af92007-06-05 16:24:36 +10001492 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf0973862005-09-05 08:22:52 +10001493 }
1494
1495 /*
Nathan Scottc41564b2006-03-29 08:55:14 +10001496 * blockdev_direct_IO can return an error even after the I/O
Christoph Hellwigf0973862005-09-05 08:22:52 +10001497 * completion handler was called. Thus we need to protect
1498 * against double-freeing.
1499 */
1500 iocb->private = NULL;
1501}
1502
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001504xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 int rw,
1506 struct kiocb *iocb,
1507 const struct iovec *iov,
1508 loff_t offset,
1509 unsigned long nr_segs)
1510{
1511 struct file *file = iocb->ki_filp;
1512 struct inode *inode = file->f_mapping->host;
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001513 struct block_device *bdev;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001514 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001516 bdev = xfs_find_bdev_for_inode(XFS_I(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001518 if (rw == WRITE) {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001519 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001520 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001521 bdev, iov, offset, nr_segs,
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001522 xfs_get_blocks_direct,
1523 xfs_end_io_direct);
1524 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001525 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001526 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001527 bdev, iov, offset, nr_segs,
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001528 xfs_get_blocks_direct,
1529 xfs_end_io_direct);
1530 }
Christoph Hellwigf0973862005-09-05 08:22:52 +10001531
Zach Brown8459d862006-12-10 02:21:05 -08001532 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
Christoph Hellwigf0973862005-09-05 08:22:52 +10001533 xfs_destroy_ioend(iocb->private);
1534 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535}
1536
Nathan Scottf51623b2006-03-14 13:26:27 +11001537STATIC int
Nick Piggind79689c2007-10-16 01:25:06 -07001538xfs_vm_write_begin(
Nathan Scottf51623b2006-03-14 13:26:27 +11001539 struct file *file,
Nick Piggind79689c2007-10-16 01:25:06 -07001540 struct address_space *mapping,
1541 loff_t pos,
1542 unsigned len,
1543 unsigned flags,
1544 struct page **pagep,
1545 void **fsdata)
Nathan Scottf51623b2006-03-14 13:26:27 +11001546{
Nick Piggind79689c2007-10-16 01:25:06 -07001547 *pagep = NULL;
1548 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1549 xfs_get_blocks);
Nathan Scottf51623b2006-03-14 13:26:27 +11001550}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
1552STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001553xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 struct address_space *mapping,
1555 sector_t block)
1556{
1557 struct inode *inode = (struct inode *)mapping->host;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001558 struct xfs_inode *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Lachlan McIlroycf441ee2008-02-07 16:42:19 +11001560 xfs_itrace_entry(XFS_I(inode));
Christoph Hellwig126468b2008-03-06 13:44:57 +11001561 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001562 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
Christoph Hellwig126468b2008-03-06 13:44:57 +11001563 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Nathan Scottc2536662006-03-29 10:44:40 +10001564 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565}
1566
1567STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001568xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 struct file *unused,
1570 struct page *page)
1571{
Nathan Scottc2536662006-03-29 10:44:40 +10001572 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573}
1574
1575STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001576xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 struct file *unused,
1578 struct address_space *mapping,
1579 struct list_head *pages,
1580 unsigned nr_pages)
1581{
Nathan Scottc2536662006-03-29 10:44:40 +10001582 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583}
1584
NeilBrown2ff28e22006-03-26 01:37:18 -08001585STATIC void
Nathan Scott238f4c52006-03-17 17:26:25 +11001586xfs_vm_invalidatepage(
Nathan Scottbcec2b72005-09-02 16:40:17 +10001587 struct page *page,
1588 unsigned long offset)
1589{
1590 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1591 page->mapping->host, page, offset);
NeilBrown2ff28e22006-03-26 01:37:18 -08001592 block_invalidatepage(page, offset);
Nathan Scottbcec2b72005-09-02 16:40:17 +10001593}
1594
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001595const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001596 .readpage = xfs_vm_readpage,
1597 .readpages = xfs_vm_readpages,
1598 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001599 .writepages = xfs_vm_writepages,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 .sync_page = block_sync_page,
Nathan Scott238f4c52006-03-17 17:26:25 +11001601 .releasepage = xfs_vm_releasepage,
1602 .invalidatepage = xfs_vm_invalidatepage,
Nick Piggind79689c2007-10-16 01:25:06 -07001603 .write_begin = xfs_vm_write_begin,
1604 .write_end = generic_write_end,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001605 .bmap = xfs_vm_bmap,
1606 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001607 .migratepage = buffer_migrate_page,
Hisashi Hifumibddaafa2009-03-29 09:53:38 +02001608 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02001609 .error_remove_page = generic_error_remove_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610};