blob: aecf2519db76345a206fb812c61f0598d85440fb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_dir2.h"
25#include "xfs_trans.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_dir2_sf.h"
Nathan Scotta844f452005-11-02 14:38:42 +110032#include "xfs_attr_sf.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_dinode.h"
34#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110035#include "xfs_alloc.h"
36#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_error.h"
38#include "xfs_rw.h"
39#include "xfs_iomap.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100040#include "xfs_vnodeops.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110042#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/writeback.h>
44
Christoph Hellwig25e41b32008-12-03 12:20:39 +010045
46/*
47 * Prime number of hash buckets since address is used as the key.
48 */
49#define NVSYNC 37
50#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
51static wait_queue_head_t xfs_ioend_wq[NVSYNC];
52
53void __init
54xfs_ioend_init(void)
55{
56 int i;
57
58 for (i = 0; i < NVSYNC; i++)
59 init_waitqueue_head(&xfs_ioend_wq[i]);
60}
61
62void
63xfs_ioend_wait(
64 xfs_inode_t *ip)
65{
66 wait_queue_head_t *wq = to_ioend_wq(ip);
67
68 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
69}
70
71STATIC void
72xfs_ioend_wake(
73 xfs_inode_t *ip)
74{
75 if (atomic_dec_and_test(&ip->i_iocount))
76 wake_up(to_ioend_wq(ip));
77}
78
Nathan Scottf51623b2006-03-14 13:26:27 +110079STATIC void
80xfs_count_page_state(
81 struct page *page,
82 int *delalloc,
83 int *unmapped,
84 int *unwritten)
85{
86 struct buffer_head *bh, *head;
87
88 *delalloc = *unmapped = *unwritten = 0;
89
90 bh = head = page_buffers(page);
91 do {
92 if (buffer_uptodate(bh) && !buffer_mapped(bh))
93 (*unmapped) = 1;
Nathan Scottf51623b2006-03-14 13:26:27 +110094 else if (buffer_unwritten(bh))
95 (*unwritten) = 1;
96 else if (buffer_delay(bh))
97 (*delalloc) = 1;
98 } while ((bh = bh->b_this_page) != head);
99}
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#if defined(XFS_RW_TRACE)
102void
103xfs_page_trace(
104 int tag,
105 struct inode *inode,
106 struct page *page,
Nathan Scotted9d88f2006-09-28 10:56:43 +1000107 unsigned long pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108{
109 xfs_inode_t *ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 loff_t isize = i_size_read(inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100111 loff_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 int delalloc = -1, unmapped = -1, unwritten = -1;
113
114 if (page_has_buffers(page))
115 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
116
David Chinnere6064d32008-08-13 16:01:45 +1000117 ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 if (!ip->i_rwtrace)
119 return;
120
121 ktrace_enter(ip->i_rwtrace,
122 (void *)((unsigned long)tag),
123 (void *)ip,
124 (void *)inode,
125 (void *)page,
Nathan Scotted9d88f2006-09-28 10:56:43 +1000126 (void *)pgoff,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
128 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
129 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
130 (void *)((unsigned long)(isize & 0xffffffff)),
131 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
132 (void *)((unsigned long)(offset & 0xffffffff)),
133 (void *)((unsigned long)delalloc),
134 (void *)((unsigned long)unmapped),
135 (void *)((unsigned long)unwritten),
Yingping Luf1fdc842006-03-22 12:44:15 +1100136 (void *)((unsigned long)current_pid()),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 (void *)NULL);
138}
139#else
Nathan Scotted9d88f2006-09-28 10:56:43 +1000140#define xfs_page_trace(tag, inode, page, pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#endif
142
Christoph Hellwig6214ed42007-09-14 15:23:17 +1000143STATIC struct block_device *
144xfs_find_bdev_for_inode(
145 struct xfs_inode *ip)
146{
147 struct xfs_mount *mp = ip->i_mount;
148
Eric Sandeen71ddabb2007-11-23 16:29:42 +1100149 if (XFS_IS_REALTIME_INODE(ip))
Christoph Hellwig6214ed42007-09-14 15:23:17 +1000150 return mp->m_rtdev_targp->bt_bdev;
151 else
152 return mp->m_ddev_targp->bt_bdev;
153}
154
Christoph Hellwig0829c362005-09-02 16:58:49 +1000155/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100156 * We're now finished for good with this ioend structure.
157 * Update the page state via the associated buffer_heads,
158 * release holds on the inode and bio, and finally free
159 * up memory. Do not use the ioend after this.
160 */
Christoph Hellwig0829c362005-09-02 16:58:49 +1000161STATIC void
162xfs_destroy_ioend(
163 xfs_ioend_t *ioend)
164{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100165 struct buffer_head *bh, *next;
Christoph Hellwig583fa582008-12-03 12:20:38 +0100166 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100167
168 for (bh = ioend->io_buffer_head; bh; bh = next) {
169 next = bh->b_private;
Nathan Scott7d04a332006-06-09 14:58:38 +1000170 bh->b_end_io(bh, !ioend->io_error);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100171 }
Christoph Hellwig583fa582008-12-03 12:20:38 +0100172
173 /*
174 * Volume managers supporting multiple paths can send back ENODEV
175 * when the final path disappears. In this case continuing to fill
176 * the page cache with dirty data which cannot be written out is
177 * evil, so prevent that.
178 */
179 if (unlikely(ioend->io_error == -ENODEV)) {
180 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
181 __FILE__, __LINE__);
Christoph Hellwigb677c212007-08-29 11:46:28 +1000182 }
Christoph Hellwig583fa582008-12-03 12:20:38 +0100183
Christoph Hellwig25e41b32008-12-03 12:20:39 +0100184 xfs_ioend_wake(ip);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000185 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
188/*
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000189 * Update on-disk file size now that data has been written to disk.
190 * The current in-memory file size is i_size. If a write is beyond
Christoph Hellwig613d7042007-10-11 17:44:08 +1000191 * eof i_new_size will be the intended file size until i_size is
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000192 * updated. If this write does not extend all the way to the valid
193 * file size then restrict this update to the end of the write.
194 */
195STATIC void
196xfs_setfilesize(
197 xfs_ioend_t *ioend)
198{
Christoph Hellwigb677c212007-08-29 11:46:28 +1000199 xfs_inode_t *ip = XFS_I(ioend->io_inode);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000200 xfs_fsize_t isize;
201 xfs_fsize_t bsize;
202
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000203 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
204 ASSERT(ioend->io_type != IOMAP_READ);
205
206 if (unlikely(ioend->io_error))
207 return;
208
209 bsize = ioend->io_offset + ioend->io_size;
210
211 xfs_ilock(ip, XFS_ILOCK_EXCL);
212
Christoph Hellwig613d7042007-10-11 17:44:08 +1000213 isize = MAX(ip->i_size, ip->i_new_size);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000214 isize = MIN(isize, bsize);
215
216 if (ip->i_d.di_size < isize) {
217 ip->i_d.di_size = isize;
218 ip->i_update_core = 1;
219 ip->i_update_size = 1;
David Chinner94b97e32008-10-30 17:21:30 +1100220 xfs_mark_inode_dirty_sync(ip);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000221 }
222
223 xfs_iunlock(ip, XFS_ILOCK_EXCL);
224}
225
226/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100227 * Buffered IO write completion for delayed allocate extents.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100228 */
229STATIC void
230xfs_end_bio_delalloc(
David Howellsc4028952006-11-22 14:57:56 +0000231 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100232{
David Howellsc4028952006-11-22 14:57:56 +0000233 xfs_ioend_t *ioend =
234 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100235
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000236 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100237 xfs_destroy_ioend(ioend);
238}
239
240/*
241 * Buffered IO write completion for regular, written extents.
242 */
243STATIC void
244xfs_end_bio_written(
David Howellsc4028952006-11-22 14:57:56 +0000245 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100246{
David Howellsc4028952006-11-22 14:57:56 +0000247 xfs_ioend_t *ioend =
248 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100249
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000250 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100251 xfs_destroy_ioend(ioend);
252}
253
254/*
255 * IO write completion for unwritten extents.
256 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 * Issue transactions to convert a buffer range from unwritten
Christoph Hellwigf0973862005-09-05 08:22:52 +1000258 * to written extents.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 */
260STATIC void
Christoph Hellwig0829c362005-09-02 16:58:49 +1000261xfs_end_bio_unwritten(
David Howellsc4028952006-11-22 14:57:56 +0000262 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263{
David Howellsc4028952006-11-22 14:57:56 +0000264 xfs_ioend_t *ioend =
265 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwig76428612007-09-14 15:23:31 +1000266 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000267 xfs_off_t offset = ioend->io_offset;
268 size_t size = ioend->io_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000270 if (likely(!ioend->io_error)) {
David Chinnercc884662008-04-10 12:23:52 +1000271 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
272 int error;
273 error = xfs_iomap_write_unwritten(ip, offset, size);
274 if (error)
275 ioend->io_error = error;
276 }
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000277 xfs_setfilesize(ioend);
278 }
279 xfs_destroy_ioend(ioend);
280}
281
282/*
283 * IO read completion for regular, written extents.
284 */
285STATIC void
286xfs_end_bio_read(
287 struct work_struct *work)
288{
289 xfs_ioend_t *ioend =
290 container_of(work, xfs_ioend_t, io_work);
291
Christoph Hellwig0829c362005-09-02 16:58:49 +1000292 xfs_destroy_ioend(ioend);
293}
294
295/*
Dave Chinnerc626d172009-04-06 18:42:11 +0200296 * Schedule IO completion handling on a xfsdatad if this was
297 * the final hold on this ioend. If we are asked to wait,
298 * flush the workqueue.
299 */
300STATIC void
301xfs_finish_ioend(
302 xfs_ioend_t *ioend,
303 int wait)
304{
305 if (atomic_dec_and_test(&ioend->io_remaining)) {
306 struct workqueue_struct *wq = xfsdatad_workqueue;
307 if (ioend->io_work.func == xfs_end_bio_unwritten)
308 wq = xfsconvertd_workqueue;
309
310 queue_work(wq, &ioend->io_work);
311 if (wait)
312 flush_workqueue(wq);
313 }
314}
315
316/*
Christoph Hellwig0829c362005-09-02 16:58:49 +1000317 * Allocate and initialise an IO completion structure.
318 * We need to track unwritten extent write completion here initially.
319 * We'll need to extend this for updating the ondisk inode size later
320 * (vs. incore size).
321 */
322STATIC xfs_ioend_t *
323xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100324 struct inode *inode,
325 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000326{
327 xfs_ioend_t *ioend;
328
329 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
330
331 /*
332 * Set the count to 1 initially, which will prevent an I/O
333 * completion callback from happening before we have started
334 * all the I/O from calling the completion routine too early.
335 */
336 atomic_set(&ioend->io_remaining, 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000337 ioend->io_error = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100338 ioend->io_list = NULL;
339 ioend->io_type = type;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000340 ioend->io_inode = inode;
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000341 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100342 ioend->io_buffer_tail = NULL;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000343 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000344 ioend->io_offset = 0;
345 ioend->io_size = 0;
346
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100347 if (type == IOMAP_UNWRITTEN)
David Howellsc4028952006-11-22 14:57:56 +0000348 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100349 else if (type == IOMAP_DELAY)
David Howellsc4028952006-11-22 14:57:56 +0000350 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000351 else if (type == IOMAP_READ)
352 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100353 else
David Howellsc4028952006-11-22 14:57:56 +0000354 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000355
356 return ioend;
357}
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359STATIC int
360xfs_map_blocks(
361 struct inode *inode,
362 loff_t offset,
363 ssize_t count,
364 xfs_iomap_t *mapp,
365 int flags)
366{
Christoph Hellwig6bd16ff2008-12-03 12:20:32 +0100367 int nmaps = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Christoph Hellwig6bd16ff2008-12-03 12:20:32 +0100369 return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370}
371
David Chinner7989cb82007-02-10 18:34:56 +1100372STATIC_INLINE int
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100373xfs_iomap_valid(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 xfs_iomap_t *iomapp,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100375 loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376{
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100377 return offset >= iomapp->iomap_offset &&
378 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379}
380
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100381/*
382 * BIO completion handler for buffered IO.
383 */
Al Viro782e3b32007-10-12 07:17:47 +0100384STATIC void
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100385xfs_end_bio(
386 struct bio *bio,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100387 int error)
388{
389 xfs_ioend_t *ioend = bio->bi_private;
390
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100391 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000392 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100393
394 /* Toss bio and pass work off to an xfsdatad thread */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100395 bio->bi_private = NULL;
396 bio->bi_end_io = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100397 bio_put(bio);
Nathan Scott7d04a332006-06-09 14:58:38 +1000398
David Chinnere927af92007-06-05 16:24:36 +1000399 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100400}
401
402STATIC void
403xfs_submit_ioend_bio(
404 xfs_ioend_t *ioend,
405 struct bio *bio)
406{
407 atomic_inc(&ioend->io_remaining);
408
409 bio->bi_private = ioend;
410 bio->bi_end_io = xfs_end_bio;
411
412 submit_bio(WRITE, bio);
413 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
414 bio_put(bio);
415}
416
417STATIC struct bio *
418xfs_alloc_ioend_bio(
419 struct buffer_head *bh)
420{
421 struct bio *bio;
422 int nvecs = bio_get_nr_vecs(bh->b_bdev);
423
424 do {
425 bio = bio_alloc(GFP_NOIO, nvecs);
426 nvecs >>= 1;
427 } while (!bio);
428
429 ASSERT(bio->bi_private == NULL);
430 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
431 bio->bi_bdev = bh->b_bdev;
432 bio_get(bio);
433 return bio;
434}
435
436STATIC void
437xfs_start_buffer_writeback(
438 struct buffer_head *bh)
439{
440 ASSERT(buffer_mapped(bh));
441 ASSERT(buffer_locked(bh));
442 ASSERT(!buffer_delay(bh));
443 ASSERT(!buffer_unwritten(bh));
444
445 mark_buffer_async_write(bh);
446 set_buffer_uptodate(bh);
447 clear_buffer_dirty(bh);
448}
449
450STATIC void
451xfs_start_page_writeback(
452 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100453 int clear_dirty,
454 int buffers)
455{
456 ASSERT(PageLocked(page));
457 ASSERT(!PageWriteback(page));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100458 if (clear_dirty)
David Chinner92132022006-12-21 10:24:01 +1100459 clear_page_dirty_for_io(page);
460 set_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100461 unlock_page(page);
Fengguang Wu1f7decf2007-10-16 23:30:42 -0700462 /* If no buffers on the page are to be written, finish it here */
463 if (!buffers)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100464 end_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100465}
466
467static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
468{
469 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
470}
471
472/*
David Chinnerd88992f2006-01-18 13:38:12 +1100473 * Submit all of the bios for all of the ioends we have saved up, covering the
474 * initial writepage page and also any probed pages.
475 *
476 * Because we may have multiple ioends spanning a page, we need to start
477 * writeback on all the buffers before we submit them for I/O. If we mark the
478 * buffers as we got, then we can end up with a page that only has buffers
479 * marked async write and I/O complete on can occur before we mark the other
480 * buffers async write.
481 *
482 * The end result of this is that we trip a bug in end_page_writeback() because
483 * we call it twice for the one page as the code in end_buffer_async_write()
484 * assumes that all buffers on the page are started at the same time.
485 *
486 * The fix is two passes across the ioend list - one to start writeback on the
Nathan Scottc41564b2006-03-29 08:55:14 +1000487 * buffer_heads, and then submit them for I/O on the second pass.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100488 */
489STATIC void
490xfs_submit_ioend(
491 xfs_ioend_t *ioend)
492{
David Chinnerd88992f2006-01-18 13:38:12 +1100493 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100494 xfs_ioend_t *next;
495 struct buffer_head *bh;
496 struct bio *bio;
497 sector_t lastblock = 0;
498
David Chinnerd88992f2006-01-18 13:38:12 +1100499 /* Pass 1 - start writeback */
500 do {
501 next = ioend->io_list;
502 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
503 xfs_start_buffer_writeback(bh);
504 }
505 } while ((ioend = next) != NULL);
506
507 /* Pass 2 - submit I/O */
508 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100509 do {
510 next = ioend->io_list;
511 bio = NULL;
512
513 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100514
515 if (!bio) {
516 retry:
517 bio = xfs_alloc_ioend_bio(bh);
518 } else if (bh->b_blocknr != lastblock + 1) {
519 xfs_submit_ioend_bio(ioend, bio);
520 goto retry;
521 }
522
523 if (bio_add_buffer(bio, bh) != bh->b_size) {
524 xfs_submit_ioend_bio(ioend, bio);
525 goto retry;
526 }
527
528 lastblock = bh->b_blocknr;
529 }
530 if (bio)
531 xfs_submit_ioend_bio(ioend, bio);
David Chinnere927af92007-06-05 16:24:36 +1000532 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100533 } while ((ioend = next) != NULL);
534}
535
536/*
537 * Cancel submission of all buffer_heads so far in this endio.
538 * Toss the endio too. Only ever called for the initial page
539 * in a writepage request, so only ever one page.
540 */
541STATIC void
542xfs_cancel_ioend(
543 xfs_ioend_t *ioend)
544{
545 xfs_ioend_t *next;
546 struct buffer_head *bh, *next_bh;
547
548 do {
549 next = ioend->io_list;
550 bh = ioend->io_buffer_head;
551 do {
552 next_bh = bh->b_private;
553 clear_buffer_async_write(bh);
554 unlock_buffer(bh);
555 } while ((bh = next_bh) != NULL);
556
Christoph Hellwig25e41b32008-12-03 12:20:39 +0100557 xfs_ioend_wake(XFS_I(ioend->io_inode));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100558 mempool_free(ioend, xfs_ioend_pool);
559 } while ((ioend = next) != NULL);
560}
561
562/*
563 * Test to see if we've been building up a completion structure for
564 * earlier buffers -- if so, we try to append to this ioend if we
565 * can, otherwise we finish off any current ioend and start another.
566 * Return true if we've finished the given ioend.
567 */
568STATIC void
569xfs_add_to_ioend(
570 struct inode *inode,
571 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100572 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100573 unsigned int type,
574 xfs_ioend_t **result,
575 int need_ioend)
576{
577 xfs_ioend_t *ioend = *result;
578
579 if (!ioend || need_ioend || type != ioend->io_type) {
580 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100581
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100582 ioend = xfs_alloc_ioend(inode, type);
583 ioend->io_offset = offset;
584 ioend->io_buffer_head = bh;
585 ioend->io_buffer_tail = bh;
586 if (previous)
587 previous->io_list = ioend;
588 *result = ioend;
589 } else {
590 ioend->io_buffer_tail->b_private = bh;
591 ioend->io_buffer_tail = bh;
592 }
593
594 bh->b_private = NULL;
595 ioend->io_size += bh->b_size;
596}
597
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100599xfs_map_buffer(
600 struct buffer_head *bh,
601 xfs_iomap_t *mp,
602 xfs_off_t offset,
603 uint block_bits)
604{
605 sector_t bn;
606
607 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
608
609 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
610 ((offset - mp->iomap_offset) >> block_bits);
611
612 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
613
614 bh->b_blocknr = bn;
615 set_buffer_mapped(bh);
616}
617
618STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619xfs_map_at_offset(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 struct buffer_head *bh,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100621 loff_t offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 int block_bits,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100623 xfs_iomap_t *iomapp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
626 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
628 lock_buffer(bh);
Nathan Scott87cbc492006-03-14 13:26:43 +1100629 xfs_map_buffer(bh, iomapp, offset, block_bits);
Nathan Scottce8e9222006-01-11 15:39:08 +1100630 bh->b_bdev = iomapp->iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 set_buffer_mapped(bh);
632 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100633 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634}
635
636/*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100637 * Look for a page at index that is suitable for clustering.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 */
639STATIC unsigned int
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100640xfs_probe_page(
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100641 struct page *page,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100642 unsigned int pg_offset,
643 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 int ret = 0;
646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100648 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
650 if (page->mapping && PageDirty(page)) {
651 if (page_has_buffers(page)) {
652 struct buffer_head *bh, *head;
653
654 bh = head = page_buffers(page);
655 do {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100656 if (!buffer_uptodate(bh))
657 break;
658 if (mapped != buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 break;
660 ret += bh->b_size;
661 if (ret >= pg_offset)
662 break;
663 } while ((bh = bh->b_this_page) != head);
664 } else
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100665 ret = mapped ? 0 : PAGE_CACHE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 }
667
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 return ret;
669}
670
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100671STATIC size_t
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100672xfs_probe_cluster(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 struct inode *inode,
674 struct page *startpage,
675 struct buffer_head *bh,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100676 struct buffer_head *head,
677 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100679 struct pagevec pvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 pgoff_t tindex, tlast, tloff;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100681 size_t total = 0;
682 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683
684 /* First sum forwards in this page */
685 do {
Eric Sandeen2353e8e2006-02-28 12:30:30 +1100686 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100687 return total;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 total += bh->b_size;
689 } while ((bh = bh->b_this_page) != head);
690
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100691 /* if we reached the end of the page, sum forwards in following pages */
692 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
693 tindex = startpage->index + 1;
694
695 /* Prune this back to avoid pathological behavior */
696 tloff = min(tlast, startpage->index + 64);
697
698 pagevec_init(&pvec, 0);
699 while (!done && tindex <= tloff) {
700 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
701
702 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
703 break;
704
705 for (i = 0; i < pagevec_count(&pvec); i++) {
706 struct page *page = pvec.pages[i];
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000707 size_t pg_offset, pg_len = 0;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100708
709 if (tindex == tlast) {
710 pg_offset =
711 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100712 if (!pg_offset) {
713 done = 1;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100714 break;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100715 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100716 } else
717 pg_offset = PAGE_CACHE_SIZE;
718
Nick Piggin529ae9a2008-08-02 12:01:03 +0200719 if (page->index == tindex && trylock_page(page)) {
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000720 pg_len = xfs_probe_page(page, pg_offset, mapped);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100721 unlock_page(page);
722 }
723
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000724 if (!pg_len) {
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100725 done = 1;
726 break;
727 }
728
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000729 total += pg_len;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100730 tindex++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100732
733 pagevec_release(&pvec);
734 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100736
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 return total;
738}
739
740/*
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100741 * Test if a given page is suitable for writing as part of an unwritten
742 * or delayed allocate extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 */
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100744STATIC int
745xfs_is_delayed_page(
746 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100747 unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100750 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
752 if (page->mapping && page_has_buffers(page)) {
753 struct buffer_head *bh, *head;
754 int acceptable = 0;
755
756 bh = head = page_buffers(page);
757 do {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100758 if (buffer_unwritten(bh))
759 acceptable = (type == IOMAP_UNWRITTEN);
760 else if (buffer_delay(bh))
761 acceptable = (type == IOMAP_DELAY);
David Chinner2ddee842006-03-22 12:47:40 +1100762 else if (buffer_dirty(bh) && buffer_mapped(bh))
David Chinnerdf3c7242007-05-24 15:27:03 +1000763 acceptable = (type == IOMAP_NEW);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100764 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 } while ((bh = bh->b_this_page) != head);
767
768 if (acceptable)
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100769 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 }
771
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100772 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773}
774
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775/*
776 * Allocate & map buffers for page given the extent map. Write it out.
777 * except for the original page of a writepage, this is called on
778 * delalloc/unwritten pages only, for the original page it is possible
779 * that the page has no mapping at all.
780 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100781STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782xfs_convert_page(
783 struct inode *inode,
784 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100785 loff_t tindex,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100786 xfs_iomap_t *mp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100787 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 int startio,
790 int all_bh)
791{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100792 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100793 xfs_off_t end_offset;
794 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100795 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 int bbits = inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -0700797 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100798 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100799 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100801 if (page->index != tindex)
802 goto fail;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200803 if (!trylock_page(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100804 goto fail;
805 if (PageWriteback(page))
806 goto fail_unlock_page;
807 if (page->mapping != inode->i_mapping)
808 goto fail_unlock_page;
809 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
810 goto fail_unlock_page;
811
Nathan Scott24e17b52005-05-05 13:33:20 -0700812 /*
813 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000814 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100815 *
816 * Derivation:
817 *
818 * End offset is the highest offset that this page should represent.
819 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
820 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
821 * hence give us the correct page_dirty count. On any other page,
822 * it will be zero and in that case we need page_dirty to be the
823 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700824 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100825 end_offset = min_t(unsigned long long,
826 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
827 i_size_read(inode));
828
Nathan Scott24e17b52005-05-05 13:33:20 -0700829 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100830 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
831 PAGE_CACHE_SIZE);
832 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
833 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700834
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 bh = head = page_buffers(page);
836 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100837 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100839 if (!buffer_uptodate(bh))
840 uptodate = 0;
841 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
842 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100844 }
845
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100846 if (buffer_unwritten(bh) || buffer_delay(bh)) {
847 if (buffer_unwritten(bh))
848 type = IOMAP_UNWRITTEN;
849 else
850 type = IOMAP_DELAY;
851
852 if (!xfs_iomap_valid(mp, offset)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100853 done = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100854 continue;
855 }
856
857 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
858 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
859
860 xfs_map_at_offset(bh, offset, bbits, mp);
861 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100862 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100863 type, ioendp, done);
864 } else {
865 set_buffer_dirty(bh);
866 unlock_buffer(bh);
867 mark_buffer_dirty(bh);
868 }
869 page_dirty--;
870 count++;
871 } else {
David Chinnerdf3c7242007-05-24 15:27:03 +1000872 type = IOMAP_NEW;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100873 if (buffer_mapped(bh) && all_bh && startio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 lock_buffer(bh);
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100875 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100876 type, ioendp, done);
877 count++;
Nathan Scott24e17b52005-05-05 13:33:20 -0700878 page_dirty--;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100879 } else {
880 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100883 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100885 if (uptodate && bh == head)
886 SetPageUptodate(page);
887
888 if (startio) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100889 if (count) {
890 struct backing_dev_info *bdi;
891
892 bdi = inode->i_mapping->backing_dev_info;
David Chinner9fddaca2006-02-07 20:27:24 +1100893 wbc->nr_to_write--;
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100894 if (bdi_write_congested(bdi)) {
895 wbc->encountered_congestion = 1;
896 done = 1;
David Chinner9fddaca2006-02-07 20:27:24 +1100897 } else if (wbc->nr_to_write <= 0) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100898 done = 1;
899 }
900 }
Denys Vlasenkob41759c2008-05-19 16:34:11 +1000901 xfs_start_page_writeback(page, !page_dirty, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100903
904 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100905 fail_unlock_page:
906 unlock_page(page);
907 fail:
908 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909}
910
911/*
912 * Convert & write out a cluster of pages in the same extent as defined
913 * by mp and following the start page.
914 */
915STATIC void
916xfs_cluster_write(
917 struct inode *inode,
918 pgoff_t tindex,
919 xfs_iomap_t *iomapp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100920 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 struct writeback_control *wbc,
922 int startio,
923 int all_bh,
924 pgoff_t tlast)
925{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100926 struct pagevec pvec;
927 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100929 pagevec_init(&pvec, 0);
930 while (!done && tindex <= tlast) {
931 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
932
933 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100935
936 for (i = 0; i < pagevec_count(&pvec); i++) {
937 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
938 iomapp, ioendp, wbc, startio, all_bh);
939 if (done)
940 break;
941 }
942
943 pagevec_release(&pvec);
944 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 }
946}
947
948/*
949 * Calling this without startio set means we are being asked to make a dirty
950 * page ready for freeing it's buffers. When called with startio set then
951 * we are coming from writepage.
952 *
953 * When called with startio set it is important that we write the WHOLE
954 * page if possible.
955 * The bh->b_state's cannot know if any of the blocks or which block for
956 * that matter are dirty due to mmap writes, and therefore bh uptodate is
Nathan Scottc41564b2006-03-29 08:55:14 +1000957 * only valid if the page itself isn't completely uptodate. Some layers
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 * may clear the page dirty flag prior to calling write page, under the
959 * assumption the entire page will be written out; by not writing out the
960 * whole page the page can be reused before all valid dirty data is
961 * written out. Note: in the case of a page that has been dirty'd by
962 * mapwrite and but partially setup by block_prepare_write the
963 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
964 * valid state, thus the whole page must be written out thing.
965 */
966
967STATIC int
968xfs_page_state_convert(
969 struct inode *inode,
970 struct page *page,
971 struct writeback_control *wbc,
972 int startio,
973 int unmapped) /* also implies page uptodate */
974{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100975 struct buffer_head *bh, *head;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100976 xfs_iomap_t iomap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100977 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 loff_t offset;
979 unsigned long p_offset = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100980 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 __uint64_t end_offset;
982 pgoff_t end_index, last_index, tlast;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100983 ssize_t size, len;
984 int flags, err, iomap_valid = 0, uptodate = 1;
Nathan Scott82721452006-04-11 15:10:55 +1000985 int page_dirty, count = 0;
986 int trylock = 0;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100987 int all_bh = unmapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
Nathan Scott82721452006-04-11 15:10:55 +1000989 if (startio) {
990 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
991 trylock |= BMAPI_TRYLOCK;
992 }
Daniel Moore3ba08152005-05-05 13:31:34 -0700993
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 /* Is this page beyond the end of the file? */
995 offset = i_size_read(inode);
996 end_index = offset >> PAGE_CACHE_SHIFT;
997 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
998 if (page->index >= end_index) {
999 if ((page->index >= end_index + 1) ||
1000 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
Nathan Scott19d5bcf2005-11-02 15:14:09 +11001001 if (startio)
1002 unlock_page(page);
1003 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 }
1005 }
1006
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 /*
Nathan Scott24e17b52005-05-05 13:33:20 -07001008 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +10001009 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001010 *
1011 * Derivation:
1012 *
1013 * End offset is the highest offset that this page should represent.
1014 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
1015 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
1016 * hence give us the correct page_dirty count. On any other page,
1017 * it will be zero and in that case we need page_dirty to be the
1018 * count of buffers on the page.
1019 */
1020 end_offset = min_t(unsigned long long,
1021 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
Nathan Scott24e17b52005-05-05 13:33:20 -07001022 len = 1 << inode->i_blkbits;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001023 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
1024 PAGE_CACHE_SIZE);
1025 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
Nathan Scott24e17b52005-05-05 13:33:20 -07001026 page_dirty = p_offset / len;
1027
Nathan Scott24e17b52005-05-05 13:33:20 -07001028 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001029 offset = page_offset(page);
David Chinnerdf3c7242007-05-24 15:27:03 +10001030 flags = BMAPI_READ;
1031 type = IOMAP_NEW;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001032
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001033 /* TODO: cleanup count and page_dirty */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
1035 do {
1036 if (offset >= end_offset)
1037 break;
1038 if (!buffer_uptodate(bh))
1039 uptodate = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001040 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001041 /*
1042 * the iomap is actually still valid, but the ioend
1043 * isn't. shouldn't happen too often.
1044 */
1045 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001047 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001049 if (iomap_valid)
1050 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
1052 /*
1053 * First case, map an unwritten extent and prepare for
1054 * extent state conversion transaction on completion.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001055 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 * Second case, allocate space for a delalloc buffer.
1057 * We can return EAGAIN here in the release page case.
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001058 *
1059 * Third case, an unmapped buffer was found, and we are
1060 * in a path where we need to write the whole page out.
David Chinnerdf3c7242007-05-24 15:27:03 +10001061 */
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001062 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1063 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1064 !buffer_mapped(bh) && (unmapped || startio))) {
David Chinnereffd1202007-06-18 16:49:58 +10001065 int new_ioend = 0;
1066
David Chinnerdf3c7242007-05-24 15:27:03 +10001067 /*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001068 * Make sure we don't use a read-only iomap
1069 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001070 if (flags == BMAPI_READ)
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001071 iomap_valid = 0;
1072
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001073 if (buffer_unwritten(bh)) {
1074 type = IOMAP_UNWRITTEN;
Nathan Scott82721452006-04-11 15:10:55 +10001075 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001076 } else if (buffer_delay(bh)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001077 type = IOMAP_DELAY;
Nathan Scott82721452006-04-11 15:10:55 +10001078 flags = BMAPI_ALLOCATE | trylock;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001079 } else {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001080 type = IOMAP_NEW;
Nathan Scott82721452006-04-11 15:10:55 +10001081 flags = BMAPI_WRITE | BMAPI_MMAP;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001082 }
1083
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001084 if (!iomap_valid) {
David Chinnereffd1202007-06-18 16:49:58 +10001085 /*
1086 * if we didn't have a valid mapping then we
1087 * need to ensure that we put the new mapping
1088 * in a new ioend structure. This needs to be
1089 * done to ensure that the ioends correctly
1090 * reflect the block mappings at io completion
1091 * for unwritten extent conversion.
1092 */
1093 new_ioend = 1;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001094 if (type == IOMAP_NEW) {
1095 size = xfs_probe_cluster(inode,
1096 page, bh, head, 0);
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001097 } else {
1098 size = len;
1099 }
1100
1101 err = xfs_map_blocks(inode, offset, size,
1102 &iomap, flags);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001103 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 goto error;
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001105 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 }
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001107 if (iomap_valid) {
1108 xfs_map_at_offset(bh, offset,
1109 inode->i_blkbits, &iomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001111 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001112 type, &ioend,
David Chinnereffd1202007-06-18 16:49:58 +10001113 new_ioend);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 } else {
1115 set_buffer_dirty(bh);
1116 unlock_buffer(bh);
1117 mark_buffer_dirty(bh);
1118 }
1119 page_dirty--;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001120 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001122 } else if (buffer_uptodate(bh) && startio) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001123 /*
1124 * we got here because the buffer is already mapped.
1125 * That means it must already have extents allocated
1126 * underneath it. Map the extent by reading it.
1127 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001128 if (!iomap_valid || flags != BMAPI_READ) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001129 flags = BMAPI_READ;
1130 size = xfs_probe_cluster(inode, page, bh,
1131 head, 1);
1132 err = xfs_map_blocks(inode, offset, size,
1133 &iomap, flags);
1134 if (err)
1135 goto error;
1136 iomap_valid = xfs_iomap_valid(&iomap, offset);
1137 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
David Chinnerdf3c7242007-05-24 15:27:03 +10001139 /*
1140 * We set the type to IOMAP_NEW in case we are doing a
1141 * small write at EOF that is extending the file but
1142 * without needing an allocation. We need to update the
1143 * file size on I/O completion in this case so it is
1144 * the same case as having just allocated a new extent
1145 * that we are writing into for the first time.
1146 */
1147 type = IOMAP_NEW;
Nick Pigginca5de402008-08-02 12:02:13 +02001148 if (trylock_buffer(bh)) {
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001149 ASSERT(buffer_mapped(bh));
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001150 if (iomap_valid)
1151 all_bh = 1;
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001152 xfs_add_to_ioend(inode, bh, offset, type,
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001153 &ioend, !iomap_valid);
1154 page_dirty--;
1155 count++;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001156 } else {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001157 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001159 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1160 (unmapped || startio)) {
1161 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001163
1164 if (!iohead)
1165 iohead = ioend;
1166
1167 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
1169 if (uptodate && bh == head)
1170 SetPageUptodate(page);
1171
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001172 if (startio)
Denys Vlasenkob41759c2008-05-19 16:34:11 +10001173 xfs_start_page_writeback(page, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001175 if (ioend && iomap_valid) {
1176 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 PAGE_CACHE_SHIFT;
Nathan Scott775bf6c2005-05-05 13:33:01 -07001178 tlast = min_t(pgoff_t, offset, last_index);
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001179 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001180 wbc, startio, all_bh, tlast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 }
1182
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001183 if (iohead)
1184 xfs_submit_ioend(iohead);
1185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 return page_dirty;
1187
1188error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001189 if (iohead)
1190 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192 /*
1193 * If it's delalloc and we have nowhere to put it,
1194 * throw it away, unless the lower layers told
1195 * us to try again.
1196 */
1197 if (err != -EAGAIN) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001198 if (!unmapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 block_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 ClearPageUptodate(page);
1201 }
1202 return err;
1203}
1204
Nathan Scottf51623b2006-03-14 13:26:27 +11001205/*
1206 * writepage: Called from one of two places:
1207 *
1208 * 1. we are flushing a delalloc buffer head.
1209 *
1210 * 2. we are writing out a dirty page. Typically the page dirty
1211 * state is cleared before we get here. In this case is it
1212 * conceivable we have no buffer heads.
1213 *
1214 * For delalloc space on the page we need to allocate space and
1215 * flush it. For unmapped buffer heads on the page we should
1216 * allocate space if the page is uptodate. For any other dirty
1217 * buffer heads on the page we should flush them.
1218 *
1219 * If we detect that a transaction would be required to flush
1220 * the page, we have to check the process flags first, if we
1221 * are already in a transaction or disk I/O during allocations
1222 * is off, we need to fail the writepage and redirty the page.
1223 */
1224
1225STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001226xfs_vm_writepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001227 struct page *page,
1228 struct writeback_control *wbc)
1229{
1230 int error;
1231 int need_trans;
1232 int delalloc, unmapped, unwritten;
1233 struct inode *inode = page->mapping->host;
1234
1235 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1236
1237 /*
1238 * We need a transaction if:
1239 * 1. There are delalloc buffers on the page
1240 * 2. The page is uptodate and we have unmapped buffers
1241 * 3. The page is uptodate and we have no buffers
1242 * 4. There are unwritten buffers on the page
1243 */
1244
1245 if (!page_has_buffers(page)) {
1246 unmapped = 1;
1247 need_trans = 1;
1248 } else {
1249 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1250 if (!PageUptodate(page))
1251 unmapped = 0;
1252 need_trans = delalloc + unmapped + unwritten;
1253 }
1254
1255 /*
1256 * If we need a transaction and the process flags say
1257 * we are already in a transaction, or no IO is allowed
1258 * then mark the page dirty again and leave the page
1259 * as is.
1260 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001261 if (current_test_flags(PF_FSTRANS) && need_trans)
Nathan Scottf51623b2006-03-14 13:26:27 +11001262 goto out_fail;
1263
1264 /*
1265 * Delay hooking up buffer heads until we have
1266 * made our go/no-go decision.
1267 */
1268 if (!page_has_buffers(page))
1269 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1270
Eric Sandeenc8a40512009-07-31 00:02:17 -05001271
1272 /*
1273 * VM calculation for nr_to_write seems off. Bump it way
1274 * up, this gets simple streaming writes zippy again.
1275 * To be reviewed again after Jens' writeback changes.
1276 */
1277 wbc->nr_to_write *= 4;
1278
Nathan Scottf51623b2006-03-14 13:26:27 +11001279 /*
1280 * Convert delayed allocate, unwritten or unmapped space
1281 * to real space and flush out to disk.
1282 */
1283 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1284 if (error == -EAGAIN)
1285 goto out_fail;
1286 if (unlikely(error < 0))
1287 goto out_unlock;
1288
1289 return 0;
1290
1291out_fail:
1292 redirty_page_for_writepage(wbc, page);
1293 unlock_page(page);
1294 return 0;
1295out_unlock:
1296 unlock_page(page);
1297 return error;
1298}
1299
Nathan Scott7d4fb402006-06-09 15:27:16 +10001300STATIC int
1301xfs_vm_writepages(
1302 struct address_space *mapping,
1303 struct writeback_control *wbc)
1304{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +10001305 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
Nathan Scott7d4fb402006-06-09 15:27:16 +10001306 return generic_writepages(mapping, wbc);
1307}
1308
Nathan Scottf51623b2006-03-14 13:26:27 +11001309/*
1310 * Called to move a page into cleanable state - and from there
1311 * to be released. Possibly the page is already clean. We always
1312 * have buffer heads in this call.
1313 *
1314 * Returns 0 if the page is ok to release, 1 otherwise.
1315 *
1316 * Possible scenarios are:
1317 *
1318 * 1. We are being called to release a page which has been written
1319 * to via regular I/O. buffer heads will be dirty and possibly
1320 * delalloc. If no delalloc buffer heads in this case then we
1321 * can just return zero.
1322 *
1323 * 2. We are called to release a page which has been written via
1324 * mmap, all we need to do is ensure there is no delalloc
1325 * state in the buffer heads, if not we can let the caller
1326 * free them and we should come back later via writepage.
1327 */
1328STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001329xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001330 struct page *page,
1331 gfp_t gfp_mask)
1332{
1333 struct inode *inode = page->mapping->host;
1334 int dirty, delalloc, unmapped, unwritten;
1335 struct writeback_control wbc = {
1336 .sync_mode = WB_SYNC_ALL,
1337 .nr_to_write = 1,
1338 };
1339
Nathan Scotted9d88f2006-09-28 10:56:43 +10001340 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
Nathan Scottf51623b2006-03-14 13:26:27 +11001341
Nathan Scott238f4c52006-03-17 17:26:25 +11001342 if (!page_has_buffers(page))
1343 return 0;
1344
Nathan Scottf51623b2006-03-14 13:26:27 +11001345 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1346 if (!delalloc && !unwritten)
1347 goto free_buffers;
1348
1349 if (!(gfp_mask & __GFP_FS))
1350 return 0;
1351
1352 /* If we are already inside a transaction or the thread cannot
1353 * do I/O, we cannot release this page.
1354 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001355 if (current_test_flags(PF_FSTRANS))
Nathan Scottf51623b2006-03-14 13:26:27 +11001356 return 0;
1357
1358 /*
1359 * Convert delalloc space to real space, do not flush the
1360 * data out to disk, that will be done by the caller.
1361 * Never need to allocate space here - we will always
1362 * come back to writepage in that case.
1363 */
1364 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1365 if (dirty == 0 && !unwritten)
1366 goto free_buffers;
1367 return 0;
1368
1369free_buffers:
1370 return try_to_free_buffers(page);
1371}
1372
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001374__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 struct inode *inode,
1376 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 struct buffer_head *bh_result,
1378 int create,
1379 int direct,
1380 bmapi_flags_t flags)
1381{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 xfs_iomap_t iomap;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001383 xfs_off_t offset;
1384 ssize_t size;
Nathan Scottc2536662006-03-29 10:44:40 +10001385 int niomap = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001388 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001389 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1390 size = bh_result->b_size;
Lachlan McIlroy364f3582008-09-17 16:50:14 +10001391
1392 if (!create && direct && offset >= i_size_read(inode))
1393 return 0;
1394
Lachlan McIlroy541d7d32007-10-11 17:34:33 +10001395 error = xfs_iomap(XFS_I(inode), offset, size,
Nathan Scott67fcaa72006-06-09 17:00:52 +10001396 create ? flags : BMAPI_READ, &iomap, &niomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 if (error)
1398 return -error;
Nathan Scottc2536662006-03-29 10:44:40 +10001399 if (niomap == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 return 0;
1401
1402 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001403 /*
1404 * For unwritten extents do not report a disk address on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 * the read case (treat as if we're reading into a hole).
1406 */
1407 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001408 xfs_map_buffer(bh_result, &iomap, offset,
1409 inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 }
1411 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1412 if (direct)
1413 bh_result->b_private = inode;
1414 set_buffer_unwritten(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 }
1416 }
1417
Nathan Scottc2536662006-03-29 10:44:40 +10001418 /*
1419 * If this is a realtime file, data may be on a different device.
1420 * to that pointed to from the buffer_head b_bdev currently.
1421 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001422 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
Nathan Scottc2536662006-03-29 10:44:40 +10001424 /*
David Chinner549054a2007-02-10 18:36:35 +11001425 * If we previously allocated a block out beyond eof and we are now
1426 * coming back to use it then we will need to flag it as new even if it
1427 * has a disk address.
1428 *
1429 * With sub-block writes into unwritten extents we also need to mark
1430 * the buffer as new so that the unwritten parts of the buffer gets
1431 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 */
1433 if (create &&
1434 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001435 (offset >= i_size_read(inode)) ||
1436 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
1439 if (iomap.iomap_flags & IOMAP_DELAY) {
1440 BUG_ON(direct);
1441 if (create) {
1442 set_buffer_uptodate(bh_result);
1443 set_buffer_mapped(bh_result);
1444 set_buffer_delay(bh_result);
1445 }
1446 }
1447
Nathan Scottc2536662006-03-29 10:44:40 +10001448 if (direct || size > (1 << inode->i_blkbits)) {
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001449 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1450 offset = min_t(xfs_off_t,
Nathan Scottc2536662006-03-29 10:44:40 +10001451 iomap.iomap_bsize - iomap.iomap_delta, size);
1452 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 }
1454
1455 return 0;
1456}
1457
1458int
Nathan Scottc2536662006-03-29 10:44:40 +10001459xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 struct inode *inode,
1461 sector_t iblock,
1462 struct buffer_head *bh_result,
1463 int create)
1464{
Nathan Scottc2536662006-03-29 10:44:40 +10001465 return __xfs_get_blocks(inode, iblock,
Badari Pulavartyfa30bd02006-03-26 01:38:01 -08001466 bh_result, create, 0, BMAPI_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467}
1468
1469STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001470xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 struct inode *inode,
1472 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 struct buffer_head *bh_result,
1474 int create)
1475{
Nathan Scottc2536662006-03-29 10:44:40 +10001476 return __xfs_get_blocks(inode, iblock,
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -08001477 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478}
1479
Christoph Hellwigf0973862005-09-05 08:22:52 +10001480STATIC void
Nathan Scotte4c573b2006-03-14 13:54:26 +11001481xfs_end_io_direct(
Christoph Hellwigf0973862005-09-05 08:22:52 +10001482 struct kiocb *iocb,
1483 loff_t offset,
1484 ssize_t size,
1485 void *private)
1486{
1487 xfs_ioend_t *ioend = iocb->private;
1488
1489 /*
1490 * Non-NULL private data means we need to issue a transaction to
1491 * convert a range from unwritten to written extents. This needs
Nathan Scottc41564b2006-03-29 08:55:14 +10001492 * to happen from process context but aio+dio I/O completion
Christoph Hellwigf0973862005-09-05 08:22:52 +10001493 * happens from irq context so we need to defer it to a workqueue.
Nathan Scottc41564b2006-03-29 08:55:14 +10001494 * This is not necessary for synchronous direct I/O, but we do
Christoph Hellwigf0973862005-09-05 08:22:52 +10001495 * it anyway to keep the code uniform and simpler.
1496 *
David Chinnere927af92007-06-05 16:24:36 +10001497 * Well, if only it were that simple. Because synchronous direct I/O
1498 * requires extent conversion to occur *before* we return to userspace,
1499 * we have to wait for extent conversion to complete. Look at the
1500 * iocb that has been passed to us to determine if this is AIO or
1501 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1502 * workqueue and wait for it to complete.
1503 *
Christoph Hellwigf0973862005-09-05 08:22:52 +10001504 * The core direct I/O code might be changed to always call the
1505 * completion handler in the future, in which case all this can
1506 * go away.
1507 */
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001508 ioend->io_offset = offset;
1509 ioend->io_size = size;
1510 if (ioend->io_type == IOMAP_READ) {
David Chinnere927af92007-06-05 16:24:36 +10001511 xfs_finish_ioend(ioend, 0);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001512 } else if (private && size > 0) {
David Chinnere927af92007-06-05 16:24:36 +10001513 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
Christoph Hellwigf0973862005-09-05 08:22:52 +10001514 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001515 /*
1516 * A direct I/O write ioend starts it's life in unwritten
1517 * state in case they map an unwritten extent. This write
1518 * didn't map an unwritten extent so switch it's completion
1519 * handler.
1520 */
1521 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
David Chinnere927af92007-06-05 16:24:36 +10001522 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf0973862005-09-05 08:22:52 +10001523 }
1524
1525 /*
Nathan Scottc41564b2006-03-29 08:55:14 +10001526 * blockdev_direct_IO can return an error even after the I/O
Christoph Hellwigf0973862005-09-05 08:22:52 +10001527 * completion handler was called. Thus we need to protect
1528 * against double-freeing.
1529 */
1530 iocb->private = NULL;
1531}
1532
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001534xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 int rw,
1536 struct kiocb *iocb,
1537 const struct iovec *iov,
1538 loff_t offset,
1539 unsigned long nr_segs)
1540{
1541 struct file *file = iocb->ki_filp;
1542 struct inode *inode = file->f_mapping->host;
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001543 struct block_device *bdev;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001544 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001546 bdev = xfs_find_bdev_for_inode(XFS_I(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001548 if (rw == WRITE) {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001549 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001550 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001551 bdev, iov, offset, nr_segs,
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001552 xfs_get_blocks_direct,
1553 xfs_end_io_direct);
1554 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001555 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001556 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001557 bdev, iov, offset, nr_segs,
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001558 xfs_get_blocks_direct,
1559 xfs_end_io_direct);
1560 }
Christoph Hellwigf0973862005-09-05 08:22:52 +10001561
Zach Brown8459d862006-12-10 02:21:05 -08001562 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
Christoph Hellwigf0973862005-09-05 08:22:52 +10001563 xfs_destroy_ioend(iocb->private);
1564 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565}
1566
Nathan Scottf51623b2006-03-14 13:26:27 +11001567STATIC int
Nick Piggind79689c2007-10-16 01:25:06 -07001568xfs_vm_write_begin(
Nathan Scottf51623b2006-03-14 13:26:27 +11001569 struct file *file,
Nick Piggind79689c2007-10-16 01:25:06 -07001570 struct address_space *mapping,
1571 loff_t pos,
1572 unsigned len,
1573 unsigned flags,
1574 struct page **pagep,
1575 void **fsdata)
Nathan Scottf51623b2006-03-14 13:26:27 +11001576{
Nick Piggind79689c2007-10-16 01:25:06 -07001577 *pagep = NULL;
1578 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1579 xfs_get_blocks);
Nathan Scottf51623b2006-03-14 13:26:27 +11001580}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581
1582STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001583xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 struct address_space *mapping,
1585 sector_t block)
1586{
1587 struct inode *inode = (struct inode *)mapping->host;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001588 struct xfs_inode *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
Lachlan McIlroycf441ee2008-02-07 16:42:19 +11001590 xfs_itrace_entry(XFS_I(inode));
Christoph Hellwig126468b2008-03-06 13:44:57 +11001591 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001592 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
Christoph Hellwig126468b2008-03-06 13:44:57 +11001593 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Nathan Scottc2536662006-03-29 10:44:40 +10001594 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595}
1596
1597STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001598xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 struct file *unused,
1600 struct page *page)
1601{
Nathan Scottc2536662006-03-29 10:44:40 +10001602 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603}
1604
1605STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001606xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 struct file *unused,
1608 struct address_space *mapping,
1609 struct list_head *pages,
1610 unsigned nr_pages)
1611{
Nathan Scottc2536662006-03-29 10:44:40 +10001612 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613}
1614
NeilBrown2ff28e22006-03-26 01:37:18 -08001615STATIC void
Nathan Scott238f4c52006-03-17 17:26:25 +11001616xfs_vm_invalidatepage(
Nathan Scottbcec2b72005-09-02 16:40:17 +10001617 struct page *page,
1618 unsigned long offset)
1619{
1620 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1621 page->mapping->host, page, offset);
NeilBrown2ff28e22006-03-26 01:37:18 -08001622 block_invalidatepage(page, offset);
Nathan Scottbcec2b72005-09-02 16:40:17 +10001623}
1624
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001625const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001626 .readpage = xfs_vm_readpage,
1627 .readpages = xfs_vm_readpages,
1628 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001629 .writepages = xfs_vm_writepages,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 .sync_page = block_sync_page,
Nathan Scott238f4c52006-03-17 17:26:25 +11001631 .releasepage = xfs_vm_releasepage,
1632 .invalidatepage = xfs_vm_invalidatepage,
Nick Piggind79689c2007-10-16 01:25:06 -07001633 .write_begin = xfs_vm_write_begin,
1634 .write_end = generic_write_end,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001635 .bmap = xfs_vm_bmap,
1636 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001637 .migratepage = buffer_migrate_page,
Hisashi Hifumibddaafa2009-03-29 09:53:38 +02001638 .is_partially_uptodate = block_is_partially_uptodate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639};