blob: fa47e43b8b41a525843c57cb4591ed9ffd334141 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_dir2.h"
25#include "xfs_trans.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_dir2_sf.h"
Nathan Scotta844f452005-11-02 14:38:42 +110032#include "xfs_attr_sf.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_dinode.h"
34#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110035#include "xfs_alloc.h"
36#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_error.h"
38#include "xfs_rw.h"
39#include "xfs_iomap.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100040#include "xfs_vnodeops.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110042#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/writeback.h>
44
Nathan Scottf51623b2006-03-14 13:26:27 +110045STATIC void
46xfs_count_page_state(
47 struct page *page,
48 int *delalloc,
49 int *unmapped,
50 int *unwritten)
51{
52 struct buffer_head *bh, *head;
53
54 *delalloc = *unmapped = *unwritten = 0;
55
56 bh = head = page_buffers(page);
57 do {
58 if (buffer_uptodate(bh) && !buffer_mapped(bh))
59 (*unmapped) = 1;
Nathan Scottf51623b2006-03-14 13:26:27 +110060 else if (buffer_unwritten(bh))
61 (*unwritten) = 1;
62 else if (buffer_delay(bh))
63 (*delalloc) = 1;
64 } while ((bh = bh->b_this_page) != head);
65}
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#if defined(XFS_RW_TRACE)
68void
69xfs_page_trace(
70 int tag,
71 struct inode *inode,
72 struct page *page,
Nathan Scotted9d88f2006-09-28 10:56:43 +100073 unsigned long pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
75 xfs_inode_t *ip;
Nathan Scott67fcaa72006-06-09 17:00:52 +100076 bhv_vnode_t *vp = vn_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 loff_t isize = i_size_read(inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110078 loff_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 int delalloc = -1, unmapped = -1, unwritten = -1;
80
81 if (page_has_buffers(page))
82 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
83
Christoph Hellwig75e17b32006-01-11 20:58:44 +110084 ip = xfs_vtoi(vp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 if (!ip->i_rwtrace)
86 return;
87
88 ktrace_enter(ip->i_rwtrace,
89 (void *)((unsigned long)tag),
90 (void *)ip,
91 (void *)inode,
92 (void *)page,
Nathan Scotted9d88f2006-09-28 10:56:43 +100093 (void *)pgoff,
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
95 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
96 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
97 (void *)((unsigned long)(isize & 0xffffffff)),
98 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
99 (void *)((unsigned long)(offset & 0xffffffff)),
100 (void *)((unsigned long)delalloc),
101 (void *)((unsigned long)unmapped),
102 (void *)((unsigned long)unwritten),
Yingping Luf1fdc842006-03-22 12:44:15 +1100103 (void *)((unsigned long)current_pid()),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 (void *)NULL);
105}
106#else
Nathan Scotted9d88f2006-09-28 10:56:43 +1000107#define xfs_page_trace(tag, inode, page, pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#endif
109
Christoph Hellwig6214ed42007-09-14 15:23:17 +1000110STATIC struct block_device *
111xfs_find_bdev_for_inode(
112 struct xfs_inode *ip)
113{
114 struct xfs_mount *mp = ip->i_mount;
115
Eric Sandeen71ddabb2007-11-23 16:29:42 +1100116 if (XFS_IS_REALTIME_INODE(ip))
Christoph Hellwig6214ed42007-09-14 15:23:17 +1000117 return mp->m_rtdev_targp->bt_bdev;
118 else
119 return mp->m_ddev_targp->bt_bdev;
120}
121
Christoph Hellwig0829c362005-09-02 16:58:49 +1000122/*
123 * Schedule IO completion handling on a xfsdatad if this was
David Chinnere927af92007-06-05 16:24:36 +1000124 * the final hold on this ioend. If we are asked to wait,
125 * flush the workqueue.
Christoph Hellwig0829c362005-09-02 16:58:49 +1000126 */
127STATIC void
128xfs_finish_ioend(
David Chinnere927af92007-06-05 16:24:36 +1000129 xfs_ioend_t *ioend,
130 int wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
David Chinnere927af92007-06-05 16:24:36 +1000132 if (atomic_dec_and_test(&ioend->io_remaining)) {
Christoph Hellwig0829c362005-09-02 16:58:49 +1000133 queue_work(xfsdatad_workqueue, &ioend->io_work);
David Chinnere927af92007-06-05 16:24:36 +1000134 if (wait)
135 flush_workqueue(xfsdatad_workqueue);
136 }
Christoph Hellwig0829c362005-09-02 16:58:49 +1000137}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100139/*
140 * We're now finished for good with this ioend structure.
141 * Update the page state via the associated buffer_heads,
142 * release holds on the inode and bio, and finally free
143 * up memory. Do not use the ioend after this.
144 */
Christoph Hellwig0829c362005-09-02 16:58:49 +1000145STATIC void
146xfs_destroy_ioend(
147 xfs_ioend_t *ioend)
148{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100149 struct buffer_head *bh, *next;
150
151 for (bh = ioend->io_buffer_head; bh; bh = next) {
152 next = bh->b_private;
Nathan Scott7d04a332006-06-09 14:58:38 +1000153 bh->b_end_io(bh, !ioend->io_error);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100154 }
Christoph Hellwigb677c212007-08-29 11:46:28 +1000155 if (unlikely(ioend->io_error)) {
156 vn_ioerror(XFS_I(ioend->io_inode), ioend->io_error,
157 __FILE__,__LINE__);
158 }
159 vn_iowake(XFS_I(ioend->io_inode));
Christoph Hellwig0829c362005-09-02 16:58:49 +1000160 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161}
162
163/*
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000164 * Update on-disk file size now that data has been written to disk.
165 * The current in-memory file size is i_size. If a write is beyond
Christoph Hellwig613d7042007-10-11 17:44:08 +1000166 * eof i_new_size will be the intended file size until i_size is
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000167 * updated. If this write does not extend all the way to the valid
168 * file size then restrict this update to the end of the write.
169 */
170STATIC void
171xfs_setfilesize(
172 xfs_ioend_t *ioend)
173{
Christoph Hellwigb677c212007-08-29 11:46:28 +1000174 xfs_inode_t *ip = XFS_I(ioend->io_inode);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000175 xfs_fsize_t isize;
176 xfs_fsize_t bsize;
177
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000178 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
179 ASSERT(ioend->io_type != IOMAP_READ);
180
181 if (unlikely(ioend->io_error))
182 return;
183
184 bsize = ioend->io_offset + ioend->io_size;
185
186 xfs_ilock(ip, XFS_ILOCK_EXCL);
187
Christoph Hellwig613d7042007-10-11 17:44:08 +1000188 isize = MAX(ip->i_size, ip->i_new_size);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000189 isize = MIN(isize, bsize);
190
191 if (ip->i_d.di_size < isize) {
192 ip->i_d.di_size = isize;
193 ip->i_update_core = 1;
194 ip->i_update_size = 1;
Tim Shimmin150f29e2007-10-16 16:20:12 +1000195 mark_inode_dirty_sync(ioend->io_inode);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000196 }
197
198 xfs_iunlock(ip, XFS_ILOCK_EXCL);
199}
200
201/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100202 * Buffered IO write completion for delayed allocate extents.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100203 */
204STATIC void
205xfs_end_bio_delalloc(
David Howellsc4028952006-11-22 14:57:56 +0000206 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100207{
David Howellsc4028952006-11-22 14:57:56 +0000208 xfs_ioend_t *ioend =
209 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100210
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000211 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100212 xfs_destroy_ioend(ioend);
213}
214
215/*
216 * Buffered IO write completion for regular, written extents.
217 */
218STATIC void
219xfs_end_bio_written(
David Howellsc4028952006-11-22 14:57:56 +0000220 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100221{
David Howellsc4028952006-11-22 14:57:56 +0000222 xfs_ioend_t *ioend =
223 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100224
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000225 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100226 xfs_destroy_ioend(ioend);
227}
228
229/*
230 * IO write completion for unwritten extents.
231 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 * Issue transactions to convert a buffer range from unwritten
Christoph Hellwigf0973862005-09-05 08:22:52 +1000233 * to written extents.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 */
235STATIC void
Christoph Hellwig0829c362005-09-02 16:58:49 +1000236xfs_end_bio_unwritten(
David Howellsc4028952006-11-22 14:57:56 +0000237 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238{
David Howellsc4028952006-11-22 14:57:56 +0000239 xfs_ioend_t *ioend =
240 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwig76428612007-09-14 15:23:31 +1000241 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000242 xfs_off_t offset = ioend->io_offset;
243 size_t size = ioend->io_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000245 if (likely(!ioend->io_error)) {
David Chinnercc884662008-04-10 12:23:52 +1000246 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
247 int error;
248 error = xfs_iomap_write_unwritten(ip, offset, size);
249 if (error)
250 ioend->io_error = error;
251 }
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000252 xfs_setfilesize(ioend);
253 }
254 xfs_destroy_ioend(ioend);
255}
256
257/*
258 * IO read completion for regular, written extents.
259 */
260STATIC void
261xfs_end_bio_read(
262 struct work_struct *work)
263{
264 xfs_ioend_t *ioend =
265 container_of(work, xfs_ioend_t, io_work);
266
Christoph Hellwig0829c362005-09-02 16:58:49 +1000267 xfs_destroy_ioend(ioend);
268}
269
270/*
271 * Allocate and initialise an IO completion structure.
272 * We need to track unwritten extent write completion here initially.
273 * We'll need to extend this for updating the ondisk inode size later
274 * (vs. incore size).
275 */
276STATIC xfs_ioend_t *
277xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100278 struct inode *inode,
279 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000280{
281 xfs_ioend_t *ioend;
282
283 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
284
285 /*
286 * Set the count to 1 initially, which will prevent an I/O
287 * completion callback from happening before we have started
288 * all the I/O from calling the completion routine too early.
289 */
290 atomic_set(&ioend->io_remaining, 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000291 ioend->io_error = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100292 ioend->io_list = NULL;
293 ioend->io_type = type;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000294 ioend->io_inode = inode;
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000295 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100296 ioend->io_buffer_tail = NULL;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000297 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000298 ioend->io_offset = 0;
299 ioend->io_size = 0;
300
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100301 if (type == IOMAP_UNWRITTEN)
David Howellsc4028952006-11-22 14:57:56 +0000302 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100303 else if (type == IOMAP_DELAY)
David Howellsc4028952006-11-22 14:57:56 +0000304 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000305 else if (type == IOMAP_READ)
306 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100307 else
David Howellsc4028952006-11-22 14:57:56 +0000308 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000309
310 return ioend;
311}
312
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313STATIC int
314xfs_map_blocks(
315 struct inode *inode,
316 loff_t offset,
317 ssize_t count,
318 xfs_iomap_t *mapp,
319 int flags)
320{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +1000321 xfs_inode_t *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 int error, nmaps = 1;
323
Lachlan McIlroy541d7d32007-10-11 17:34:33 +1000324 error = xfs_iomap(ip, offset, count,
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000325 flags, mapp, &nmaps);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +1000327 xfs_iflags_set(ip, XFS_IMODIFIED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 return -error;
329}
330
David Chinner7989cb82007-02-10 18:34:56 +1100331STATIC_INLINE int
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100332xfs_iomap_valid(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 xfs_iomap_t *iomapp,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100334 loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335{
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100336 return offset >= iomapp->iomap_offset &&
337 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338}
339
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100340/*
341 * BIO completion handler for buffered IO.
342 */
Al Viro782e3b32007-10-12 07:17:47 +0100343STATIC void
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100344xfs_end_bio(
345 struct bio *bio,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100346 int error)
347{
348 xfs_ioend_t *ioend = bio->bi_private;
349
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100350 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000351 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100352
353 /* Toss bio and pass work off to an xfsdatad thread */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100354 bio->bi_private = NULL;
355 bio->bi_end_io = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100356 bio_put(bio);
Nathan Scott7d04a332006-06-09 14:58:38 +1000357
David Chinnere927af92007-06-05 16:24:36 +1000358 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100359}
360
361STATIC void
362xfs_submit_ioend_bio(
363 xfs_ioend_t *ioend,
364 struct bio *bio)
365{
366 atomic_inc(&ioend->io_remaining);
367
368 bio->bi_private = ioend;
369 bio->bi_end_io = xfs_end_bio;
370
371 submit_bio(WRITE, bio);
372 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
373 bio_put(bio);
374}
375
376STATIC struct bio *
377xfs_alloc_ioend_bio(
378 struct buffer_head *bh)
379{
380 struct bio *bio;
381 int nvecs = bio_get_nr_vecs(bh->b_bdev);
382
383 do {
384 bio = bio_alloc(GFP_NOIO, nvecs);
385 nvecs >>= 1;
386 } while (!bio);
387
388 ASSERT(bio->bi_private == NULL);
389 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
390 bio->bi_bdev = bh->b_bdev;
391 bio_get(bio);
392 return bio;
393}
394
395STATIC void
396xfs_start_buffer_writeback(
397 struct buffer_head *bh)
398{
399 ASSERT(buffer_mapped(bh));
400 ASSERT(buffer_locked(bh));
401 ASSERT(!buffer_delay(bh));
402 ASSERT(!buffer_unwritten(bh));
403
404 mark_buffer_async_write(bh);
405 set_buffer_uptodate(bh);
406 clear_buffer_dirty(bh);
407}
408
409STATIC void
410xfs_start_page_writeback(
411 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100412 int clear_dirty,
413 int buffers)
414{
415 ASSERT(PageLocked(page));
416 ASSERT(!PageWriteback(page));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100417 if (clear_dirty)
David Chinner92132022006-12-21 10:24:01 +1100418 clear_page_dirty_for_io(page);
419 set_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100420 unlock_page(page);
Fengguang Wu1f7decf2007-10-16 23:30:42 -0700421 /* If no buffers on the page are to be written, finish it here */
422 if (!buffers)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100423 end_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100424}
425
426static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
427{
428 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
429}
430
431/*
David Chinnerd88992f2006-01-18 13:38:12 +1100432 * Submit all of the bios for all of the ioends we have saved up, covering the
433 * initial writepage page and also any probed pages.
434 *
435 * Because we may have multiple ioends spanning a page, we need to start
436 * writeback on all the buffers before we submit them for I/O. If we mark the
437 * buffers as we got, then we can end up with a page that only has buffers
438 * marked async write and I/O complete on can occur before we mark the other
439 * buffers async write.
440 *
441 * The end result of this is that we trip a bug in end_page_writeback() because
442 * we call it twice for the one page as the code in end_buffer_async_write()
443 * assumes that all buffers on the page are started at the same time.
444 *
445 * The fix is two passes across the ioend list - one to start writeback on the
Nathan Scottc41564b2006-03-29 08:55:14 +1000446 * buffer_heads, and then submit them for I/O on the second pass.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100447 */
448STATIC void
449xfs_submit_ioend(
450 xfs_ioend_t *ioend)
451{
David Chinnerd88992f2006-01-18 13:38:12 +1100452 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100453 xfs_ioend_t *next;
454 struct buffer_head *bh;
455 struct bio *bio;
456 sector_t lastblock = 0;
457
David Chinnerd88992f2006-01-18 13:38:12 +1100458 /* Pass 1 - start writeback */
459 do {
460 next = ioend->io_list;
461 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
462 xfs_start_buffer_writeback(bh);
463 }
464 } while ((ioend = next) != NULL);
465
466 /* Pass 2 - submit I/O */
467 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100468 do {
469 next = ioend->io_list;
470 bio = NULL;
471
472 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100473
474 if (!bio) {
475 retry:
476 bio = xfs_alloc_ioend_bio(bh);
477 } else if (bh->b_blocknr != lastblock + 1) {
478 xfs_submit_ioend_bio(ioend, bio);
479 goto retry;
480 }
481
482 if (bio_add_buffer(bio, bh) != bh->b_size) {
483 xfs_submit_ioend_bio(ioend, bio);
484 goto retry;
485 }
486
487 lastblock = bh->b_blocknr;
488 }
489 if (bio)
490 xfs_submit_ioend_bio(ioend, bio);
David Chinnere927af92007-06-05 16:24:36 +1000491 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100492 } while ((ioend = next) != NULL);
493}
494
495/*
496 * Cancel submission of all buffer_heads so far in this endio.
497 * Toss the endio too. Only ever called for the initial page
498 * in a writepage request, so only ever one page.
499 */
500STATIC void
501xfs_cancel_ioend(
502 xfs_ioend_t *ioend)
503{
504 xfs_ioend_t *next;
505 struct buffer_head *bh, *next_bh;
506
507 do {
508 next = ioend->io_list;
509 bh = ioend->io_buffer_head;
510 do {
511 next_bh = bh->b_private;
512 clear_buffer_async_write(bh);
513 unlock_buffer(bh);
514 } while ((bh = next_bh) != NULL);
515
Christoph Hellwigb677c212007-08-29 11:46:28 +1000516 vn_iowake(XFS_I(ioend->io_inode));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100517 mempool_free(ioend, xfs_ioend_pool);
518 } while ((ioend = next) != NULL);
519}
520
521/*
522 * Test to see if we've been building up a completion structure for
523 * earlier buffers -- if so, we try to append to this ioend if we
524 * can, otherwise we finish off any current ioend and start another.
525 * Return true if we've finished the given ioend.
526 */
527STATIC void
528xfs_add_to_ioend(
529 struct inode *inode,
530 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100531 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100532 unsigned int type,
533 xfs_ioend_t **result,
534 int need_ioend)
535{
536 xfs_ioend_t *ioend = *result;
537
538 if (!ioend || need_ioend || type != ioend->io_type) {
539 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100540
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100541 ioend = xfs_alloc_ioend(inode, type);
542 ioend->io_offset = offset;
543 ioend->io_buffer_head = bh;
544 ioend->io_buffer_tail = bh;
545 if (previous)
546 previous->io_list = ioend;
547 *result = ioend;
548 } else {
549 ioend->io_buffer_tail->b_private = bh;
550 ioend->io_buffer_tail = bh;
551 }
552
553 bh->b_private = NULL;
554 ioend->io_size += bh->b_size;
555}
556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100558xfs_map_buffer(
559 struct buffer_head *bh,
560 xfs_iomap_t *mp,
561 xfs_off_t offset,
562 uint block_bits)
563{
564 sector_t bn;
565
566 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
567
568 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
569 ((offset - mp->iomap_offset) >> block_bits);
570
571 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
572
573 bh->b_blocknr = bn;
574 set_buffer_mapped(bh);
575}
576
577STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578xfs_map_at_offset(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 struct buffer_head *bh,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100580 loff_t offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 int block_bits,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100582 xfs_iomap_t *iomapp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
585 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587 lock_buffer(bh);
Nathan Scott87cbc492006-03-14 13:26:43 +1100588 xfs_map_buffer(bh, iomapp, offset, block_bits);
Nathan Scottce8e9222006-01-11 15:39:08 +1100589 bh->b_bdev = iomapp->iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 set_buffer_mapped(bh);
591 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100592 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593}
594
595/*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100596 * Look for a page at index that is suitable for clustering.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 */
598STATIC unsigned int
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100599xfs_probe_page(
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100600 struct page *page,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100601 unsigned int pg_offset,
602 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 int ret = 0;
605
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100607 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
609 if (page->mapping && PageDirty(page)) {
610 if (page_has_buffers(page)) {
611 struct buffer_head *bh, *head;
612
613 bh = head = page_buffers(page);
614 do {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100615 if (!buffer_uptodate(bh))
616 break;
617 if (mapped != buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 break;
619 ret += bh->b_size;
620 if (ret >= pg_offset)
621 break;
622 } while ((bh = bh->b_this_page) != head);
623 } else
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100624 ret = mapped ? 0 : PAGE_CACHE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 }
626
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 return ret;
628}
629
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100630STATIC size_t
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100631xfs_probe_cluster(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 struct inode *inode,
633 struct page *startpage,
634 struct buffer_head *bh,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100635 struct buffer_head *head,
636 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100638 struct pagevec pvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 pgoff_t tindex, tlast, tloff;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100640 size_t total = 0;
641 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
643 /* First sum forwards in this page */
644 do {
Eric Sandeen2353e8e2006-02-28 12:30:30 +1100645 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100646 return total;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 total += bh->b_size;
648 } while ((bh = bh->b_this_page) != head);
649
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100650 /* if we reached the end of the page, sum forwards in following pages */
651 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
652 tindex = startpage->index + 1;
653
654 /* Prune this back to avoid pathological behavior */
655 tloff = min(tlast, startpage->index + 64);
656
657 pagevec_init(&pvec, 0);
658 while (!done && tindex <= tloff) {
659 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
660
661 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
662 break;
663
664 for (i = 0; i < pagevec_count(&pvec); i++) {
665 struct page *page = pvec.pages[i];
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000666 size_t pg_offset, pg_len = 0;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100667
668 if (tindex == tlast) {
669 pg_offset =
670 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100671 if (!pg_offset) {
672 done = 1;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100673 break;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100674 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100675 } else
676 pg_offset = PAGE_CACHE_SIZE;
677
Nick Piggin529ae9a2008-08-02 12:01:03 +0200678 if (page->index == tindex && trylock_page(page)) {
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000679 pg_len = xfs_probe_page(page, pg_offset, mapped);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100680 unlock_page(page);
681 }
682
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000683 if (!pg_len) {
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100684 done = 1;
685 break;
686 }
687
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000688 total += pg_len;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100689 tindex++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100691
692 pagevec_release(&pvec);
693 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 return total;
697}
698
699/*
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100700 * Test if a given page is suitable for writing as part of an unwritten
701 * or delayed allocate extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 */
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100703STATIC int
704xfs_is_delayed_page(
705 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100706 unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100709 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711 if (page->mapping && page_has_buffers(page)) {
712 struct buffer_head *bh, *head;
713 int acceptable = 0;
714
715 bh = head = page_buffers(page);
716 do {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100717 if (buffer_unwritten(bh))
718 acceptable = (type == IOMAP_UNWRITTEN);
719 else if (buffer_delay(bh))
720 acceptable = (type == IOMAP_DELAY);
David Chinner2ddee842006-03-22 12:47:40 +1100721 else if (buffer_dirty(bh) && buffer_mapped(bh))
David Chinnerdf3c7242007-05-24 15:27:03 +1000722 acceptable = (type == IOMAP_NEW);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100723 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 } while ((bh = bh->b_this_page) != head);
726
727 if (acceptable)
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100728 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 }
730
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100731 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732}
733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734/*
735 * Allocate & map buffers for page given the extent map. Write it out.
736 * except for the original page of a writepage, this is called on
737 * delalloc/unwritten pages only, for the original page it is possible
738 * that the page has no mapping at all.
739 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100740STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741xfs_convert_page(
742 struct inode *inode,
743 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100744 loff_t tindex,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100745 xfs_iomap_t *mp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100746 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 int startio,
749 int all_bh)
750{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100751 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100752 xfs_off_t end_offset;
753 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100754 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 int bbits = inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -0700756 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100757 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100758 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100760 if (page->index != tindex)
761 goto fail;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200762 if (!trylock_page(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100763 goto fail;
764 if (PageWriteback(page))
765 goto fail_unlock_page;
766 if (page->mapping != inode->i_mapping)
767 goto fail_unlock_page;
768 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
769 goto fail_unlock_page;
770
Nathan Scott24e17b52005-05-05 13:33:20 -0700771 /*
772 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000773 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100774 *
775 * Derivation:
776 *
777 * End offset is the highest offset that this page should represent.
778 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
779 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
780 * hence give us the correct page_dirty count. On any other page,
781 * it will be zero and in that case we need page_dirty to be the
782 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700783 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100784 end_offset = min_t(unsigned long long,
785 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
786 i_size_read(inode));
787
Nathan Scott24e17b52005-05-05 13:33:20 -0700788 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100789 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
790 PAGE_CACHE_SIZE);
791 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
792 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700793
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 bh = head = page_buffers(page);
795 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100796 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100798 if (!buffer_uptodate(bh))
799 uptodate = 0;
800 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
801 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100803 }
804
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100805 if (buffer_unwritten(bh) || buffer_delay(bh)) {
806 if (buffer_unwritten(bh))
807 type = IOMAP_UNWRITTEN;
808 else
809 type = IOMAP_DELAY;
810
811 if (!xfs_iomap_valid(mp, offset)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100812 done = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100813 continue;
814 }
815
816 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
817 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
818
819 xfs_map_at_offset(bh, offset, bbits, mp);
820 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100821 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100822 type, ioendp, done);
823 } else {
824 set_buffer_dirty(bh);
825 unlock_buffer(bh);
826 mark_buffer_dirty(bh);
827 }
828 page_dirty--;
829 count++;
830 } else {
David Chinnerdf3c7242007-05-24 15:27:03 +1000831 type = IOMAP_NEW;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100832 if (buffer_mapped(bh) && all_bh && startio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 lock_buffer(bh);
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100834 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100835 type, ioendp, done);
836 count++;
Nathan Scott24e17b52005-05-05 13:33:20 -0700837 page_dirty--;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100838 } else {
839 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100842 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100844 if (uptodate && bh == head)
845 SetPageUptodate(page);
846
847 if (startio) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100848 if (count) {
849 struct backing_dev_info *bdi;
850
851 bdi = inode->i_mapping->backing_dev_info;
David Chinner9fddaca2006-02-07 20:27:24 +1100852 wbc->nr_to_write--;
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100853 if (bdi_write_congested(bdi)) {
854 wbc->encountered_congestion = 1;
855 done = 1;
David Chinner9fddaca2006-02-07 20:27:24 +1100856 } else if (wbc->nr_to_write <= 0) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100857 done = 1;
858 }
859 }
Denys Vlasenkob41759c2008-05-19 16:34:11 +1000860 xfs_start_page_writeback(page, !page_dirty, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100862
863 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100864 fail_unlock_page:
865 unlock_page(page);
866 fail:
867 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868}
869
870/*
871 * Convert & write out a cluster of pages in the same extent as defined
872 * by mp and following the start page.
873 */
874STATIC void
875xfs_cluster_write(
876 struct inode *inode,
877 pgoff_t tindex,
878 xfs_iomap_t *iomapp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100879 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 struct writeback_control *wbc,
881 int startio,
882 int all_bh,
883 pgoff_t tlast)
884{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100885 struct pagevec pvec;
886 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100888 pagevec_init(&pvec, 0);
889 while (!done && tindex <= tlast) {
890 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
891
892 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100894
895 for (i = 0; i < pagevec_count(&pvec); i++) {
896 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
897 iomapp, ioendp, wbc, startio, all_bh);
898 if (done)
899 break;
900 }
901
902 pagevec_release(&pvec);
903 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 }
905}
906
907/*
908 * Calling this without startio set means we are being asked to make a dirty
909 * page ready for freeing it's buffers. When called with startio set then
910 * we are coming from writepage.
911 *
912 * When called with startio set it is important that we write the WHOLE
913 * page if possible.
914 * The bh->b_state's cannot know if any of the blocks or which block for
915 * that matter are dirty due to mmap writes, and therefore bh uptodate is
Nathan Scottc41564b2006-03-29 08:55:14 +1000916 * only valid if the page itself isn't completely uptodate. Some layers
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 * may clear the page dirty flag prior to calling write page, under the
918 * assumption the entire page will be written out; by not writing out the
919 * whole page the page can be reused before all valid dirty data is
920 * written out. Note: in the case of a page that has been dirty'd by
921 * mapwrite and but partially setup by block_prepare_write the
922 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
923 * valid state, thus the whole page must be written out thing.
924 */
925
926STATIC int
927xfs_page_state_convert(
928 struct inode *inode,
929 struct page *page,
930 struct writeback_control *wbc,
931 int startio,
932 int unmapped) /* also implies page uptodate */
933{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100934 struct buffer_head *bh, *head;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100935 xfs_iomap_t iomap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100936 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 loff_t offset;
938 unsigned long p_offset = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100939 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 __uint64_t end_offset;
941 pgoff_t end_index, last_index, tlast;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100942 ssize_t size, len;
943 int flags, err, iomap_valid = 0, uptodate = 1;
Nathan Scott82721452006-04-11 15:10:55 +1000944 int page_dirty, count = 0;
945 int trylock = 0;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100946 int all_bh = unmapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
Nathan Scott82721452006-04-11 15:10:55 +1000948 if (startio) {
949 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
950 trylock |= BMAPI_TRYLOCK;
951 }
Daniel Moore3ba08152005-05-05 13:31:34 -0700952
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 /* Is this page beyond the end of the file? */
954 offset = i_size_read(inode);
955 end_index = offset >> PAGE_CACHE_SHIFT;
956 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
957 if (page->index >= end_index) {
958 if ((page->index >= end_index + 1) ||
959 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
Nathan Scott19d5bcf2005-11-02 15:14:09 +1100960 if (startio)
961 unlock_page(page);
962 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 }
964 }
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 /*
Nathan Scott24e17b52005-05-05 13:33:20 -0700967 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000968 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100969 *
970 * Derivation:
971 *
972 * End offset is the highest offset that this page should represent.
973 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
974 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
975 * hence give us the correct page_dirty count. On any other page,
976 * it will be zero and in that case we need page_dirty to be the
977 * count of buffers on the page.
978 */
979 end_offset = min_t(unsigned long long,
980 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
Nathan Scott24e17b52005-05-05 13:33:20 -0700981 len = 1 << inode->i_blkbits;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100982 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
983 PAGE_CACHE_SIZE);
984 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
Nathan Scott24e17b52005-05-05 13:33:20 -0700985 page_dirty = p_offset / len;
986
Nathan Scott24e17b52005-05-05 13:33:20 -0700987 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100988 offset = page_offset(page);
David Chinnerdf3c7242007-05-24 15:27:03 +1000989 flags = BMAPI_READ;
990 type = IOMAP_NEW;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100991
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100992 /* TODO: cleanup count and page_dirty */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
994 do {
995 if (offset >= end_offset)
996 break;
997 if (!buffer_uptodate(bh))
998 uptodate = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100999 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001000 /*
1001 * the iomap is actually still valid, but the ioend
1002 * isn't. shouldn't happen too often.
1003 */
1004 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001006 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001008 if (iomap_valid)
1009 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
1011 /*
1012 * First case, map an unwritten extent and prepare for
1013 * extent state conversion transaction on completion.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001014 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 * Second case, allocate space for a delalloc buffer.
1016 * We can return EAGAIN here in the release page case.
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001017 *
1018 * Third case, an unmapped buffer was found, and we are
1019 * in a path where we need to write the whole page out.
David Chinnerdf3c7242007-05-24 15:27:03 +10001020 */
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001021 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1022 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1023 !buffer_mapped(bh) && (unmapped || startio))) {
David Chinnereffd1202007-06-18 16:49:58 +10001024 int new_ioend = 0;
1025
David Chinnerdf3c7242007-05-24 15:27:03 +10001026 /*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001027 * Make sure we don't use a read-only iomap
1028 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001029 if (flags == BMAPI_READ)
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001030 iomap_valid = 0;
1031
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001032 if (buffer_unwritten(bh)) {
1033 type = IOMAP_UNWRITTEN;
Nathan Scott82721452006-04-11 15:10:55 +10001034 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001035 } else if (buffer_delay(bh)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001036 type = IOMAP_DELAY;
Nathan Scott82721452006-04-11 15:10:55 +10001037 flags = BMAPI_ALLOCATE | trylock;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001038 } else {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001039 type = IOMAP_NEW;
Nathan Scott82721452006-04-11 15:10:55 +10001040 flags = BMAPI_WRITE | BMAPI_MMAP;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001041 }
1042
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001043 if (!iomap_valid) {
David Chinnereffd1202007-06-18 16:49:58 +10001044 /*
1045 * if we didn't have a valid mapping then we
1046 * need to ensure that we put the new mapping
1047 * in a new ioend structure. This needs to be
1048 * done to ensure that the ioends correctly
1049 * reflect the block mappings at io completion
1050 * for unwritten extent conversion.
1051 */
1052 new_ioend = 1;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001053 if (type == IOMAP_NEW) {
1054 size = xfs_probe_cluster(inode,
1055 page, bh, head, 0);
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001056 } else {
1057 size = len;
1058 }
1059
1060 err = xfs_map_blocks(inode, offset, size,
1061 &iomap, flags);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001062 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 goto error;
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001064 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 }
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001066 if (iomap_valid) {
1067 xfs_map_at_offset(bh, offset,
1068 inode->i_blkbits, &iomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001070 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001071 type, &ioend,
David Chinnereffd1202007-06-18 16:49:58 +10001072 new_ioend);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 } else {
1074 set_buffer_dirty(bh);
1075 unlock_buffer(bh);
1076 mark_buffer_dirty(bh);
1077 }
1078 page_dirty--;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001079 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001081 } else if (buffer_uptodate(bh) && startio) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001082 /*
1083 * we got here because the buffer is already mapped.
1084 * That means it must already have extents allocated
1085 * underneath it. Map the extent by reading it.
1086 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001087 if (!iomap_valid || flags != BMAPI_READ) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001088 flags = BMAPI_READ;
1089 size = xfs_probe_cluster(inode, page, bh,
1090 head, 1);
1091 err = xfs_map_blocks(inode, offset, size,
1092 &iomap, flags);
1093 if (err)
1094 goto error;
1095 iomap_valid = xfs_iomap_valid(&iomap, offset);
1096 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
David Chinnerdf3c7242007-05-24 15:27:03 +10001098 /*
1099 * We set the type to IOMAP_NEW in case we are doing a
1100 * small write at EOF that is extending the file but
1101 * without needing an allocation. We need to update the
1102 * file size on I/O completion in this case so it is
1103 * the same case as having just allocated a new extent
1104 * that we are writing into for the first time.
1105 */
1106 type = IOMAP_NEW;
Nick Pigginca5de402008-08-02 12:02:13 +02001107 if (trylock_buffer(bh)) {
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001108 ASSERT(buffer_mapped(bh));
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001109 if (iomap_valid)
1110 all_bh = 1;
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001111 xfs_add_to_ioend(inode, bh, offset, type,
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001112 &ioend, !iomap_valid);
1113 page_dirty--;
1114 count++;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001115 } else {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001116 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001118 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1119 (unmapped || startio)) {
1120 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001122
1123 if (!iohead)
1124 iohead = ioend;
1125
1126 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
1128 if (uptodate && bh == head)
1129 SetPageUptodate(page);
1130
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001131 if (startio)
Denys Vlasenkob41759c2008-05-19 16:34:11 +10001132 xfs_start_page_writeback(page, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001134 if (ioend && iomap_valid) {
1135 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 PAGE_CACHE_SHIFT;
Nathan Scott775bf6c2005-05-05 13:33:01 -07001137 tlast = min_t(pgoff_t, offset, last_index);
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001138 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001139 wbc, startio, all_bh, tlast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 }
1141
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001142 if (iohead)
1143 xfs_submit_ioend(iohead);
1144
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 return page_dirty;
1146
1147error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001148 if (iohead)
1149 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
1151 /*
1152 * If it's delalloc and we have nowhere to put it,
1153 * throw it away, unless the lower layers told
1154 * us to try again.
1155 */
1156 if (err != -EAGAIN) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001157 if (!unmapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 block_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 ClearPageUptodate(page);
1160 }
1161 return err;
1162}
1163
Nathan Scottf51623b2006-03-14 13:26:27 +11001164/*
1165 * writepage: Called from one of two places:
1166 *
1167 * 1. we are flushing a delalloc buffer head.
1168 *
1169 * 2. we are writing out a dirty page. Typically the page dirty
1170 * state is cleared before we get here. In this case is it
1171 * conceivable we have no buffer heads.
1172 *
1173 * For delalloc space on the page we need to allocate space and
1174 * flush it. For unmapped buffer heads on the page we should
1175 * allocate space if the page is uptodate. For any other dirty
1176 * buffer heads on the page we should flush them.
1177 *
1178 * If we detect that a transaction would be required to flush
1179 * the page, we have to check the process flags first, if we
1180 * are already in a transaction or disk I/O during allocations
1181 * is off, we need to fail the writepage and redirty the page.
1182 */
1183
1184STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001185xfs_vm_writepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001186 struct page *page,
1187 struct writeback_control *wbc)
1188{
1189 int error;
1190 int need_trans;
1191 int delalloc, unmapped, unwritten;
1192 struct inode *inode = page->mapping->host;
1193
1194 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1195
1196 /*
1197 * We need a transaction if:
1198 * 1. There are delalloc buffers on the page
1199 * 2. The page is uptodate and we have unmapped buffers
1200 * 3. The page is uptodate and we have no buffers
1201 * 4. There are unwritten buffers on the page
1202 */
1203
1204 if (!page_has_buffers(page)) {
1205 unmapped = 1;
1206 need_trans = 1;
1207 } else {
1208 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1209 if (!PageUptodate(page))
1210 unmapped = 0;
1211 need_trans = delalloc + unmapped + unwritten;
1212 }
1213
1214 /*
1215 * If we need a transaction and the process flags say
1216 * we are already in a transaction, or no IO is allowed
1217 * then mark the page dirty again and leave the page
1218 * as is.
1219 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001220 if (current_test_flags(PF_FSTRANS) && need_trans)
Nathan Scottf51623b2006-03-14 13:26:27 +11001221 goto out_fail;
1222
1223 /*
1224 * Delay hooking up buffer heads until we have
1225 * made our go/no-go decision.
1226 */
1227 if (!page_has_buffers(page))
1228 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1229
1230 /*
1231 * Convert delayed allocate, unwritten or unmapped space
1232 * to real space and flush out to disk.
1233 */
1234 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1235 if (error == -EAGAIN)
1236 goto out_fail;
1237 if (unlikely(error < 0))
1238 goto out_unlock;
1239
1240 return 0;
1241
1242out_fail:
1243 redirty_page_for_writepage(wbc, page);
1244 unlock_page(page);
1245 return 0;
1246out_unlock:
1247 unlock_page(page);
1248 return error;
1249}
1250
Nathan Scott7d4fb402006-06-09 15:27:16 +10001251STATIC int
1252xfs_vm_writepages(
1253 struct address_space *mapping,
1254 struct writeback_control *wbc)
1255{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +10001256 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
Nathan Scott7d4fb402006-06-09 15:27:16 +10001257 return generic_writepages(mapping, wbc);
1258}
1259
Nathan Scottf51623b2006-03-14 13:26:27 +11001260/*
1261 * Called to move a page into cleanable state - and from there
1262 * to be released. Possibly the page is already clean. We always
1263 * have buffer heads in this call.
1264 *
1265 * Returns 0 if the page is ok to release, 1 otherwise.
1266 *
1267 * Possible scenarios are:
1268 *
1269 * 1. We are being called to release a page which has been written
1270 * to via regular I/O. buffer heads will be dirty and possibly
1271 * delalloc. If no delalloc buffer heads in this case then we
1272 * can just return zero.
1273 *
1274 * 2. We are called to release a page which has been written via
1275 * mmap, all we need to do is ensure there is no delalloc
1276 * state in the buffer heads, if not we can let the caller
1277 * free them and we should come back later via writepage.
1278 */
1279STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001280xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001281 struct page *page,
1282 gfp_t gfp_mask)
1283{
1284 struct inode *inode = page->mapping->host;
1285 int dirty, delalloc, unmapped, unwritten;
1286 struct writeback_control wbc = {
1287 .sync_mode = WB_SYNC_ALL,
1288 .nr_to_write = 1,
1289 };
1290
Nathan Scotted9d88f2006-09-28 10:56:43 +10001291 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
Nathan Scottf51623b2006-03-14 13:26:27 +11001292
Nathan Scott238f4c52006-03-17 17:26:25 +11001293 if (!page_has_buffers(page))
1294 return 0;
1295
Nathan Scottf51623b2006-03-14 13:26:27 +11001296 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1297 if (!delalloc && !unwritten)
1298 goto free_buffers;
1299
1300 if (!(gfp_mask & __GFP_FS))
1301 return 0;
1302
1303 /* If we are already inside a transaction or the thread cannot
1304 * do I/O, we cannot release this page.
1305 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001306 if (current_test_flags(PF_FSTRANS))
Nathan Scottf51623b2006-03-14 13:26:27 +11001307 return 0;
1308
1309 /*
1310 * Convert delalloc space to real space, do not flush the
1311 * data out to disk, that will be done by the caller.
1312 * Never need to allocate space here - we will always
1313 * come back to writepage in that case.
1314 */
1315 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1316 if (dirty == 0 && !unwritten)
1317 goto free_buffers;
1318 return 0;
1319
1320free_buffers:
1321 return try_to_free_buffers(page);
1322}
1323
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001325__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 struct inode *inode,
1327 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 struct buffer_head *bh_result,
1329 int create,
1330 int direct,
1331 bmapi_flags_t flags)
1332{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 xfs_iomap_t iomap;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001334 xfs_off_t offset;
1335 ssize_t size;
Nathan Scottc2536662006-03-29 10:44:40 +10001336 int niomap = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001339 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001340 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1341 size = bh_result->b_size;
Lachlan McIlroy541d7d32007-10-11 17:34:33 +10001342 error = xfs_iomap(XFS_I(inode), offset, size,
Nathan Scott67fcaa72006-06-09 17:00:52 +10001343 create ? flags : BMAPI_READ, &iomap, &niomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 if (error)
1345 return -error;
Nathan Scottc2536662006-03-29 10:44:40 +10001346 if (niomap == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 return 0;
1348
1349 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001350 /*
1351 * For unwritten extents do not report a disk address on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 * the read case (treat as if we're reading into a hole).
1353 */
1354 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001355 xfs_map_buffer(bh_result, &iomap, offset,
1356 inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 }
1358 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1359 if (direct)
1360 bh_result->b_private = inode;
1361 set_buffer_unwritten(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 }
1363 }
1364
Nathan Scottc2536662006-03-29 10:44:40 +10001365 /*
1366 * If this is a realtime file, data may be on a different device.
1367 * to that pointed to from the buffer_head b_bdev currently.
1368 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001369 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370
Nathan Scottc2536662006-03-29 10:44:40 +10001371 /*
David Chinner549054a2007-02-10 18:36:35 +11001372 * If we previously allocated a block out beyond eof and we are now
1373 * coming back to use it then we will need to flag it as new even if it
1374 * has a disk address.
1375 *
1376 * With sub-block writes into unwritten extents we also need to mark
1377 * the buffer as new so that the unwritten parts of the buffer gets
1378 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 */
1380 if (create &&
1381 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001382 (offset >= i_size_read(inode)) ||
1383 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
1386 if (iomap.iomap_flags & IOMAP_DELAY) {
1387 BUG_ON(direct);
1388 if (create) {
1389 set_buffer_uptodate(bh_result);
1390 set_buffer_mapped(bh_result);
1391 set_buffer_delay(bh_result);
1392 }
1393 }
1394
Nathan Scottc2536662006-03-29 10:44:40 +10001395 if (direct || size > (1 << inode->i_blkbits)) {
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001396 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1397 offset = min_t(xfs_off_t,
Nathan Scottc2536662006-03-29 10:44:40 +10001398 iomap.iomap_bsize - iomap.iomap_delta, size);
1399 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 }
1401
1402 return 0;
1403}
1404
1405int
Nathan Scottc2536662006-03-29 10:44:40 +10001406xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 struct inode *inode,
1408 sector_t iblock,
1409 struct buffer_head *bh_result,
1410 int create)
1411{
Nathan Scottc2536662006-03-29 10:44:40 +10001412 return __xfs_get_blocks(inode, iblock,
Badari Pulavartyfa30bd02006-03-26 01:38:01 -08001413 bh_result, create, 0, BMAPI_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414}
1415
1416STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001417xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 struct inode *inode,
1419 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 struct buffer_head *bh_result,
1421 int create)
1422{
Nathan Scottc2536662006-03-29 10:44:40 +10001423 return __xfs_get_blocks(inode, iblock,
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -08001424 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425}
1426
Christoph Hellwigf0973862005-09-05 08:22:52 +10001427STATIC void
Nathan Scotte4c573b2006-03-14 13:54:26 +11001428xfs_end_io_direct(
Christoph Hellwigf0973862005-09-05 08:22:52 +10001429 struct kiocb *iocb,
1430 loff_t offset,
1431 ssize_t size,
1432 void *private)
1433{
1434 xfs_ioend_t *ioend = iocb->private;
1435
1436 /*
1437 * Non-NULL private data means we need to issue a transaction to
1438 * convert a range from unwritten to written extents. This needs
Nathan Scottc41564b2006-03-29 08:55:14 +10001439 * to happen from process context but aio+dio I/O completion
Christoph Hellwigf0973862005-09-05 08:22:52 +10001440 * happens from irq context so we need to defer it to a workqueue.
Nathan Scottc41564b2006-03-29 08:55:14 +10001441 * This is not necessary for synchronous direct I/O, but we do
Christoph Hellwigf0973862005-09-05 08:22:52 +10001442 * it anyway to keep the code uniform and simpler.
1443 *
David Chinnere927af92007-06-05 16:24:36 +10001444 * Well, if only it were that simple. Because synchronous direct I/O
1445 * requires extent conversion to occur *before* we return to userspace,
1446 * we have to wait for extent conversion to complete. Look at the
1447 * iocb that has been passed to us to determine if this is AIO or
1448 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1449 * workqueue and wait for it to complete.
1450 *
Christoph Hellwigf0973862005-09-05 08:22:52 +10001451 * The core direct I/O code might be changed to always call the
1452 * completion handler in the future, in which case all this can
1453 * go away.
1454 */
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001455 ioend->io_offset = offset;
1456 ioend->io_size = size;
1457 if (ioend->io_type == IOMAP_READ) {
David Chinnere927af92007-06-05 16:24:36 +10001458 xfs_finish_ioend(ioend, 0);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001459 } else if (private && size > 0) {
David Chinnere927af92007-06-05 16:24:36 +10001460 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
Christoph Hellwigf0973862005-09-05 08:22:52 +10001461 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001462 /*
1463 * A direct I/O write ioend starts it's life in unwritten
1464 * state in case they map an unwritten extent. This write
1465 * didn't map an unwritten extent so switch it's completion
1466 * handler.
1467 */
1468 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
David Chinnere927af92007-06-05 16:24:36 +10001469 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf0973862005-09-05 08:22:52 +10001470 }
1471
1472 /*
Nathan Scottc41564b2006-03-29 08:55:14 +10001473 * blockdev_direct_IO can return an error even after the I/O
Christoph Hellwigf0973862005-09-05 08:22:52 +10001474 * completion handler was called. Thus we need to protect
1475 * against double-freeing.
1476 */
1477 iocb->private = NULL;
1478}
1479
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001481xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 int rw,
1483 struct kiocb *iocb,
1484 const struct iovec *iov,
1485 loff_t offset,
1486 unsigned long nr_segs)
1487{
1488 struct file *file = iocb->ki_filp;
1489 struct inode *inode = file->f_mapping->host;
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001490 struct block_device *bdev;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001491 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001493 bdev = xfs_find_bdev_for_inode(XFS_I(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001495 if (rw == WRITE) {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001496 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001497 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001498 bdev, iov, offset, nr_segs,
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001499 xfs_get_blocks_direct,
1500 xfs_end_io_direct);
1501 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001502 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001503 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
Christoph Hellwig6214ed42007-09-14 15:23:17 +10001504 bdev, iov, offset, nr_segs,
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001505 xfs_get_blocks_direct,
1506 xfs_end_io_direct);
1507 }
Christoph Hellwigf0973862005-09-05 08:22:52 +10001508
Zach Brown8459d862006-12-10 02:21:05 -08001509 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
Christoph Hellwigf0973862005-09-05 08:22:52 +10001510 xfs_destroy_ioend(iocb->private);
1511 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512}
1513
Nathan Scottf51623b2006-03-14 13:26:27 +11001514STATIC int
Nick Piggind79689c2007-10-16 01:25:06 -07001515xfs_vm_write_begin(
Nathan Scottf51623b2006-03-14 13:26:27 +11001516 struct file *file,
Nick Piggind79689c2007-10-16 01:25:06 -07001517 struct address_space *mapping,
1518 loff_t pos,
1519 unsigned len,
1520 unsigned flags,
1521 struct page **pagep,
1522 void **fsdata)
Nathan Scottf51623b2006-03-14 13:26:27 +11001523{
Nick Piggind79689c2007-10-16 01:25:06 -07001524 *pagep = NULL;
1525 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1526 xfs_get_blocks);
Nathan Scottf51623b2006-03-14 13:26:27 +11001527}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
1529STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001530xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 struct address_space *mapping,
1532 sector_t block)
1533{
1534 struct inode *inode = (struct inode *)mapping->host;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001535 struct xfs_inode *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
Lachlan McIlroycf441ee2008-02-07 16:42:19 +11001537 xfs_itrace_entry(XFS_I(inode));
Christoph Hellwig126468b2008-03-06 13:44:57 +11001538 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001539 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
Christoph Hellwig126468b2008-03-06 13:44:57 +11001540 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Nathan Scottc2536662006-03-29 10:44:40 +10001541 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542}
1543
1544STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001545xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 struct file *unused,
1547 struct page *page)
1548{
Nathan Scottc2536662006-03-29 10:44:40 +10001549 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550}
1551
1552STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001553xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 struct file *unused,
1555 struct address_space *mapping,
1556 struct list_head *pages,
1557 unsigned nr_pages)
1558{
Nathan Scottc2536662006-03-29 10:44:40 +10001559 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560}
1561
NeilBrown2ff28e22006-03-26 01:37:18 -08001562STATIC void
Nathan Scott238f4c52006-03-17 17:26:25 +11001563xfs_vm_invalidatepage(
Nathan Scottbcec2b72005-09-02 16:40:17 +10001564 struct page *page,
1565 unsigned long offset)
1566{
1567 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1568 page->mapping->host, page, offset);
NeilBrown2ff28e22006-03-26 01:37:18 -08001569 block_invalidatepage(page, offset);
Nathan Scottbcec2b72005-09-02 16:40:17 +10001570}
1571
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001572const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001573 .readpage = xfs_vm_readpage,
1574 .readpages = xfs_vm_readpages,
1575 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001576 .writepages = xfs_vm_writepages,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 .sync_page = block_sync_page,
Nathan Scott238f4c52006-03-17 17:26:25 +11001578 .releasepage = xfs_vm_releasepage,
1579 .invalidatepage = xfs_vm_invalidatepage,
Nick Piggind79689c2007-10-16 01:25:06 -07001580 .write_begin = xfs_vm_write_begin,
1581 .write_end = generic_write_end,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001582 .bmap = xfs_vm_bmap,
1583 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001584 .migratepage = buffer_migrate_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585};