blob: 1dd198ec28904d79f406d6aaf4bd80dd482c82a2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_dir2.h"
25#include "xfs_trans.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_dir2_sf.h"
Nathan Scotta844f452005-11-02 14:38:42 +110032#include "xfs_attr_sf.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_dinode.h"
34#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110035#include "xfs_alloc.h"
36#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_error.h"
38#include "xfs_rw.h"
39#include "xfs_iomap.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100040#include "xfs_vnodeops.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110042#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/writeback.h>
44
Nathan Scottf51623b2006-03-14 13:26:27 +110045STATIC void
46xfs_count_page_state(
47 struct page *page,
48 int *delalloc,
49 int *unmapped,
50 int *unwritten)
51{
52 struct buffer_head *bh, *head;
53
54 *delalloc = *unmapped = *unwritten = 0;
55
56 bh = head = page_buffers(page);
57 do {
58 if (buffer_uptodate(bh) && !buffer_mapped(bh))
59 (*unmapped) = 1;
Nathan Scottf51623b2006-03-14 13:26:27 +110060 else if (buffer_unwritten(bh))
61 (*unwritten) = 1;
62 else if (buffer_delay(bh))
63 (*delalloc) = 1;
64 } while ((bh = bh->b_this_page) != head);
65}
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#if defined(XFS_RW_TRACE)
68void
69xfs_page_trace(
70 int tag,
71 struct inode *inode,
72 struct page *page,
Nathan Scotted9d88f2006-09-28 10:56:43 +100073 unsigned long pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
75 xfs_inode_t *ip;
Nathan Scott67fcaa72006-06-09 17:00:52 +100076 bhv_vnode_t *vp = vn_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 loff_t isize = i_size_read(inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110078 loff_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 int delalloc = -1, unmapped = -1, unwritten = -1;
80
81 if (page_has_buffers(page))
82 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
83
Christoph Hellwig75e17b32006-01-11 20:58:44 +110084 ip = xfs_vtoi(vp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 if (!ip->i_rwtrace)
86 return;
87
88 ktrace_enter(ip->i_rwtrace,
89 (void *)((unsigned long)tag),
90 (void *)ip,
91 (void *)inode,
92 (void *)page,
Nathan Scotted9d88f2006-09-28 10:56:43 +100093 (void *)pgoff,
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
95 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
96 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
97 (void *)((unsigned long)(isize & 0xffffffff)),
98 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
99 (void *)((unsigned long)(offset & 0xffffffff)),
100 (void *)((unsigned long)delalloc),
101 (void *)((unsigned long)unmapped),
102 (void *)((unsigned long)unwritten),
Yingping Luf1fdc842006-03-22 12:44:15 +1100103 (void *)((unsigned long)current_pid()),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 (void *)NULL);
105}
106#else
Nathan Scotted9d88f2006-09-28 10:56:43 +1000107#define xfs_page_trace(tag, inode, page, pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#endif
109
Christoph Hellwig0829c362005-09-02 16:58:49 +1000110/*
111 * Schedule IO completion handling on a xfsdatad if this was
David Chinnere927af92007-06-05 16:24:36 +1000112 * the final hold on this ioend. If we are asked to wait,
113 * flush the workqueue.
Christoph Hellwig0829c362005-09-02 16:58:49 +1000114 */
115STATIC void
116xfs_finish_ioend(
David Chinnere927af92007-06-05 16:24:36 +1000117 xfs_ioend_t *ioend,
118 int wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
David Chinnere927af92007-06-05 16:24:36 +1000120 if (atomic_dec_and_test(&ioend->io_remaining)) {
Christoph Hellwig0829c362005-09-02 16:58:49 +1000121 queue_work(xfsdatad_workqueue, &ioend->io_work);
David Chinnere927af92007-06-05 16:24:36 +1000122 if (wait)
123 flush_workqueue(xfsdatad_workqueue);
124 }
Christoph Hellwig0829c362005-09-02 16:58:49 +1000125}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100127/*
128 * We're now finished for good with this ioend structure.
129 * Update the page state via the associated buffer_heads,
130 * release holds on the inode and bio, and finally free
131 * up memory. Do not use the ioend after this.
132 */
Christoph Hellwig0829c362005-09-02 16:58:49 +1000133STATIC void
134xfs_destroy_ioend(
135 xfs_ioend_t *ioend)
136{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100137 struct buffer_head *bh, *next;
138
139 for (bh = ioend->io_buffer_head; bh; bh = next) {
140 next = bh->b_private;
Nathan Scott7d04a332006-06-09 14:58:38 +1000141 bh->b_end_io(bh, !ioend->io_error);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100142 }
Christoph Hellwigb677c212007-08-29 11:46:28 +1000143 if (unlikely(ioend->io_error)) {
144 vn_ioerror(XFS_I(ioend->io_inode), ioend->io_error,
145 __FILE__,__LINE__);
146 }
147 vn_iowake(XFS_I(ioend->io_inode));
Christoph Hellwig0829c362005-09-02 16:58:49 +1000148 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149}
150
151/*
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000152 * Update on-disk file size now that data has been written to disk.
153 * The current in-memory file size is i_size. If a write is beyond
154 * eof io_new_size will be the intended file size until i_size is
155 * updated. If this write does not extend all the way to the valid
156 * file size then restrict this update to the end of the write.
157 */
158STATIC void
159xfs_setfilesize(
160 xfs_ioend_t *ioend)
161{
Christoph Hellwigb677c212007-08-29 11:46:28 +1000162 xfs_inode_t *ip = XFS_I(ioend->io_inode);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000163 xfs_fsize_t isize;
164 xfs_fsize_t bsize;
165
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000166 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
167 ASSERT(ioend->io_type != IOMAP_READ);
168
169 if (unlikely(ioend->io_error))
170 return;
171
172 bsize = ioend->io_offset + ioend->io_size;
173
174 xfs_ilock(ip, XFS_ILOCK_EXCL);
175
176 isize = MAX(ip->i_size, ip->i_iocore.io_new_size);
177 isize = MIN(isize, bsize);
178
179 if (ip->i_d.di_size < isize) {
180 ip->i_d.di_size = isize;
181 ip->i_update_core = 1;
182 ip->i_update_size = 1;
Lachlan McIlroy776a75fa2007-09-14 15:22:50 +1000183 mark_inode_dirty_sync(vn_to_inode(ioend->io_vnode));
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000184 }
185
186 xfs_iunlock(ip, XFS_ILOCK_EXCL);
187}
188
189/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100190 * Buffered IO write completion for delayed allocate extents.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100191 */
192STATIC void
193xfs_end_bio_delalloc(
David Howellsc4028952006-11-22 14:57:56 +0000194 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100195{
David Howellsc4028952006-11-22 14:57:56 +0000196 xfs_ioend_t *ioend =
197 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100198
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000199 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100200 xfs_destroy_ioend(ioend);
201}
202
203/*
204 * Buffered IO write completion for regular, written extents.
205 */
206STATIC void
207xfs_end_bio_written(
David Howellsc4028952006-11-22 14:57:56 +0000208 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100209{
David Howellsc4028952006-11-22 14:57:56 +0000210 xfs_ioend_t *ioend =
211 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100212
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000213 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100214 xfs_destroy_ioend(ioend);
215}
216
217/*
218 * IO write completion for unwritten extents.
219 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 * Issue transactions to convert a buffer range from unwritten
Christoph Hellwigf0973862005-09-05 08:22:52 +1000221 * to written extents.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 */
223STATIC void
Christoph Hellwig0829c362005-09-02 16:58:49 +1000224xfs_end_bio_unwritten(
David Howellsc4028952006-11-22 14:57:56 +0000225 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
David Howellsc4028952006-11-22 14:57:56 +0000227 xfs_ioend_t *ioend =
228 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000229 xfs_off_t offset = ioend->io_offset;
230 size_t size = ioend->io_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000232 if (likely(!ioend->io_error)) {
Christoph Hellwigb677c212007-08-29 11:46:28 +1000233 xfs_bmap(XFS_I(ioend->io_inode), offset, size,
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000234 BMAPI_UNWRITTEN, NULL, NULL);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000235 xfs_setfilesize(ioend);
236 }
237 xfs_destroy_ioend(ioend);
238}
239
240/*
241 * IO read completion for regular, written extents.
242 */
243STATIC void
244xfs_end_bio_read(
245 struct work_struct *work)
246{
247 xfs_ioend_t *ioend =
248 container_of(work, xfs_ioend_t, io_work);
249
Christoph Hellwig0829c362005-09-02 16:58:49 +1000250 xfs_destroy_ioend(ioend);
251}
252
253/*
254 * Allocate and initialise an IO completion structure.
255 * We need to track unwritten extent write completion here initially.
256 * We'll need to extend this for updating the ondisk inode size later
257 * (vs. incore size).
258 */
259STATIC xfs_ioend_t *
260xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100261 struct inode *inode,
262 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000263{
264 xfs_ioend_t *ioend;
265
266 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
267
268 /*
269 * Set the count to 1 initially, which will prevent an I/O
270 * completion callback from happening before we have started
271 * all the I/O from calling the completion routine too early.
272 */
273 atomic_set(&ioend->io_remaining, 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000274 ioend->io_error = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100275 ioend->io_list = NULL;
276 ioend->io_type = type;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000277 ioend->io_inode = inode;
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000278 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100279 ioend->io_buffer_tail = NULL;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000280 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000281 ioend->io_offset = 0;
282 ioend->io_size = 0;
283
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100284 if (type == IOMAP_UNWRITTEN)
David Howellsc4028952006-11-22 14:57:56 +0000285 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100286 else if (type == IOMAP_DELAY)
David Howellsc4028952006-11-22 14:57:56 +0000287 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000288 else if (type == IOMAP_READ)
289 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100290 else
David Howellsc4028952006-11-22 14:57:56 +0000291 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000292
293 return ioend;
294}
295
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296STATIC int
297xfs_map_blocks(
298 struct inode *inode,
299 loff_t offset,
300 ssize_t count,
301 xfs_iomap_t *mapp,
302 int flags)
303{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +1000304 xfs_inode_t *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 int error, nmaps = 1;
306
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +1000307 error = xfs_bmap(ip, offset, count,
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000308 flags, mapp, &nmaps);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +1000310 xfs_iflags_set(ip, XFS_IMODIFIED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 return -error;
312}
313
David Chinner7989cb82007-02-10 18:34:56 +1100314STATIC_INLINE int
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100315xfs_iomap_valid(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 xfs_iomap_t *iomapp,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100317 loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100319 return offset >= iomapp->iomap_offset &&
320 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321}
322
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100323/*
324 * BIO completion handler for buffered IO.
325 */
326STATIC int
327xfs_end_bio(
328 struct bio *bio,
329 unsigned int bytes_done,
330 int error)
331{
332 xfs_ioend_t *ioend = bio->bi_private;
333
334 if (bio->bi_size)
335 return 1;
336
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100337 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000338 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100339
340 /* Toss bio and pass work off to an xfsdatad thread */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100341 bio->bi_private = NULL;
342 bio->bi_end_io = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100343 bio_put(bio);
Nathan Scott7d04a332006-06-09 14:58:38 +1000344
David Chinnere927af92007-06-05 16:24:36 +1000345 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100346 return 0;
347}
348
349STATIC void
350xfs_submit_ioend_bio(
351 xfs_ioend_t *ioend,
352 struct bio *bio)
353{
354 atomic_inc(&ioend->io_remaining);
355
356 bio->bi_private = ioend;
357 bio->bi_end_io = xfs_end_bio;
358
359 submit_bio(WRITE, bio);
360 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
361 bio_put(bio);
362}
363
364STATIC struct bio *
365xfs_alloc_ioend_bio(
366 struct buffer_head *bh)
367{
368 struct bio *bio;
369 int nvecs = bio_get_nr_vecs(bh->b_bdev);
370
371 do {
372 bio = bio_alloc(GFP_NOIO, nvecs);
373 nvecs >>= 1;
374 } while (!bio);
375
376 ASSERT(bio->bi_private == NULL);
377 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
378 bio->bi_bdev = bh->b_bdev;
379 bio_get(bio);
380 return bio;
381}
382
383STATIC void
384xfs_start_buffer_writeback(
385 struct buffer_head *bh)
386{
387 ASSERT(buffer_mapped(bh));
388 ASSERT(buffer_locked(bh));
389 ASSERT(!buffer_delay(bh));
390 ASSERT(!buffer_unwritten(bh));
391
392 mark_buffer_async_write(bh);
393 set_buffer_uptodate(bh);
394 clear_buffer_dirty(bh);
395}
396
397STATIC void
398xfs_start_page_writeback(
399 struct page *page,
400 struct writeback_control *wbc,
401 int clear_dirty,
402 int buffers)
403{
404 ASSERT(PageLocked(page));
405 ASSERT(!PageWriteback(page));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100406 if (clear_dirty)
David Chinner92132022006-12-21 10:24:01 +1100407 clear_page_dirty_for_io(page);
408 set_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100409 unlock_page(page);
410 if (!buffers) {
411 end_page_writeback(page);
412 wbc->pages_skipped++; /* We didn't write this page */
413 }
414}
415
416static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
417{
418 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
419}
420
421/*
David Chinnerd88992f2006-01-18 13:38:12 +1100422 * Submit all of the bios for all of the ioends we have saved up, covering the
423 * initial writepage page and also any probed pages.
424 *
425 * Because we may have multiple ioends spanning a page, we need to start
426 * writeback on all the buffers before we submit them for I/O. If we mark the
427 * buffers as we got, then we can end up with a page that only has buffers
428 * marked async write and I/O complete on can occur before we mark the other
429 * buffers async write.
430 *
431 * The end result of this is that we trip a bug in end_page_writeback() because
432 * we call it twice for the one page as the code in end_buffer_async_write()
433 * assumes that all buffers on the page are started at the same time.
434 *
435 * The fix is two passes across the ioend list - one to start writeback on the
Nathan Scottc41564b2006-03-29 08:55:14 +1000436 * buffer_heads, and then submit them for I/O on the second pass.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100437 */
438STATIC void
439xfs_submit_ioend(
440 xfs_ioend_t *ioend)
441{
David Chinnerd88992f2006-01-18 13:38:12 +1100442 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100443 xfs_ioend_t *next;
444 struct buffer_head *bh;
445 struct bio *bio;
446 sector_t lastblock = 0;
447
David Chinnerd88992f2006-01-18 13:38:12 +1100448 /* Pass 1 - start writeback */
449 do {
450 next = ioend->io_list;
451 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
452 xfs_start_buffer_writeback(bh);
453 }
454 } while ((ioend = next) != NULL);
455
456 /* Pass 2 - submit I/O */
457 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100458 do {
459 next = ioend->io_list;
460 bio = NULL;
461
462 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100463
464 if (!bio) {
465 retry:
466 bio = xfs_alloc_ioend_bio(bh);
467 } else if (bh->b_blocknr != lastblock + 1) {
468 xfs_submit_ioend_bio(ioend, bio);
469 goto retry;
470 }
471
472 if (bio_add_buffer(bio, bh) != bh->b_size) {
473 xfs_submit_ioend_bio(ioend, bio);
474 goto retry;
475 }
476
477 lastblock = bh->b_blocknr;
478 }
479 if (bio)
480 xfs_submit_ioend_bio(ioend, bio);
David Chinnere927af92007-06-05 16:24:36 +1000481 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100482 } while ((ioend = next) != NULL);
483}
484
485/*
486 * Cancel submission of all buffer_heads so far in this endio.
487 * Toss the endio too. Only ever called for the initial page
488 * in a writepage request, so only ever one page.
489 */
490STATIC void
491xfs_cancel_ioend(
492 xfs_ioend_t *ioend)
493{
494 xfs_ioend_t *next;
495 struct buffer_head *bh, *next_bh;
496
497 do {
498 next = ioend->io_list;
499 bh = ioend->io_buffer_head;
500 do {
501 next_bh = bh->b_private;
502 clear_buffer_async_write(bh);
503 unlock_buffer(bh);
504 } while ((bh = next_bh) != NULL);
505
Christoph Hellwigb677c212007-08-29 11:46:28 +1000506 vn_iowake(XFS_I(ioend->io_inode));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100507 mempool_free(ioend, xfs_ioend_pool);
508 } while ((ioend = next) != NULL);
509}
510
511/*
512 * Test to see if we've been building up a completion structure for
513 * earlier buffers -- if so, we try to append to this ioend if we
514 * can, otherwise we finish off any current ioend and start another.
515 * Return true if we've finished the given ioend.
516 */
517STATIC void
518xfs_add_to_ioend(
519 struct inode *inode,
520 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100521 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100522 unsigned int type,
523 xfs_ioend_t **result,
524 int need_ioend)
525{
526 xfs_ioend_t *ioend = *result;
527
528 if (!ioend || need_ioend || type != ioend->io_type) {
529 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100530
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100531 ioend = xfs_alloc_ioend(inode, type);
532 ioend->io_offset = offset;
533 ioend->io_buffer_head = bh;
534 ioend->io_buffer_tail = bh;
535 if (previous)
536 previous->io_list = ioend;
537 *result = ioend;
538 } else {
539 ioend->io_buffer_tail->b_private = bh;
540 ioend->io_buffer_tail = bh;
541 }
542
543 bh->b_private = NULL;
544 ioend->io_size += bh->b_size;
545}
546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100548xfs_map_buffer(
549 struct buffer_head *bh,
550 xfs_iomap_t *mp,
551 xfs_off_t offset,
552 uint block_bits)
553{
554 sector_t bn;
555
556 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
557
558 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
559 ((offset - mp->iomap_offset) >> block_bits);
560
561 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
562
563 bh->b_blocknr = bn;
564 set_buffer_mapped(bh);
565}
566
567STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568xfs_map_at_offset(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 struct buffer_head *bh,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100570 loff_t offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 int block_bits,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100572 xfs_iomap_t *iomapp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
575 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577 lock_buffer(bh);
Nathan Scott87cbc492006-03-14 13:26:43 +1100578 xfs_map_buffer(bh, iomapp, offset, block_bits);
Nathan Scottce8e9222006-01-11 15:39:08 +1100579 bh->b_bdev = iomapp->iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 set_buffer_mapped(bh);
581 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100582 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583}
584
585/*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100586 * Look for a page at index that is suitable for clustering.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 */
588STATIC unsigned int
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100589xfs_probe_page(
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100590 struct page *page,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100591 unsigned int pg_offset,
592 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 int ret = 0;
595
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100597 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
599 if (page->mapping && PageDirty(page)) {
600 if (page_has_buffers(page)) {
601 struct buffer_head *bh, *head;
602
603 bh = head = page_buffers(page);
604 do {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100605 if (!buffer_uptodate(bh))
606 break;
607 if (mapped != buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 break;
609 ret += bh->b_size;
610 if (ret >= pg_offset)
611 break;
612 } while ((bh = bh->b_this_page) != head);
613 } else
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100614 ret = mapped ? 0 : PAGE_CACHE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 }
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 return ret;
618}
619
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100620STATIC size_t
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100621xfs_probe_cluster(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 struct inode *inode,
623 struct page *startpage,
624 struct buffer_head *bh,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100625 struct buffer_head *head,
626 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100628 struct pagevec pvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 pgoff_t tindex, tlast, tloff;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100630 size_t total = 0;
631 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632
633 /* First sum forwards in this page */
634 do {
Eric Sandeen2353e8e2006-02-28 12:30:30 +1100635 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100636 return total;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 total += bh->b_size;
638 } while ((bh = bh->b_this_page) != head);
639
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100640 /* if we reached the end of the page, sum forwards in following pages */
641 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
642 tindex = startpage->index + 1;
643
644 /* Prune this back to avoid pathological behavior */
645 tloff = min(tlast, startpage->index + 64);
646
647 pagevec_init(&pvec, 0);
648 while (!done && tindex <= tloff) {
649 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
650
651 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
652 break;
653
654 for (i = 0; i < pagevec_count(&pvec); i++) {
655 struct page *page = pvec.pages[i];
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000656 size_t pg_offset, pg_len = 0;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100657
658 if (tindex == tlast) {
659 pg_offset =
660 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100661 if (!pg_offset) {
662 done = 1;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100663 break;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100664 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100665 } else
666 pg_offset = PAGE_CACHE_SIZE;
667
668 if (page->index == tindex && !TestSetPageLocked(page)) {
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000669 pg_len = xfs_probe_page(page, pg_offset, mapped);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100670 unlock_page(page);
671 }
672
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000673 if (!pg_len) {
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100674 done = 1;
675 break;
676 }
677
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000678 total += pg_len;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100679 tindex++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100681
682 pagevec_release(&pvec);
683 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100685
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 return total;
687}
688
689/*
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100690 * Test if a given page is suitable for writing as part of an unwritten
691 * or delayed allocate extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 */
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100693STATIC int
694xfs_is_delayed_page(
695 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100696 unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100699 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
701 if (page->mapping && page_has_buffers(page)) {
702 struct buffer_head *bh, *head;
703 int acceptable = 0;
704
705 bh = head = page_buffers(page);
706 do {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100707 if (buffer_unwritten(bh))
708 acceptable = (type == IOMAP_UNWRITTEN);
709 else if (buffer_delay(bh))
710 acceptable = (type == IOMAP_DELAY);
David Chinner2ddee842006-03-22 12:47:40 +1100711 else if (buffer_dirty(bh) && buffer_mapped(bh))
David Chinnerdf3c7242007-05-24 15:27:03 +1000712 acceptable = (type == IOMAP_NEW);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100713 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 } while ((bh = bh->b_this_page) != head);
716
717 if (acceptable)
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100718 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 }
720
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100721 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722}
723
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724/*
725 * Allocate & map buffers for page given the extent map. Write it out.
726 * except for the original page of a writepage, this is called on
727 * delalloc/unwritten pages only, for the original page it is possible
728 * that the page has no mapping at all.
729 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100730STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731xfs_convert_page(
732 struct inode *inode,
733 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100734 loff_t tindex,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100735 xfs_iomap_t *mp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100736 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 int startio,
739 int all_bh)
740{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100741 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100742 xfs_off_t end_offset;
743 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100744 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 int bbits = inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -0700746 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100747 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100748 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100750 if (page->index != tindex)
751 goto fail;
752 if (TestSetPageLocked(page))
753 goto fail;
754 if (PageWriteback(page))
755 goto fail_unlock_page;
756 if (page->mapping != inode->i_mapping)
757 goto fail_unlock_page;
758 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
759 goto fail_unlock_page;
760
Nathan Scott24e17b52005-05-05 13:33:20 -0700761 /*
762 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000763 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100764 *
765 * Derivation:
766 *
767 * End offset is the highest offset that this page should represent.
768 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
769 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
770 * hence give us the correct page_dirty count. On any other page,
771 * it will be zero and in that case we need page_dirty to be the
772 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700773 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100774 end_offset = min_t(unsigned long long,
775 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
776 i_size_read(inode));
777
Nathan Scott24e17b52005-05-05 13:33:20 -0700778 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100779 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
780 PAGE_CACHE_SIZE);
781 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
782 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700783
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 bh = head = page_buffers(page);
785 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100786 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100788 if (!buffer_uptodate(bh))
789 uptodate = 0;
790 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
791 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100793 }
794
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100795 if (buffer_unwritten(bh) || buffer_delay(bh)) {
796 if (buffer_unwritten(bh))
797 type = IOMAP_UNWRITTEN;
798 else
799 type = IOMAP_DELAY;
800
801 if (!xfs_iomap_valid(mp, offset)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100802 done = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100803 continue;
804 }
805
806 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
807 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
808
809 xfs_map_at_offset(bh, offset, bbits, mp);
810 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100811 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100812 type, ioendp, done);
813 } else {
814 set_buffer_dirty(bh);
815 unlock_buffer(bh);
816 mark_buffer_dirty(bh);
817 }
818 page_dirty--;
819 count++;
820 } else {
David Chinnerdf3c7242007-05-24 15:27:03 +1000821 type = IOMAP_NEW;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100822 if (buffer_mapped(bh) && all_bh && startio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 lock_buffer(bh);
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100824 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100825 type, ioendp, done);
826 count++;
Nathan Scott24e17b52005-05-05 13:33:20 -0700827 page_dirty--;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100828 } else {
829 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100832 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100834 if (uptodate && bh == head)
835 SetPageUptodate(page);
836
837 if (startio) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100838 if (count) {
839 struct backing_dev_info *bdi;
840
841 bdi = inode->i_mapping->backing_dev_info;
David Chinner9fddaca2006-02-07 20:27:24 +1100842 wbc->nr_to_write--;
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100843 if (bdi_write_congested(bdi)) {
844 wbc->encountered_congestion = 1;
845 done = 1;
David Chinner9fddaca2006-02-07 20:27:24 +1100846 } else if (wbc->nr_to_write <= 0) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100847 done = 1;
848 }
849 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100850 xfs_start_page_writeback(page, wbc, !page_dirty, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100852
853 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100854 fail_unlock_page:
855 unlock_page(page);
856 fail:
857 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858}
859
860/*
861 * Convert & write out a cluster of pages in the same extent as defined
862 * by mp and following the start page.
863 */
864STATIC void
865xfs_cluster_write(
866 struct inode *inode,
867 pgoff_t tindex,
868 xfs_iomap_t *iomapp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100869 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 struct writeback_control *wbc,
871 int startio,
872 int all_bh,
873 pgoff_t tlast)
874{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100875 struct pagevec pvec;
876 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100878 pagevec_init(&pvec, 0);
879 while (!done && tindex <= tlast) {
880 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
881
882 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100884
885 for (i = 0; i < pagevec_count(&pvec); i++) {
886 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
887 iomapp, ioendp, wbc, startio, all_bh);
888 if (done)
889 break;
890 }
891
892 pagevec_release(&pvec);
893 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 }
895}
896
897/*
898 * Calling this without startio set means we are being asked to make a dirty
899 * page ready for freeing it's buffers. When called with startio set then
900 * we are coming from writepage.
901 *
902 * When called with startio set it is important that we write the WHOLE
903 * page if possible.
904 * The bh->b_state's cannot know if any of the blocks or which block for
905 * that matter are dirty due to mmap writes, and therefore bh uptodate is
Nathan Scottc41564b2006-03-29 08:55:14 +1000906 * only valid if the page itself isn't completely uptodate. Some layers
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 * may clear the page dirty flag prior to calling write page, under the
908 * assumption the entire page will be written out; by not writing out the
909 * whole page the page can be reused before all valid dirty data is
910 * written out. Note: in the case of a page that has been dirty'd by
911 * mapwrite and but partially setup by block_prepare_write the
912 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
913 * valid state, thus the whole page must be written out thing.
914 */
915
916STATIC int
917xfs_page_state_convert(
918 struct inode *inode,
919 struct page *page,
920 struct writeback_control *wbc,
921 int startio,
922 int unmapped) /* also implies page uptodate */
923{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100924 struct buffer_head *bh, *head;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100925 xfs_iomap_t iomap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100926 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 loff_t offset;
928 unsigned long p_offset = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100929 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 __uint64_t end_offset;
931 pgoff_t end_index, last_index, tlast;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100932 ssize_t size, len;
933 int flags, err, iomap_valid = 0, uptodate = 1;
Nathan Scott82721452006-04-11 15:10:55 +1000934 int page_dirty, count = 0;
935 int trylock = 0;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100936 int all_bh = unmapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
Nathan Scott82721452006-04-11 15:10:55 +1000938 if (startio) {
939 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
940 trylock |= BMAPI_TRYLOCK;
941 }
Daniel Moore3ba08152005-05-05 13:31:34 -0700942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 /* Is this page beyond the end of the file? */
944 offset = i_size_read(inode);
945 end_index = offset >> PAGE_CACHE_SHIFT;
946 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
947 if (page->index >= end_index) {
948 if ((page->index >= end_index + 1) ||
949 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
Nathan Scott19d5bcf2005-11-02 15:14:09 +1100950 if (startio)
951 unlock_page(page);
952 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 }
954 }
955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 /*
Nathan Scott24e17b52005-05-05 13:33:20 -0700957 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000958 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100959 *
960 * Derivation:
961 *
962 * End offset is the highest offset that this page should represent.
963 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
964 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
965 * hence give us the correct page_dirty count. On any other page,
966 * it will be zero and in that case we need page_dirty to be the
967 * count of buffers on the page.
968 */
969 end_offset = min_t(unsigned long long,
970 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
Nathan Scott24e17b52005-05-05 13:33:20 -0700971 len = 1 << inode->i_blkbits;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100972 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
973 PAGE_CACHE_SIZE);
974 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
Nathan Scott24e17b52005-05-05 13:33:20 -0700975 page_dirty = p_offset / len;
976
Nathan Scott24e17b52005-05-05 13:33:20 -0700977 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100978 offset = page_offset(page);
David Chinnerdf3c7242007-05-24 15:27:03 +1000979 flags = BMAPI_READ;
980 type = IOMAP_NEW;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100981
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100982 /* TODO: cleanup count and page_dirty */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
984 do {
985 if (offset >= end_offset)
986 break;
987 if (!buffer_uptodate(bh))
988 uptodate = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100989 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100990 /*
991 * the iomap is actually still valid, but the ioend
992 * isn't. shouldn't happen too often.
993 */
994 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100996 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100998 if (iomap_valid)
999 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001 /*
1002 * First case, map an unwritten extent and prepare for
1003 * extent state conversion transaction on completion.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 * Second case, allocate space for a delalloc buffer.
1006 * We can return EAGAIN here in the release page case.
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001007 *
1008 * Third case, an unmapped buffer was found, and we are
1009 * in a path where we need to write the whole page out.
David Chinnerdf3c7242007-05-24 15:27:03 +10001010 */
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001011 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1012 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1013 !buffer_mapped(bh) && (unmapped || startio))) {
David Chinnereffd1202007-06-18 16:49:58 +10001014 int new_ioend = 0;
1015
David Chinnerdf3c7242007-05-24 15:27:03 +10001016 /*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001017 * Make sure we don't use a read-only iomap
1018 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001019 if (flags == BMAPI_READ)
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001020 iomap_valid = 0;
1021
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001022 if (buffer_unwritten(bh)) {
1023 type = IOMAP_UNWRITTEN;
Nathan Scott82721452006-04-11 15:10:55 +10001024 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001025 } else if (buffer_delay(bh)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001026 type = IOMAP_DELAY;
Nathan Scott82721452006-04-11 15:10:55 +10001027 flags = BMAPI_ALLOCATE | trylock;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001028 } else {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001029 type = IOMAP_NEW;
Nathan Scott82721452006-04-11 15:10:55 +10001030 flags = BMAPI_WRITE | BMAPI_MMAP;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001031 }
1032
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001033 if (!iomap_valid) {
David Chinnereffd1202007-06-18 16:49:58 +10001034 /*
1035 * if we didn't have a valid mapping then we
1036 * need to ensure that we put the new mapping
1037 * in a new ioend structure. This needs to be
1038 * done to ensure that the ioends correctly
1039 * reflect the block mappings at io completion
1040 * for unwritten extent conversion.
1041 */
1042 new_ioend = 1;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001043 if (type == IOMAP_NEW) {
1044 size = xfs_probe_cluster(inode,
1045 page, bh, head, 0);
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001046 } else {
1047 size = len;
1048 }
1049
1050 err = xfs_map_blocks(inode, offset, size,
1051 &iomap, flags);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001052 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 goto error;
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001054 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 }
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001056 if (iomap_valid) {
1057 xfs_map_at_offset(bh, offset,
1058 inode->i_blkbits, &iomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001060 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001061 type, &ioend,
David Chinnereffd1202007-06-18 16:49:58 +10001062 new_ioend);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 } else {
1064 set_buffer_dirty(bh);
1065 unlock_buffer(bh);
1066 mark_buffer_dirty(bh);
1067 }
1068 page_dirty--;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001069 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001071 } else if (buffer_uptodate(bh) && startio) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001072 /*
1073 * we got here because the buffer is already mapped.
1074 * That means it must already have extents allocated
1075 * underneath it. Map the extent by reading it.
1076 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001077 if (!iomap_valid || flags != BMAPI_READ) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001078 flags = BMAPI_READ;
1079 size = xfs_probe_cluster(inode, page, bh,
1080 head, 1);
1081 err = xfs_map_blocks(inode, offset, size,
1082 &iomap, flags);
1083 if (err)
1084 goto error;
1085 iomap_valid = xfs_iomap_valid(&iomap, offset);
1086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
David Chinnerdf3c7242007-05-24 15:27:03 +10001088 /*
1089 * We set the type to IOMAP_NEW in case we are doing a
1090 * small write at EOF that is extending the file but
1091 * without needing an allocation. We need to update the
1092 * file size on I/O completion in this case so it is
1093 * the same case as having just allocated a new extent
1094 * that we are writing into for the first time.
1095 */
1096 type = IOMAP_NEW;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001097 if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
1098 ASSERT(buffer_mapped(bh));
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001099 if (iomap_valid)
1100 all_bh = 1;
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001101 xfs_add_to_ioend(inode, bh, offset, type,
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001102 &ioend, !iomap_valid);
1103 page_dirty--;
1104 count++;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001105 } else {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001106 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001108 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1109 (unmapped || startio)) {
1110 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001112
1113 if (!iohead)
1114 iohead = ioend;
1115
1116 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117
1118 if (uptodate && bh == head)
1119 SetPageUptodate(page);
1120
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001121 if (startio)
1122 xfs_start_page_writeback(page, wbc, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001124 if (ioend && iomap_valid) {
1125 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 PAGE_CACHE_SHIFT;
Nathan Scott775bf6c2005-05-05 13:33:01 -07001127 tlast = min_t(pgoff_t, offset, last_index);
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001128 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001129 wbc, startio, all_bh, tlast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 }
1131
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001132 if (iohead)
1133 xfs_submit_ioend(iohead);
1134
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 return page_dirty;
1136
1137error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001138 if (iohead)
1139 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
1141 /*
1142 * If it's delalloc and we have nowhere to put it,
1143 * throw it away, unless the lower layers told
1144 * us to try again.
1145 */
1146 if (err != -EAGAIN) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001147 if (!unmapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 block_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 ClearPageUptodate(page);
1150 }
1151 return err;
1152}
1153
Nathan Scottf51623b2006-03-14 13:26:27 +11001154/*
1155 * writepage: Called from one of two places:
1156 *
1157 * 1. we are flushing a delalloc buffer head.
1158 *
1159 * 2. we are writing out a dirty page. Typically the page dirty
1160 * state is cleared before we get here. In this case is it
1161 * conceivable we have no buffer heads.
1162 *
1163 * For delalloc space on the page we need to allocate space and
1164 * flush it. For unmapped buffer heads on the page we should
1165 * allocate space if the page is uptodate. For any other dirty
1166 * buffer heads on the page we should flush them.
1167 *
1168 * If we detect that a transaction would be required to flush
1169 * the page, we have to check the process flags first, if we
1170 * are already in a transaction or disk I/O during allocations
1171 * is off, we need to fail the writepage and redirty the page.
1172 */
1173
1174STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001175xfs_vm_writepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001176 struct page *page,
1177 struct writeback_control *wbc)
1178{
1179 int error;
1180 int need_trans;
1181 int delalloc, unmapped, unwritten;
1182 struct inode *inode = page->mapping->host;
1183
1184 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1185
1186 /*
1187 * We need a transaction if:
1188 * 1. There are delalloc buffers on the page
1189 * 2. The page is uptodate and we have unmapped buffers
1190 * 3. The page is uptodate and we have no buffers
1191 * 4. There are unwritten buffers on the page
1192 */
1193
1194 if (!page_has_buffers(page)) {
1195 unmapped = 1;
1196 need_trans = 1;
1197 } else {
1198 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1199 if (!PageUptodate(page))
1200 unmapped = 0;
1201 need_trans = delalloc + unmapped + unwritten;
1202 }
1203
1204 /*
1205 * If we need a transaction and the process flags say
1206 * we are already in a transaction, or no IO is allowed
1207 * then mark the page dirty again and leave the page
1208 * as is.
1209 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001210 if (current_test_flags(PF_FSTRANS) && need_trans)
Nathan Scottf51623b2006-03-14 13:26:27 +11001211 goto out_fail;
1212
1213 /*
1214 * Delay hooking up buffer heads until we have
1215 * made our go/no-go decision.
1216 */
1217 if (!page_has_buffers(page))
1218 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1219
1220 /*
1221 * Convert delayed allocate, unwritten or unmapped space
1222 * to real space and flush out to disk.
1223 */
1224 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1225 if (error == -EAGAIN)
1226 goto out_fail;
1227 if (unlikely(error < 0))
1228 goto out_unlock;
1229
1230 return 0;
1231
1232out_fail:
1233 redirty_page_for_writepage(wbc, page);
1234 unlock_page(page);
1235 return 0;
1236out_unlock:
1237 unlock_page(page);
1238 return error;
1239}
1240
Nathan Scott7d4fb402006-06-09 15:27:16 +10001241STATIC int
1242xfs_vm_writepages(
1243 struct address_space *mapping,
1244 struct writeback_control *wbc)
1245{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +10001246 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
Nathan Scott7d4fb402006-06-09 15:27:16 +10001247 return generic_writepages(mapping, wbc);
1248}
1249
Nathan Scottf51623b2006-03-14 13:26:27 +11001250/*
1251 * Called to move a page into cleanable state - and from there
1252 * to be released. Possibly the page is already clean. We always
1253 * have buffer heads in this call.
1254 *
1255 * Returns 0 if the page is ok to release, 1 otherwise.
1256 *
1257 * Possible scenarios are:
1258 *
1259 * 1. We are being called to release a page which has been written
1260 * to via regular I/O. buffer heads will be dirty and possibly
1261 * delalloc. If no delalloc buffer heads in this case then we
1262 * can just return zero.
1263 *
1264 * 2. We are called to release a page which has been written via
1265 * mmap, all we need to do is ensure there is no delalloc
1266 * state in the buffer heads, if not we can let the caller
1267 * free them and we should come back later via writepage.
1268 */
1269STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001270xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001271 struct page *page,
1272 gfp_t gfp_mask)
1273{
1274 struct inode *inode = page->mapping->host;
1275 int dirty, delalloc, unmapped, unwritten;
1276 struct writeback_control wbc = {
1277 .sync_mode = WB_SYNC_ALL,
1278 .nr_to_write = 1,
1279 };
1280
Nathan Scotted9d88f2006-09-28 10:56:43 +10001281 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
Nathan Scottf51623b2006-03-14 13:26:27 +11001282
Nathan Scott238f4c52006-03-17 17:26:25 +11001283 if (!page_has_buffers(page))
1284 return 0;
1285
Nathan Scottf51623b2006-03-14 13:26:27 +11001286 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1287 if (!delalloc && !unwritten)
1288 goto free_buffers;
1289
1290 if (!(gfp_mask & __GFP_FS))
1291 return 0;
1292
1293 /* If we are already inside a transaction or the thread cannot
1294 * do I/O, we cannot release this page.
1295 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001296 if (current_test_flags(PF_FSTRANS))
Nathan Scottf51623b2006-03-14 13:26:27 +11001297 return 0;
1298
1299 /*
1300 * Convert delalloc space to real space, do not flush the
1301 * data out to disk, that will be done by the caller.
1302 * Never need to allocate space here - we will always
1303 * come back to writepage in that case.
1304 */
1305 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1306 if (dirty == 0 && !unwritten)
1307 goto free_buffers;
1308 return 0;
1309
1310free_buffers:
1311 return try_to_free_buffers(page);
1312}
1313
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001315__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 struct inode *inode,
1317 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 struct buffer_head *bh_result,
1319 int create,
1320 int direct,
1321 bmapi_flags_t flags)
1322{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 xfs_iomap_t iomap;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001324 xfs_off_t offset;
1325 ssize_t size;
Nathan Scottc2536662006-03-29 10:44:40 +10001326 int niomap = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001329 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001330 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1331 size = bh_result->b_size;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001332 error = xfs_bmap(XFS_I(inode), offset, size,
Nathan Scott67fcaa72006-06-09 17:00:52 +10001333 create ? flags : BMAPI_READ, &iomap, &niomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 if (error)
1335 return -error;
Nathan Scottc2536662006-03-29 10:44:40 +10001336 if (niomap == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 return 0;
1338
1339 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001340 /*
1341 * For unwritten extents do not report a disk address on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 * the read case (treat as if we're reading into a hole).
1343 */
1344 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001345 xfs_map_buffer(bh_result, &iomap, offset,
1346 inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 }
1348 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1349 if (direct)
1350 bh_result->b_private = inode;
1351 set_buffer_unwritten(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 }
1353 }
1354
Nathan Scottc2536662006-03-29 10:44:40 +10001355 /*
1356 * If this is a realtime file, data may be on a different device.
1357 * to that pointed to from the buffer_head b_bdev currently.
1358 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001359 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
Nathan Scottc2536662006-03-29 10:44:40 +10001361 /*
David Chinner549054a2007-02-10 18:36:35 +11001362 * If we previously allocated a block out beyond eof and we are now
1363 * coming back to use it then we will need to flag it as new even if it
1364 * has a disk address.
1365 *
1366 * With sub-block writes into unwritten extents we also need to mark
1367 * the buffer as new so that the unwritten parts of the buffer gets
1368 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 */
1370 if (create &&
1371 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001372 (offset >= i_size_read(inode)) ||
1373 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
1376 if (iomap.iomap_flags & IOMAP_DELAY) {
1377 BUG_ON(direct);
1378 if (create) {
1379 set_buffer_uptodate(bh_result);
1380 set_buffer_mapped(bh_result);
1381 set_buffer_delay(bh_result);
1382 }
1383 }
1384
Nathan Scottc2536662006-03-29 10:44:40 +10001385 if (direct || size > (1 << inode->i_blkbits)) {
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001386 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1387 offset = min_t(xfs_off_t,
Nathan Scottc2536662006-03-29 10:44:40 +10001388 iomap.iomap_bsize - iomap.iomap_delta, size);
1389 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 }
1391
1392 return 0;
1393}
1394
1395int
Nathan Scottc2536662006-03-29 10:44:40 +10001396xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 struct inode *inode,
1398 sector_t iblock,
1399 struct buffer_head *bh_result,
1400 int create)
1401{
Nathan Scottc2536662006-03-29 10:44:40 +10001402 return __xfs_get_blocks(inode, iblock,
Badari Pulavartyfa30bd02006-03-26 01:38:01 -08001403 bh_result, create, 0, BMAPI_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404}
1405
1406STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001407xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 struct inode *inode,
1409 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 struct buffer_head *bh_result,
1411 int create)
1412{
Nathan Scottc2536662006-03-29 10:44:40 +10001413 return __xfs_get_blocks(inode, iblock,
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -08001414 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415}
1416
Christoph Hellwigf0973862005-09-05 08:22:52 +10001417STATIC void
Nathan Scotte4c573b2006-03-14 13:54:26 +11001418xfs_end_io_direct(
Christoph Hellwigf0973862005-09-05 08:22:52 +10001419 struct kiocb *iocb,
1420 loff_t offset,
1421 ssize_t size,
1422 void *private)
1423{
1424 xfs_ioend_t *ioend = iocb->private;
1425
1426 /*
1427 * Non-NULL private data means we need to issue a transaction to
1428 * convert a range from unwritten to written extents. This needs
Nathan Scottc41564b2006-03-29 08:55:14 +10001429 * to happen from process context but aio+dio I/O completion
Christoph Hellwigf0973862005-09-05 08:22:52 +10001430 * happens from irq context so we need to defer it to a workqueue.
Nathan Scottc41564b2006-03-29 08:55:14 +10001431 * This is not necessary for synchronous direct I/O, but we do
Christoph Hellwigf0973862005-09-05 08:22:52 +10001432 * it anyway to keep the code uniform and simpler.
1433 *
David Chinnere927af92007-06-05 16:24:36 +10001434 * Well, if only it were that simple. Because synchronous direct I/O
1435 * requires extent conversion to occur *before* we return to userspace,
1436 * we have to wait for extent conversion to complete. Look at the
1437 * iocb that has been passed to us to determine if this is AIO or
1438 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1439 * workqueue and wait for it to complete.
1440 *
Christoph Hellwigf0973862005-09-05 08:22:52 +10001441 * The core direct I/O code might be changed to always call the
1442 * completion handler in the future, in which case all this can
1443 * go away.
1444 */
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001445 ioend->io_offset = offset;
1446 ioend->io_size = size;
1447 if (ioend->io_type == IOMAP_READ) {
David Chinnere927af92007-06-05 16:24:36 +10001448 xfs_finish_ioend(ioend, 0);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001449 } else if (private && size > 0) {
David Chinnere927af92007-06-05 16:24:36 +10001450 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
Christoph Hellwigf0973862005-09-05 08:22:52 +10001451 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001452 /*
1453 * A direct I/O write ioend starts it's life in unwritten
1454 * state in case they map an unwritten extent. This write
1455 * didn't map an unwritten extent so switch it's completion
1456 * handler.
1457 */
1458 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
David Chinnere927af92007-06-05 16:24:36 +10001459 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf0973862005-09-05 08:22:52 +10001460 }
1461
1462 /*
Nathan Scottc41564b2006-03-29 08:55:14 +10001463 * blockdev_direct_IO can return an error even after the I/O
Christoph Hellwigf0973862005-09-05 08:22:52 +10001464 * completion handler was called. Thus we need to protect
1465 * against double-freeing.
1466 */
1467 iocb->private = NULL;
1468}
1469
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001471xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 int rw,
1473 struct kiocb *iocb,
1474 const struct iovec *iov,
1475 loff_t offset,
1476 unsigned long nr_segs)
1477{
1478 struct file *file = iocb->ki_filp;
1479 struct inode *inode = file->f_mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 xfs_iomap_t iomap;
1481 int maps = 1;
1482 int error;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001483 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001485 error = xfs_bmap(XFS_I(inode), offset, 0,
1486 BMAPI_DEVICE, &iomap, &maps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 if (error)
1488 return -error;
1489
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001490 if (rw == WRITE) {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001491 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001492 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1493 iomap.iomap_target->bt_bdev,
1494 iov, offset, nr_segs,
1495 xfs_get_blocks_direct,
1496 xfs_end_io_direct);
1497 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001498 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001499 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
1500 iomap.iomap_target->bt_bdev,
1501 iov, offset, nr_segs,
1502 xfs_get_blocks_direct,
1503 xfs_end_io_direct);
1504 }
Christoph Hellwigf0973862005-09-05 08:22:52 +10001505
Zach Brown8459d862006-12-10 02:21:05 -08001506 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
Christoph Hellwigf0973862005-09-05 08:22:52 +10001507 xfs_destroy_ioend(iocb->private);
1508 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509}
1510
Nathan Scottf51623b2006-03-14 13:26:27 +11001511STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001512xfs_vm_prepare_write(
Nathan Scottf51623b2006-03-14 13:26:27 +11001513 struct file *file,
1514 struct page *page,
1515 unsigned int from,
1516 unsigned int to)
1517{
Nathan Scottc2536662006-03-29 10:44:40 +10001518 return block_prepare_write(page, from, to, xfs_get_blocks);
Nathan Scottf51623b2006-03-14 13:26:27 +11001519}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520
1521STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001522xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 struct address_space *mapping,
1524 sector_t block)
1525{
1526 struct inode *inode = (struct inode *)mapping->host;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001527 struct xfs_inode *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001529 vn_trace_entry(vn_from_inode(inode), __FUNCTION__,
1530 (inst_t *)__return_address);
1531 xfs_rwlock(ip, VRWLOCK_READ);
1532 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
1533 xfs_rwunlock(ip, VRWLOCK_READ);
Nathan Scottc2536662006-03-29 10:44:40 +10001534 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535}
1536
1537STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001538xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 struct file *unused,
1540 struct page *page)
1541{
Nathan Scottc2536662006-03-29 10:44:40 +10001542 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543}
1544
1545STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001546xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 struct file *unused,
1548 struct address_space *mapping,
1549 struct list_head *pages,
1550 unsigned nr_pages)
1551{
Nathan Scottc2536662006-03-29 10:44:40 +10001552 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553}
1554
NeilBrown2ff28e22006-03-26 01:37:18 -08001555STATIC void
Nathan Scott238f4c52006-03-17 17:26:25 +11001556xfs_vm_invalidatepage(
Nathan Scottbcec2b72005-09-02 16:40:17 +10001557 struct page *page,
1558 unsigned long offset)
1559{
1560 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1561 page->mapping->host, page, offset);
NeilBrown2ff28e22006-03-26 01:37:18 -08001562 block_invalidatepage(page, offset);
Nathan Scottbcec2b72005-09-02 16:40:17 +10001563}
1564
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001565const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001566 .readpage = xfs_vm_readpage,
1567 .readpages = xfs_vm_readpages,
1568 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001569 .writepages = xfs_vm_writepages,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 .sync_page = block_sync_page,
Nathan Scott238f4c52006-03-17 17:26:25 +11001571 .releasepage = xfs_vm_releasepage,
1572 .invalidatepage = xfs_vm_invalidatepage,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001573 .prepare_write = xfs_vm_prepare_write,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 .commit_write = generic_commit_write,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001575 .bmap = xfs_vm_bmap,
1576 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001577 .migratepage = buffer_migrate_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578};