blob: c097e4e69768df096e751ee62ea26b61fe2a97eb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_dir2.h"
25#include "xfs_trans.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_dir2_sf.h"
Nathan Scotta844f452005-11-02 14:38:42 +110032#include "xfs_attr_sf.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_dinode.h"
34#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110035#include "xfs_alloc.h"
36#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_error.h"
38#include "xfs_rw.h"
39#include "xfs_iomap.h"
40#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110041#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/writeback.h>
43
Nathan Scottf51623b2006-03-14 13:26:27 +110044STATIC void
45xfs_count_page_state(
46 struct page *page,
47 int *delalloc,
48 int *unmapped,
49 int *unwritten)
50{
51 struct buffer_head *bh, *head;
52
53 *delalloc = *unmapped = *unwritten = 0;
54
55 bh = head = page_buffers(page);
56 do {
57 if (buffer_uptodate(bh) && !buffer_mapped(bh))
58 (*unmapped) = 1;
Nathan Scottf51623b2006-03-14 13:26:27 +110059 else if (buffer_unwritten(bh))
60 (*unwritten) = 1;
61 else if (buffer_delay(bh))
62 (*delalloc) = 1;
63 } while ((bh = bh->b_this_page) != head);
64}
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#if defined(XFS_RW_TRACE)
67void
68xfs_page_trace(
69 int tag,
70 struct inode *inode,
71 struct page *page,
Nathan Scotted9d88f2006-09-28 10:56:43 +100072 unsigned long pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
74 xfs_inode_t *ip;
Nathan Scott67fcaa72006-06-09 17:00:52 +100075 bhv_vnode_t *vp = vn_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 loff_t isize = i_size_read(inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110077 loff_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 int delalloc = -1, unmapped = -1, unwritten = -1;
79
80 if (page_has_buffers(page))
81 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
82
Christoph Hellwig75e17b32006-01-11 20:58:44 +110083 ip = xfs_vtoi(vp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 if (!ip->i_rwtrace)
85 return;
86
87 ktrace_enter(ip->i_rwtrace,
88 (void *)((unsigned long)tag),
89 (void *)ip,
90 (void *)inode,
91 (void *)page,
Nathan Scotted9d88f2006-09-28 10:56:43 +100092 (void *)pgoff,
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
94 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
95 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
96 (void *)((unsigned long)(isize & 0xffffffff)),
97 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
98 (void *)((unsigned long)(offset & 0xffffffff)),
99 (void *)((unsigned long)delalloc),
100 (void *)((unsigned long)unmapped),
101 (void *)((unsigned long)unwritten),
Yingping Luf1fdc842006-03-22 12:44:15 +1100102 (void *)((unsigned long)current_pid()),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 (void *)NULL);
104}
105#else
Nathan Scotted9d88f2006-09-28 10:56:43 +1000106#define xfs_page_trace(tag, inode, page, pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#endif
108
Christoph Hellwig0829c362005-09-02 16:58:49 +1000109/*
110 * Schedule IO completion handling on a xfsdatad if this was
David Chinnere927af92007-06-05 16:24:36 +1000111 * the final hold on this ioend. If we are asked to wait,
112 * flush the workqueue.
Christoph Hellwig0829c362005-09-02 16:58:49 +1000113 */
114STATIC void
115xfs_finish_ioend(
David Chinnere927af92007-06-05 16:24:36 +1000116 xfs_ioend_t *ioend,
117 int wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
David Chinnere927af92007-06-05 16:24:36 +1000119 if (atomic_dec_and_test(&ioend->io_remaining)) {
Christoph Hellwig0829c362005-09-02 16:58:49 +1000120 queue_work(xfsdatad_workqueue, &ioend->io_work);
David Chinnere927af92007-06-05 16:24:36 +1000121 if (wait)
122 flush_workqueue(xfsdatad_workqueue);
123 }
Christoph Hellwig0829c362005-09-02 16:58:49 +1000124}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100126/*
127 * We're now finished for good with this ioend structure.
128 * Update the page state via the associated buffer_heads,
129 * release holds on the inode and bio, and finally free
130 * up memory. Do not use the ioend after this.
131 */
Christoph Hellwig0829c362005-09-02 16:58:49 +1000132STATIC void
133xfs_destroy_ioend(
134 xfs_ioend_t *ioend)
135{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100136 struct buffer_head *bh, *next;
137
138 for (bh = ioend->io_buffer_head; bh; bh = next) {
139 next = bh->b_private;
Nathan Scott7d04a332006-06-09 14:58:38 +1000140 bh->b_end_io(bh, !ioend->io_error);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100141 }
Nathan Scott7d04a332006-06-09 14:58:38 +1000142 if (unlikely(ioend->io_error))
143 vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000144 vn_iowake(ioend->io_vnode);
145 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146}
147
148/*
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000149 * Update on-disk file size now that data has been written to disk.
150 * The current in-memory file size is i_size. If a write is beyond
151 * eof io_new_size will be the intended file size until i_size is
152 * updated. If this write does not extend all the way to the valid
153 * file size then restrict this update to the end of the write.
154 */
155STATIC void
156xfs_setfilesize(
157 xfs_ioend_t *ioend)
158{
159 xfs_inode_t *ip;
160 xfs_fsize_t isize;
161 xfs_fsize_t bsize;
162
163 ip = xfs_vtoi(ioend->io_vnode);
David Chinnerb2826132007-06-05 16:24:44 +1000164 if (!ip)
165 return;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000166
167 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
168 ASSERT(ioend->io_type != IOMAP_READ);
169
170 if (unlikely(ioend->io_error))
171 return;
172
173 bsize = ioend->io_offset + ioend->io_size;
174
175 xfs_ilock(ip, XFS_ILOCK_EXCL);
176
177 isize = MAX(ip->i_size, ip->i_iocore.io_new_size);
178 isize = MIN(isize, bsize);
179
180 if (ip->i_d.di_size < isize) {
181 ip->i_d.di_size = isize;
182 ip->i_update_core = 1;
183 ip->i_update_size = 1;
184 }
185
186 xfs_iunlock(ip, XFS_ILOCK_EXCL);
187}
188
189/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100190 * Buffered IO write completion for delayed allocate extents.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100191 */
192STATIC void
193xfs_end_bio_delalloc(
David Howellsc4028952006-11-22 14:57:56 +0000194 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100195{
David Howellsc4028952006-11-22 14:57:56 +0000196 xfs_ioend_t *ioend =
197 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100198
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000199 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100200 xfs_destroy_ioend(ioend);
201}
202
203/*
204 * Buffered IO write completion for regular, written extents.
205 */
206STATIC void
207xfs_end_bio_written(
David Howellsc4028952006-11-22 14:57:56 +0000208 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100209{
David Howellsc4028952006-11-22 14:57:56 +0000210 xfs_ioend_t *ioend =
211 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100212
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000213 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100214 xfs_destroy_ioend(ioend);
215}
216
217/*
218 * IO write completion for unwritten extents.
219 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 * Issue transactions to convert a buffer range from unwritten
Christoph Hellwigf0973862005-09-05 08:22:52 +1000221 * to written extents.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 */
223STATIC void
Christoph Hellwig0829c362005-09-02 16:58:49 +1000224xfs_end_bio_unwritten(
David Howellsc4028952006-11-22 14:57:56 +0000225 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
David Howellsc4028952006-11-22 14:57:56 +0000227 xfs_ioend_t *ioend =
228 container_of(work, xfs_ioend_t, io_work);
Nathan Scott67fcaa72006-06-09 17:00:52 +1000229 bhv_vnode_t *vp = ioend->io_vnode;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000230 xfs_off_t offset = ioend->io_offset;
231 size_t size = ioend->io_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000233 if (likely(!ioend->io_error)) {
Nathan Scott67fcaa72006-06-09 17:00:52 +1000234 bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000235 xfs_setfilesize(ioend);
236 }
237 xfs_destroy_ioend(ioend);
238}
239
240/*
241 * IO read completion for regular, written extents.
242 */
243STATIC void
244xfs_end_bio_read(
245 struct work_struct *work)
246{
247 xfs_ioend_t *ioend =
248 container_of(work, xfs_ioend_t, io_work);
249
Christoph Hellwig0829c362005-09-02 16:58:49 +1000250 xfs_destroy_ioend(ioend);
251}
252
253/*
254 * Allocate and initialise an IO completion structure.
255 * We need to track unwritten extent write completion here initially.
256 * We'll need to extend this for updating the ondisk inode size later
257 * (vs. incore size).
258 */
259STATIC xfs_ioend_t *
260xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100261 struct inode *inode,
262 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000263{
264 xfs_ioend_t *ioend;
265
266 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
267
268 /*
269 * Set the count to 1 initially, which will prevent an I/O
270 * completion callback from happening before we have started
271 * all the I/O from calling the completion routine too early.
272 */
273 atomic_set(&ioend->io_remaining, 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000274 ioend->io_error = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100275 ioend->io_list = NULL;
276 ioend->io_type = type;
Nathan Scottec86dc02006-03-17 17:25:36 +1100277 ioend->io_vnode = vn_from_inode(inode);
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000278 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100279 ioend->io_buffer_tail = NULL;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000280 atomic_inc(&ioend->io_vnode->v_iocount);
281 ioend->io_offset = 0;
282 ioend->io_size = 0;
283
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100284 if (type == IOMAP_UNWRITTEN)
David Howellsc4028952006-11-22 14:57:56 +0000285 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100286 else if (type == IOMAP_DELAY)
David Howellsc4028952006-11-22 14:57:56 +0000287 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000288 else if (type == IOMAP_READ)
289 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100290 else
David Howellsc4028952006-11-22 14:57:56 +0000291 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000292
293 return ioend;
294}
295
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296STATIC int
297xfs_map_blocks(
298 struct inode *inode,
299 loff_t offset,
300 ssize_t count,
301 xfs_iomap_t *mapp,
302 int flags)
303{
Nathan Scott67fcaa72006-06-09 17:00:52 +1000304 bhv_vnode_t *vp = vn_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 int error, nmaps = 1;
306
Nathan Scott67fcaa72006-06-09 17:00:52 +1000307 error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
309 VMODIFY(vp);
310 return -error;
311}
312
David Chinner7989cb82007-02-10 18:34:56 +1100313STATIC_INLINE int
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100314xfs_iomap_valid(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 xfs_iomap_t *iomapp,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100316 loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317{
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100318 return offset >= iomapp->iomap_offset &&
319 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320}
321
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100322/*
323 * BIO completion handler for buffered IO.
324 */
325STATIC int
326xfs_end_bio(
327 struct bio *bio,
328 unsigned int bytes_done,
329 int error)
330{
331 xfs_ioend_t *ioend = bio->bi_private;
332
333 if (bio->bi_size)
334 return 1;
335
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100336 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000337 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100338
339 /* Toss bio and pass work off to an xfsdatad thread */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100340 bio->bi_private = NULL;
341 bio->bi_end_io = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100342 bio_put(bio);
Nathan Scott7d04a332006-06-09 14:58:38 +1000343
David Chinnere927af92007-06-05 16:24:36 +1000344 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100345 return 0;
346}
347
348STATIC void
349xfs_submit_ioend_bio(
350 xfs_ioend_t *ioend,
351 struct bio *bio)
352{
353 atomic_inc(&ioend->io_remaining);
354
355 bio->bi_private = ioend;
356 bio->bi_end_io = xfs_end_bio;
357
358 submit_bio(WRITE, bio);
359 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
360 bio_put(bio);
361}
362
363STATIC struct bio *
364xfs_alloc_ioend_bio(
365 struct buffer_head *bh)
366{
367 struct bio *bio;
368 int nvecs = bio_get_nr_vecs(bh->b_bdev);
369
370 do {
371 bio = bio_alloc(GFP_NOIO, nvecs);
372 nvecs >>= 1;
373 } while (!bio);
374
375 ASSERT(bio->bi_private == NULL);
376 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
377 bio->bi_bdev = bh->b_bdev;
378 bio_get(bio);
379 return bio;
380}
381
382STATIC void
383xfs_start_buffer_writeback(
384 struct buffer_head *bh)
385{
386 ASSERT(buffer_mapped(bh));
387 ASSERT(buffer_locked(bh));
388 ASSERT(!buffer_delay(bh));
389 ASSERT(!buffer_unwritten(bh));
390
391 mark_buffer_async_write(bh);
392 set_buffer_uptodate(bh);
393 clear_buffer_dirty(bh);
394}
395
396STATIC void
397xfs_start_page_writeback(
398 struct page *page,
399 struct writeback_control *wbc,
400 int clear_dirty,
401 int buffers)
402{
403 ASSERT(PageLocked(page));
404 ASSERT(!PageWriteback(page));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100405 if (clear_dirty)
David Chinner92132022006-12-21 10:24:01 +1100406 clear_page_dirty_for_io(page);
407 set_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100408 unlock_page(page);
409 if (!buffers) {
410 end_page_writeback(page);
411 wbc->pages_skipped++; /* We didn't write this page */
412 }
413}
414
415static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
416{
417 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
418}
419
420/*
David Chinnerd88992f2006-01-18 13:38:12 +1100421 * Submit all of the bios for all of the ioends we have saved up, covering the
422 * initial writepage page and also any probed pages.
423 *
424 * Because we may have multiple ioends spanning a page, we need to start
425 * writeback on all the buffers before we submit them for I/O. If we mark the
426 * buffers as we got, then we can end up with a page that only has buffers
427 * marked async write and I/O complete on can occur before we mark the other
428 * buffers async write.
429 *
430 * The end result of this is that we trip a bug in end_page_writeback() because
431 * we call it twice for the one page as the code in end_buffer_async_write()
432 * assumes that all buffers on the page are started at the same time.
433 *
434 * The fix is two passes across the ioend list - one to start writeback on the
Nathan Scottc41564b2006-03-29 08:55:14 +1000435 * buffer_heads, and then submit them for I/O on the second pass.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100436 */
437STATIC void
438xfs_submit_ioend(
439 xfs_ioend_t *ioend)
440{
David Chinnerd88992f2006-01-18 13:38:12 +1100441 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100442 xfs_ioend_t *next;
443 struct buffer_head *bh;
444 struct bio *bio;
445 sector_t lastblock = 0;
446
David Chinnerd88992f2006-01-18 13:38:12 +1100447 /* Pass 1 - start writeback */
448 do {
449 next = ioend->io_list;
450 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
451 xfs_start_buffer_writeback(bh);
452 }
453 } while ((ioend = next) != NULL);
454
455 /* Pass 2 - submit I/O */
456 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100457 do {
458 next = ioend->io_list;
459 bio = NULL;
460
461 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100462
463 if (!bio) {
464 retry:
465 bio = xfs_alloc_ioend_bio(bh);
466 } else if (bh->b_blocknr != lastblock + 1) {
467 xfs_submit_ioend_bio(ioend, bio);
468 goto retry;
469 }
470
471 if (bio_add_buffer(bio, bh) != bh->b_size) {
472 xfs_submit_ioend_bio(ioend, bio);
473 goto retry;
474 }
475
476 lastblock = bh->b_blocknr;
477 }
478 if (bio)
479 xfs_submit_ioend_bio(ioend, bio);
David Chinnere927af92007-06-05 16:24:36 +1000480 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100481 } while ((ioend = next) != NULL);
482}
483
484/*
485 * Cancel submission of all buffer_heads so far in this endio.
486 * Toss the endio too. Only ever called for the initial page
487 * in a writepage request, so only ever one page.
488 */
489STATIC void
490xfs_cancel_ioend(
491 xfs_ioend_t *ioend)
492{
493 xfs_ioend_t *next;
494 struct buffer_head *bh, *next_bh;
495
496 do {
497 next = ioend->io_list;
498 bh = ioend->io_buffer_head;
499 do {
500 next_bh = bh->b_private;
501 clear_buffer_async_write(bh);
502 unlock_buffer(bh);
503 } while ((bh = next_bh) != NULL);
504
505 vn_iowake(ioend->io_vnode);
506 mempool_free(ioend, xfs_ioend_pool);
507 } while ((ioend = next) != NULL);
508}
509
510/*
511 * Test to see if we've been building up a completion structure for
512 * earlier buffers -- if so, we try to append to this ioend if we
513 * can, otherwise we finish off any current ioend and start another.
514 * Return true if we've finished the given ioend.
515 */
516STATIC void
517xfs_add_to_ioend(
518 struct inode *inode,
519 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100520 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100521 unsigned int type,
522 xfs_ioend_t **result,
523 int need_ioend)
524{
525 xfs_ioend_t *ioend = *result;
526
527 if (!ioend || need_ioend || type != ioend->io_type) {
528 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100529
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100530 ioend = xfs_alloc_ioend(inode, type);
531 ioend->io_offset = offset;
532 ioend->io_buffer_head = bh;
533 ioend->io_buffer_tail = bh;
534 if (previous)
535 previous->io_list = ioend;
536 *result = ioend;
537 } else {
538 ioend->io_buffer_tail->b_private = bh;
539 ioend->io_buffer_tail = bh;
540 }
541
542 bh->b_private = NULL;
543 ioend->io_size += bh->b_size;
544}
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100547xfs_map_buffer(
548 struct buffer_head *bh,
549 xfs_iomap_t *mp,
550 xfs_off_t offset,
551 uint block_bits)
552{
553 sector_t bn;
554
555 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
556
557 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
558 ((offset - mp->iomap_offset) >> block_bits);
559
560 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
561
562 bh->b_blocknr = bn;
563 set_buffer_mapped(bh);
564}
565
566STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567xfs_map_at_offset(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 struct buffer_head *bh,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100569 loff_t offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 int block_bits,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100571 xfs_iomap_t *iomapp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
574 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575
576 lock_buffer(bh);
Nathan Scott87cbc492006-03-14 13:26:43 +1100577 xfs_map_buffer(bh, iomapp, offset, block_bits);
Nathan Scottce8e9222006-01-11 15:39:08 +1100578 bh->b_bdev = iomapp->iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 set_buffer_mapped(bh);
580 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100581 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582}
583
584/*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100585 * Look for a page at index that is suitable for clustering.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 */
587STATIC unsigned int
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100588xfs_probe_page(
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100589 struct page *page,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100590 unsigned int pg_offset,
591 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 int ret = 0;
594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100596 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
598 if (page->mapping && PageDirty(page)) {
599 if (page_has_buffers(page)) {
600 struct buffer_head *bh, *head;
601
602 bh = head = page_buffers(page);
603 do {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100604 if (!buffer_uptodate(bh))
605 break;
606 if (mapped != buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 break;
608 ret += bh->b_size;
609 if (ret >= pg_offset)
610 break;
611 } while ((bh = bh->b_this_page) != head);
612 } else
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100613 ret = mapped ? 0 : PAGE_CACHE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 }
615
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 return ret;
617}
618
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100619STATIC size_t
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100620xfs_probe_cluster(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 struct inode *inode,
622 struct page *startpage,
623 struct buffer_head *bh,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100624 struct buffer_head *head,
625 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100627 struct pagevec pvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 pgoff_t tindex, tlast, tloff;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100629 size_t total = 0;
630 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
632 /* First sum forwards in this page */
633 do {
Eric Sandeen2353e8e2006-02-28 12:30:30 +1100634 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100635 return total;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 total += bh->b_size;
637 } while ((bh = bh->b_this_page) != head);
638
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100639 /* if we reached the end of the page, sum forwards in following pages */
640 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
641 tindex = startpage->index + 1;
642
643 /* Prune this back to avoid pathological behavior */
644 tloff = min(tlast, startpage->index + 64);
645
646 pagevec_init(&pvec, 0);
647 while (!done && tindex <= tloff) {
648 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
649
650 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
651 break;
652
653 for (i = 0; i < pagevec_count(&pvec); i++) {
654 struct page *page = pvec.pages[i];
655 size_t pg_offset, len = 0;
656
657 if (tindex == tlast) {
658 pg_offset =
659 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100660 if (!pg_offset) {
661 done = 1;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100662 break;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100663 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100664 } else
665 pg_offset = PAGE_CACHE_SIZE;
666
667 if (page->index == tindex && !TestSetPageLocked(page)) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100668 len = xfs_probe_page(page, pg_offset, mapped);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100669 unlock_page(page);
670 }
671
672 if (!len) {
673 done = 1;
674 break;
675 }
676
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 total += len;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100678 tindex++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100680
681 pagevec_release(&pvec);
682 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100684
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 return total;
686}
687
688/*
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100689 * Test if a given page is suitable for writing as part of an unwritten
690 * or delayed allocate extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 */
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100692STATIC int
693xfs_is_delayed_page(
694 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100695 unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100698 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
700 if (page->mapping && page_has_buffers(page)) {
701 struct buffer_head *bh, *head;
702 int acceptable = 0;
703
704 bh = head = page_buffers(page);
705 do {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100706 if (buffer_unwritten(bh))
707 acceptable = (type == IOMAP_UNWRITTEN);
708 else if (buffer_delay(bh))
709 acceptable = (type == IOMAP_DELAY);
David Chinner2ddee842006-03-22 12:47:40 +1100710 else if (buffer_dirty(bh) && buffer_mapped(bh))
David Chinnerdf3c7242007-05-24 15:27:03 +1000711 acceptable = (type == IOMAP_NEW);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100712 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 } while ((bh = bh->b_this_page) != head);
715
716 if (acceptable)
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100717 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 }
719
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100720 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721}
722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723/*
724 * Allocate & map buffers for page given the extent map. Write it out.
725 * except for the original page of a writepage, this is called on
726 * delalloc/unwritten pages only, for the original page it is possible
727 * that the page has no mapping at all.
728 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100729STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730xfs_convert_page(
731 struct inode *inode,
732 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100733 loff_t tindex,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100734 xfs_iomap_t *mp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100735 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 int startio,
738 int all_bh)
739{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100740 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100741 xfs_off_t end_offset;
742 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100743 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 int bbits = inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -0700745 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100746 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100747 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100749 if (page->index != tindex)
750 goto fail;
751 if (TestSetPageLocked(page))
752 goto fail;
753 if (PageWriteback(page))
754 goto fail_unlock_page;
755 if (page->mapping != inode->i_mapping)
756 goto fail_unlock_page;
757 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
758 goto fail_unlock_page;
759
Nathan Scott24e17b52005-05-05 13:33:20 -0700760 /*
761 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000762 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100763 *
764 * Derivation:
765 *
766 * End offset is the highest offset that this page should represent.
767 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
768 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
769 * hence give us the correct page_dirty count. On any other page,
770 * it will be zero and in that case we need page_dirty to be the
771 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700772 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100773 end_offset = min_t(unsigned long long,
774 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
775 i_size_read(inode));
776
Nathan Scott24e17b52005-05-05 13:33:20 -0700777 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100778 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
779 PAGE_CACHE_SIZE);
780 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
781 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700782
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 bh = head = page_buffers(page);
784 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100785 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100787 if (!buffer_uptodate(bh))
788 uptodate = 0;
789 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
790 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100792 }
793
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100794 if (buffer_unwritten(bh) || buffer_delay(bh)) {
795 if (buffer_unwritten(bh))
796 type = IOMAP_UNWRITTEN;
797 else
798 type = IOMAP_DELAY;
799
800 if (!xfs_iomap_valid(mp, offset)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100801 done = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100802 continue;
803 }
804
805 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
806 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
807
808 xfs_map_at_offset(bh, offset, bbits, mp);
809 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100810 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100811 type, ioendp, done);
812 } else {
813 set_buffer_dirty(bh);
814 unlock_buffer(bh);
815 mark_buffer_dirty(bh);
816 }
817 page_dirty--;
818 count++;
819 } else {
David Chinnerdf3c7242007-05-24 15:27:03 +1000820 type = IOMAP_NEW;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100821 if (buffer_mapped(bh) && all_bh && startio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 lock_buffer(bh);
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100823 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100824 type, ioendp, done);
825 count++;
Nathan Scott24e17b52005-05-05 13:33:20 -0700826 page_dirty--;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100827 } else {
828 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100831 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100833 if (uptodate && bh == head)
834 SetPageUptodate(page);
835
836 if (startio) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100837 if (count) {
838 struct backing_dev_info *bdi;
839
840 bdi = inode->i_mapping->backing_dev_info;
David Chinner9fddaca2006-02-07 20:27:24 +1100841 wbc->nr_to_write--;
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100842 if (bdi_write_congested(bdi)) {
843 wbc->encountered_congestion = 1;
844 done = 1;
David Chinner9fddaca2006-02-07 20:27:24 +1100845 } else if (wbc->nr_to_write <= 0) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100846 done = 1;
847 }
848 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100849 xfs_start_page_writeback(page, wbc, !page_dirty, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100851
852 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100853 fail_unlock_page:
854 unlock_page(page);
855 fail:
856 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857}
858
859/*
860 * Convert & write out a cluster of pages in the same extent as defined
861 * by mp and following the start page.
862 */
863STATIC void
864xfs_cluster_write(
865 struct inode *inode,
866 pgoff_t tindex,
867 xfs_iomap_t *iomapp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100868 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 struct writeback_control *wbc,
870 int startio,
871 int all_bh,
872 pgoff_t tlast)
873{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100874 struct pagevec pvec;
875 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100877 pagevec_init(&pvec, 0);
878 while (!done && tindex <= tlast) {
879 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
880
881 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100883
884 for (i = 0; i < pagevec_count(&pvec); i++) {
885 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
886 iomapp, ioendp, wbc, startio, all_bh);
887 if (done)
888 break;
889 }
890
891 pagevec_release(&pvec);
892 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 }
894}
895
896/*
897 * Calling this without startio set means we are being asked to make a dirty
898 * page ready for freeing it's buffers. When called with startio set then
899 * we are coming from writepage.
900 *
901 * When called with startio set it is important that we write the WHOLE
902 * page if possible.
903 * The bh->b_state's cannot know if any of the blocks or which block for
904 * that matter are dirty due to mmap writes, and therefore bh uptodate is
Nathan Scottc41564b2006-03-29 08:55:14 +1000905 * only valid if the page itself isn't completely uptodate. Some layers
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 * may clear the page dirty flag prior to calling write page, under the
907 * assumption the entire page will be written out; by not writing out the
908 * whole page the page can be reused before all valid dirty data is
909 * written out. Note: in the case of a page that has been dirty'd by
910 * mapwrite and but partially setup by block_prepare_write the
911 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
912 * valid state, thus the whole page must be written out thing.
913 */
914
915STATIC int
916xfs_page_state_convert(
917 struct inode *inode,
918 struct page *page,
919 struct writeback_control *wbc,
920 int startio,
921 int unmapped) /* also implies page uptodate */
922{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100923 struct buffer_head *bh, *head;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100924 xfs_iomap_t iomap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100925 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 loff_t offset;
927 unsigned long p_offset = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100928 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 __uint64_t end_offset;
930 pgoff_t end_index, last_index, tlast;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100931 ssize_t size, len;
932 int flags, err, iomap_valid = 0, uptodate = 1;
Nathan Scott82721452006-04-11 15:10:55 +1000933 int page_dirty, count = 0;
934 int trylock = 0;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100935 int all_bh = unmapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936
Nathan Scott82721452006-04-11 15:10:55 +1000937 if (startio) {
938 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
939 trylock |= BMAPI_TRYLOCK;
940 }
Daniel Moore3ba08152005-05-05 13:31:34 -0700941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 /* Is this page beyond the end of the file? */
943 offset = i_size_read(inode);
944 end_index = offset >> PAGE_CACHE_SHIFT;
945 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
946 if (page->index >= end_index) {
947 if ((page->index >= end_index + 1) ||
948 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
Nathan Scott19d5bcf2005-11-02 15:14:09 +1100949 if (startio)
950 unlock_page(page);
951 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 }
953 }
954
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 /*
Nathan Scott24e17b52005-05-05 13:33:20 -0700956 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000957 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100958 *
959 * Derivation:
960 *
961 * End offset is the highest offset that this page should represent.
962 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
963 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
964 * hence give us the correct page_dirty count. On any other page,
965 * it will be zero and in that case we need page_dirty to be the
966 * count of buffers on the page.
967 */
968 end_offset = min_t(unsigned long long,
969 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
Nathan Scott24e17b52005-05-05 13:33:20 -0700970 len = 1 << inode->i_blkbits;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100971 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
972 PAGE_CACHE_SIZE);
973 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
Nathan Scott24e17b52005-05-05 13:33:20 -0700974 page_dirty = p_offset / len;
975
Nathan Scott24e17b52005-05-05 13:33:20 -0700976 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100977 offset = page_offset(page);
David Chinnerdf3c7242007-05-24 15:27:03 +1000978 flags = BMAPI_READ;
979 type = IOMAP_NEW;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100980
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100981 /* TODO: cleanup count and page_dirty */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 do {
984 if (offset >= end_offset)
985 break;
986 if (!buffer_uptodate(bh))
987 uptodate = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100988 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100989 /*
990 * the iomap is actually still valid, but the ioend
991 * isn't. shouldn't happen too often.
992 */
993 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100995 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100997 if (iomap_valid)
998 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
1000 /*
1001 * First case, map an unwritten extent and prepare for
1002 * extent state conversion transaction on completion.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001003 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 * Second case, allocate space for a delalloc buffer.
1005 * We can return EAGAIN here in the release page case.
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001006 *
1007 * Third case, an unmapped buffer was found, and we are
1008 * in a path where we need to write the whole page out.
David Chinnerdf3c7242007-05-24 15:27:03 +10001009 */
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001010 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1011 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1012 !buffer_mapped(bh) && (unmapped || startio))) {
David Chinnerdf3c7242007-05-24 15:27:03 +10001013 /*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001014 * Make sure we don't use a read-only iomap
1015 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001016 if (flags == BMAPI_READ)
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001017 iomap_valid = 0;
1018
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001019 if (buffer_unwritten(bh)) {
1020 type = IOMAP_UNWRITTEN;
Nathan Scott82721452006-04-11 15:10:55 +10001021 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001022 } else if (buffer_delay(bh)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001023 type = IOMAP_DELAY;
Nathan Scott82721452006-04-11 15:10:55 +10001024 flags = BMAPI_ALLOCATE | trylock;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001025 } else {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001026 type = IOMAP_NEW;
Nathan Scott82721452006-04-11 15:10:55 +10001027 flags = BMAPI_WRITE | BMAPI_MMAP;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001028 }
1029
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001030 if (!iomap_valid) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001031 if (type == IOMAP_NEW) {
1032 size = xfs_probe_cluster(inode,
1033 page, bh, head, 0);
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001034 } else {
1035 size = len;
1036 }
1037
1038 err = xfs_map_blocks(inode, offset, size,
1039 &iomap, flags);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001040 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 goto error;
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001042 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 }
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001044 if (iomap_valid) {
1045 xfs_map_at_offset(bh, offset,
1046 inode->i_blkbits, &iomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001048 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001049 type, &ioend,
1050 !iomap_valid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 } else {
1052 set_buffer_dirty(bh);
1053 unlock_buffer(bh);
1054 mark_buffer_dirty(bh);
1055 }
1056 page_dirty--;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001057 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001059 } else if (buffer_uptodate(bh) && startio) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001060 /*
1061 * we got here because the buffer is already mapped.
1062 * That means it must already have extents allocated
1063 * underneath it. Map the extent by reading it.
1064 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001065 if (!iomap_valid || flags != BMAPI_READ) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001066 flags = BMAPI_READ;
1067 size = xfs_probe_cluster(inode, page, bh,
1068 head, 1);
1069 err = xfs_map_blocks(inode, offset, size,
1070 &iomap, flags);
1071 if (err)
1072 goto error;
1073 iomap_valid = xfs_iomap_valid(&iomap, offset);
1074 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
David Chinnerdf3c7242007-05-24 15:27:03 +10001076 /*
1077 * We set the type to IOMAP_NEW in case we are doing a
1078 * small write at EOF that is extending the file but
1079 * without needing an allocation. We need to update the
1080 * file size on I/O completion in this case so it is
1081 * the same case as having just allocated a new extent
1082 * that we are writing into for the first time.
1083 */
1084 type = IOMAP_NEW;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001085 if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
1086 ASSERT(buffer_mapped(bh));
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001087 if (iomap_valid)
1088 all_bh = 1;
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001089 xfs_add_to_ioend(inode, bh, offset, type,
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001090 &ioend, !iomap_valid);
1091 page_dirty--;
1092 count++;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001093 } else {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001094 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001096 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1097 (unmapped || startio)) {
1098 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001100
1101 if (!iohead)
1102 iohead = ioend;
1103
1104 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
1106 if (uptodate && bh == head)
1107 SetPageUptodate(page);
1108
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001109 if (startio)
1110 xfs_start_page_writeback(page, wbc, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001112 if (ioend && iomap_valid) {
1113 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 PAGE_CACHE_SHIFT;
Nathan Scott775bf6c2005-05-05 13:33:01 -07001115 tlast = min_t(pgoff_t, offset, last_index);
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001116 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001117 wbc, startio, all_bh, tlast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 }
1119
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001120 if (iohead)
1121 xfs_submit_ioend(iohead);
1122
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 return page_dirty;
1124
1125error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001126 if (iohead)
1127 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
1129 /*
1130 * If it's delalloc and we have nowhere to put it,
1131 * throw it away, unless the lower layers told
1132 * us to try again.
1133 */
1134 if (err != -EAGAIN) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001135 if (!unmapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 block_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 ClearPageUptodate(page);
1138 }
1139 return err;
1140}
1141
Nathan Scottf51623b2006-03-14 13:26:27 +11001142/*
1143 * writepage: Called from one of two places:
1144 *
1145 * 1. we are flushing a delalloc buffer head.
1146 *
1147 * 2. we are writing out a dirty page. Typically the page dirty
1148 * state is cleared before we get here. In this case is it
1149 * conceivable we have no buffer heads.
1150 *
1151 * For delalloc space on the page we need to allocate space and
1152 * flush it. For unmapped buffer heads on the page we should
1153 * allocate space if the page is uptodate. For any other dirty
1154 * buffer heads on the page we should flush them.
1155 *
1156 * If we detect that a transaction would be required to flush
1157 * the page, we have to check the process flags first, if we
1158 * are already in a transaction or disk I/O during allocations
1159 * is off, we need to fail the writepage and redirty the page.
1160 */
1161
1162STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001163xfs_vm_writepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001164 struct page *page,
1165 struct writeback_control *wbc)
1166{
1167 int error;
1168 int need_trans;
1169 int delalloc, unmapped, unwritten;
1170 struct inode *inode = page->mapping->host;
1171
1172 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1173
1174 /*
1175 * We need a transaction if:
1176 * 1. There are delalloc buffers on the page
1177 * 2. The page is uptodate and we have unmapped buffers
1178 * 3. The page is uptodate and we have no buffers
1179 * 4. There are unwritten buffers on the page
1180 */
1181
1182 if (!page_has_buffers(page)) {
1183 unmapped = 1;
1184 need_trans = 1;
1185 } else {
1186 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1187 if (!PageUptodate(page))
1188 unmapped = 0;
1189 need_trans = delalloc + unmapped + unwritten;
1190 }
1191
1192 /*
1193 * If we need a transaction and the process flags say
1194 * we are already in a transaction, or no IO is allowed
1195 * then mark the page dirty again and leave the page
1196 * as is.
1197 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001198 if (current_test_flags(PF_FSTRANS) && need_trans)
Nathan Scottf51623b2006-03-14 13:26:27 +11001199 goto out_fail;
1200
1201 /*
1202 * Delay hooking up buffer heads until we have
1203 * made our go/no-go decision.
1204 */
1205 if (!page_has_buffers(page))
1206 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1207
1208 /*
1209 * Convert delayed allocate, unwritten or unmapped space
1210 * to real space and flush out to disk.
1211 */
1212 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1213 if (error == -EAGAIN)
1214 goto out_fail;
1215 if (unlikely(error < 0))
1216 goto out_unlock;
1217
1218 return 0;
1219
1220out_fail:
1221 redirty_page_for_writepage(wbc, page);
1222 unlock_page(page);
1223 return 0;
1224out_unlock:
1225 unlock_page(page);
1226 return error;
1227}
1228
Nathan Scott7d4fb402006-06-09 15:27:16 +10001229STATIC int
1230xfs_vm_writepages(
1231 struct address_space *mapping,
1232 struct writeback_control *wbc)
1233{
Nathan Scott67fcaa72006-06-09 17:00:52 +10001234 struct bhv_vnode *vp = vn_from_inode(mapping->host);
Nathan Scott7d4fb402006-06-09 15:27:16 +10001235
1236 if (VN_TRUNC(vp))
1237 VUNTRUNCATE(vp);
1238 return generic_writepages(mapping, wbc);
1239}
1240
Nathan Scottf51623b2006-03-14 13:26:27 +11001241/*
1242 * Called to move a page into cleanable state - and from there
1243 * to be released. Possibly the page is already clean. We always
1244 * have buffer heads in this call.
1245 *
1246 * Returns 0 if the page is ok to release, 1 otherwise.
1247 *
1248 * Possible scenarios are:
1249 *
1250 * 1. We are being called to release a page which has been written
1251 * to via regular I/O. buffer heads will be dirty and possibly
1252 * delalloc. If no delalloc buffer heads in this case then we
1253 * can just return zero.
1254 *
1255 * 2. We are called to release a page which has been written via
1256 * mmap, all we need to do is ensure there is no delalloc
1257 * state in the buffer heads, if not we can let the caller
1258 * free them and we should come back later via writepage.
1259 */
1260STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001261xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001262 struct page *page,
1263 gfp_t gfp_mask)
1264{
1265 struct inode *inode = page->mapping->host;
1266 int dirty, delalloc, unmapped, unwritten;
1267 struct writeback_control wbc = {
1268 .sync_mode = WB_SYNC_ALL,
1269 .nr_to_write = 1,
1270 };
1271
Nathan Scotted9d88f2006-09-28 10:56:43 +10001272 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
Nathan Scottf51623b2006-03-14 13:26:27 +11001273
Nathan Scott238f4c52006-03-17 17:26:25 +11001274 if (!page_has_buffers(page))
1275 return 0;
1276
Nathan Scottf51623b2006-03-14 13:26:27 +11001277 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1278 if (!delalloc && !unwritten)
1279 goto free_buffers;
1280
1281 if (!(gfp_mask & __GFP_FS))
1282 return 0;
1283
1284 /* If we are already inside a transaction or the thread cannot
1285 * do I/O, we cannot release this page.
1286 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001287 if (current_test_flags(PF_FSTRANS))
Nathan Scottf51623b2006-03-14 13:26:27 +11001288 return 0;
1289
1290 /*
1291 * Convert delalloc space to real space, do not flush the
1292 * data out to disk, that will be done by the caller.
1293 * Never need to allocate space here - we will always
1294 * come back to writepage in that case.
1295 */
1296 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1297 if (dirty == 0 && !unwritten)
1298 goto free_buffers;
1299 return 0;
1300
1301free_buffers:
1302 return try_to_free_buffers(page);
1303}
1304
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001306__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 struct inode *inode,
1308 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 struct buffer_head *bh_result,
1310 int create,
1311 int direct,
1312 bmapi_flags_t flags)
1313{
Nathan Scott67fcaa72006-06-09 17:00:52 +10001314 bhv_vnode_t *vp = vn_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 xfs_iomap_t iomap;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001316 xfs_off_t offset;
1317 ssize_t size;
Nathan Scottc2536662006-03-29 10:44:40 +10001318 int niomap = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001321 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001322 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1323 size = bh_result->b_size;
Nathan Scott67fcaa72006-06-09 17:00:52 +10001324 error = bhv_vop_bmap(vp, offset, size,
1325 create ? flags : BMAPI_READ, &iomap, &niomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 if (error)
1327 return -error;
Nathan Scottc2536662006-03-29 10:44:40 +10001328 if (niomap == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 return 0;
1330
1331 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001332 /*
1333 * For unwritten extents do not report a disk address on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 * the read case (treat as if we're reading into a hole).
1335 */
1336 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001337 xfs_map_buffer(bh_result, &iomap, offset,
1338 inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 }
1340 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1341 if (direct)
1342 bh_result->b_private = inode;
1343 set_buffer_unwritten(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 }
1345 }
1346
Nathan Scottc2536662006-03-29 10:44:40 +10001347 /*
1348 * If this is a realtime file, data may be on a different device.
1349 * to that pointed to from the buffer_head b_bdev currently.
1350 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001351 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
Nathan Scottc2536662006-03-29 10:44:40 +10001353 /*
David Chinner549054a2007-02-10 18:36:35 +11001354 * If we previously allocated a block out beyond eof and we are now
1355 * coming back to use it then we will need to flag it as new even if it
1356 * has a disk address.
1357 *
1358 * With sub-block writes into unwritten extents we also need to mark
1359 * the buffer as new so that the unwritten parts of the buffer gets
1360 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 */
1362 if (create &&
1363 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001364 (offset >= i_size_read(inode)) ||
1365 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
1368 if (iomap.iomap_flags & IOMAP_DELAY) {
1369 BUG_ON(direct);
1370 if (create) {
1371 set_buffer_uptodate(bh_result);
1372 set_buffer_mapped(bh_result);
1373 set_buffer_delay(bh_result);
1374 }
1375 }
1376
Nathan Scottc2536662006-03-29 10:44:40 +10001377 if (direct || size > (1 << inode->i_blkbits)) {
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001378 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1379 offset = min_t(xfs_off_t,
Nathan Scottc2536662006-03-29 10:44:40 +10001380 iomap.iomap_bsize - iomap.iomap_delta, size);
1381 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 }
1383
1384 return 0;
1385}
1386
1387int
Nathan Scottc2536662006-03-29 10:44:40 +10001388xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 struct inode *inode,
1390 sector_t iblock,
1391 struct buffer_head *bh_result,
1392 int create)
1393{
Nathan Scottc2536662006-03-29 10:44:40 +10001394 return __xfs_get_blocks(inode, iblock,
Badari Pulavartyfa30bd02006-03-26 01:38:01 -08001395 bh_result, create, 0, BMAPI_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396}
1397
1398STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001399xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 struct inode *inode,
1401 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 struct buffer_head *bh_result,
1403 int create)
1404{
Nathan Scottc2536662006-03-29 10:44:40 +10001405 return __xfs_get_blocks(inode, iblock,
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -08001406 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407}
1408
Christoph Hellwigf0973862005-09-05 08:22:52 +10001409STATIC void
Nathan Scotte4c573b2006-03-14 13:54:26 +11001410xfs_end_io_direct(
Christoph Hellwigf0973862005-09-05 08:22:52 +10001411 struct kiocb *iocb,
1412 loff_t offset,
1413 ssize_t size,
1414 void *private)
1415{
1416 xfs_ioend_t *ioend = iocb->private;
1417
1418 /*
1419 * Non-NULL private data means we need to issue a transaction to
1420 * convert a range from unwritten to written extents. This needs
Nathan Scottc41564b2006-03-29 08:55:14 +10001421 * to happen from process context but aio+dio I/O completion
Christoph Hellwigf0973862005-09-05 08:22:52 +10001422 * happens from irq context so we need to defer it to a workqueue.
Nathan Scottc41564b2006-03-29 08:55:14 +10001423 * This is not necessary for synchronous direct I/O, but we do
Christoph Hellwigf0973862005-09-05 08:22:52 +10001424 * it anyway to keep the code uniform and simpler.
1425 *
David Chinnere927af92007-06-05 16:24:36 +10001426 * Well, if only it were that simple. Because synchronous direct I/O
1427 * requires extent conversion to occur *before* we return to userspace,
1428 * we have to wait for extent conversion to complete. Look at the
1429 * iocb that has been passed to us to determine if this is AIO or
1430 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1431 * workqueue and wait for it to complete.
1432 *
Christoph Hellwigf0973862005-09-05 08:22:52 +10001433 * The core direct I/O code might be changed to always call the
1434 * completion handler in the future, in which case all this can
1435 * go away.
1436 */
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001437 ioend->io_offset = offset;
1438 ioend->io_size = size;
1439 if (ioend->io_type == IOMAP_READ) {
David Chinnere927af92007-06-05 16:24:36 +10001440 xfs_finish_ioend(ioend, 0);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001441 } else if (private && size > 0) {
David Chinnere927af92007-06-05 16:24:36 +10001442 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
Christoph Hellwigf0973862005-09-05 08:22:52 +10001443 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001444 /*
1445 * A direct I/O write ioend starts it's life in unwritten
1446 * state in case they map an unwritten extent. This write
1447 * didn't map an unwritten extent so switch it's completion
1448 * handler.
1449 */
1450 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
David Chinnere927af92007-06-05 16:24:36 +10001451 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf0973862005-09-05 08:22:52 +10001452 }
1453
1454 /*
Nathan Scottc41564b2006-03-29 08:55:14 +10001455 * blockdev_direct_IO can return an error even after the I/O
Christoph Hellwigf0973862005-09-05 08:22:52 +10001456 * completion handler was called. Thus we need to protect
1457 * against double-freeing.
1458 */
1459 iocb->private = NULL;
1460}
1461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001463xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 int rw,
1465 struct kiocb *iocb,
1466 const struct iovec *iov,
1467 loff_t offset,
1468 unsigned long nr_segs)
1469{
1470 struct file *file = iocb->ki_filp;
1471 struct inode *inode = file->f_mapping->host;
Nathan Scott67fcaa72006-06-09 17:00:52 +10001472 bhv_vnode_t *vp = vn_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 xfs_iomap_t iomap;
1474 int maps = 1;
1475 int error;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001476 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
Nathan Scott67fcaa72006-06-09 17:00:52 +10001478 error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 if (error)
1480 return -error;
1481
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001482 if (rw == WRITE) {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001483 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001484 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1485 iomap.iomap_target->bt_bdev,
1486 iov, offset, nr_segs,
1487 xfs_get_blocks_direct,
1488 xfs_end_io_direct);
1489 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001490 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001491 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
1492 iomap.iomap_target->bt_bdev,
1493 iov, offset, nr_segs,
1494 xfs_get_blocks_direct,
1495 xfs_end_io_direct);
1496 }
Christoph Hellwigf0973862005-09-05 08:22:52 +10001497
Zach Brown8459d862006-12-10 02:21:05 -08001498 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
Christoph Hellwigf0973862005-09-05 08:22:52 +10001499 xfs_destroy_ioend(iocb->private);
1500 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501}
1502
Nathan Scottf51623b2006-03-14 13:26:27 +11001503STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001504xfs_vm_prepare_write(
Nathan Scottf51623b2006-03-14 13:26:27 +11001505 struct file *file,
1506 struct page *page,
1507 unsigned int from,
1508 unsigned int to)
1509{
Nathan Scottc2536662006-03-29 10:44:40 +10001510 return block_prepare_write(page, from, to, xfs_get_blocks);
Nathan Scottf51623b2006-03-14 13:26:27 +11001511}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
1513STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001514xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 struct address_space *mapping,
1516 sector_t block)
1517{
1518 struct inode *inode = (struct inode *)mapping->host;
Nathan Scott67fcaa72006-06-09 17:00:52 +10001519 bhv_vnode_t *vp = vn_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520
Nathan Scotte4c573b2006-03-14 13:54:26 +11001521 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
Nathan Scott67fcaa72006-06-09 17:00:52 +10001522 bhv_vop_rwlock(vp, VRWLOCK_READ);
1523 bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
1524 bhv_vop_rwunlock(vp, VRWLOCK_READ);
Nathan Scottc2536662006-03-29 10:44:40 +10001525 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526}
1527
1528STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001529xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 struct file *unused,
1531 struct page *page)
1532{
Nathan Scottc2536662006-03-29 10:44:40 +10001533 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534}
1535
1536STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001537xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 struct file *unused,
1539 struct address_space *mapping,
1540 struct list_head *pages,
1541 unsigned nr_pages)
1542{
Nathan Scottc2536662006-03-29 10:44:40 +10001543 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544}
1545
NeilBrown2ff28e22006-03-26 01:37:18 -08001546STATIC void
Nathan Scott238f4c52006-03-17 17:26:25 +11001547xfs_vm_invalidatepage(
Nathan Scottbcec2b72005-09-02 16:40:17 +10001548 struct page *page,
1549 unsigned long offset)
1550{
1551 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1552 page->mapping->host, page, offset);
NeilBrown2ff28e22006-03-26 01:37:18 -08001553 block_invalidatepage(page, offset);
Nathan Scottbcec2b72005-09-02 16:40:17 +10001554}
1555
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001556const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001557 .readpage = xfs_vm_readpage,
1558 .readpages = xfs_vm_readpages,
1559 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001560 .writepages = xfs_vm_writepages,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 .sync_page = block_sync_page,
Nathan Scott238f4c52006-03-17 17:26:25 +11001562 .releasepage = xfs_vm_releasepage,
1563 .invalidatepage = xfs_vm_invalidatepage,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001564 .prepare_write = xfs_vm_prepare_write,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 .commit_write = generic_commit_write,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001566 .bmap = xfs_vm_bmap,
1567 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001568 .migratepage = buffer_migrate_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569};