blob: 52bd08c0a2780c0b12a582fe6a4031d716cf9926 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_dir2.h"
25#include "xfs_trans.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_dir2_sf.h"
Nathan Scotta844f452005-11-02 14:38:42 +110032#include "xfs_attr_sf.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include "xfs_dinode.h"
34#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110035#include "xfs_alloc.h"
36#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_error.h"
38#include "xfs_rw.h"
39#include "xfs_iomap.h"
40#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110041#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/writeback.h>
43
Nathan Scottf51623b2006-03-14 13:26:27 +110044STATIC void
45xfs_count_page_state(
46 struct page *page,
47 int *delalloc,
48 int *unmapped,
49 int *unwritten)
50{
51 struct buffer_head *bh, *head;
52
53 *delalloc = *unmapped = *unwritten = 0;
54
55 bh = head = page_buffers(page);
56 do {
57 if (buffer_uptodate(bh) && !buffer_mapped(bh))
58 (*unmapped) = 1;
Nathan Scottf51623b2006-03-14 13:26:27 +110059 else if (buffer_unwritten(bh))
60 (*unwritten) = 1;
61 else if (buffer_delay(bh))
62 (*delalloc) = 1;
63 } while ((bh = bh->b_this_page) != head);
64}
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#if defined(XFS_RW_TRACE)
67void
68xfs_page_trace(
69 int tag,
70 struct inode *inode,
71 struct page *page,
Nathan Scotted9d88f2006-09-28 10:56:43 +100072 unsigned long pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
74 xfs_inode_t *ip;
Nathan Scott67fcaa72006-06-09 17:00:52 +100075 bhv_vnode_t *vp = vn_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 loff_t isize = i_size_read(inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110077 loff_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 int delalloc = -1, unmapped = -1, unwritten = -1;
79
80 if (page_has_buffers(page))
81 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
82
Christoph Hellwig75e17b32006-01-11 20:58:44 +110083 ip = xfs_vtoi(vp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 if (!ip->i_rwtrace)
85 return;
86
87 ktrace_enter(ip->i_rwtrace,
88 (void *)((unsigned long)tag),
89 (void *)ip,
90 (void *)inode,
91 (void *)page,
Nathan Scotted9d88f2006-09-28 10:56:43 +100092 (void *)pgoff,
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
94 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
95 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
96 (void *)((unsigned long)(isize & 0xffffffff)),
97 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
98 (void *)((unsigned long)(offset & 0xffffffff)),
99 (void *)((unsigned long)delalloc),
100 (void *)((unsigned long)unmapped),
101 (void *)((unsigned long)unwritten),
Yingping Luf1fdc842006-03-22 12:44:15 +1100102 (void *)((unsigned long)current_pid()),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 (void *)NULL);
104}
105#else
Nathan Scotted9d88f2006-09-28 10:56:43 +1000106#define xfs_page_trace(tag, inode, page, pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#endif
108
Christoph Hellwig0829c362005-09-02 16:58:49 +1000109/*
110 * Schedule IO completion handling on a xfsdatad if this was
David Chinnere927af92007-06-05 16:24:36 +1000111 * the final hold on this ioend. If we are asked to wait,
112 * flush the workqueue.
Christoph Hellwig0829c362005-09-02 16:58:49 +1000113 */
114STATIC void
115xfs_finish_ioend(
David Chinnere927af92007-06-05 16:24:36 +1000116 xfs_ioend_t *ioend,
117 int wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
David Chinnere927af92007-06-05 16:24:36 +1000119 if (atomic_dec_and_test(&ioend->io_remaining)) {
Christoph Hellwig0829c362005-09-02 16:58:49 +1000120 queue_work(xfsdatad_workqueue, &ioend->io_work);
David Chinnere927af92007-06-05 16:24:36 +1000121 if (wait)
122 flush_workqueue(xfsdatad_workqueue);
123 }
Christoph Hellwig0829c362005-09-02 16:58:49 +1000124}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100126/*
127 * We're now finished for good with this ioend structure.
128 * Update the page state via the associated buffer_heads,
129 * release holds on the inode and bio, and finally free
130 * up memory. Do not use the ioend after this.
131 */
Christoph Hellwig0829c362005-09-02 16:58:49 +1000132STATIC void
133xfs_destroy_ioend(
134 xfs_ioend_t *ioend)
135{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100136 struct buffer_head *bh, *next;
137
138 for (bh = ioend->io_buffer_head; bh; bh = next) {
139 next = bh->b_private;
Nathan Scott7d04a332006-06-09 14:58:38 +1000140 bh->b_end_io(bh, !ioend->io_error);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100141 }
Nathan Scott7d04a332006-06-09 14:58:38 +1000142 if (unlikely(ioend->io_error))
143 vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000144 vn_iowake(ioend->io_vnode);
145 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146}
147
148/*
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000149 * Update on-disk file size now that data has been written to disk.
150 * The current in-memory file size is i_size. If a write is beyond
151 * eof io_new_size will be the intended file size until i_size is
152 * updated. If this write does not extend all the way to the valid
153 * file size then restrict this update to the end of the write.
154 */
155STATIC void
156xfs_setfilesize(
157 xfs_ioend_t *ioend)
158{
159 xfs_inode_t *ip;
160 xfs_fsize_t isize;
161 xfs_fsize_t bsize;
162
163 ip = xfs_vtoi(ioend->io_vnode);
David Chinnerb2826132007-06-05 16:24:44 +1000164 if (!ip)
165 return;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000166
167 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
168 ASSERT(ioend->io_type != IOMAP_READ);
169
170 if (unlikely(ioend->io_error))
171 return;
172
173 bsize = ioend->io_offset + ioend->io_size;
174
175 xfs_ilock(ip, XFS_ILOCK_EXCL);
176
177 isize = MAX(ip->i_size, ip->i_iocore.io_new_size);
178 isize = MIN(isize, bsize);
179
180 if (ip->i_d.di_size < isize) {
181 ip->i_d.di_size = isize;
182 ip->i_update_core = 1;
183 ip->i_update_size = 1;
Lachlan McIlroy776a75fa2007-09-14 15:22:50 +1000184 mark_inode_dirty_sync(vn_to_inode(ioend->io_vnode));
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000185 }
186
187 xfs_iunlock(ip, XFS_ILOCK_EXCL);
188}
189
190/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100191 * Buffered IO write completion for delayed allocate extents.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100192 */
193STATIC void
194xfs_end_bio_delalloc(
David Howellsc4028952006-11-22 14:57:56 +0000195 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100196{
David Howellsc4028952006-11-22 14:57:56 +0000197 xfs_ioend_t *ioend =
198 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100199
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000200 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100201 xfs_destroy_ioend(ioend);
202}
203
204/*
205 * Buffered IO write completion for regular, written extents.
206 */
207STATIC void
208xfs_end_bio_written(
David Howellsc4028952006-11-22 14:57:56 +0000209 struct work_struct *work)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100210{
David Howellsc4028952006-11-22 14:57:56 +0000211 xfs_ioend_t *ioend =
212 container_of(work, xfs_ioend_t, io_work);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100213
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000214 xfs_setfilesize(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100215 xfs_destroy_ioend(ioend);
216}
217
218/*
219 * IO write completion for unwritten extents.
220 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 * Issue transactions to convert a buffer range from unwritten
Christoph Hellwigf0973862005-09-05 08:22:52 +1000222 * to written extents.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 */
224STATIC void
Christoph Hellwig0829c362005-09-02 16:58:49 +1000225xfs_end_bio_unwritten(
David Howellsc4028952006-11-22 14:57:56 +0000226 struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227{
David Howellsc4028952006-11-22 14:57:56 +0000228 xfs_ioend_t *ioend =
229 container_of(work, xfs_ioend_t, io_work);
Nathan Scott67fcaa72006-06-09 17:00:52 +1000230 bhv_vnode_t *vp = ioend->io_vnode;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000231 xfs_off_t offset = ioend->io_offset;
232 size_t size = ioend->io_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000234 if (likely(!ioend->io_error)) {
Nathan Scott67fcaa72006-06-09 17:00:52 +1000235 bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000236 xfs_setfilesize(ioend);
237 }
238 xfs_destroy_ioend(ioend);
239}
240
241/*
242 * IO read completion for regular, written extents.
243 */
244STATIC void
245xfs_end_bio_read(
246 struct work_struct *work)
247{
248 xfs_ioend_t *ioend =
249 container_of(work, xfs_ioend_t, io_work);
250
Christoph Hellwig0829c362005-09-02 16:58:49 +1000251 xfs_destroy_ioend(ioend);
252}
253
254/*
255 * Allocate and initialise an IO completion structure.
256 * We need to track unwritten extent write completion here initially.
257 * We'll need to extend this for updating the ondisk inode size later
258 * (vs. incore size).
259 */
260STATIC xfs_ioend_t *
261xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100262 struct inode *inode,
263 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000264{
265 xfs_ioend_t *ioend;
266
267 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
268
269 /*
270 * Set the count to 1 initially, which will prevent an I/O
271 * completion callback from happening before we have started
272 * all the I/O from calling the completion routine too early.
273 */
274 atomic_set(&ioend->io_remaining, 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000275 ioend->io_error = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100276 ioend->io_list = NULL;
277 ioend->io_type = type;
Nathan Scottec86dc02006-03-17 17:25:36 +1100278 ioend->io_vnode = vn_from_inode(inode);
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000279 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100280 ioend->io_buffer_tail = NULL;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000281 atomic_inc(&ioend->io_vnode->v_iocount);
282 ioend->io_offset = 0;
283 ioend->io_size = 0;
284
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100285 if (type == IOMAP_UNWRITTEN)
David Howellsc4028952006-11-22 14:57:56 +0000286 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100287 else if (type == IOMAP_DELAY)
David Howellsc4028952006-11-22 14:57:56 +0000288 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000289 else if (type == IOMAP_READ)
290 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100291 else
David Howellsc4028952006-11-22 14:57:56 +0000292 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000293
294 return ioend;
295}
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297STATIC int
298xfs_map_blocks(
299 struct inode *inode,
300 loff_t offset,
301 ssize_t count,
302 xfs_iomap_t *mapp,
303 int flags)
304{
Nathan Scott67fcaa72006-06-09 17:00:52 +1000305 bhv_vnode_t *vp = vn_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 int error, nmaps = 1;
307
Nathan Scott67fcaa72006-06-09 17:00:52 +1000308 error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
310 VMODIFY(vp);
311 return -error;
312}
313
David Chinner7989cb82007-02-10 18:34:56 +1100314STATIC_INLINE int
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100315xfs_iomap_valid(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 xfs_iomap_t *iomapp,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100317 loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100319 return offset >= iomapp->iomap_offset &&
320 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321}
322
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100323/*
324 * BIO completion handler for buffered IO.
325 */
Al Viro782e3b32007-10-12 07:17:47 +0100326STATIC void
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100327xfs_end_bio(
328 struct bio *bio,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100329 int error)
330{
331 xfs_ioend_t *ioend = bio->bi_private;
332
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100333 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000334 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100335
336 /* Toss bio and pass work off to an xfsdatad thread */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100337 bio->bi_private = NULL;
338 bio->bi_end_io = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100339 bio_put(bio);
Nathan Scott7d04a332006-06-09 14:58:38 +1000340
David Chinnere927af92007-06-05 16:24:36 +1000341 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100342}
343
344STATIC void
345xfs_submit_ioend_bio(
346 xfs_ioend_t *ioend,
347 struct bio *bio)
348{
349 atomic_inc(&ioend->io_remaining);
350
351 bio->bi_private = ioend;
352 bio->bi_end_io = xfs_end_bio;
353
354 submit_bio(WRITE, bio);
355 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
356 bio_put(bio);
357}
358
359STATIC struct bio *
360xfs_alloc_ioend_bio(
361 struct buffer_head *bh)
362{
363 struct bio *bio;
364 int nvecs = bio_get_nr_vecs(bh->b_bdev);
365
366 do {
367 bio = bio_alloc(GFP_NOIO, nvecs);
368 nvecs >>= 1;
369 } while (!bio);
370
371 ASSERT(bio->bi_private == NULL);
372 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
373 bio->bi_bdev = bh->b_bdev;
374 bio_get(bio);
375 return bio;
376}
377
378STATIC void
379xfs_start_buffer_writeback(
380 struct buffer_head *bh)
381{
382 ASSERT(buffer_mapped(bh));
383 ASSERT(buffer_locked(bh));
384 ASSERT(!buffer_delay(bh));
385 ASSERT(!buffer_unwritten(bh));
386
387 mark_buffer_async_write(bh);
388 set_buffer_uptodate(bh);
389 clear_buffer_dirty(bh);
390}
391
392STATIC void
393xfs_start_page_writeback(
394 struct page *page,
395 struct writeback_control *wbc,
396 int clear_dirty,
397 int buffers)
398{
399 ASSERT(PageLocked(page));
400 ASSERT(!PageWriteback(page));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100401 if (clear_dirty)
David Chinner92132022006-12-21 10:24:01 +1100402 clear_page_dirty_for_io(page);
403 set_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100404 unlock_page(page);
Fengguang Wu1f7decf2007-10-16 23:30:42 -0700405 /* If no buffers on the page are to be written, finish it here */
406 if (!buffers)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100407 end_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100408}
409
410static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
411{
412 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
413}
414
415/*
David Chinnerd88992f2006-01-18 13:38:12 +1100416 * Submit all of the bios for all of the ioends we have saved up, covering the
417 * initial writepage page and also any probed pages.
418 *
419 * Because we may have multiple ioends spanning a page, we need to start
420 * writeback on all the buffers before we submit them for I/O. If we mark the
421 * buffers as we got, then we can end up with a page that only has buffers
422 * marked async write and I/O complete on can occur before we mark the other
423 * buffers async write.
424 *
425 * The end result of this is that we trip a bug in end_page_writeback() because
426 * we call it twice for the one page as the code in end_buffer_async_write()
427 * assumes that all buffers on the page are started at the same time.
428 *
429 * The fix is two passes across the ioend list - one to start writeback on the
Nathan Scottc41564b2006-03-29 08:55:14 +1000430 * buffer_heads, and then submit them for I/O on the second pass.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100431 */
432STATIC void
433xfs_submit_ioend(
434 xfs_ioend_t *ioend)
435{
David Chinnerd88992f2006-01-18 13:38:12 +1100436 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100437 xfs_ioend_t *next;
438 struct buffer_head *bh;
439 struct bio *bio;
440 sector_t lastblock = 0;
441
David Chinnerd88992f2006-01-18 13:38:12 +1100442 /* Pass 1 - start writeback */
443 do {
444 next = ioend->io_list;
445 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
446 xfs_start_buffer_writeback(bh);
447 }
448 } while ((ioend = next) != NULL);
449
450 /* Pass 2 - submit I/O */
451 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100452 do {
453 next = ioend->io_list;
454 bio = NULL;
455
456 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100457
458 if (!bio) {
459 retry:
460 bio = xfs_alloc_ioend_bio(bh);
461 } else if (bh->b_blocknr != lastblock + 1) {
462 xfs_submit_ioend_bio(ioend, bio);
463 goto retry;
464 }
465
466 if (bio_add_buffer(bio, bh) != bh->b_size) {
467 xfs_submit_ioend_bio(ioend, bio);
468 goto retry;
469 }
470
471 lastblock = bh->b_blocknr;
472 }
473 if (bio)
474 xfs_submit_ioend_bio(ioend, bio);
David Chinnere927af92007-06-05 16:24:36 +1000475 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100476 } while ((ioend = next) != NULL);
477}
478
479/*
480 * Cancel submission of all buffer_heads so far in this endio.
481 * Toss the endio too. Only ever called for the initial page
482 * in a writepage request, so only ever one page.
483 */
484STATIC void
485xfs_cancel_ioend(
486 xfs_ioend_t *ioend)
487{
488 xfs_ioend_t *next;
489 struct buffer_head *bh, *next_bh;
490
491 do {
492 next = ioend->io_list;
493 bh = ioend->io_buffer_head;
494 do {
495 next_bh = bh->b_private;
496 clear_buffer_async_write(bh);
497 unlock_buffer(bh);
498 } while ((bh = next_bh) != NULL);
499
500 vn_iowake(ioend->io_vnode);
501 mempool_free(ioend, xfs_ioend_pool);
502 } while ((ioend = next) != NULL);
503}
504
505/*
506 * Test to see if we've been building up a completion structure for
507 * earlier buffers -- if so, we try to append to this ioend if we
508 * can, otherwise we finish off any current ioend and start another.
509 * Return true if we've finished the given ioend.
510 */
511STATIC void
512xfs_add_to_ioend(
513 struct inode *inode,
514 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100515 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100516 unsigned int type,
517 xfs_ioend_t **result,
518 int need_ioend)
519{
520 xfs_ioend_t *ioend = *result;
521
522 if (!ioend || need_ioend || type != ioend->io_type) {
523 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100524
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100525 ioend = xfs_alloc_ioend(inode, type);
526 ioend->io_offset = offset;
527 ioend->io_buffer_head = bh;
528 ioend->io_buffer_tail = bh;
529 if (previous)
530 previous->io_list = ioend;
531 *result = ioend;
532 } else {
533 ioend->io_buffer_tail->b_private = bh;
534 ioend->io_buffer_tail = bh;
535 }
536
537 bh->b_private = NULL;
538 ioend->io_size += bh->b_size;
539}
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100542xfs_map_buffer(
543 struct buffer_head *bh,
544 xfs_iomap_t *mp,
545 xfs_off_t offset,
546 uint block_bits)
547{
548 sector_t bn;
549
550 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
551
552 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
553 ((offset - mp->iomap_offset) >> block_bits);
554
555 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
556
557 bh->b_blocknr = bn;
558 set_buffer_mapped(bh);
559}
560
561STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562xfs_map_at_offset(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 struct buffer_head *bh,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100564 loff_t offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 int block_bits,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100566 xfs_iomap_t *iomapp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
569 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
571 lock_buffer(bh);
Nathan Scott87cbc492006-03-14 13:26:43 +1100572 xfs_map_buffer(bh, iomapp, offset, block_bits);
Nathan Scottce8e9222006-01-11 15:39:08 +1100573 bh->b_bdev = iomapp->iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 set_buffer_mapped(bh);
575 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100576 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577}
578
579/*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100580 * Look for a page at index that is suitable for clustering.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 */
582STATIC unsigned int
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100583xfs_probe_page(
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100584 struct page *page,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100585 unsigned int pg_offset,
586 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 int ret = 0;
589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100591 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
593 if (page->mapping && PageDirty(page)) {
594 if (page_has_buffers(page)) {
595 struct buffer_head *bh, *head;
596
597 bh = head = page_buffers(page);
598 do {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100599 if (!buffer_uptodate(bh))
600 break;
601 if (mapped != buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 break;
603 ret += bh->b_size;
604 if (ret >= pg_offset)
605 break;
606 } while ((bh = bh->b_this_page) != head);
607 } else
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100608 ret = mapped ? 0 : PAGE_CACHE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 }
610
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 return ret;
612}
613
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100614STATIC size_t
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100615xfs_probe_cluster(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 struct inode *inode,
617 struct page *startpage,
618 struct buffer_head *bh,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100619 struct buffer_head *head,
620 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100622 struct pagevec pvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 pgoff_t tindex, tlast, tloff;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100624 size_t total = 0;
625 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
627 /* First sum forwards in this page */
628 do {
Eric Sandeen2353e8e2006-02-28 12:30:30 +1100629 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100630 return total;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 total += bh->b_size;
632 } while ((bh = bh->b_this_page) != head);
633
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100634 /* if we reached the end of the page, sum forwards in following pages */
635 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
636 tindex = startpage->index + 1;
637
638 /* Prune this back to avoid pathological behavior */
639 tloff = min(tlast, startpage->index + 64);
640
641 pagevec_init(&pvec, 0);
642 while (!done && tindex <= tloff) {
643 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
644
645 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
646 break;
647
648 for (i = 0; i < pagevec_count(&pvec); i++) {
649 struct page *page = pvec.pages[i];
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000650 size_t pg_offset, pg_len = 0;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100651
652 if (tindex == tlast) {
653 pg_offset =
654 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100655 if (!pg_offset) {
656 done = 1;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100657 break;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100658 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100659 } else
660 pg_offset = PAGE_CACHE_SIZE;
661
662 if (page->index == tindex && !TestSetPageLocked(page)) {
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000663 pg_len = xfs_probe_page(page, pg_offset, mapped);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100664 unlock_page(page);
665 }
666
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000667 if (!pg_len) {
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100668 done = 1;
669 break;
670 }
671
Christoph Hellwig265c1fa2007-08-16 15:38:19 +1000672 total += pg_len;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100673 tindex++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100675
676 pagevec_release(&pvec);
677 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100679
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 return total;
681}
682
683/*
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100684 * Test if a given page is suitable for writing as part of an unwritten
685 * or delayed allocate extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 */
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100687STATIC int
688xfs_is_delayed_page(
689 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100690 unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100693 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
695 if (page->mapping && page_has_buffers(page)) {
696 struct buffer_head *bh, *head;
697 int acceptable = 0;
698
699 bh = head = page_buffers(page);
700 do {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100701 if (buffer_unwritten(bh))
702 acceptable = (type == IOMAP_UNWRITTEN);
703 else if (buffer_delay(bh))
704 acceptable = (type == IOMAP_DELAY);
David Chinner2ddee842006-03-22 12:47:40 +1100705 else if (buffer_dirty(bh) && buffer_mapped(bh))
David Chinnerdf3c7242007-05-24 15:27:03 +1000706 acceptable = (type == IOMAP_NEW);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100707 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 } while ((bh = bh->b_this_page) != head);
710
711 if (acceptable)
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100712 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 }
714
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100715 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716}
717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718/*
719 * Allocate & map buffers for page given the extent map. Write it out.
720 * except for the original page of a writepage, this is called on
721 * delalloc/unwritten pages only, for the original page it is possible
722 * that the page has no mapping at all.
723 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100724STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725xfs_convert_page(
726 struct inode *inode,
727 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100728 loff_t tindex,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100729 xfs_iomap_t *mp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100730 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 int startio,
733 int all_bh)
734{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100735 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100736 xfs_off_t end_offset;
737 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100738 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 int bbits = inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -0700740 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100741 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100742 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100744 if (page->index != tindex)
745 goto fail;
746 if (TestSetPageLocked(page))
747 goto fail;
748 if (PageWriteback(page))
749 goto fail_unlock_page;
750 if (page->mapping != inode->i_mapping)
751 goto fail_unlock_page;
752 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
753 goto fail_unlock_page;
754
Nathan Scott24e17b52005-05-05 13:33:20 -0700755 /*
756 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000757 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100758 *
759 * Derivation:
760 *
761 * End offset is the highest offset that this page should represent.
762 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
763 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
764 * hence give us the correct page_dirty count. On any other page,
765 * it will be zero and in that case we need page_dirty to be the
766 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700767 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100768 end_offset = min_t(unsigned long long,
769 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
770 i_size_read(inode));
771
Nathan Scott24e17b52005-05-05 13:33:20 -0700772 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100773 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
774 PAGE_CACHE_SIZE);
775 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
776 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 bh = head = page_buffers(page);
779 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100780 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100782 if (!buffer_uptodate(bh))
783 uptodate = 0;
784 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
785 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100787 }
788
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100789 if (buffer_unwritten(bh) || buffer_delay(bh)) {
790 if (buffer_unwritten(bh))
791 type = IOMAP_UNWRITTEN;
792 else
793 type = IOMAP_DELAY;
794
795 if (!xfs_iomap_valid(mp, offset)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100796 done = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100797 continue;
798 }
799
800 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
801 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
802
803 xfs_map_at_offset(bh, offset, bbits, mp);
804 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100805 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100806 type, ioendp, done);
807 } else {
808 set_buffer_dirty(bh);
809 unlock_buffer(bh);
810 mark_buffer_dirty(bh);
811 }
812 page_dirty--;
813 count++;
814 } else {
David Chinnerdf3c7242007-05-24 15:27:03 +1000815 type = IOMAP_NEW;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100816 if (buffer_mapped(bh) && all_bh && startio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 lock_buffer(bh);
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100818 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100819 type, ioendp, done);
820 count++;
Nathan Scott24e17b52005-05-05 13:33:20 -0700821 page_dirty--;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100822 } else {
823 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100826 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100828 if (uptodate && bh == head)
829 SetPageUptodate(page);
830
831 if (startio) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100832 if (count) {
833 struct backing_dev_info *bdi;
834
835 bdi = inode->i_mapping->backing_dev_info;
David Chinner9fddaca2006-02-07 20:27:24 +1100836 wbc->nr_to_write--;
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100837 if (bdi_write_congested(bdi)) {
838 wbc->encountered_congestion = 1;
839 done = 1;
David Chinner9fddaca2006-02-07 20:27:24 +1100840 } else if (wbc->nr_to_write <= 0) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100841 done = 1;
842 }
843 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100844 xfs_start_page_writeback(page, wbc, !page_dirty, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100846
847 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100848 fail_unlock_page:
849 unlock_page(page);
850 fail:
851 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
853
854/*
855 * Convert & write out a cluster of pages in the same extent as defined
856 * by mp and following the start page.
857 */
858STATIC void
859xfs_cluster_write(
860 struct inode *inode,
861 pgoff_t tindex,
862 xfs_iomap_t *iomapp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100863 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 struct writeback_control *wbc,
865 int startio,
866 int all_bh,
867 pgoff_t tlast)
868{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100869 struct pagevec pvec;
870 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100872 pagevec_init(&pvec, 0);
873 while (!done && tindex <= tlast) {
874 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
875
876 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100878
879 for (i = 0; i < pagevec_count(&pvec); i++) {
880 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
881 iomapp, ioendp, wbc, startio, all_bh);
882 if (done)
883 break;
884 }
885
886 pagevec_release(&pvec);
887 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 }
889}
890
891/*
892 * Calling this without startio set means we are being asked to make a dirty
893 * page ready for freeing it's buffers. When called with startio set then
894 * we are coming from writepage.
895 *
896 * When called with startio set it is important that we write the WHOLE
897 * page if possible.
898 * The bh->b_state's cannot know if any of the blocks or which block for
899 * that matter are dirty due to mmap writes, and therefore bh uptodate is
Nathan Scottc41564b2006-03-29 08:55:14 +1000900 * only valid if the page itself isn't completely uptodate. Some layers
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 * may clear the page dirty flag prior to calling write page, under the
902 * assumption the entire page will be written out; by not writing out the
903 * whole page the page can be reused before all valid dirty data is
904 * written out. Note: in the case of a page that has been dirty'd by
905 * mapwrite and but partially setup by block_prepare_write the
906 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
907 * valid state, thus the whole page must be written out thing.
908 */
909
910STATIC int
911xfs_page_state_convert(
912 struct inode *inode,
913 struct page *page,
914 struct writeback_control *wbc,
915 int startio,
916 int unmapped) /* also implies page uptodate */
917{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100918 struct buffer_head *bh, *head;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100919 xfs_iomap_t iomap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100920 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 loff_t offset;
922 unsigned long p_offset = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100923 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 __uint64_t end_offset;
925 pgoff_t end_index, last_index, tlast;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100926 ssize_t size, len;
927 int flags, err, iomap_valid = 0, uptodate = 1;
Nathan Scott82721452006-04-11 15:10:55 +1000928 int page_dirty, count = 0;
929 int trylock = 0;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100930 int all_bh = unmapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
Nathan Scott82721452006-04-11 15:10:55 +1000932 if (startio) {
933 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
934 trylock |= BMAPI_TRYLOCK;
935 }
Daniel Moore3ba08152005-05-05 13:31:34 -0700936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 /* Is this page beyond the end of the file? */
938 offset = i_size_read(inode);
939 end_index = offset >> PAGE_CACHE_SHIFT;
940 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
941 if (page->index >= end_index) {
942 if ((page->index >= end_index + 1) ||
943 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
Nathan Scott19d5bcf2005-11-02 15:14:09 +1100944 if (startio)
945 unlock_page(page);
946 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 }
948 }
949
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 /*
Nathan Scott24e17b52005-05-05 13:33:20 -0700951 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000952 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100953 *
954 * Derivation:
955 *
956 * End offset is the highest offset that this page should represent.
957 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
958 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
959 * hence give us the correct page_dirty count. On any other page,
960 * it will be zero and in that case we need page_dirty to be the
961 * count of buffers on the page.
962 */
963 end_offset = min_t(unsigned long long,
964 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
Nathan Scott24e17b52005-05-05 13:33:20 -0700965 len = 1 << inode->i_blkbits;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100966 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
967 PAGE_CACHE_SIZE);
968 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
Nathan Scott24e17b52005-05-05 13:33:20 -0700969 page_dirty = p_offset / len;
970
Nathan Scott24e17b52005-05-05 13:33:20 -0700971 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100972 offset = page_offset(page);
David Chinnerdf3c7242007-05-24 15:27:03 +1000973 flags = BMAPI_READ;
974 type = IOMAP_NEW;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100975
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100976 /* TODO: cleanup count and page_dirty */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
978 do {
979 if (offset >= end_offset)
980 break;
981 if (!buffer_uptodate(bh))
982 uptodate = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100983 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100984 /*
985 * the iomap is actually still valid, but the ioend
986 * isn't. shouldn't happen too often.
987 */
988 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100990 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100992 if (iomap_valid)
993 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
995 /*
996 * First case, map an unwritten extent and prepare for
997 * extent state conversion transaction on completion.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100998 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 * Second case, allocate space for a delalloc buffer.
1000 * We can return EAGAIN here in the release page case.
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001001 *
1002 * Third case, an unmapped buffer was found, and we are
1003 * in a path where we need to write the whole page out.
David Chinnerdf3c7242007-05-24 15:27:03 +10001004 */
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001005 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1006 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1007 !buffer_mapped(bh) && (unmapped || startio))) {
David Chinnereffd1202007-06-18 16:49:58 +10001008 int new_ioend = 0;
1009
David Chinnerdf3c7242007-05-24 15:27:03 +10001010 /*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001011 * Make sure we don't use a read-only iomap
1012 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001013 if (flags == BMAPI_READ)
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001014 iomap_valid = 0;
1015
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001016 if (buffer_unwritten(bh)) {
1017 type = IOMAP_UNWRITTEN;
Nathan Scott82721452006-04-11 15:10:55 +10001018 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001019 } else if (buffer_delay(bh)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001020 type = IOMAP_DELAY;
Nathan Scott82721452006-04-11 15:10:55 +10001021 flags = BMAPI_ALLOCATE | trylock;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001022 } else {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001023 type = IOMAP_NEW;
Nathan Scott82721452006-04-11 15:10:55 +10001024 flags = BMAPI_WRITE | BMAPI_MMAP;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001025 }
1026
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001027 if (!iomap_valid) {
David Chinnereffd1202007-06-18 16:49:58 +10001028 /*
1029 * if we didn't have a valid mapping then we
1030 * need to ensure that we put the new mapping
1031 * in a new ioend structure. This needs to be
1032 * done to ensure that the ioends correctly
1033 * reflect the block mappings at io completion
1034 * for unwritten extent conversion.
1035 */
1036 new_ioend = 1;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001037 if (type == IOMAP_NEW) {
1038 size = xfs_probe_cluster(inode,
1039 page, bh, head, 0);
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001040 } else {
1041 size = len;
1042 }
1043
1044 err = xfs_map_blocks(inode, offset, size,
1045 &iomap, flags);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001046 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 goto error;
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001048 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 }
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001050 if (iomap_valid) {
1051 xfs_map_at_offset(bh, offset,
1052 inode->i_blkbits, &iomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001054 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001055 type, &ioend,
David Chinnereffd1202007-06-18 16:49:58 +10001056 new_ioend);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 } else {
1058 set_buffer_dirty(bh);
1059 unlock_buffer(bh);
1060 mark_buffer_dirty(bh);
1061 }
1062 page_dirty--;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001063 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001065 } else if (buffer_uptodate(bh) && startio) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001066 /*
1067 * we got here because the buffer is already mapped.
1068 * That means it must already have extents allocated
1069 * underneath it. Map the extent by reading it.
1070 */
David Chinnerdf3c7242007-05-24 15:27:03 +10001071 if (!iomap_valid || flags != BMAPI_READ) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001072 flags = BMAPI_READ;
1073 size = xfs_probe_cluster(inode, page, bh,
1074 head, 1);
1075 err = xfs_map_blocks(inode, offset, size,
1076 &iomap, flags);
1077 if (err)
1078 goto error;
1079 iomap_valid = xfs_iomap_valid(&iomap, offset);
1080 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
David Chinnerdf3c7242007-05-24 15:27:03 +10001082 /*
1083 * We set the type to IOMAP_NEW in case we are doing a
1084 * small write at EOF that is extending the file but
1085 * without needing an allocation. We need to update the
1086 * file size on I/O completion in this case so it is
1087 * the same case as having just allocated a new extent
1088 * that we are writing into for the first time.
1089 */
1090 type = IOMAP_NEW;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001091 if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
1092 ASSERT(buffer_mapped(bh));
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001093 if (iomap_valid)
1094 all_bh = 1;
Christoph Hellwig7336cea2006-01-11 20:49:16 +11001095 xfs_add_to_ioend(inode, bh, offset, type,
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001096 &ioend, !iomap_valid);
1097 page_dirty--;
1098 count++;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001099 } else {
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001100 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +11001102 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1103 (unmapped || startio)) {
1104 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001106
1107 if (!iohead)
1108 iohead = ioend;
1109
1110 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
1112 if (uptodate && bh == head)
1113 SetPageUptodate(page);
1114
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001115 if (startio)
1116 xfs_start_page_writeback(page, wbc, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001118 if (ioend && iomap_valid) {
1119 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 PAGE_CACHE_SHIFT;
Nathan Scott775bf6c2005-05-05 13:33:01 -07001121 tlast = min_t(pgoff_t, offset, last_index);
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001122 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001123 wbc, startio, all_bh, tlast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 }
1125
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001126 if (iohead)
1127 xfs_submit_ioend(iohead);
1128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 return page_dirty;
1130
1131error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001132 if (iohead)
1133 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
1135 /*
1136 * If it's delalloc and we have nowhere to put it,
1137 * throw it away, unless the lower layers told
1138 * us to try again.
1139 */
1140 if (err != -EAGAIN) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001141 if (!unmapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 block_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 ClearPageUptodate(page);
1144 }
1145 return err;
1146}
1147
Nathan Scottf51623b2006-03-14 13:26:27 +11001148/*
1149 * writepage: Called from one of two places:
1150 *
1151 * 1. we are flushing a delalloc buffer head.
1152 *
1153 * 2. we are writing out a dirty page. Typically the page dirty
1154 * state is cleared before we get here. In this case is it
1155 * conceivable we have no buffer heads.
1156 *
1157 * For delalloc space on the page we need to allocate space and
1158 * flush it. For unmapped buffer heads on the page we should
1159 * allocate space if the page is uptodate. For any other dirty
1160 * buffer heads on the page we should flush them.
1161 *
1162 * If we detect that a transaction would be required to flush
1163 * the page, we have to check the process flags first, if we
1164 * are already in a transaction or disk I/O during allocations
1165 * is off, we need to fail the writepage and redirty the page.
1166 */
1167
1168STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001169xfs_vm_writepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001170 struct page *page,
1171 struct writeback_control *wbc)
1172{
1173 int error;
1174 int need_trans;
1175 int delalloc, unmapped, unwritten;
1176 struct inode *inode = page->mapping->host;
1177
1178 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1179
1180 /*
1181 * We need a transaction if:
1182 * 1. There are delalloc buffers on the page
1183 * 2. The page is uptodate and we have unmapped buffers
1184 * 3. The page is uptodate and we have no buffers
1185 * 4. There are unwritten buffers on the page
1186 */
1187
1188 if (!page_has_buffers(page)) {
1189 unmapped = 1;
1190 need_trans = 1;
1191 } else {
1192 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1193 if (!PageUptodate(page))
1194 unmapped = 0;
1195 need_trans = delalloc + unmapped + unwritten;
1196 }
1197
1198 /*
1199 * If we need a transaction and the process flags say
1200 * we are already in a transaction, or no IO is allowed
1201 * then mark the page dirty again and leave the page
1202 * as is.
1203 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001204 if (current_test_flags(PF_FSTRANS) && need_trans)
Nathan Scottf51623b2006-03-14 13:26:27 +11001205 goto out_fail;
1206
1207 /*
1208 * Delay hooking up buffer heads until we have
1209 * made our go/no-go decision.
1210 */
1211 if (!page_has_buffers(page))
1212 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1213
1214 /*
1215 * Convert delayed allocate, unwritten or unmapped space
1216 * to real space and flush out to disk.
1217 */
1218 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1219 if (error == -EAGAIN)
1220 goto out_fail;
1221 if (unlikely(error < 0))
1222 goto out_unlock;
1223
1224 return 0;
1225
1226out_fail:
1227 redirty_page_for_writepage(wbc, page);
1228 unlock_page(page);
1229 return 0;
1230out_unlock:
1231 unlock_page(page);
1232 return error;
1233}
1234
Nathan Scott7d4fb402006-06-09 15:27:16 +10001235STATIC int
1236xfs_vm_writepages(
1237 struct address_space *mapping,
1238 struct writeback_control *wbc)
1239{
Nathan Scott67fcaa72006-06-09 17:00:52 +10001240 struct bhv_vnode *vp = vn_from_inode(mapping->host);
Nathan Scott7d4fb402006-06-09 15:27:16 +10001241
1242 if (VN_TRUNC(vp))
1243 VUNTRUNCATE(vp);
1244 return generic_writepages(mapping, wbc);
1245}
1246
Nathan Scottf51623b2006-03-14 13:26:27 +11001247/*
1248 * Called to move a page into cleanable state - and from there
1249 * to be released. Possibly the page is already clean. We always
1250 * have buffer heads in this call.
1251 *
1252 * Returns 0 if the page is ok to release, 1 otherwise.
1253 *
1254 * Possible scenarios are:
1255 *
1256 * 1. We are being called to release a page which has been written
1257 * to via regular I/O. buffer heads will be dirty and possibly
1258 * delalloc. If no delalloc buffer heads in this case then we
1259 * can just return zero.
1260 *
1261 * 2. We are called to release a page which has been written via
1262 * mmap, all we need to do is ensure there is no delalloc
1263 * state in the buffer heads, if not we can let the caller
1264 * free them and we should come back later via writepage.
1265 */
1266STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001267xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001268 struct page *page,
1269 gfp_t gfp_mask)
1270{
1271 struct inode *inode = page->mapping->host;
1272 int dirty, delalloc, unmapped, unwritten;
1273 struct writeback_control wbc = {
1274 .sync_mode = WB_SYNC_ALL,
1275 .nr_to_write = 1,
1276 };
1277
Nathan Scotted9d88f2006-09-28 10:56:43 +10001278 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
Nathan Scottf51623b2006-03-14 13:26:27 +11001279
Nathan Scott238f4c52006-03-17 17:26:25 +11001280 if (!page_has_buffers(page))
1281 return 0;
1282
Nathan Scottf51623b2006-03-14 13:26:27 +11001283 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1284 if (!delalloc && !unwritten)
1285 goto free_buffers;
1286
1287 if (!(gfp_mask & __GFP_FS))
1288 return 0;
1289
1290 /* If we are already inside a transaction or the thread cannot
1291 * do I/O, we cannot release this page.
1292 */
Nathan Scott59c1b082006-06-09 14:59:13 +10001293 if (current_test_flags(PF_FSTRANS))
Nathan Scottf51623b2006-03-14 13:26:27 +11001294 return 0;
1295
1296 /*
1297 * Convert delalloc space to real space, do not flush the
1298 * data out to disk, that will be done by the caller.
1299 * Never need to allocate space here - we will always
1300 * come back to writepage in that case.
1301 */
1302 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1303 if (dirty == 0 && !unwritten)
1304 goto free_buffers;
1305 return 0;
1306
1307free_buffers:
1308 return try_to_free_buffers(page);
1309}
1310
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001312__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 struct inode *inode,
1314 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 struct buffer_head *bh_result,
1316 int create,
1317 int direct,
1318 bmapi_flags_t flags)
1319{
Nathan Scott67fcaa72006-06-09 17:00:52 +10001320 bhv_vnode_t *vp = vn_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 xfs_iomap_t iomap;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001322 xfs_off_t offset;
1323 ssize_t size;
Nathan Scottc2536662006-03-29 10:44:40 +10001324 int niomap = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001327 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001328 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1329 size = bh_result->b_size;
Nathan Scott67fcaa72006-06-09 17:00:52 +10001330 error = bhv_vop_bmap(vp, offset, size,
1331 create ? flags : BMAPI_READ, &iomap, &niomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 if (error)
1333 return -error;
Nathan Scottc2536662006-03-29 10:44:40 +10001334 if (niomap == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 return 0;
1336
1337 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001338 /*
1339 * For unwritten extents do not report a disk address on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 * the read case (treat as if we're reading into a hole).
1341 */
1342 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001343 xfs_map_buffer(bh_result, &iomap, offset,
1344 inode->i_blkbits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 }
1346 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1347 if (direct)
1348 bh_result->b_private = inode;
1349 set_buffer_unwritten(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 }
1351 }
1352
Nathan Scottc2536662006-03-29 10:44:40 +10001353 /*
1354 * If this is a realtime file, data may be on a different device.
1355 * to that pointed to from the buffer_head b_bdev currently.
1356 */
Nathan Scottce8e9222006-01-11 15:39:08 +11001357 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358
Nathan Scottc2536662006-03-29 10:44:40 +10001359 /*
David Chinner549054a2007-02-10 18:36:35 +11001360 * If we previously allocated a block out beyond eof and we are now
1361 * coming back to use it then we will need to flag it as new even if it
1362 * has a disk address.
1363 *
1364 * With sub-block writes into unwritten extents we also need to mark
1365 * the buffer as new so that the unwritten parts of the buffer gets
1366 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 */
1368 if (create &&
1369 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001370 (offset >= i_size_read(inode)) ||
1371 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
1374 if (iomap.iomap_flags & IOMAP_DELAY) {
1375 BUG_ON(direct);
1376 if (create) {
1377 set_buffer_uptodate(bh_result);
1378 set_buffer_mapped(bh_result);
1379 set_buffer_delay(bh_result);
1380 }
1381 }
1382
Nathan Scottc2536662006-03-29 10:44:40 +10001383 if (direct || size > (1 << inode->i_blkbits)) {
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001384 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1385 offset = min_t(xfs_off_t,
Nathan Scottc2536662006-03-29 10:44:40 +10001386 iomap.iomap_bsize - iomap.iomap_delta, size);
1387 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 }
1389
1390 return 0;
1391}
1392
1393int
Nathan Scottc2536662006-03-29 10:44:40 +10001394xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 struct inode *inode,
1396 sector_t iblock,
1397 struct buffer_head *bh_result,
1398 int create)
1399{
Nathan Scottc2536662006-03-29 10:44:40 +10001400 return __xfs_get_blocks(inode, iblock,
Badari Pulavartyfa30bd02006-03-26 01:38:01 -08001401 bh_result, create, 0, BMAPI_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402}
1403
1404STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001405xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 struct inode *inode,
1407 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 struct buffer_head *bh_result,
1409 int create)
1410{
Nathan Scottc2536662006-03-29 10:44:40 +10001411 return __xfs_get_blocks(inode, iblock,
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -08001412 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413}
1414
Christoph Hellwigf0973862005-09-05 08:22:52 +10001415STATIC void
Nathan Scotte4c573b2006-03-14 13:54:26 +11001416xfs_end_io_direct(
Christoph Hellwigf0973862005-09-05 08:22:52 +10001417 struct kiocb *iocb,
1418 loff_t offset,
1419 ssize_t size,
1420 void *private)
1421{
1422 xfs_ioend_t *ioend = iocb->private;
1423
1424 /*
1425 * Non-NULL private data means we need to issue a transaction to
1426 * convert a range from unwritten to written extents. This needs
Nathan Scottc41564b2006-03-29 08:55:14 +10001427 * to happen from process context but aio+dio I/O completion
Christoph Hellwigf0973862005-09-05 08:22:52 +10001428 * happens from irq context so we need to defer it to a workqueue.
Nathan Scottc41564b2006-03-29 08:55:14 +10001429 * This is not necessary for synchronous direct I/O, but we do
Christoph Hellwigf0973862005-09-05 08:22:52 +10001430 * it anyway to keep the code uniform and simpler.
1431 *
David Chinnere927af92007-06-05 16:24:36 +10001432 * Well, if only it were that simple. Because synchronous direct I/O
1433 * requires extent conversion to occur *before* we return to userspace,
1434 * we have to wait for extent conversion to complete. Look at the
1435 * iocb that has been passed to us to determine if this is AIO or
1436 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1437 * workqueue and wait for it to complete.
1438 *
Christoph Hellwigf0973862005-09-05 08:22:52 +10001439 * The core direct I/O code might be changed to always call the
1440 * completion handler in the future, in which case all this can
1441 * go away.
1442 */
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001443 ioend->io_offset = offset;
1444 ioend->io_size = size;
1445 if (ioend->io_type == IOMAP_READ) {
David Chinnere927af92007-06-05 16:24:36 +10001446 xfs_finish_ioend(ioend, 0);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001447 } else if (private && size > 0) {
David Chinnere927af92007-06-05 16:24:36 +10001448 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
Christoph Hellwigf0973862005-09-05 08:22:52 +10001449 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001450 /*
1451 * A direct I/O write ioend starts it's life in unwritten
1452 * state in case they map an unwritten extent. This write
1453 * didn't map an unwritten extent so switch it's completion
1454 * handler.
1455 */
1456 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
David Chinnere927af92007-06-05 16:24:36 +10001457 xfs_finish_ioend(ioend, 0);
Christoph Hellwigf0973862005-09-05 08:22:52 +10001458 }
1459
1460 /*
Nathan Scottc41564b2006-03-29 08:55:14 +10001461 * blockdev_direct_IO can return an error even after the I/O
Christoph Hellwigf0973862005-09-05 08:22:52 +10001462 * completion handler was called. Thus we need to protect
1463 * against double-freeing.
1464 */
1465 iocb->private = NULL;
1466}
1467
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001469xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 int rw,
1471 struct kiocb *iocb,
1472 const struct iovec *iov,
1473 loff_t offset,
1474 unsigned long nr_segs)
1475{
1476 struct file *file = iocb->ki_filp;
1477 struct inode *inode = file->f_mapping->host;
Nathan Scott67fcaa72006-06-09 17:00:52 +10001478 bhv_vnode_t *vp = vn_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 xfs_iomap_t iomap;
1480 int maps = 1;
1481 int error;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001482 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Nathan Scott67fcaa72006-06-09 17:00:52 +10001484 error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 if (error)
1486 return -error;
1487
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001488 if (rw == WRITE) {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001489 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001490 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1491 iomap.iomap_target->bt_bdev,
1492 iov, offset, nr_segs,
1493 xfs_get_blocks_direct,
1494 xfs_end_io_direct);
1495 } else {
Lachlan McIlroyba87ea62007-05-08 13:49:46 +10001496 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
Lachlan McIlroy721259b2006-09-07 14:27:05 +10001497 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
1498 iomap.iomap_target->bt_bdev,
1499 iov, offset, nr_segs,
1500 xfs_get_blocks_direct,
1501 xfs_end_io_direct);
1502 }
Christoph Hellwigf0973862005-09-05 08:22:52 +10001503
Zach Brown8459d862006-12-10 02:21:05 -08001504 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
Christoph Hellwigf0973862005-09-05 08:22:52 +10001505 xfs_destroy_ioend(iocb->private);
1506 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507}
1508
Nathan Scottf51623b2006-03-14 13:26:27 +11001509STATIC int
Nick Piggind79689c2007-10-16 01:25:06 -07001510xfs_vm_write_begin(
Nathan Scottf51623b2006-03-14 13:26:27 +11001511 struct file *file,
Nick Piggind79689c2007-10-16 01:25:06 -07001512 struct address_space *mapping,
1513 loff_t pos,
1514 unsigned len,
1515 unsigned flags,
1516 struct page **pagep,
1517 void **fsdata)
Nathan Scottf51623b2006-03-14 13:26:27 +11001518{
Nick Piggind79689c2007-10-16 01:25:06 -07001519 *pagep = NULL;
1520 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1521 xfs_get_blocks);
Nathan Scottf51623b2006-03-14 13:26:27 +11001522}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523
1524STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001525xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 struct address_space *mapping,
1527 sector_t block)
1528{
1529 struct inode *inode = (struct inode *)mapping->host;
Nathan Scott67fcaa72006-06-09 17:00:52 +10001530 bhv_vnode_t *vp = vn_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
Nathan Scotte4c573b2006-03-14 13:54:26 +11001532 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
Nathan Scott67fcaa72006-06-09 17:00:52 +10001533 bhv_vop_rwlock(vp, VRWLOCK_READ);
1534 bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
1535 bhv_vop_rwunlock(vp, VRWLOCK_READ);
Nathan Scottc2536662006-03-29 10:44:40 +10001536 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537}
1538
1539STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001540xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 struct file *unused,
1542 struct page *page)
1543{
Nathan Scottc2536662006-03-29 10:44:40 +10001544 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545}
1546
1547STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001548xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 struct file *unused,
1550 struct address_space *mapping,
1551 struct list_head *pages,
1552 unsigned nr_pages)
1553{
Nathan Scottc2536662006-03-29 10:44:40 +10001554 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555}
1556
NeilBrown2ff28e22006-03-26 01:37:18 -08001557STATIC void
Nathan Scott238f4c52006-03-17 17:26:25 +11001558xfs_vm_invalidatepage(
Nathan Scottbcec2b72005-09-02 16:40:17 +10001559 struct page *page,
1560 unsigned long offset)
1561{
1562 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1563 page->mapping->host, page, offset);
NeilBrown2ff28e22006-03-26 01:37:18 -08001564 block_invalidatepage(page, offset);
Nathan Scottbcec2b72005-09-02 16:40:17 +10001565}
1566
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001567const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001568 .readpage = xfs_vm_readpage,
1569 .readpages = xfs_vm_readpages,
1570 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001571 .writepages = xfs_vm_writepages,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 .sync_page = block_sync_page,
Nathan Scott238f4c52006-03-17 17:26:25 +11001573 .releasepage = xfs_vm_releasepage,
1574 .invalidatepage = xfs_vm_invalidatepage,
Nick Piggind79689c2007-10-16 01:25:06 -07001575 .write_begin = xfs_vm_write_begin,
1576 .write_end = generic_write_end,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001577 .bmap = xfs_vm_bmap,
1578 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001579 .migratepage = buffer_migrate_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580};