blob: 9892268e30050fdd4a21846160c74a708fe56545 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_dir.h"
25#include "xfs_dir2.h"
26#include "xfs_trans.h"
27#include "xfs_dmapi.h"
28#include "xfs_mount.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_alloc_btree.h"
31#include "xfs_ialloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include "xfs_dir_sf.h"
33#include "xfs_dir2_sf.h"
Nathan Scotta844f452005-11-02 14:38:42 +110034#include "xfs_attr_sf.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include "xfs_dinode.h"
36#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110037#include "xfs_alloc.h"
38#include "xfs_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include "xfs_error.h"
40#include "xfs_rw.h"
41#include "xfs_iomap.h"
42#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110043#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/writeback.h>
45
46STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48#if defined(XFS_RW_TRACE)
49void
50xfs_page_trace(
51 int tag,
52 struct inode *inode,
53 struct page *page,
54 int mask)
55{
56 xfs_inode_t *ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 vnode_t *vp = LINVFS_GET_VP(inode);
58 loff_t isize = i_size_read(inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110059 loff_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 int delalloc = -1, unmapped = -1, unwritten = -1;
61
62 if (page_has_buffers(page))
63 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
64
Christoph Hellwig75e17b32006-01-11 20:58:44 +110065 ip = xfs_vtoi(vp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 if (!ip->i_rwtrace)
67 return;
68
69 ktrace_enter(ip->i_rwtrace,
70 (void *)((unsigned long)tag),
71 (void *)ip,
72 (void *)inode,
73 (void *)page,
74 (void *)((unsigned long)mask),
75 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
76 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
77 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
78 (void *)((unsigned long)(isize & 0xffffffff)),
79 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
80 (void *)((unsigned long)(offset & 0xffffffff)),
81 (void *)((unsigned long)delalloc),
82 (void *)((unsigned long)unmapped),
83 (void *)((unsigned long)unwritten),
84 (void *)NULL,
85 (void *)NULL);
86}
87#else
88#define xfs_page_trace(tag, inode, page, mask)
89#endif
90
Christoph Hellwig0829c362005-09-02 16:58:49 +100091/*
92 * Schedule IO completion handling on a xfsdatad if this was
93 * the final hold on this ioend.
94 */
95STATIC void
96xfs_finish_ioend(
97 xfs_ioend_t *ioend)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098{
Christoph Hellwig0829c362005-09-02 16:58:49 +100099 if (atomic_dec_and_test(&ioend->io_remaining))
100 queue_work(xfsdatad_workqueue, &ioend->io_work);
101}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100103/*
104 * We're now finished for good with this ioend structure.
105 * Update the page state via the associated buffer_heads,
106 * release holds on the inode and bio, and finally free
107 * up memory. Do not use the ioend after this.
108 */
Christoph Hellwig0829c362005-09-02 16:58:49 +1000109STATIC void
110xfs_destroy_ioend(
111 xfs_ioend_t *ioend)
112{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100113 struct buffer_head *bh, *next;
114
115 for (bh = ioend->io_buffer_head; bh; bh = next) {
116 next = bh->b_private;
117 bh->b_end_io(bh, ioend->io_uptodate);
118 }
119
Christoph Hellwig0829c362005-09-02 16:58:49 +1000120 vn_iowake(ioend->io_vnode);
121 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122}
123
124/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100125 * Buffered IO write completion for delayed allocate extents.
126 * TODO: Update ondisk isize now that we know the file data
127 * has been flushed (i.e. the notorious "NULL file" problem).
128 */
129STATIC void
130xfs_end_bio_delalloc(
131 void *data)
132{
133 xfs_ioend_t *ioend = data;
134
135 xfs_destroy_ioend(ioend);
136}
137
138/*
139 * Buffered IO write completion for regular, written extents.
140 */
141STATIC void
142xfs_end_bio_written(
143 void *data)
144{
145 xfs_ioend_t *ioend = data;
146
147 xfs_destroy_ioend(ioend);
148}
149
150/*
151 * IO write completion for unwritten extents.
152 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 * Issue transactions to convert a buffer range from unwritten
Christoph Hellwigf0973862005-09-05 08:22:52 +1000154 * to written extents.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 */
156STATIC void
Christoph Hellwig0829c362005-09-02 16:58:49 +1000157xfs_end_bio_unwritten(
158 void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159{
Christoph Hellwig0829c362005-09-02 16:58:49 +1000160 xfs_ioend_t *ioend = data;
161 vnode_t *vp = ioend->io_vnode;
162 xfs_off_t offset = ioend->io_offset;
163 size_t size = ioend->io_size;
164 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Christoph Hellwig0829c362005-09-02 16:58:49 +1000166 if (ioend->io_uptodate)
167 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
168 xfs_destroy_ioend(ioend);
169}
170
171/*
172 * Allocate and initialise an IO completion structure.
173 * We need to track unwritten extent write completion here initially.
174 * We'll need to extend this for updating the ondisk inode size later
175 * (vs. incore size).
176 */
177STATIC xfs_ioend_t *
178xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100179 struct inode *inode,
180 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000181{
182 xfs_ioend_t *ioend;
183
184 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
185
186 /*
187 * Set the count to 1 initially, which will prevent an I/O
188 * completion callback from happening before we have started
189 * all the I/O from calling the completion routine too early.
190 */
191 atomic_set(&ioend->io_remaining, 1);
192 ioend->io_uptodate = 1; /* cleared if any I/O fails */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100193 ioend->io_list = NULL;
194 ioend->io_type = type;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000195 ioend->io_vnode = LINVFS_GET_VP(inode);
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000196 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100197 ioend->io_buffer_tail = NULL;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000198 atomic_inc(&ioend->io_vnode->v_iocount);
199 ioend->io_offset = 0;
200 ioend->io_size = 0;
201
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100202 if (type == IOMAP_UNWRITTEN)
203 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
204 else if (type == IOMAP_DELAY)
205 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
206 else
207 INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000208
209 return ioend;
210}
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212STATIC int
213xfs_map_blocks(
214 struct inode *inode,
215 loff_t offset,
216 ssize_t count,
217 xfs_iomap_t *mapp,
218 int flags)
219{
220 vnode_t *vp = LINVFS_GET_VP(inode);
221 int error, nmaps = 1;
222
223 VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
224 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
225 VMODIFY(vp);
226 return -error;
227}
228
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100229STATIC inline int
230xfs_iomap_valid(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 xfs_iomap_t *iomapp,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100232 loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233{
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100234 return offset >= iomapp->iomap_offset &&
235 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236}
237
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100238/*
239 * BIO completion handler for buffered IO.
240 */
241STATIC int
242xfs_end_bio(
243 struct bio *bio,
244 unsigned int bytes_done,
245 int error)
246{
247 xfs_ioend_t *ioend = bio->bi_private;
248
249 if (bio->bi_size)
250 return 1;
251
252 ASSERT(ioend);
253 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
254
255 /* Toss bio and pass work off to an xfsdatad thread */
256 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
257 ioend->io_uptodate = 0;
258 bio->bi_private = NULL;
259 bio->bi_end_io = NULL;
260
261 bio_put(bio);
262 xfs_finish_ioend(ioend);
263 return 0;
264}
265
266STATIC void
267xfs_submit_ioend_bio(
268 xfs_ioend_t *ioend,
269 struct bio *bio)
270{
271 atomic_inc(&ioend->io_remaining);
272
273 bio->bi_private = ioend;
274 bio->bi_end_io = xfs_end_bio;
275
276 submit_bio(WRITE, bio);
277 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
278 bio_put(bio);
279}
280
281STATIC struct bio *
282xfs_alloc_ioend_bio(
283 struct buffer_head *bh)
284{
285 struct bio *bio;
286 int nvecs = bio_get_nr_vecs(bh->b_bdev);
287
288 do {
289 bio = bio_alloc(GFP_NOIO, nvecs);
290 nvecs >>= 1;
291 } while (!bio);
292
293 ASSERT(bio->bi_private == NULL);
294 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
295 bio->bi_bdev = bh->b_bdev;
296 bio_get(bio);
297 return bio;
298}
299
300STATIC void
301xfs_start_buffer_writeback(
302 struct buffer_head *bh)
303{
304 ASSERT(buffer_mapped(bh));
305 ASSERT(buffer_locked(bh));
306 ASSERT(!buffer_delay(bh));
307 ASSERT(!buffer_unwritten(bh));
308
309 mark_buffer_async_write(bh);
310 set_buffer_uptodate(bh);
311 clear_buffer_dirty(bh);
312}
313
314STATIC void
315xfs_start_page_writeback(
316 struct page *page,
317 struct writeback_control *wbc,
318 int clear_dirty,
319 int buffers)
320{
321 ASSERT(PageLocked(page));
322 ASSERT(!PageWriteback(page));
323 set_page_writeback(page);
324 if (clear_dirty)
325 clear_page_dirty(page);
326 unlock_page(page);
327 if (!buffers) {
328 end_page_writeback(page);
329 wbc->pages_skipped++; /* We didn't write this page */
330 }
331}
332
333static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
334{
335 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
336}
337
338/*
David Chinnerd88992f2006-01-18 13:38:12 +1100339 * Submit all of the bios for all of the ioends we have saved up, covering the
340 * initial writepage page and also any probed pages.
341 *
342 * Because we may have multiple ioends spanning a page, we need to start
343 * writeback on all the buffers before we submit them for I/O. If we mark the
344 * buffers as we got, then we can end up with a page that only has buffers
345 * marked async write and I/O complete on can occur before we mark the other
346 * buffers async write.
347 *
348 * The end result of this is that we trip a bug in end_page_writeback() because
349 * we call it twice for the one page as the code in end_buffer_async_write()
350 * assumes that all buffers on the page are started at the same time.
351 *
352 * The fix is two passes across the ioend list - one to start writeback on the
353 * bufferheads, and then the second one submit them for I/O.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100354 */
355STATIC void
356xfs_submit_ioend(
357 xfs_ioend_t *ioend)
358{
David Chinnerd88992f2006-01-18 13:38:12 +1100359 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100360 xfs_ioend_t *next;
361 struct buffer_head *bh;
362 struct bio *bio;
363 sector_t lastblock = 0;
364
David Chinnerd88992f2006-01-18 13:38:12 +1100365 /* Pass 1 - start writeback */
366 do {
367 next = ioend->io_list;
368 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
369 xfs_start_buffer_writeback(bh);
370 }
371 } while ((ioend = next) != NULL);
372
373 /* Pass 2 - submit I/O */
374 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100375 do {
376 next = ioend->io_list;
377 bio = NULL;
378
379 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100380
381 if (!bio) {
382 retry:
383 bio = xfs_alloc_ioend_bio(bh);
384 } else if (bh->b_blocknr != lastblock + 1) {
385 xfs_submit_ioend_bio(ioend, bio);
386 goto retry;
387 }
388
389 if (bio_add_buffer(bio, bh) != bh->b_size) {
390 xfs_submit_ioend_bio(ioend, bio);
391 goto retry;
392 }
393
394 lastblock = bh->b_blocknr;
395 }
396 if (bio)
397 xfs_submit_ioend_bio(ioend, bio);
398 xfs_finish_ioend(ioend);
399 } while ((ioend = next) != NULL);
400}
401
402/*
403 * Cancel submission of all buffer_heads so far in this endio.
404 * Toss the endio too. Only ever called for the initial page
405 * in a writepage request, so only ever one page.
406 */
407STATIC void
408xfs_cancel_ioend(
409 xfs_ioend_t *ioend)
410{
411 xfs_ioend_t *next;
412 struct buffer_head *bh, *next_bh;
413
414 do {
415 next = ioend->io_list;
416 bh = ioend->io_buffer_head;
417 do {
418 next_bh = bh->b_private;
419 clear_buffer_async_write(bh);
420 unlock_buffer(bh);
421 } while ((bh = next_bh) != NULL);
422
423 vn_iowake(ioend->io_vnode);
424 mempool_free(ioend, xfs_ioend_pool);
425 } while ((ioend = next) != NULL);
426}
427
428/*
429 * Test to see if we've been building up a completion structure for
430 * earlier buffers -- if so, we try to append to this ioend if we
431 * can, otherwise we finish off any current ioend and start another.
432 * Return true if we've finished the given ioend.
433 */
434STATIC void
435xfs_add_to_ioend(
436 struct inode *inode,
437 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100438 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100439 unsigned int type,
440 xfs_ioend_t **result,
441 int need_ioend)
442{
443 xfs_ioend_t *ioend = *result;
444
445 if (!ioend || need_ioend || type != ioend->io_type) {
446 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100447
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100448 ioend = xfs_alloc_ioend(inode, type);
449 ioend->io_offset = offset;
450 ioend->io_buffer_head = bh;
451 ioend->io_buffer_tail = bh;
452 if (previous)
453 previous->io_list = ioend;
454 *result = ioend;
455 } else {
456 ioend->io_buffer_tail->b_private = bh;
457 ioend->io_buffer_tail = bh;
458 }
459
460 bh->b_private = NULL;
461 ioend->io_size += bh->b_size;
462}
463
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464STATIC void
465xfs_map_at_offset(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 struct buffer_head *bh,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100467 loff_t offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 int block_bits,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100469 xfs_iomap_t *iomapp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470{
471 xfs_daddr_t bn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 int sector_shift;
473
474 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
475 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
476 ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
477
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 sector_shift = block_bits - BBSHIFT;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100479 bn = (iomapp->iomap_bn >> sector_shift) +
480 ((offset - iomapp->iomap_offset) >> block_bits);
481
482 ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
484
485 lock_buffer(bh);
486 bh->b_blocknr = bn;
Nathan Scottce8e9222006-01-11 15:39:08 +1100487 bh->b_bdev = iomapp->iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 set_buffer_mapped(bh);
489 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100490 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491}
492
493/*
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100494 * Look for a page at index that is suitable for clustering.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 */
496STATIC unsigned int
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100497xfs_probe_page(
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100498 struct page *page,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100499 unsigned int pg_offset,
500 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 int ret = 0;
503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100505 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
507 if (page->mapping && PageDirty(page)) {
508 if (page_has_buffers(page)) {
509 struct buffer_head *bh, *head;
510
511 bh = head = page_buffers(page);
512 do {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100513 if (!buffer_uptodate(bh))
514 break;
515 if (mapped != buffer_mapped(bh))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 break;
517 ret += bh->b_size;
518 if (ret >= pg_offset)
519 break;
520 } while ((bh = bh->b_this_page) != head);
521 } else
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100522 ret = mapped ? 0 : PAGE_CACHE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 }
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 return ret;
526}
527
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100528STATIC size_t
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100529xfs_probe_cluster(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 struct inode *inode,
531 struct page *startpage,
532 struct buffer_head *bh,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100533 struct buffer_head *head,
534 int mapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100536 struct pagevec pvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 pgoff_t tindex, tlast, tloff;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100538 size_t total = 0;
539 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
541 /* First sum forwards in this page */
542 do {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100543 if (mapped != buffer_mapped(bh))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100544 return total;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 total += bh->b_size;
546 } while ((bh = bh->b_this_page) != head);
547
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100548 /* if we reached the end of the page, sum forwards in following pages */
549 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
550 tindex = startpage->index + 1;
551
552 /* Prune this back to avoid pathological behavior */
553 tloff = min(tlast, startpage->index + 64);
554
555 pagevec_init(&pvec, 0);
556 while (!done && tindex <= tloff) {
557 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
558
559 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
560 break;
561
562 for (i = 0; i < pagevec_count(&pvec); i++) {
563 struct page *page = pvec.pages[i];
564 size_t pg_offset, len = 0;
565
566 if (tindex == tlast) {
567 pg_offset =
568 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100569 if (!pg_offset) {
570 done = 1;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100571 break;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100572 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100573 } else
574 pg_offset = PAGE_CACHE_SIZE;
575
576 if (page->index == tindex && !TestSetPageLocked(page)) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100577 len = xfs_probe_page(page, pg_offset, mapped);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100578 unlock_page(page);
579 }
580
581 if (!len) {
582 done = 1;
583 break;
584 }
585
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 total += len;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100587 tindex++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100589
590 pagevec_release(&pvec);
591 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 }
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100593
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 return total;
595}
596
597/*
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100598 * Test if a given page is suitable for writing as part of an unwritten
599 * or delayed allocate extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 */
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100601STATIC int
602xfs_is_delayed_page(
603 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100604 unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100607 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
609 if (page->mapping && page_has_buffers(page)) {
610 struct buffer_head *bh, *head;
611 int acceptable = 0;
612
613 bh = head = page_buffers(page);
614 do {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100615 if (buffer_unwritten(bh))
616 acceptable = (type == IOMAP_UNWRITTEN);
617 else if (buffer_delay(bh))
618 acceptable = (type == IOMAP_DELAY);
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100619 else if (buffer_mapped(bh))
620 acceptable = (type == 0);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100621 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 } while ((bh = bh->b_this_page) != head);
624
625 if (acceptable)
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100626 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 }
628
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100629 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630}
631
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632/*
633 * Allocate & map buffers for page given the extent map. Write it out.
634 * except for the original page of a writepage, this is called on
635 * delalloc/unwritten pages only, for the original page it is possible
636 * that the page has no mapping at all.
637 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100638STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639xfs_convert_page(
640 struct inode *inode,
641 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100642 loff_t tindex,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100643 xfs_iomap_t *mp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100644 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 int startio,
647 int all_bh)
648{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100649 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100650 xfs_off_t end_offset;
651 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100652 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 int bbits = inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -0700654 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100655 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100656 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100658 if (page->index != tindex)
659 goto fail;
660 if (TestSetPageLocked(page))
661 goto fail;
662 if (PageWriteback(page))
663 goto fail_unlock_page;
664 if (page->mapping != inode->i_mapping)
665 goto fail_unlock_page;
666 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
667 goto fail_unlock_page;
668
Nathan Scott24e17b52005-05-05 13:33:20 -0700669 /*
670 * page_dirty is initially a count of buffers on the page before
671 * EOF and is decrememted as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100672 *
673 * Derivation:
674 *
675 * End offset is the highest offset that this page should represent.
676 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
677 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
678 * hence give us the correct page_dirty count. On any other page,
679 * it will be zero and in that case we need page_dirty to be the
680 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700681 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100682 end_offset = min_t(unsigned long long,
683 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
684 i_size_read(inode));
685
Nathan Scott24e17b52005-05-05 13:33:20 -0700686 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100687 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
688 PAGE_CACHE_SIZE);
689 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
690 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700691
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 bh = head = page_buffers(page);
693 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100694 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100696 if (!buffer_uptodate(bh))
697 uptodate = 0;
698 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
699 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100701 }
702
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100703 if (buffer_unwritten(bh) || buffer_delay(bh)) {
704 if (buffer_unwritten(bh))
705 type = IOMAP_UNWRITTEN;
706 else
707 type = IOMAP_DELAY;
708
709 if (!xfs_iomap_valid(mp, offset)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100710 done = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100711 continue;
712 }
713
714 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
715 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
716
717 xfs_map_at_offset(bh, offset, bbits, mp);
718 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100719 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100720 type, ioendp, done);
721 } else {
722 set_buffer_dirty(bh);
723 unlock_buffer(bh);
724 mark_buffer_dirty(bh);
725 }
726 page_dirty--;
727 count++;
728 } else {
729 type = 0;
730 if (buffer_mapped(bh) && all_bh && startio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 lock_buffer(bh);
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100732 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100733 type, ioendp, done);
734 count++;
Nathan Scott24e17b52005-05-05 13:33:20 -0700735 page_dirty--;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100736 } else {
737 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100740 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100742 if (uptodate && bh == head)
743 SetPageUptodate(page);
744
745 if (startio) {
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100746 if (count) {
747 struct backing_dev_info *bdi;
748
749 bdi = inode->i_mapping->backing_dev_info;
750 if (bdi_write_congested(bdi)) {
751 wbc->encountered_congestion = 1;
752 done = 1;
753 } else if (--wbc->nr_to_write <= 0) {
754 done = 1;
755 }
756 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100757 xfs_start_page_writeback(page, wbc, !page_dirty, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100759
760 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100761 fail_unlock_page:
762 unlock_page(page);
763 fail:
764 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765}
766
767/*
768 * Convert & write out a cluster of pages in the same extent as defined
769 * by mp and following the start page.
770 */
771STATIC void
772xfs_cluster_write(
773 struct inode *inode,
774 pgoff_t tindex,
775 xfs_iomap_t *iomapp,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100776 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 struct writeback_control *wbc,
778 int startio,
779 int all_bh,
780 pgoff_t tlast)
781{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100782 struct pagevec pvec;
783 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100785 pagevec_init(&pvec, 0);
786 while (!done && tindex <= tlast) {
787 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
788
789 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100791
792 for (i = 0; i < pagevec_count(&pvec); i++) {
793 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
794 iomapp, ioendp, wbc, startio, all_bh);
795 if (done)
796 break;
797 }
798
799 pagevec_release(&pvec);
800 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 }
802}
803
804/*
805 * Calling this without startio set means we are being asked to make a dirty
806 * page ready for freeing it's buffers. When called with startio set then
807 * we are coming from writepage.
808 *
809 * When called with startio set it is important that we write the WHOLE
810 * page if possible.
811 * The bh->b_state's cannot know if any of the blocks or which block for
812 * that matter are dirty due to mmap writes, and therefore bh uptodate is
813 * only vaild if the page itself isn't completely uptodate. Some layers
814 * may clear the page dirty flag prior to calling write page, under the
815 * assumption the entire page will be written out; by not writing out the
816 * whole page the page can be reused before all valid dirty data is
817 * written out. Note: in the case of a page that has been dirty'd by
818 * mapwrite and but partially setup by block_prepare_write the
819 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
820 * valid state, thus the whole page must be written out thing.
821 */
822
823STATIC int
824xfs_page_state_convert(
825 struct inode *inode,
826 struct page *page,
827 struct writeback_control *wbc,
828 int startio,
829 int unmapped) /* also implies page uptodate */
830{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100831 struct buffer_head *bh, *head;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100832 xfs_iomap_t iomap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100833 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 loff_t offset;
835 unsigned long p_offset = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100836 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 __uint64_t end_offset;
838 pgoff_t end_index, last_index, tlast;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100839 ssize_t size, len;
840 int flags, err, iomap_valid = 0, uptodate = 1;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100841 int page_dirty, count = 0, trylock_flag = 0;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100842 int all_bh = unmapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Daniel Moore3ba08152005-05-05 13:31:34 -0700844 /* wait for other IO threads? */
Christoph Hellwigf5e596b2006-01-11 20:49:42 +1100845 if (startio && (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking))
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100846 trylock_flag |= BMAPI_TRYLOCK;
Daniel Moore3ba08152005-05-05 13:31:34 -0700847
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 /* Is this page beyond the end of the file? */
849 offset = i_size_read(inode);
850 end_index = offset >> PAGE_CACHE_SHIFT;
851 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
852 if (page->index >= end_index) {
853 if ((page->index >= end_index + 1) ||
854 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
Nathan Scott19d5bcf2005-11-02 15:14:09 +1100855 if (startio)
856 unlock_page(page);
857 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 }
859 }
860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 /*
Nathan Scott24e17b52005-05-05 13:33:20 -0700862 * page_dirty is initially a count of buffers on the page before
863 * EOF and is decrememted as we move each into a cleanable state.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100864 *
865 * Derivation:
866 *
867 * End offset is the highest offset that this page should represent.
868 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
869 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
870 * hence give us the correct page_dirty count. On any other page,
871 * it will be zero and in that case we need page_dirty to be the
872 * count of buffers on the page.
873 */
874 end_offset = min_t(unsigned long long,
875 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
Nathan Scott24e17b52005-05-05 13:33:20 -0700876 len = 1 << inode->i_blkbits;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100877 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
878 PAGE_CACHE_SIZE);
879 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
Nathan Scott24e17b52005-05-05 13:33:20 -0700880 page_dirty = p_offset / len;
881
Nathan Scott24e17b52005-05-05 13:33:20 -0700882 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100883 offset = page_offset(page);
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100884 flags = -1;
885 type = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100886
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100887 /* TODO: cleanup count and page_dirty */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
889 do {
890 if (offset >= end_offset)
891 break;
892 if (!buffer_uptodate(bh))
893 uptodate = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100894 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100895 /*
896 * the iomap is actually still valid, but the ioend
897 * isn't. shouldn't happen too often.
898 */
899 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100901 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100903 if (iomap_valid)
904 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905
906 /*
907 * First case, map an unwritten extent and prepare for
908 * extent state conversion transaction on completion.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100909 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 * Second case, allocate space for a delalloc buffer.
911 * We can return EAGAIN here in the release page case.
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100912 *
913 * Third case, an unmapped buffer was found, and we are
914 * in a path where we need to write the whole page out.
915 */
916 if (buffer_unwritten(bh) || buffer_delay(bh) ||
917 ((buffer_uptodate(bh) || PageUptodate(page)) &&
918 !buffer_mapped(bh) && (unmapped || startio))) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100919 /*
920 * Make sure we don't use a read-only iomap
921 */
922 if (flags == BMAPI_READ)
923 iomap_valid = 0;
924
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100925 if (buffer_unwritten(bh)) {
926 type = IOMAP_UNWRITTEN;
927 flags = BMAPI_WRITE|BMAPI_IGNSTATE;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100928 } else if (buffer_delay(bh)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100929 type = IOMAP_DELAY;
930 flags = BMAPI_ALLOCATE;
931 if (!startio)
932 flags |= trylock_flag;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100933 } else {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100934 type = IOMAP_NEW;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100935 flags = BMAPI_WRITE|BMAPI_MMAP;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100936 }
937
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100938 if (!iomap_valid) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100939 if (type == IOMAP_NEW) {
940 size = xfs_probe_cluster(inode,
941 page, bh, head, 0);
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100942 } else {
943 size = len;
944 }
945
946 err = xfs_map_blocks(inode, offset, size,
947 &iomap, flags);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100948 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 goto error;
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100950 iomap_valid = xfs_iomap_valid(&iomap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 }
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100952 if (iomap_valid) {
953 xfs_map_at_offset(bh, offset,
954 inode->i_blkbits, &iomap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 if (startio) {
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100956 xfs_add_to_ioend(inode, bh, offset,
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100957 type, &ioend,
958 !iomap_valid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 } else {
960 set_buffer_dirty(bh);
961 unlock_buffer(bh);
962 mark_buffer_dirty(bh);
963 }
964 page_dirty--;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100965 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100967 } else if (buffer_uptodate(bh) && startio) {
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100968 /*
969 * we got here because the buffer is already mapped.
970 * That means it must already have extents allocated
971 * underneath it. Map the extent by reading it.
972 */
973 if (!iomap_valid || type != 0) {
974 flags = BMAPI_READ;
975 size = xfs_probe_cluster(inode, page, bh,
976 head, 1);
977 err = xfs_map_blocks(inode, offset, size,
978 &iomap, flags);
979 if (err)
980 goto error;
981 iomap_valid = xfs_iomap_valid(&iomap, offset);
982 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100984 type = 0;
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100985 if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
986 ASSERT(buffer_mapped(bh));
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100987 if (iomap_valid)
988 all_bh = 1;
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100989 xfs_add_to_ioend(inode, bh, offset, type,
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100990 &ioend, !iomap_valid);
991 page_dirty--;
992 count++;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100993 } else {
Christoph Hellwig1defeac2006-01-11 20:48:33 +1100994 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 }
Christoph Hellwigd5cb48a2006-01-11 20:49:02 +1100996 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
997 (unmapped || startio)) {
998 iomap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001000
1001 if (!iohead)
1002 iohead = ioend;
1003
1004 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 if (uptodate && bh == head)
1007 SetPageUptodate(page);
1008
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001009 if (startio)
1010 xfs_start_page_writeback(page, wbc, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001012 if (ioend && iomap_valid) {
1013 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 PAGE_CACHE_SHIFT;
Nathan Scott775bf6c2005-05-05 13:33:01 -07001015 tlast = min_t(pgoff_t, offset, last_index);
Christoph Hellwig1defeac2006-01-11 20:48:33 +11001016 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001017 wbc, startio, all_bh, tlast);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 }
1019
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001020 if (iohead)
1021 xfs_submit_ioend(iohead);
1022
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 return page_dirty;
1024
1025error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001026 if (iohead)
1027 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
1029 /*
1030 * If it's delalloc and we have nowhere to put it,
1031 * throw it away, unless the lower layers told
1032 * us to try again.
1033 */
1034 if (err != -EAGAIN) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001035 if (!unmapped)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 block_invalidatepage(page, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 ClearPageUptodate(page);
1038 }
1039 return err;
1040}
1041
1042STATIC int
1043__linvfs_get_block(
1044 struct inode *inode,
1045 sector_t iblock,
1046 unsigned long blocks,
1047 struct buffer_head *bh_result,
1048 int create,
1049 int direct,
1050 bmapi_flags_t flags)
1051{
1052 vnode_t *vp = LINVFS_GET_VP(inode);
1053 xfs_iomap_t iomap;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001054 xfs_off_t offset;
1055 ssize_t size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 int retpbbm = 1;
1057 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001059 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scotta4656392005-11-25 16:41:57 +11001060 if (blocks)
1061 size = (ssize_t) min_t(xfs_off_t, LONG_MAX,
1062 (xfs_off_t)blocks << inode->i_blkbits);
1063 else
1064 size = 1 << inode->i_blkbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065
1066 VOP_BMAP(vp, offset, size,
1067 create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
1068 if (error)
1069 return -error;
1070
1071 if (retpbbm == 0)
1072 return 0;
1073
1074 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001075 xfs_daddr_t bn;
1076 xfs_off_t delta;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078 /* For unwritten extents do not report a disk address on
1079 * the read case (treat as if we're reading into a hole).
1080 */
1081 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1082 delta = offset - iomap.iomap_offset;
1083 delta >>= inode->i_blkbits;
1084
1085 bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
1086 bn += delta;
1087 BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME));
1088 bh_result->b_blocknr = bn;
1089 set_buffer_mapped(bh_result);
1090 }
1091 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1092 if (direct)
1093 bh_result->b_private = inode;
1094 set_buffer_unwritten(bh_result);
1095 set_buffer_delay(bh_result);
1096 }
1097 }
1098
1099 /* If this is a realtime file, data might be on a new device */
Nathan Scottce8e9222006-01-11 15:39:08 +11001100 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
1102 /* If we previously allocated a block out beyond eof and
1103 * we are now coming back to use it then we will need to
1104 * flag it as new even if it has a disk address.
1105 */
1106 if (create &&
1107 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001108 (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
1111 if (iomap.iomap_flags & IOMAP_DELAY) {
1112 BUG_ON(direct);
1113 if (create) {
1114 set_buffer_uptodate(bh_result);
1115 set_buffer_mapped(bh_result);
1116 set_buffer_delay(bh_result);
1117 }
1118 }
1119
1120 if (blocks) {
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001121 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1122 offset = min_t(xfs_off_t,
1123 iomap.iomap_bsize - iomap.iomap_delta,
Nathan Scotta4656392005-11-25 16:41:57 +11001124 (xfs_off_t)blocks << inode->i_blkbits);
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001125 bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 }
1127
1128 return 0;
1129}
1130
1131int
1132linvfs_get_block(
1133 struct inode *inode,
1134 sector_t iblock,
1135 struct buffer_head *bh_result,
1136 int create)
1137{
1138 return __linvfs_get_block(inode, iblock, 0, bh_result,
1139 create, 0, BMAPI_WRITE);
1140}
1141
1142STATIC int
1143linvfs_get_blocks_direct(
1144 struct inode *inode,
1145 sector_t iblock,
1146 unsigned long max_blocks,
1147 struct buffer_head *bh_result,
1148 int create)
1149{
1150 return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
1151 create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1152}
1153
Christoph Hellwigf0973862005-09-05 08:22:52 +10001154STATIC void
1155linvfs_end_io_direct(
1156 struct kiocb *iocb,
1157 loff_t offset,
1158 ssize_t size,
1159 void *private)
1160{
1161 xfs_ioend_t *ioend = iocb->private;
1162
1163 /*
1164 * Non-NULL private data means we need to issue a transaction to
1165 * convert a range from unwritten to written extents. This needs
1166 * to happen from process contect but aio+dio I/O completion
1167 * happens from irq context so we need to defer it to a workqueue.
1168 * This is not nessecary for synchronous direct I/O, but we do
1169 * it anyway to keep the code uniform and simpler.
1170 *
1171 * The core direct I/O code might be changed to always call the
1172 * completion handler in the future, in which case all this can
1173 * go away.
1174 */
1175 if (private && size > 0) {
1176 ioend->io_offset = offset;
1177 ioend->io_size = size;
1178 xfs_finish_ioend(ioend);
1179 } else {
1180 ASSERT(size >= 0);
1181 xfs_destroy_ioend(ioend);
1182 }
1183
1184 /*
1185 * blockdev_direct_IO can return an error even afer the I/O
1186 * completion handler was called. Thus we need to protect
1187 * against double-freeing.
1188 */
1189 iocb->private = NULL;
1190}
1191
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192STATIC ssize_t
1193linvfs_direct_IO(
1194 int rw,
1195 struct kiocb *iocb,
1196 const struct iovec *iov,
1197 loff_t offset,
1198 unsigned long nr_segs)
1199{
1200 struct file *file = iocb->ki_filp;
1201 struct inode *inode = file->f_mapping->host;
1202 vnode_t *vp = LINVFS_GET_VP(inode);
1203 xfs_iomap_t iomap;
1204 int maps = 1;
1205 int error;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001206 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207
1208 VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
1209 if (error)
1210 return -error;
1211
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001212 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
Christoph Hellwigf0973862005-09-05 08:22:52 +10001213
1214 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
Nathan Scottce8e9222006-01-11 15:39:08 +11001215 iomap.iomap_target->bt_bdev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 iov, offset, nr_segs,
1217 linvfs_get_blocks_direct,
Christoph Hellwigf0973862005-09-05 08:22:52 +10001218 linvfs_end_io_direct);
1219
1220 if (unlikely(ret <= 0 && iocb->private))
1221 xfs_destroy_ioend(iocb->private);
1222 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223}
1224
1225
1226STATIC sector_t
1227linvfs_bmap(
1228 struct address_space *mapping,
1229 sector_t block)
1230{
1231 struct inode *inode = (struct inode *)mapping->host;
1232 vnode_t *vp = LINVFS_GET_VP(inode);
1233 int error;
1234
1235 vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
1236
1237 VOP_RWLOCK(vp, VRWLOCK_READ);
1238 VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
1239 VOP_RWUNLOCK(vp, VRWLOCK_READ);
1240 return generic_block_bmap(mapping, block, linvfs_get_block);
1241}
1242
1243STATIC int
1244linvfs_readpage(
1245 struct file *unused,
1246 struct page *page)
1247{
1248 return mpage_readpage(page, linvfs_get_block);
1249}
1250
1251STATIC int
1252linvfs_readpages(
1253 struct file *unused,
1254 struct address_space *mapping,
1255 struct list_head *pages,
1256 unsigned nr_pages)
1257{
1258 return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
1259}
1260
1261STATIC void
1262xfs_count_page_state(
1263 struct page *page,
1264 int *delalloc,
1265 int *unmapped,
1266 int *unwritten)
1267{
1268 struct buffer_head *bh, *head;
1269
1270 *delalloc = *unmapped = *unwritten = 0;
1271
1272 bh = head = page_buffers(page);
1273 do {
1274 if (buffer_uptodate(bh) && !buffer_mapped(bh))
1275 (*unmapped) = 1;
1276 else if (buffer_unwritten(bh) && !buffer_delay(bh))
1277 clear_buffer_unwritten(bh);
1278 else if (buffer_unwritten(bh))
1279 (*unwritten) = 1;
1280 else if (buffer_delay(bh))
1281 (*delalloc) = 1;
1282 } while ((bh = bh->b_this_page) != head);
1283}
1284
1285
1286/*
1287 * writepage: Called from one of two places:
1288 *
1289 * 1. we are flushing a delalloc buffer head.
1290 *
1291 * 2. we are writing out a dirty page. Typically the page dirty
1292 * state is cleared before we get here. In this case is it
1293 * conceivable we have no buffer heads.
1294 *
1295 * For delalloc space on the page we need to allocate space and
1296 * flush it. For unmapped buffer heads on the page we should
1297 * allocate space if the page is uptodate. For any other dirty
1298 * buffer heads on the page we should flush them.
1299 *
1300 * If we detect that a transaction would be required to flush
1301 * the page, we have to check the process flags first, if we
1302 * are already in a transaction or disk I/O during allocations
1303 * is off, we need to fail the writepage and redirty the page.
1304 */
1305
1306STATIC int
1307linvfs_writepage(
1308 struct page *page,
1309 struct writeback_control *wbc)
1310{
1311 int error;
1312 int need_trans;
1313 int delalloc, unmapped, unwritten;
1314 struct inode *inode = page->mapping->host;
1315
1316 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1317
1318 /*
1319 * We need a transaction if:
1320 * 1. There are delalloc buffers on the page
1321 * 2. The page is uptodate and we have unmapped buffers
1322 * 3. The page is uptodate and we have no buffers
1323 * 4. There are unwritten buffers on the page
1324 */
1325
1326 if (!page_has_buffers(page)) {
1327 unmapped = 1;
1328 need_trans = 1;
1329 } else {
1330 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1331 if (!PageUptodate(page))
1332 unmapped = 0;
1333 need_trans = delalloc + unmapped + unwritten;
1334 }
1335
1336 /*
1337 * If we need a transaction and the process flags say
1338 * we are already in a transaction, or no IO is allowed
1339 * then mark the page dirty again and leave the page
1340 * as is.
1341 */
1342 if (PFLAGS_TEST_FSTRANS() && need_trans)
1343 goto out_fail;
1344
1345 /*
1346 * Delay hooking up buffer heads until we have
1347 * made our go/no-go decision.
1348 */
1349 if (!page_has_buffers(page))
1350 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1351
1352 /*
1353 * Convert delayed allocate, unwritten or unmapped space
1354 * to real space and flush out to disk.
1355 */
1356 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1357 if (error == -EAGAIN)
1358 goto out_fail;
1359 if (unlikely(error < 0))
1360 goto out_unlock;
1361
1362 return 0;
1363
1364out_fail:
1365 redirty_page_for_writepage(wbc, page);
1366 unlock_page(page);
1367 return 0;
1368out_unlock:
1369 unlock_page(page);
1370 return error;
1371}
1372
Nathan Scottbcec2b72005-09-02 16:40:17 +10001373STATIC int
1374linvfs_invalidate_page(
1375 struct page *page,
1376 unsigned long offset)
1377{
1378 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1379 page->mapping->host, page, offset);
1380 return block_invalidatepage(page, offset);
1381}
1382
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383/*
1384 * Called to move a page into cleanable state - and from there
1385 * to be released. Possibly the page is already clean. We always
1386 * have buffer heads in this call.
1387 *
1388 * Returns 0 if the page is ok to release, 1 otherwise.
1389 *
1390 * Possible scenarios are:
1391 *
1392 * 1. We are being called to release a page which has been written
1393 * to via regular I/O. buffer heads will be dirty and possibly
1394 * delalloc. If no delalloc buffer heads in this case then we
1395 * can just return zero.
1396 *
1397 * 2. We are called to release a page which has been written via
1398 * mmap, all we need to do is ensure there is no delalloc
1399 * state in the buffer heads, if not we can let the caller
1400 * free them and we should come back later via writepage.
1401 */
1402STATIC int
1403linvfs_release_page(
1404 struct page *page,
Al Viro27496a82005-10-21 03:20:48 -04001405 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406{
1407 struct inode *inode = page->mapping->host;
1408 int dirty, delalloc, unmapped, unwritten;
1409 struct writeback_control wbc = {
1410 .sync_mode = WB_SYNC_ALL,
1411 .nr_to_write = 1,
1412 };
1413
1414 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
1415
1416 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1417 if (!delalloc && !unwritten)
1418 goto free_buffers;
1419
1420 if (!(gfp_mask & __GFP_FS))
1421 return 0;
1422
1423 /* If we are already inside a transaction or the thread cannot
1424 * do I/O, we cannot release this page.
1425 */
1426 if (PFLAGS_TEST_FSTRANS())
1427 return 0;
1428
1429 /*
1430 * Convert delalloc space to real space, do not flush the
1431 * data out to disk, that will be done by the caller.
1432 * Never need to allocate space here - we will always
1433 * come back to writepage in that case.
1434 */
1435 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1436 if (dirty == 0 && !unwritten)
1437 goto free_buffers;
1438 return 0;
1439
1440free_buffers:
1441 return try_to_free_buffers(page);
1442}
1443
1444STATIC int
1445linvfs_prepare_write(
1446 struct file *file,
1447 struct page *page,
1448 unsigned int from,
1449 unsigned int to)
1450{
1451 return block_prepare_write(page, from, to, linvfs_get_block);
1452}
1453
1454struct address_space_operations linvfs_aops = {
1455 .readpage = linvfs_readpage,
1456 .readpages = linvfs_readpages,
1457 .writepage = linvfs_writepage,
1458 .sync_page = block_sync_page,
1459 .releasepage = linvfs_release_page,
Nathan Scottbcec2b72005-09-02 16:40:17 +10001460 .invalidatepage = linvfs_invalidate_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 .prepare_write = linvfs_prepare_write,
1462 .commit_write = generic_commit_write,
1463 .bmap = linvfs_bmap,
1464 .direct_IO = linvfs_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001465 .migratepage = buffer_migrate_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466};