blob: 8416e383197cbf8291f1b697b3d4b457f2d7d1c9 [file] [log] [blame]
Mark Fashehccd979b2005-12-15 14:31:24 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#include <linux/fs.h>
23#include <linux/slab.h>
24#include <linux/highmem.h>
25#include <linux/pagemap.h>
26#include <asm/byteorder.h>
Mark Fasheh9517bac2007-02-09 20:24:12 -080027#include <linux/swap.h>
Mark Fasheh6af67d82007-03-06 17:24:46 -080028#include <linux/pipe_fs_i.h>
Mark Fashehccd979b2005-12-15 14:31:24 -080029
30#define MLOG_MASK_PREFIX ML_FILE_IO
31#include <cluster/masklog.h>
32
33#include "ocfs2.h"
34
35#include "alloc.h"
36#include "aops.h"
37#include "dlmglue.h"
38#include "extent_map.h"
39#include "file.h"
40#include "inode.h"
41#include "journal.h"
Mark Fasheh9517bac2007-02-09 20:24:12 -080042#include "suballoc.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080043#include "super.h"
44#include "symlink.h"
45
46#include "buffer_head_io.h"
47
48static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
49 struct buffer_head *bh_result, int create)
50{
51 int err = -EIO;
52 int status;
53 struct ocfs2_dinode *fe = NULL;
54 struct buffer_head *bh = NULL;
55 struct buffer_head *buffer_cache_bh = NULL;
56 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
57 void *kaddr;
58
59 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
60 (unsigned long long)iblock, bh_result, create);
61
62 BUG_ON(ocfs2_inode_is_fast_symlink(inode));
63
64 if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
65 mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
66 (unsigned long long)iblock);
67 goto bail;
68 }
69
70 status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
71 OCFS2_I(inode)->ip_blkno,
72 &bh, OCFS2_BH_CACHED, inode);
73 if (status < 0) {
74 mlog_errno(status);
75 goto bail;
76 }
77 fe = (struct ocfs2_dinode *) bh->b_data;
78
79 if (!OCFS2_IS_VALID_DINODE(fe)) {
Mark Fashehb06970532006-03-03 10:24:33 -080080 mlog(ML_ERROR, "Invalid dinode #%llu: signature = %.*s\n",
Mark Fasheh1ca1a112007-04-27 16:01:25 -070081 (unsigned long long)le64_to_cpu(fe->i_blkno), 7,
82 fe->i_signature);
Mark Fashehccd979b2005-12-15 14:31:24 -080083 goto bail;
84 }
85
86 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
87 le32_to_cpu(fe->i_clusters))) {
88 mlog(ML_ERROR, "block offset is outside the allocated size: "
89 "%llu\n", (unsigned long long)iblock);
90 goto bail;
91 }
92
93 /* We don't use the page cache to create symlink data, so if
94 * need be, copy it over from the buffer cache. */
95 if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
96 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
97 iblock;
98 buffer_cache_bh = sb_getblk(osb->sb, blkno);
99 if (!buffer_cache_bh) {
100 mlog(ML_ERROR, "couldn't getblock for symlink!\n");
101 goto bail;
102 }
103
104 /* we haven't locked out transactions, so a commit
105 * could've happened. Since we've got a reference on
106 * the bh, even if it commits while we're doing the
107 * copy, the data is still good. */
108 if (buffer_jbd(buffer_cache_bh)
109 && ocfs2_inode_is_new(inode)) {
110 kaddr = kmap_atomic(bh_result->b_page, KM_USER0);
111 if (!kaddr) {
112 mlog(ML_ERROR, "couldn't kmap!\n");
113 goto bail;
114 }
115 memcpy(kaddr + (bh_result->b_size * iblock),
116 buffer_cache_bh->b_data,
117 bh_result->b_size);
118 kunmap_atomic(kaddr, KM_USER0);
119 set_buffer_uptodate(bh_result);
120 }
121 brelse(buffer_cache_bh);
122 }
123
124 map_bh(bh_result, inode->i_sb,
125 le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
126
127 err = 0;
128
129bail:
130 if (bh)
131 brelse(bh);
132
133 mlog_exit(err);
134 return err;
135}
136
137static int ocfs2_get_block(struct inode *inode, sector_t iblock,
138 struct buffer_head *bh_result, int create)
139{
140 int err = 0;
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800141 unsigned int ext_flags;
Mark Fashehccd979b2005-12-15 14:31:24 -0800142 u64 p_blkno, past_eof;
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800143 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Mark Fashehccd979b2005-12-15 14:31:24 -0800144
145 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
146 (unsigned long long)iblock, bh_result, create);
147
148 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
149 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
150 inode, inode->i_ino);
151
152 if (S_ISLNK(inode->i_mode)) {
153 /* this always does I/O for some reason. */
154 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
155 goto bail;
156 }
157
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800158 err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, NULL,
159 &ext_flags);
Mark Fashehccd979b2005-12-15 14:31:24 -0800160 if (err) {
161 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
Mark Fashehb06970532006-03-03 10:24:33 -0800162 "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
163 (unsigned long long)p_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -0800164 goto bail;
165 }
166
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800167 /*
168 * ocfs2 never allocates in this function - the only time we
169 * need to use BH_New is when we're extending i_size on a file
170 * system which doesn't support holes, in which case BH_New
171 * allows block_prepare_write() to zero.
172 */
173 mlog_bug_on_msg(create && p_blkno == 0 && ocfs2_sparse_alloc(osb),
174 "ino %lu, iblock %llu\n", inode->i_ino,
175 (unsigned long long)iblock);
Mark Fashehccd979b2005-12-15 14:31:24 -0800176
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800177 /* Treat the unwritten extent as a hole for zeroing purposes. */
178 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800179 map_bh(bh_result, inode->i_sb, p_blkno);
180
181 if (!ocfs2_sparse_alloc(osb)) {
182 if (p_blkno == 0) {
183 err = -EIO;
184 mlog(ML_ERROR,
185 "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
186 (unsigned long long)iblock,
187 (unsigned long long)p_blkno,
188 (unsigned long long)OCFS2_I(inode)->ip_blkno);
189 mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
190 dump_stack();
191 }
192
193 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
194 mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
195 (unsigned long long)past_eof);
196
197 if (create && (iblock >= past_eof))
198 set_buffer_new(bh_result);
Mark Fashehccd979b2005-12-15 14:31:24 -0800199 }
200
Mark Fashehccd979b2005-12-15 14:31:24 -0800201bail:
202 if (err < 0)
203 err = -EIO;
204
205 mlog_exit(err);
206 return err;
207}
208
209static int ocfs2_readpage(struct file *file, struct page *page)
210{
211 struct inode *inode = page->mapping->host;
212 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
213 int ret, unlock = 1;
214
215 mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));
216
Mark Fasheh4bcec182006-10-09 16:02:40 -0700217 ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page);
Mark Fashehccd979b2005-12-15 14:31:24 -0800218 if (ret != 0) {
219 if (ret == AOP_TRUNCATED_PAGE)
220 unlock = 0;
221 mlog_errno(ret);
222 goto out;
223 }
224
Mark Fashehe9dfc0b2007-05-14 11:38:51 -0700225 if (down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem) == 0) {
226 ret = AOP_TRUNCATED_PAGE;
227 goto out_meta_unlock;
228 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800229
230 /*
231 * i_size might have just been updated as we grabed the meta lock. We
232 * might now be discovering a truncate that hit on another node.
233 * block_read_full_page->get_block freaks out if it is asked to read
234 * beyond the end of a file, so we check here. Callers
Nick Piggin54cb8822007-07-19 01:46:59 -0700235 * (generic_file_read, vm_ops->fault) are clever enough to check i_size
Mark Fashehccd979b2005-12-15 14:31:24 -0800236 * and notice that the page they just read isn't needed.
237 *
238 * XXX sys_readahead() seems to get that wrong?
239 */
240 if (start >= i_size_read(inode)) {
Nate Diller5c3c6bb2007-05-10 22:56:01 -0700241 zero_user_page(page, 0, PAGE_SIZE, KM_USER0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800242 SetPageUptodate(page);
243 ret = 0;
244 goto out_alloc;
245 }
246
247 ret = ocfs2_data_lock_with_page(inode, 0, page);
248 if (ret != 0) {
249 if (ret == AOP_TRUNCATED_PAGE)
250 unlock = 0;
251 mlog_errno(ret);
252 goto out_alloc;
253 }
254
255 ret = block_read_full_page(page, ocfs2_get_block);
256 unlock = 0;
257
258 ocfs2_data_unlock(inode, 0);
259out_alloc:
260 up_read(&OCFS2_I(inode)->ip_alloc_sem);
Mark Fashehe9dfc0b2007-05-14 11:38:51 -0700261out_meta_unlock:
Mark Fashehccd979b2005-12-15 14:31:24 -0800262 ocfs2_meta_unlock(inode, 0);
263out:
264 if (unlock)
265 unlock_page(page);
266 mlog_exit(ret);
267 return ret;
268}
269
270/* Note: Because we don't support holes, our allocation has
271 * already happened (allocation writes zeros to the file data)
272 * so we don't have to worry about ordered writes in
273 * ocfs2_writepage.
274 *
275 * ->writepage is called during the process of invalidating the page cache
276 * during blocked lock processing. It can't block on any cluster locks
277 * to during block mapping. It's relying on the fact that the block
278 * mapping can't have disappeared under the dirty pages that it is
279 * being asked to write back.
280 */
281static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
282{
283 int ret;
284
285 mlog_entry("(0x%p)\n", page);
286
287 ret = block_write_full_page(page, ocfs2_get_block, wbc);
288
289 mlog_exit(ret);
290
291 return ret;
292}
293
Mark Fasheh50691202007-02-09 20:52:53 -0800294/*
295 * This is called from ocfs2_write_zero_page() which has handled it's
296 * own cluster locking and has ensured allocation exists for those
297 * blocks to be written.
298 */
Mark Fasheh53013cb2006-05-05 19:04:03 -0700299int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
300 unsigned from, unsigned to)
301{
302 int ret;
303
Mark Fasheh53013cb2006-05-05 19:04:03 -0700304 ret = block_prepare_write(page, from, to, ocfs2_get_block);
305
Mark Fasheh53013cb2006-05-05 19:04:03 -0700306 return ret;
307}
308
Mark Fashehccd979b2005-12-15 14:31:24 -0800309/* Taken from ext3. We don't necessarily need the full blown
310 * functionality yet, but IMHO it's better to cut and paste the whole
311 * thing so we can avoid introducing our own bugs (and easily pick up
312 * their fixes when they happen) --Mark */
Mark Fasheh60b11392007-02-16 11:46:50 -0800313int walk_page_buffers( handle_t *handle,
314 struct buffer_head *head,
315 unsigned from,
316 unsigned to,
317 int *partial,
318 int (*fn)( handle_t *handle,
319 struct buffer_head *bh))
Mark Fashehccd979b2005-12-15 14:31:24 -0800320{
321 struct buffer_head *bh;
322 unsigned block_start, block_end;
323 unsigned blocksize = head->b_size;
324 int err, ret = 0;
325 struct buffer_head *next;
326
327 for ( bh = head, block_start = 0;
328 ret == 0 && (bh != head || !block_start);
329 block_start = block_end, bh = next)
330 {
331 next = bh->b_this_page;
332 block_end = block_start + blocksize;
333 if (block_end <= from || block_start >= to) {
334 if (partial && !buffer_uptodate(bh))
335 *partial = 1;
336 continue;
337 }
338 err = (*fn)(handle, bh);
339 if (!ret)
340 ret = err;
341 }
342 return ret;
343}
344
Mark Fasheh1fabe142006-10-09 18:11:45 -0700345handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
Mark Fashehccd979b2005-12-15 14:31:24 -0800346 struct page *page,
347 unsigned from,
348 unsigned to)
349{
350 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Mark Fasheh1fabe142006-10-09 18:11:45 -0700351 handle_t *handle = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -0800352 int ret = 0;
353
Mark Fasheh65eff9c2006-10-09 17:26:22 -0700354 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
Mark Fashehccd979b2005-12-15 14:31:24 -0800355 if (!handle) {
356 ret = -ENOMEM;
357 mlog_errno(ret);
358 goto out;
359 }
360
361 if (ocfs2_should_order_data(inode)) {
Mark Fasheh1fabe142006-10-09 18:11:45 -0700362 ret = walk_page_buffers(handle,
Mark Fashehccd979b2005-12-15 14:31:24 -0800363 page_buffers(page),
364 from, to, NULL,
365 ocfs2_journal_dirty_data);
366 if (ret < 0)
367 mlog_errno(ret);
368 }
369out:
370 if (ret) {
371 if (handle)
Mark Fasheh02dc1af2006-10-09 16:48:10 -0700372 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -0800373 handle = ERR_PTR(ret);
374 }
375 return handle;
376}
377
Mark Fashehccd979b2005-12-15 14:31:24 -0800378static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
379{
380 sector_t status;
381 u64 p_blkno = 0;
382 int err = 0;
383 struct inode *inode = mapping->host;
384
385 mlog_entry("(block = %llu)\n", (unsigned long long)block);
386
387 /* We don't need to lock journal system files, since they aren't
388 * accessed concurrently from multiple nodes.
389 */
390 if (!INODE_JOURNAL(inode)) {
Mark Fasheh4bcec182006-10-09 16:02:40 -0700391 err = ocfs2_meta_lock(inode, NULL, 0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800392 if (err) {
393 if (err != -ENOENT)
394 mlog_errno(err);
395 goto bail;
396 }
397 down_read(&OCFS2_I(inode)->ip_alloc_sem);
398 }
399
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800400 err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, NULL);
Mark Fashehccd979b2005-12-15 14:31:24 -0800401
402 if (!INODE_JOURNAL(inode)) {
403 up_read(&OCFS2_I(inode)->ip_alloc_sem);
404 ocfs2_meta_unlock(inode, 0);
405 }
406
407 if (err) {
408 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
409 (unsigned long long)block);
410 mlog_errno(err);
411 goto bail;
412 }
413
414
415bail:
416 status = err ? 0 : p_blkno;
417
418 mlog_exit((int)status);
419
420 return status;
421}
422
423/*
424 * TODO: Make this into a generic get_blocks function.
425 *
426 * From do_direct_io in direct-io.c:
427 * "So what we do is to permit the ->get_blocks function to populate
428 * bh.b_size with the size of IO which is permitted at this offset and
429 * this i_blkbits."
430 *
431 * This function is called directly from get_more_blocks in direct-io.c.
432 *
433 * called like this: dio->get_blocks(dio->inode, fs_startblk,
434 * fs_count, map_bh, dio->rw == WRITE);
435 */
436static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
Mark Fashehccd979b2005-12-15 14:31:24 -0800437 struct buffer_head *bh_result, int create)
438{
439 int ret;
Mark Fasheh4f902c32007-03-09 16:26:50 -0800440 u64 p_blkno, inode_blocks, contig_blocks;
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800441 unsigned int ext_flags;
Florin Malita184d7d22006-06-03 19:30:10 -0400442 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -0800443 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
Mark Fashehccd979b2005-12-15 14:31:24 -0800444
Mark Fashehccd979b2005-12-15 14:31:24 -0800445 /* This function won't even be called if the request isn't all
446 * nicely aligned and of the right size, so there's no need
447 * for us to check any of that. */
448
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800449 inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
Mark Fasheh564f8a32006-12-14 13:01:05 -0800450
451 /*
452 * Any write past EOF is not allowed because we'd be extending.
453 */
454 if (create && (iblock + max_blocks) > inode_blocks) {
Mark Fashehccd979b2005-12-15 14:31:24 -0800455 ret = -EIO;
456 goto bail;
457 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800458
459 /* This figures out the size of the next contiguous block, and
460 * our logical offset */
Mark Fasheh363041a2007-01-17 12:31:35 -0800461 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800462 &contig_blocks, &ext_flags);
Mark Fashehccd979b2005-12-15 14:31:24 -0800463 if (ret) {
464 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
465 (unsigned long long)iblock);
466 ret = -EIO;
467 goto bail;
468 }
469
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800470 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)) && !p_blkno) {
471 ocfs2_error(inode->i_sb,
472 "Inode %llu has a hole at block %llu\n",
473 (unsigned long long)OCFS2_I(inode)->ip_blkno,
474 (unsigned long long)iblock);
475 ret = -EROFS;
476 goto bail;
477 }
478
479 /*
480 * get_more_blocks() expects us to describe a hole by clearing
481 * the mapped bit on bh_result().
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800482 *
483 * Consider an unwritten extent as a hole.
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800484 */
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800485 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800486 map_bh(bh_result, inode->i_sb, p_blkno);
487 else {
488 /*
489 * ocfs2_prepare_inode_for_write() should have caught
490 * the case where we'd be filling a hole and triggered
491 * a buffered write instead.
492 */
493 if (create) {
494 ret = -EIO;
495 mlog_errno(ret);
496 goto bail;
497 }
498
499 clear_buffer_mapped(bh_result);
500 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800501
502 /* make sure we don't map more than max_blocks blocks here as
503 that's all the kernel will handle at this point. */
504 if (max_blocks < contig_blocks)
505 contig_blocks = max_blocks;
506 bh_result->b_size = contig_blocks << blocksize_bits;
507bail:
508 return ret;
509}
510
511/*
512 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
513 * particularly interested in the aio/dio case. Like the core uses
514 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
515 * truncation on another.
516 */
517static void ocfs2_dio_end_io(struct kiocb *iocb,
518 loff_t offset,
519 ssize_t bytes,
520 void *private)
521{
Josef Sipekd28c9172006-12-08 02:37:25 -0800522 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -0700523 int level;
Mark Fashehccd979b2005-12-15 14:31:24 -0800524
525 /* this io's submitter should not have unlocked this before we could */
526 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -0700527
Mark Fashehccd979b2005-12-15 14:31:24 -0800528 ocfs2_iocb_clear_rw_locked(iocb);
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -0700529
530 level = ocfs2_iocb_rw_locked_level(iocb);
531 if (!level)
532 up_read(&inode->i_alloc_sem);
533 ocfs2_rw_unlock(inode, level);
Mark Fashehccd979b2005-12-15 14:31:24 -0800534}
535
Joel Becker03f981c2007-01-04 14:54:41 -0800536/*
537 * ocfs2_invalidatepage() and ocfs2_releasepage() are shamelessly stolen
538 * from ext3. PageChecked() bits have been removed as OCFS2 does not
539 * do journalled data.
540 */
541static void ocfs2_invalidatepage(struct page *page, unsigned long offset)
542{
543 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
544
545 journal_invalidatepage(journal, page, offset);
546}
547
548static int ocfs2_releasepage(struct page *page, gfp_t wait)
549{
550 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
551
552 if (!page_has_buffers(page))
553 return 0;
554 return journal_try_to_free_buffers(journal, page, wait);
555}
556
Mark Fashehccd979b2005-12-15 14:31:24 -0800557static ssize_t ocfs2_direct_IO(int rw,
558 struct kiocb *iocb,
559 const struct iovec *iov,
560 loff_t offset,
561 unsigned long nr_segs)
562{
563 struct file *file = iocb->ki_filp;
Josef Sipekd28c9172006-12-08 02:37:25 -0800564 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
Mark Fashehccd979b2005-12-15 14:31:24 -0800565 int ret;
566
567 mlog_entry_void();
Mark Fasheh53013cb2006-05-05 19:04:03 -0700568
Mark Fasheh9517bac2007-02-09 20:24:12 -0800569 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
570 /*
571 * We get PR data locks even for O_DIRECT. This
572 * allows concurrent O_DIRECT I/O but doesn't let
573 * O_DIRECT with extending and buffered zeroing writes
574 * race. If they did race then the buffered zeroing
575 * could be written back after the O_DIRECT I/O. It's
576 * one thing to tell people not to mix buffered and
577 * O_DIRECT writes, but expecting them to understand
578 * that file extension is also an implicit buffered
579 * write is too much. By getting the PR we force
580 * writeback of the buffered zeroing before
581 * proceeding.
582 */
583 ret = ocfs2_data_lock(inode, 0);
584 if (ret < 0) {
585 mlog_errno(ret);
586 goto out;
587 }
588 ocfs2_data_unlock(inode, 0);
Mark Fasheh53013cb2006-05-05 19:04:03 -0700589 }
Mark Fasheh53013cb2006-05-05 19:04:03 -0700590
Mark Fashehccd979b2005-12-15 14:31:24 -0800591 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
592 inode->i_sb->s_bdev, iov, offset,
593 nr_segs,
594 ocfs2_direct_IO_get_blocks,
595 ocfs2_dio_end_io);
Mark Fasheh53013cb2006-05-05 19:04:03 -0700596out:
Mark Fashehccd979b2005-12-15 14:31:24 -0800597 mlog_exit(ret);
598 return ret;
599}
600
Mark Fasheh9517bac2007-02-09 20:24:12 -0800601static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
602 u32 cpos,
603 unsigned int *start,
604 unsigned int *end)
605{
606 unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE;
607
608 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) {
609 unsigned int cpp;
610
611 cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits);
612
613 cluster_start = cpos % cpp;
614 cluster_start = cluster_start << osb->s_clustersize_bits;
615
616 cluster_end = cluster_start + osb->s_clustersize;
617 }
618
619 BUG_ON(cluster_start > PAGE_SIZE);
620 BUG_ON(cluster_end > PAGE_SIZE);
621
622 if (start)
623 *start = cluster_start;
624 if (end)
625 *end = cluster_end;
626}
627
628/*
629 * 'from' and 'to' are the region in the page to avoid zeroing.
630 *
631 * If pagesize > clustersize, this function will avoid zeroing outside
632 * of the cluster boundary.
633 *
634 * from == to == 0 is code for "zero the entire cluster region"
635 */
636static void ocfs2_clear_page_regions(struct page *page,
637 struct ocfs2_super *osb, u32 cpos,
638 unsigned from, unsigned to)
639{
640 void *kaddr;
641 unsigned int cluster_start, cluster_end;
642
643 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
644
645 kaddr = kmap_atomic(page, KM_USER0);
646
647 if (from || to) {
648 if (from > cluster_start)
649 memset(kaddr + cluster_start, 0, from - cluster_start);
650 if (to < cluster_end)
651 memset(kaddr + to, 0, cluster_end - to);
652 } else {
653 memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
654 }
655
656 kunmap_atomic(kaddr, KM_USER0);
657}
658
659/*
660 * Some of this taken from block_prepare_write(). We already have our
661 * mapping by now though, and the entire write will be allocating or
662 * it won't, so not much need to use BH_New.
663 *
664 * This will also skip zeroing, which is handled externally.
665 */
Mark Fasheh60b11392007-02-16 11:46:50 -0800666int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
667 struct inode *inode, unsigned int from,
668 unsigned int to, int new)
Mark Fasheh9517bac2007-02-09 20:24:12 -0800669{
670 int ret = 0;
671 struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
672 unsigned int block_end, block_start;
673 unsigned int bsize = 1 << inode->i_blkbits;
674
675 if (!page_has_buffers(page))
676 create_empty_buffers(page, bsize, 0);
677
678 head = page_buffers(page);
679 for (bh = head, block_start = 0; bh != head || !block_start;
680 bh = bh->b_this_page, block_start += bsize) {
681 block_end = block_start + bsize;
682
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700683 clear_buffer_new(bh);
684
Mark Fasheh9517bac2007-02-09 20:24:12 -0800685 /*
686 * Ignore blocks outside of our i/o range -
687 * they may belong to unallocated clusters.
688 */
Mark Fasheh60b11392007-02-16 11:46:50 -0800689 if (block_start >= to || block_end <= from) {
Mark Fasheh9517bac2007-02-09 20:24:12 -0800690 if (PageUptodate(page))
691 set_buffer_uptodate(bh);
692 continue;
693 }
694
695 /*
696 * For an allocating write with cluster size >= page
697 * size, we always write the entire page.
698 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700699 if (new)
700 set_buffer_new(bh);
Mark Fasheh9517bac2007-02-09 20:24:12 -0800701
702 if (!buffer_mapped(bh)) {
703 map_bh(bh, inode->i_sb, *p_blkno);
704 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
705 }
706
707 if (PageUptodate(page)) {
708 if (!buffer_uptodate(bh))
709 set_buffer_uptodate(bh);
710 } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
Mark Fashehbce99762007-06-18 11:12:36 -0700711 !buffer_new(bh) &&
712 (block_start < from || block_end > to)) {
Mark Fasheh9517bac2007-02-09 20:24:12 -0800713 ll_rw_block(READ, 1, &bh);
714 *wait_bh++=bh;
715 }
716
717 *p_blkno = *p_blkno + 1;
718 }
719
720 /*
721 * If we issued read requests - let them complete.
722 */
723 while(wait_bh > wait) {
724 wait_on_buffer(*--wait_bh);
725 if (!buffer_uptodate(*wait_bh))
726 ret = -EIO;
727 }
728
729 if (ret == 0 || !new)
730 return ret;
731
732 /*
733 * If we get -EIO above, zero out any newly allocated blocks
734 * to avoid exposing stale data.
735 */
736 bh = head;
737 block_start = 0;
738 do {
Mark Fasheh9517bac2007-02-09 20:24:12 -0800739 block_end = block_start + bsize;
740 if (block_end <= from)
741 goto next_bh;
742 if (block_start >= to)
743 break;
744
Eric Sandeen54c57dc2007-06-20 17:15:10 -0700745 zero_user_page(page, block_start, bh->b_size, KM_USER0);
Mark Fasheh9517bac2007-02-09 20:24:12 -0800746 set_buffer_uptodate(bh);
747 mark_buffer_dirty(bh);
748
749next_bh:
750 block_start = block_end;
751 bh = bh->b_this_page;
752 } while (bh != head);
753
754 return ret;
755}
756
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700757#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
758#define OCFS2_MAX_CTXT_PAGES 1
759#else
760#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE)
761#endif
Mark Fasheh6af67d82007-03-06 17:24:46 -0800762
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700763#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE)
Mark Fasheh6af67d82007-03-06 17:24:46 -0800764
765/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700766 * Describe the state of a single cluster to be written to.
Mark Fasheh9517bac2007-02-09 20:24:12 -0800767 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700768struct ocfs2_write_cluster_desc {
769 u32 c_cpos;
770 u32 c_phys;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800771 /*
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700772 * Give this a unique field because c_phys eventually gets
773 * filled.
Mark Fasheh9517bac2007-02-09 20:24:12 -0800774 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700775 unsigned c_new;
Mark Fashehb27b7cb2007-06-18 11:22:56 -0700776 unsigned c_unwritten;
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700777};
Mark Fasheh9517bac2007-02-09 20:24:12 -0800778
Mark Fashehb27b7cb2007-06-18 11:22:56 -0700779static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d)
780{
781 return d->c_new || d->c_unwritten;
782}
783
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700784struct ocfs2_write_ctxt {
785 /* Logical cluster position / len of write */
786 u32 w_cpos;
787 u32 w_clen;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800788
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700789 struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
Mark Fasheh9517bac2007-02-09 20:24:12 -0800790
791 /*
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700792 * This is true if page_size > cluster_size.
Mark Fasheh9517bac2007-02-09 20:24:12 -0800793 *
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700794 * It triggers a set of special cases during write which might
795 * have to deal with allocating writes to partial pages.
Mark Fasheh9517bac2007-02-09 20:24:12 -0800796 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700797 unsigned int w_large_pages;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800798
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700799 /*
800 * Pages involved in this write.
801 *
802 * w_target_page is the page being written to by the user.
803 *
804 * w_pages is an array of pages which always contains
805 * w_target_page, and in the case of an allocating write with
806 * page_size < cluster size, it will contain zero'd and mapped
807 * pages adjacent to w_target_page which need to be written
808 * out in so that future reads from that region will get
809 * zero's.
810 */
811 struct page *w_pages[OCFS2_MAX_CTXT_PAGES];
812 unsigned int w_num_pages;
813 struct page *w_target_page;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800814
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700815 /*
816 * ocfs2_write_end() uses this to know what the real range to
817 * write in the target should be.
818 */
819 unsigned int w_target_from;
820 unsigned int w_target_to;
821
822 /*
823 * We could use journal_current_handle() but this is cleaner,
824 * IMHO -Mark
825 */
826 handle_t *w_handle;
827
828 struct buffer_head *w_di_bh;
Mark Fashehb27b7cb2007-06-18 11:22:56 -0700829
830 struct ocfs2_cached_dealloc_ctxt w_dealloc;
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700831};
832
Mark Fasheh1d410a62007-09-07 14:20:45 -0700833void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700834{
835 int i;
836
Mark Fasheh1d410a62007-09-07 14:20:45 -0700837 for(i = 0; i < num_pages; i++) {
838 if (pages[i]) {
839 unlock_page(pages[i]);
840 mark_page_accessed(pages[i]);
841 page_cache_release(pages[i]);
842 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700843 }
Mark Fasheh1d410a62007-09-07 14:20:45 -0700844}
845
846static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
847{
848 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700849
850 brelse(wc->w_di_bh);
851 kfree(wc);
852}
853
854static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
855 struct ocfs2_super *osb, loff_t pos,
Mark Fasheh607d44a2007-05-09 15:14:45 -0700856 unsigned len, struct buffer_head *di_bh)
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700857{
tao.ma@oracle.com30b85482007-09-06 08:02:25 +0800858 u32 cend;
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700859 struct ocfs2_write_ctxt *wc;
860
861 wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS);
862 if (!wc)
863 return -ENOMEM;
864
865 wc->w_cpos = pos >> osb->s_clustersize_bits;
tao.ma@oracle.com30b85482007-09-06 08:02:25 +0800866 cend = (pos + len - 1) >> osb->s_clustersize_bits;
867 wc->w_clen = cend - wc->w_cpos + 1;
Mark Fasheh607d44a2007-05-09 15:14:45 -0700868 get_bh(di_bh);
869 wc->w_di_bh = di_bh;
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700870
871 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits))
872 wc->w_large_pages = 1;
873 else
874 wc->w_large_pages = 0;
875
Mark Fashehb27b7cb2007-06-18 11:22:56 -0700876 ocfs2_init_dealloc_ctxt(&wc->w_dealloc);
877
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700878 *wcp = wc;
879
880 return 0;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800881}
882
883/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700884 * If a page has any new buffers, zero them out here, and mark them uptodate
885 * and dirty so they'll be written out (in order to prevent uninitialised
886 * block data from leaking). And clear the new bit.
Mark Fasheh9517bac2007-02-09 20:24:12 -0800887 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700888static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
Mark Fasheh9517bac2007-02-09 20:24:12 -0800889{
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700890 unsigned int block_start, block_end;
891 struct buffer_head *head, *bh;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800892
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700893 BUG_ON(!PageLocked(page));
894 if (!page_has_buffers(page))
895 return;
896
897 bh = head = page_buffers(page);
898 block_start = 0;
899 do {
900 block_end = block_start + bh->b_size;
901
902 if (buffer_new(bh)) {
903 if (block_end > from && block_start < to) {
904 if (!PageUptodate(page)) {
905 unsigned start, end;
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700906
907 start = max(from, block_start);
908 end = min(to, block_end);
909
Eric Sandeen54c57dc2007-06-20 17:15:10 -0700910 zero_user_page(page, start, end - start, KM_USER0);
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700911 set_buffer_uptodate(bh);
912 }
913
914 clear_buffer_new(bh);
915 mark_buffer_dirty(bh);
916 }
917 }
918
919 block_start = block_end;
920 bh = bh->b_this_page;
921 } while (bh != head);
922}
923
924/*
925 * Only called when we have a failure during allocating write to write
926 * zero's to the newly allocated region.
927 */
928static void ocfs2_write_failure(struct inode *inode,
929 struct ocfs2_write_ctxt *wc,
930 loff_t user_pos, unsigned user_len)
931{
932 int i;
Mark Fasheh5c26a7b2007-09-18 17:49:29 -0700933 unsigned from = user_pos & (PAGE_CACHE_SIZE - 1),
934 to = user_pos + user_len;
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700935 struct page *tmppage;
936
Mark Fasheh5c26a7b2007-09-18 17:49:29 -0700937 ocfs2_zero_new_buffers(wc->w_target_page, from, to);
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700938
939 for(i = 0; i < wc->w_num_pages; i++) {
940 tmppage = wc->w_pages[i];
941
942 if (ocfs2_should_order_data(inode))
943 walk_page_buffers(wc->w_handle, page_buffers(tmppage),
944 from, to, NULL,
945 ocfs2_journal_dirty_data);
946
947 block_commit_write(tmppage, from, to);
948 }
949}
950
951static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
952 struct ocfs2_write_ctxt *wc,
953 struct page *page, u32 cpos,
954 loff_t user_pos, unsigned user_len,
955 int new)
956{
957 int ret;
958 unsigned int map_from = 0, map_to = 0;
959 unsigned int cluster_start, cluster_end;
960 unsigned int user_data_from = 0, user_data_to = 0;
961
962 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
Mark Fasheh9517bac2007-02-09 20:24:12 -0800963 &cluster_start, &cluster_end);
964
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700965 if (page == wc->w_target_page) {
966 map_from = user_pos & (PAGE_CACHE_SIZE - 1);
967 map_to = map_from + user_len;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800968
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700969 if (new)
970 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
971 cluster_start, cluster_end,
972 new);
973 else
974 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
975 map_from, map_to, new);
976 if (ret) {
Mark Fasheh9517bac2007-02-09 20:24:12 -0800977 mlog_errno(ret);
978 goto out;
979 }
980
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700981 user_data_from = map_from;
982 user_data_to = map_to;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800983 if (new) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700984 map_from = cluster_start;
985 map_to = cluster_end;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800986 }
987 } else {
988 /*
989 * If we haven't allocated the new page yet, we
990 * shouldn't be writing it out without copying user
991 * data. This is likely a math error from the caller.
992 */
993 BUG_ON(!new);
994
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700995 map_from = cluster_start;
996 map_to = cluster_end;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800997
998 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700999 cluster_start, cluster_end, new);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001000 if (ret) {
1001 mlog_errno(ret);
1002 goto out;
1003 }
1004 }
1005
1006 /*
1007 * Parts of newly allocated pages need to be zero'd.
1008 *
1009 * Above, we have also rewritten 'to' and 'from' - as far as
1010 * the rest of the function is concerned, the entire cluster
1011 * range inside of a page needs to be written.
1012 *
1013 * We can skip this if the page is up to date - it's already
1014 * been zero'd from being read in as a hole.
1015 */
1016 if (new && !PageUptodate(page))
1017 ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001018 cpos, user_data_from, user_data_to);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001019
1020 flush_dcache_page(page);
1021
Mark Fasheh9517bac2007-02-09 20:24:12 -08001022out:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001023 return ret;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001024}
1025
1026/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001027 * This function will only grab one clusters worth of pages.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001028 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001029static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1030 struct ocfs2_write_ctxt *wc,
Mark Fasheh7307de82007-05-09 15:16:19 -07001031 u32 cpos, loff_t user_pos, int new,
1032 struct page *mmap_page)
Mark Fasheh9517bac2007-02-09 20:24:12 -08001033{
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001034 int ret = 0, i;
1035 unsigned long start, target_index, index;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001036 struct inode *inode = mapping->host;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001037
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001038 target_index = user_pos >> PAGE_CACHE_SHIFT;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001039
1040 /*
1041 * Figure out how many pages we'll be manipulating here. For
Mark Fasheh60b11392007-02-16 11:46:50 -08001042 * non allocating write, we just change the one
1043 * page. Otherwise, we'll need a whole clusters worth.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001044 */
Mark Fasheh9517bac2007-02-09 20:24:12 -08001045 if (new) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001046 wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
1047 start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001048 } else {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001049 wc->w_num_pages = 1;
1050 start = target_index;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001051 }
1052
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001053 for(i = 0; i < wc->w_num_pages; i++) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001054 index = start + i;
1055
Mark Fasheh7307de82007-05-09 15:16:19 -07001056 if (index == target_index && mmap_page) {
1057 /*
1058 * ocfs2_pagemkwrite() is a little different
1059 * and wants us to directly use the page
1060 * passed in.
1061 */
1062 lock_page(mmap_page);
1063
1064 if (mmap_page->mapping != mapping) {
1065 unlock_page(mmap_page);
1066 /*
1067 * Sanity check - the locking in
1068 * ocfs2_pagemkwrite() should ensure
1069 * that this code doesn't trigger.
1070 */
1071 ret = -EINVAL;
1072 mlog_errno(ret);
1073 goto out;
1074 }
1075
1076 page_cache_get(mmap_page);
1077 wc->w_pages[i] = mmap_page;
1078 } else {
1079 wc->w_pages[i] = find_or_create_page(mapping, index,
1080 GFP_NOFS);
1081 if (!wc->w_pages[i]) {
1082 ret = -ENOMEM;
1083 mlog_errno(ret);
1084 goto out;
1085 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001086 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001087
1088 if (index == target_index)
1089 wc->w_target_page = wc->w_pages[i];
Mark Fasheh9517bac2007-02-09 20:24:12 -08001090 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001091out:
1092 return ret;
1093}
1094
1095/*
1096 * Prepare a single cluster for write one cluster into the file.
1097 */
1098static int ocfs2_write_cluster(struct address_space *mapping,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001099 u32 phys, unsigned int unwritten,
1100 struct ocfs2_alloc_context *data_ac,
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001101 struct ocfs2_alloc_context *meta_ac,
1102 struct ocfs2_write_ctxt *wc, u32 cpos,
1103 loff_t user_pos, unsigned user_len)
1104{
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001105 int ret, i, new, should_zero = 0;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001106 u64 v_blkno, p_blkno;
1107 struct inode *inode = mapping->host;
1108
1109 new = phys == 0 ? 1 : 0;
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001110 if (new || unwritten)
1111 should_zero = 1;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001112
1113 if (new) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001114 u32 tmp_pos;
1115
Mark Fasheh9517bac2007-02-09 20:24:12 -08001116 /*
1117 * This is safe to call with the page locks - it won't take
1118 * any additional semaphores or cluster locks.
1119 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001120 tmp_pos = cpos;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001121 ret = ocfs2_do_extend_allocation(OCFS2_SB(inode->i_sb), inode,
Mark Fasheh2ae99a62007-03-09 16:43:28 -08001122 &tmp_pos, 1, 0, wc->w_di_bh,
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001123 wc->w_handle, data_ac,
1124 meta_ac, NULL);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001125 /*
1126 * This shouldn't happen because we must have already
1127 * calculated the correct meta data allocation required. The
1128 * internal tree allocation code should know how to increase
1129 * transaction credits itself.
1130 *
1131 * If need be, we could handle -EAGAIN for a
1132 * RESTART_TRANS here.
1133 */
1134 mlog_bug_on_msg(ret == -EAGAIN,
1135 "Inode %llu: EAGAIN return during allocation.\n",
1136 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1137 if (ret < 0) {
1138 mlog_errno(ret);
1139 goto out;
1140 }
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001141 } else if (unwritten) {
1142 ret = ocfs2_mark_extent_written(inode, wc->w_di_bh,
1143 wc->w_handle, cpos, 1, phys,
1144 meta_ac, &wc->w_dealloc);
1145 if (ret < 0) {
1146 mlog_errno(ret);
1147 goto out;
1148 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001149 }
1150
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001151 if (should_zero)
1152 v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, cpos);
1153 else
1154 v_blkno = user_pos >> inode->i_sb->s_blocksize_bits;
1155
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001156 /*
1157 * The only reason this should fail is due to an inability to
1158 * find the extent added.
1159 */
Mark Fasheh49cb8d22007-03-09 16:21:46 -08001160 ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL,
1161 NULL);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001162 if (ret < 0) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001163 ocfs2_error(inode->i_sb, "Corrupting extend for inode %llu, "
1164 "at logical block %llu",
1165 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1166 (unsigned long long)v_blkno);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001167 goto out;
1168 }
1169
1170 BUG_ON(p_blkno == 0);
1171
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001172 for(i = 0; i < wc->w_num_pages; i++) {
1173 int tmpret;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001174
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001175 tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
1176 wc->w_pages[i], cpos,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001177 user_pos, user_len,
1178 should_zero);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001179 if (tmpret) {
1180 mlog_errno(tmpret);
1181 if (ret == 0)
1182 tmpret = ret;
1183 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001184 }
1185
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001186 /*
1187 * We only have cleanup to do in case of allocating write.
1188 */
1189 if (ret && new)
1190 ocfs2_write_failure(inode, wc, user_pos, user_len);
1191
Mark Fasheh9517bac2007-02-09 20:24:12 -08001192out:
Mark Fasheh9517bac2007-02-09 20:24:12 -08001193
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001194 return ret;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001195}
1196
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001197static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1198 struct ocfs2_alloc_context *data_ac,
1199 struct ocfs2_alloc_context *meta_ac,
1200 struct ocfs2_write_ctxt *wc,
1201 loff_t pos, unsigned len)
1202{
1203 int ret, i;
Mark Fashehdb562462007-09-17 09:06:29 -07001204 loff_t cluster_off;
1205 unsigned int local_len = len;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001206 struct ocfs2_write_cluster_desc *desc;
Mark Fashehdb562462007-09-17 09:06:29 -07001207 struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001208
1209 for (i = 0; i < wc->w_clen; i++) {
1210 desc = &wc->w_desc[i];
1211
Mark Fashehdb562462007-09-17 09:06:29 -07001212 /*
1213 * We have to make sure that the total write passed in
1214 * doesn't extend past a single cluster.
1215 */
1216 local_len = len;
1217 cluster_off = pos & (osb->s_clustersize - 1);
1218 if ((cluster_off + local_len) > osb->s_clustersize)
1219 local_len = osb->s_clustersize - cluster_off;
1220
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001221 ret = ocfs2_write_cluster(mapping, desc->c_phys,
1222 desc->c_unwritten, data_ac, meta_ac,
Mark Fashehdb562462007-09-17 09:06:29 -07001223 wc, desc->c_cpos, pos, local_len);
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001224 if (ret) {
1225 mlog_errno(ret);
1226 goto out;
1227 }
Mark Fashehdb562462007-09-17 09:06:29 -07001228
1229 len -= local_len;
1230 pos += local_len;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001231 }
1232
1233 ret = 0;
1234out:
1235 return ret;
1236}
1237
Mark Fasheh9517bac2007-02-09 20:24:12 -08001238/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001239 * ocfs2_write_end() wants to know which parts of the target page it
1240 * should complete the write on. It's easiest to compute them ahead of
1241 * time when a more complete view of the write is available.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001242 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001243static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1244 struct ocfs2_write_ctxt *wc,
1245 loff_t pos, unsigned len, int alloc)
Mark Fasheh9517bac2007-02-09 20:24:12 -08001246{
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001247 struct ocfs2_write_cluster_desc *desc;
1248
1249 wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1);
1250 wc->w_target_to = wc->w_target_from + len;
1251
1252 if (alloc == 0)
1253 return;
1254
1255 /*
1256 * Allocating write - we may have different boundaries based
1257 * on page size and cluster size.
1258 *
1259 * NOTE: We can no longer compute one value from the other as
1260 * the actual write length and user provided length may be
1261 * different.
1262 */
1263
1264 if (wc->w_large_pages) {
1265 /*
1266 * We only care about the 1st and last cluster within
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001267 * our range and whether they should be zero'd or not. Either
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001268 * value may be extended out to the start/end of a
1269 * newly allocated cluster.
1270 */
1271 desc = &wc->w_desc[0];
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001272 if (ocfs2_should_zero_cluster(desc))
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001273 ocfs2_figure_cluster_boundaries(osb,
1274 desc->c_cpos,
1275 &wc->w_target_from,
1276 NULL);
1277
1278 desc = &wc->w_desc[wc->w_clen - 1];
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001279 if (ocfs2_should_zero_cluster(desc))
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001280 ocfs2_figure_cluster_boundaries(osb,
1281 desc->c_cpos,
1282 NULL,
1283 &wc->w_target_to);
1284 } else {
1285 wc->w_target_from = 0;
1286 wc->w_target_to = PAGE_CACHE_SIZE;
1287 }
1288}
1289
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001290/*
1291 * Populate each single-cluster write descriptor in the write context
1292 * with information about the i/o to be done.
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001293 *
1294 * Returns the number of clusters that will have to be allocated, as
1295 * well as a worst case estimate of the number of extent records that
1296 * would have to be created during a write to an unwritten region.
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001297 */
1298static int ocfs2_populate_write_desc(struct inode *inode,
1299 struct ocfs2_write_ctxt *wc,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001300 unsigned int *clusters_to_alloc,
1301 unsigned int *extents_to_split)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001302{
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001303 int ret;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001304 struct ocfs2_write_cluster_desc *desc;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001305 unsigned int num_clusters = 0;
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001306 unsigned int ext_flags = 0;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001307 u32 phys = 0;
1308 int i;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001309
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001310 *clusters_to_alloc = 0;
1311 *extents_to_split = 0;
1312
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001313 for (i = 0; i < wc->w_clen; i++) {
1314 desc = &wc->w_desc[i];
1315 desc->c_cpos = wc->w_cpos + i;
1316
1317 if (num_clusters == 0) {
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001318 /*
1319 * Need to look up the next extent record.
1320 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001321 ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001322 &num_clusters, &ext_flags);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001323 if (ret) {
1324 mlog_errno(ret);
Mark Fasheh607d44a2007-05-09 15:14:45 -07001325 goto out;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001326 }
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001327
1328 /*
1329 * Assume worst case - that we're writing in
1330 * the middle of the extent.
1331 *
1332 * We can assume that the write proceeds from
1333 * left to right, in which case the extent
1334 * insert code is smart enough to coalesce the
1335 * next splits into the previous records created.
1336 */
1337 if (ext_flags & OCFS2_EXT_UNWRITTEN)
1338 *extents_to_split = *extents_to_split + 2;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001339 } else if (phys) {
1340 /*
1341 * Only increment phys if it doesn't describe
1342 * a hole.
1343 */
1344 phys++;
1345 }
1346
1347 desc->c_phys = phys;
1348 if (phys == 0) {
1349 desc->c_new = 1;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001350 *clusters_to_alloc = *clusters_to_alloc + 1;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001351 }
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001352 if (ext_flags & OCFS2_EXT_UNWRITTEN)
1353 desc->c_unwritten = 1;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001354
1355 num_clusters--;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001356 }
1357
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001358 ret = 0;
1359out:
1360 return ret;
1361}
1362
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001363/*
1364 * This function only does anything for file systems which can't
1365 * handle sparse files.
1366 *
1367 * What we want to do here is fill in any hole between the current end
1368 * of allocation and the end of our write. That way the rest of the
1369 * write path can treat it as an non-allocating write, which has no
1370 * special case code for sparse/nonsparse files.
1371 */
1372static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
1373 unsigned len,
1374 struct ocfs2_write_ctxt *wc)
1375{
1376 int ret;
1377 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1378 loff_t newsize = pos + len;
1379
1380 if (ocfs2_sparse_alloc(osb))
1381 return 0;
1382
1383 if (newsize <= i_size_read(inode))
1384 return 0;
1385
1386 ret = ocfs2_extend_no_holes(inode, newsize, newsize - len);
1387 if (ret)
1388 mlog_errno(ret);
1389
1390 return ret;
1391}
1392
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001393int ocfs2_write_begin_nolock(struct address_space *mapping,
1394 loff_t pos, unsigned len, unsigned flags,
1395 struct page **pagep, void **fsdata,
1396 struct buffer_head *di_bh, struct page *mmap_page)
1397{
1398 int ret, credits = OCFS2_INODE_UPDATE_CREDITS;
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001399 unsigned int clusters_to_alloc, extents_to_split;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001400 struct ocfs2_write_ctxt *wc;
1401 struct inode *inode = mapping->host;
1402 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1403 struct ocfs2_dinode *di;
1404 struct ocfs2_alloc_context *data_ac = NULL;
1405 struct ocfs2_alloc_context *meta_ac = NULL;
1406 handle_t *handle;
1407
1408 ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh);
1409 if (ret) {
1410 mlog_errno(ret);
1411 return ret;
1412 }
1413
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001414 ret = ocfs2_expand_nonsparse_inode(inode, pos, len, wc);
1415 if (ret) {
1416 mlog_errno(ret);
1417 goto out;
1418 }
1419
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001420 ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
1421 &extents_to_split);
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001422 if (ret) {
1423 mlog_errno(ret);
1424 goto out;
1425 }
1426
1427 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1428
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001429 /*
1430 * We set w_target_from, w_target_to here so that
1431 * ocfs2_write_end() knows which range in the target page to
1432 * write out. An allocation requires that we write the entire
1433 * cluster range.
1434 */
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001435 if (clusters_to_alloc || extents_to_split) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001436 /*
1437 * XXX: We are stretching the limits of
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001438 * ocfs2_lock_allocators(). It greatly over-estimates
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001439 * the work to be done.
1440 */
1441 ret = ocfs2_lock_allocators(inode, di, clusters_to_alloc,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001442 extents_to_split, &data_ac, &meta_ac);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001443 if (ret) {
1444 mlog_errno(ret);
Mark Fasheh607d44a2007-05-09 15:14:45 -07001445 goto out;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001446 }
1447
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001448 credits = ocfs2_calc_extend_credits(inode->i_sb, di,
1449 clusters_to_alloc);
1450
Mark Fasheh9517bac2007-02-09 20:24:12 -08001451 }
1452
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001453 ocfs2_set_target_boundaries(osb, wc, pos, len,
1454 clusters_to_alloc + extents_to_split);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001455
Mark Fasheh9517bac2007-02-09 20:24:12 -08001456 handle = ocfs2_start_trans(osb, credits);
1457 if (IS_ERR(handle)) {
1458 ret = PTR_ERR(handle);
1459 mlog_errno(ret);
Mark Fasheh607d44a2007-05-09 15:14:45 -07001460 goto out;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001461 }
1462
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001463 wc->w_handle = handle;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001464
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001465 /*
1466 * We don't want this to fail in ocfs2_write_end(), so do it
1467 * here.
1468 */
1469 ret = ocfs2_journal_access(handle, inode, wc->w_di_bh,
Mark Fasheh9517bac2007-02-09 20:24:12 -08001470 OCFS2_JOURNAL_ACCESS_WRITE);
1471 if (ret) {
1472 mlog_errno(ret);
1473 goto out_commit;
1474 }
1475
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001476 /*
1477 * Fill our page array first. That way we've grabbed enough so
1478 * that we can zero and flush if we error after adding the
1479 * extent.
1480 */
1481 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001482 clusters_to_alloc + extents_to_split,
1483 mmap_page);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001484 if (ret) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001485 mlog_errno(ret);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001486 goto out_commit;
1487 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001488
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001489 ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
1490 len);
1491 if (ret) {
1492 mlog_errno(ret);
1493 goto out_commit;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001494 }
1495
1496 if (data_ac)
1497 ocfs2_free_alloc_context(data_ac);
1498 if (meta_ac)
1499 ocfs2_free_alloc_context(meta_ac);
1500
1501 *pagep = wc->w_target_page;
1502 *fsdata = wc;
1503 return 0;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001504out_commit:
1505 ocfs2_commit_trans(osb, handle);
1506
Mark Fasheh9517bac2007-02-09 20:24:12 -08001507out:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001508 ocfs2_free_write_ctxt(wc);
1509
Mark Fasheh9517bac2007-02-09 20:24:12 -08001510 if (data_ac)
1511 ocfs2_free_alloc_context(data_ac);
1512 if (meta_ac)
1513 ocfs2_free_alloc_context(meta_ac);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001514 return ret;
1515}
Mark Fasheh9517bac2007-02-09 20:24:12 -08001516
Mark Fasheh607d44a2007-05-09 15:14:45 -07001517int ocfs2_write_begin(struct file *file, struct address_space *mapping,
1518 loff_t pos, unsigned len, unsigned flags,
1519 struct page **pagep, void **fsdata)
1520{
1521 int ret;
1522 struct buffer_head *di_bh = NULL;
1523 struct inode *inode = mapping->host;
1524
1525 ret = ocfs2_meta_lock(inode, &di_bh, 1);
1526 if (ret) {
1527 mlog_errno(ret);
1528 return ret;
1529 }
1530
1531 /*
1532 * Take alloc sem here to prevent concurrent lookups. That way
1533 * the mapping, zeroing and tree manipulation within
1534 * ocfs2_write() will be safe against ->readpage(). This
1535 * should also serve to lock out allocation from a shared
1536 * writeable region.
1537 */
1538 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1539
1540 ret = ocfs2_data_lock(inode, 1);
1541 if (ret) {
1542 mlog_errno(ret);
1543 goto out_fail;
1544 }
1545
1546 ret = ocfs2_write_begin_nolock(mapping, pos, len, flags, pagep,
Mark Fasheh7307de82007-05-09 15:16:19 -07001547 fsdata, di_bh, NULL);
Mark Fasheh607d44a2007-05-09 15:14:45 -07001548 if (ret) {
1549 mlog_errno(ret);
1550 goto out_fail_data;
1551 }
1552
1553 brelse(di_bh);
1554
1555 return 0;
1556
1557out_fail_data:
1558 ocfs2_data_unlock(inode, 1);
1559out_fail:
1560 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1561
1562 brelse(di_bh);
1563 ocfs2_meta_unlock(inode, 1);
1564
1565 return ret;
1566}
1567
Mark Fasheh7307de82007-05-09 15:16:19 -07001568int ocfs2_write_end_nolock(struct address_space *mapping,
1569 loff_t pos, unsigned len, unsigned copied,
1570 struct page *page, void *fsdata)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001571{
1572 int i;
1573 unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1);
1574 struct inode *inode = mapping->host;
1575 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1576 struct ocfs2_write_ctxt *wc = fsdata;
1577 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1578 handle_t *handle = wc->w_handle;
1579 struct page *tmppage;
1580
1581 if (unlikely(copied < len)) {
1582 if (!PageUptodate(wc->w_target_page))
1583 copied = 0;
1584
1585 ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
1586 start+len);
1587 }
1588 flush_dcache_page(wc->w_target_page);
1589
1590 for(i = 0; i < wc->w_num_pages; i++) {
1591 tmppage = wc->w_pages[i];
1592
1593 if (tmppage == wc->w_target_page) {
1594 from = wc->w_target_from;
1595 to = wc->w_target_to;
1596
1597 BUG_ON(from > PAGE_CACHE_SIZE ||
1598 to > PAGE_CACHE_SIZE ||
1599 to < from);
1600 } else {
1601 /*
1602 * Pages adjacent to the target (if any) imply
1603 * a hole-filling write in which case we want
1604 * to flush their entire range.
1605 */
1606 from = 0;
1607 to = PAGE_CACHE_SIZE;
1608 }
1609
1610 if (ocfs2_should_order_data(inode))
1611 walk_page_buffers(wc->w_handle, page_buffers(tmppage),
1612 from, to, NULL,
1613 ocfs2_journal_dirty_data);
1614
1615 block_commit_write(tmppage, from, to);
1616 }
1617
1618 pos += copied;
1619 if (pos > inode->i_size) {
1620 i_size_write(inode, pos);
1621 mark_inode_dirty(inode);
1622 }
1623 inode->i_blocks = ocfs2_inode_sector_count(inode);
1624 di->i_size = cpu_to_le64((u64)i_size_read(inode));
1625 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1626 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
1627 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001628 ocfs2_journal_dirty(handle, wc->w_di_bh);
1629
1630 ocfs2_commit_trans(osb, handle);
Mark Fasheh59a5e412007-06-22 15:52:36 -07001631
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001632 ocfs2_run_deallocs(osb, &wc->w_dealloc);
1633
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001634 ocfs2_free_write_ctxt(wc);
1635
1636 return copied;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001637}
1638
Mark Fasheh607d44a2007-05-09 15:14:45 -07001639int ocfs2_write_end(struct file *file, struct address_space *mapping,
1640 loff_t pos, unsigned len, unsigned copied,
1641 struct page *page, void *fsdata)
1642{
1643 int ret;
1644 struct inode *inode = mapping->host;
1645
1646 ret = ocfs2_write_end_nolock(mapping, pos, len, copied, page, fsdata);
1647
1648 ocfs2_data_unlock(inode, 1);
1649 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1650 ocfs2_meta_unlock(inode, 1);
1651
1652 return ret;
1653}
1654
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001655const struct address_space_operations ocfs2_aops = {
Mark Fashehccd979b2005-12-15 14:31:24 -08001656 .readpage = ocfs2_readpage,
1657 .writepage = ocfs2_writepage,
Mark Fashehccd979b2005-12-15 14:31:24 -08001658 .bmap = ocfs2_bmap,
1659 .sync_page = block_sync_page,
Joel Becker03f981c2007-01-04 14:54:41 -08001660 .direct_IO = ocfs2_direct_IO,
1661 .invalidatepage = ocfs2_invalidatepage,
1662 .releasepage = ocfs2_releasepage,
1663 .migratepage = buffer_migrate_page,
Mark Fashehccd979b2005-12-15 14:31:24 -08001664};