blob: eb67c902b002a2b3212f42cc0d9677fa47d614ad [file] [log] [blame]
Mark Fashehccd979b2005-12-15 14:31:24 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#include <linux/fs.h>
23#include <linux/slab.h>
24#include <linux/highmem.h>
25#include <linux/pagemap.h>
26#include <asm/byteorder.h>
Mark Fasheh9517bac2007-02-09 20:24:12 -080027#include <linux/swap.h>
Mark Fasheh6af67d82007-03-06 17:24:46 -080028#include <linux/pipe_fs_i.h>
Mark Fashehccd979b2005-12-15 14:31:24 -080029
30#define MLOG_MASK_PREFIX ML_FILE_IO
31#include <cluster/masklog.h>
32
33#include "ocfs2.h"
34
35#include "alloc.h"
36#include "aops.h"
37#include "dlmglue.h"
38#include "extent_map.h"
39#include "file.h"
40#include "inode.h"
41#include "journal.h"
Mark Fasheh9517bac2007-02-09 20:24:12 -080042#include "suballoc.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080043#include "super.h"
44#include "symlink.h"
45
46#include "buffer_head_io.h"
47
48static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
49 struct buffer_head *bh_result, int create)
50{
51 int err = -EIO;
52 int status;
53 struct ocfs2_dinode *fe = NULL;
54 struct buffer_head *bh = NULL;
55 struct buffer_head *buffer_cache_bh = NULL;
56 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
57 void *kaddr;
58
59 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
60 (unsigned long long)iblock, bh_result, create);
61
62 BUG_ON(ocfs2_inode_is_fast_symlink(inode));
63
64 if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
65 mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
66 (unsigned long long)iblock);
67 goto bail;
68 }
69
70 status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
71 OCFS2_I(inode)->ip_blkno,
72 &bh, OCFS2_BH_CACHED, inode);
73 if (status < 0) {
74 mlog_errno(status);
75 goto bail;
76 }
77 fe = (struct ocfs2_dinode *) bh->b_data;
78
79 if (!OCFS2_IS_VALID_DINODE(fe)) {
Mark Fashehb0697052006-03-03 10:24:33 -080080 mlog(ML_ERROR, "Invalid dinode #%llu: signature = %.*s\n",
81 (unsigned long long)fe->i_blkno, 7, fe->i_signature);
Mark Fashehccd979b2005-12-15 14:31:24 -080082 goto bail;
83 }
84
85 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
86 le32_to_cpu(fe->i_clusters))) {
87 mlog(ML_ERROR, "block offset is outside the allocated size: "
88 "%llu\n", (unsigned long long)iblock);
89 goto bail;
90 }
91
92 /* We don't use the page cache to create symlink data, so if
93 * need be, copy it over from the buffer cache. */
94 if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
95 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
96 iblock;
97 buffer_cache_bh = sb_getblk(osb->sb, blkno);
98 if (!buffer_cache_bh) {
99 mlog(ML_ERROR, "couldn't getblock for symlink!\n");
100 goto bail;
101 }
102
103 /* we haven't locked out transactions, so a commit
104 * could've happened. Since we've got a reference on
105 * the bh, even if it commits while we're doing the
106 * copy, the data is still good. */
107 if (buffer_jbd(buffer_cache_bh)
108 && ocfs2_inode_is_new(inode)) {
109 kaddr = kmap_atomic(bh_result->b_page, KM_USER0);
110 if (!kaddr) {
111 mlog(ML_ERROR, "couldn't kmap!\n");
112 goto bail;
113 }
114 memcpy(kaddr + (bh_result->b_size * iblock),
115 buffer_cache_bh->b_data,
116 bh_result->b_size);
117 kunmap_atomic(kaddr, KM_USER0);
118 set_buffer_uptodate(bh_result);
119 }
120 brelse(buffer_cache_bh);
121 }
122
123 map_bh(bh_result, inode->i_sb,
124 le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
125
126 err = 0;
127
128bail:
129 if (bh)
130 brelse(bh);
131
132 mlog_exit(err);
133 return err;
134}
135
136static int ocfs2_get_block(struct inode *inode, sector_t iblock,
137 struct buffer_head *bh_result, int create)
138{
139 int err = 0;
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800140 unsigned int ext_flags;
Mark Fashehccd979b2005-12-15 14:31:24 -0800141 u64 p_blkno, past_eof;
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800142 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Mark Fashehccd979b2005-12-15 14:31:24 -0800143
144 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
145 (unsigned long long)iblock, bh_result, create);
146
147 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
148 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
149 inode, inode->i_ino);
150
151 if (S_ISLNK(inode->i_mode)) {
152 /* this always does I/O for some reason. */
153 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
154 goto bail;
155 }
156
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800157 err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, NULL,
158 &ext_flags);
Mark Fashehccd979b2005-12-15 14:31:24 -0800159 if (err) {
160 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
Mark Fashehb0697052006-03-03 10:24:33 -0800161 "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
162 (unsigned long long)p_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -0800163 goto bail;
164 }
165
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800166 /*
167 * ocfs2 never allocates in this function - the only time we
168 * need to use BH_New is when we're extending i_size on a file
169 * system which doesn't support holes, in which case BH_New
170 * allows block_prepare_write() to zero.
171 */
172 mlog_bug_on_msg(create && p_blkno == 0 && ocfs2_sparse_alloc(osb),
173 "ino %lu, iblock %llu\n", inode->i_ino,
174 (unsigned long long)iblock);
Mark Fashehccd979b2005-12-15 14:31:24 -0800175
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800176 /* Treat the unwritten extent as a hole for zeroing purposes. */
177 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800178 map_bh(bh_result, inode->i_sb, p_blkno);
179
180 if (!ocfs2_sparse_alloc(osb)) {
181 if (p_blkno == 0) {
182 err = -EIO;
183 mlog(ML_ERROR,
184 "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
185 (unsigned long long)iblock,
186 (unsigned long long)p_blkno,
187 (unsigned long long)OCFS2_I(inode)->ip_blkno);
188 mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
189 dump_stack();
190 }
191
192 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
193 mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
194 (unsigned long long)past_eof);
195
196 if (create && (iblock >= past_eof))
197 set_buffer_new(bh_result);
Mark Fashehccd979b2005-12-15 14:31:24 -0800198 }
199
Mark Fashehccd979b2005-12-15 14:31:24 -0800200bail:
201 if (err < 0)
202 err = -EIO;
203
204 mlog_exit(err);
205 return err;
206}
207
208static int ocfs2_readpage(struct file *file, struct page *page)
209{
210 struct inode *inode = page->mapping->host;
211 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
212 int ret, unlock = 1;
213
214 mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));
215
Mark Fasheh4bcec182006-10-09 16:02:40 -0700216 ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page);
Mark Fashehccd979b2005-12-15 14:31:24 -0800217 if (ret != 0) {
218 if (ret == AOP_TRUNCATED_PAGE)
219 unlock = 0;
220 mlog_errno(ret);
221 goto out;
222 }
223
224 down_read(&OCFS2_I(inode)->ip_alloc_sem);
225
226 /*
227 * i_size might have just been updated as we grabed the meta lock. We
228 * might now be discovering a truncate that hit on another node.
229 * block_read_full_page->get_block freaks out if it is asked to read
230 * beyond the end of a file, so we check here. Callers
231 * (generic_file_read, fault->nopage) are clever enough to check i_size
232 * and notice that the page they just read isn't needed.
233 *
234 * XXX sys_readahead() seems to get that wrong?
235 */
236 if (start >= i_size_read(inode)) {
237 char *addr = kmap(page);
238 memset(addr, 0, PAGE_SIZE);
239 flush_dcache_page(page);
240 kunmap(page);
241 SetPageUptodate(page);
242 ret = 0;
243 goto out_alloc;
244 }
245
246 ret = ocfs2_data_lock_with_page(inode, 0, page);
247 if (ret != 0) {
248 if (ret == AOP_TRUNCATED_PAGE)
249 unlock = 0;
250 mlog_errno(ret);
251 goto out_alloc;
252 }
253
254 ret = block_read_full_page(page, ocfs2_get_block);
255 unlock = 0;
256
257 ocfs2_data_unlock(inode, 0);
258out_alloc:
259 up_read(&OCFS2_I(inode)->ip_alloc_sem);
260 ocfs2_meta_unlock(inode, 0);
261out:
262 if (unlock)
263 unlock_page(page);
264 mlog_exit(ret);
265 return ret;
266}
267
268/* Note: Because we don't support holes, our allocation has
269 * already happened (allocation writes zeros to the file data)
270 * so we don't have to worry about ordered writes in
271 * ocfs2_writepage.
272 *
273 * ->writepage is called during the process of invalidating the page cache
274 * during blocked lock processing. It can't block on any cluster locks
275 * to during block mapping. It's relying on the fact that the block
276 * mapping can't have disappeared under the dirty pages that it is
277 * being asked to write back.
278 */
279static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
280{
281 int ret;
282
283 mlog_entry("(0x%p)\n", page);
284
285 ret = block_write_full_page(page, ocfs2_get_block, wbc);
286
287 mlog_exit(ret);
288
289 return ret;
290}
291
Mark Fasheh50691202007-02-09 20:52:53 -0800292/*
293 * This is called from ocfs2_write_zero_page() which has handled it's
294 * own cluster locking and has ensured allocation exists for those
295 * blocks to be written.
296 */
Mark Fasheh53013cb2006-05-05 19:04:03 -0700297int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
298 unsigned from, unsigned to)
299{
300 int ret;
301
302 down_read(&OCFS2_I(inode)->ip_alloc_sem);
303
304 ret = block_prepare_write(page, from, to, ocfs2_get_block);
305
306 up_read(&OCFS2_I(inode)->ip_alloc_sem);
307
308 return ret;
309}
310
Mark Fashehccd979b2005-12-15 14:31:24 -0800311/* Taken from ext3. We don't necessarily need the full blown
312 * functionality yet, but IMHO it's better to cut and paste the whole
313 * thing so we can avoid introducing our own bugs (and easily pick up
314 * their fixes when they happen) --Mark */
Mark Fasheh60b11392007-02-16 11:46:50 -0800315int walk_page_buffers( handle_t *handle,
316 struct buffer_head *head,
317 unsigned from,
318 unsigned to,
319 int *partial,
320 int (*fn)( handle_t *handle,
321 struct buffer_head *bh))
Mark Fashehccd979b2005-12-15 14:31:24 -0800322{
323 struct buffer_head *bh;
324 unsigned block_start, block_end;
325 unsigned blocksize = head->b_size;
326 int err, ret = 0;
327 struct buffer_head *next;
328
329 for ( bh = head, block_start = 0;
330 ret == 0 && (bh != head || !block_start);
331 block_start = block_end, bh = next)
332 {
333 next = bh->b_this_page;
334 block_end = block_start + blocksize;
335 if (block_end <= from || block_start >= to) {
336 if (partial && !buffer_uptodate(bh))
337 *partial = 1;
338 continue;
339 }
340 err = (*fn)(handle, bh);
341 if (!ret)
342 ret = err;
343 }
344 return ret;
345}
346
Mark Fasheh1fabe142006-10-09 18:11:45 -0700347handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
Mark Fashehccd979b2005-12-15 14:31:24 -0800348 struct page *page,
349 unsigned from,
350 unsigned to)
351{
352 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Mark Fasheh1fabe142006-10-09 18:11:45 -0700353 handle_t *handle = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -0800354 int ret = 0;
355
Mark Fasheh65eff9c2006-10-09 17:26:22 -0700356 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
Mark Fashehccd979b2005-12-15 14:31:24 -0800357 if (!handle) {
358 ret = -ENOMEM;
359 mlog_errno(ret);
360 goto out;
361 }
362
363 if (ocfs2_should_order_data(inode)) {
Mark Fasheh1fabe142006-10-09 18:11:45 -0700364 ret = walk_page_buffers(handle,
Mark Fashehccd979b2005-12-15 14:31:24 -0800365 page_buffers(page),
366 from, to, NULL,
367 ocfs2_journal_dirty_data);
368 if (ret < 0)
369 mlog_errno(ret);
370 }
371out:
372 if (ret) {
373 if (handle)
Mark Fasheh02dc1af2006-10-09 16:48:10 -0700374 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -0800375 handle = ERR_PTR(ret);
376 }
377 return handle;
378}
379
Mark Fashehccd979b2005-12-15 14:31:24 -0800380static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
381{
382 sector_t status;
383 u64 p_blkno = 0;
384 int err = 0;
385 struct inode *inode = mapping->host;
386
387 mlog_entry("(block = %llu)\n", (unsigned long long)block);
388
389 /* We don't need to lock journal system files, since they aren't
390 * accessed concurrently from multiple nodes.
391 */
392 if (!INODE_JOURNAL(inode)) {
Mark Fasheh4bcec182006-10-09 16:02:40 -0700393 err = ocfs2_meta_lock(inode, NULL, 0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800394 if (err) {
395 if (err != -ENOENT)
396 mlog_errno(err);
397 goto bail;
398 }
399 down_read(&OCFS2_I(inode)->ip_alloc_sem);
400 }
401
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800402 err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, NULL);
Mark Fashehccd979b2005-12-15 14:31:24 -0800403
404 if (!INODE_JOURNAL(inode)) {
405 up_read(&OCFS2_I(inode)->ip_alloc_sem);
406 ocfs2_meta_unlock(inode, 0);
407 }
408
409 if (err) {
410 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
411 (unsigned long long)block);
412 mlog_errno(err);
413 goto bail;
414 }
415
416
417bail:
418 status = err ? 0 : p_blkno;
419
420 mlog_exit((int)status);
421
422 return status;
423}
424
425/*
426 * TODO: Make this into a generic get_blocks function.
427 *
428 * From do_direct_io in direct-io.c:
429 * "So what we do is to permit the ->get_blocks function to populate
430 * bh.b_size with the size of IO which is permitted at this offset and
431 * this i_blkbits."
432 *
433 * This function is called directly from get_more_blocks in direct-io.c.
434 *
435 * called like this: dio->get_blocks(dio->inode, fs_startblk,
436 * fs_count, map_bh, dio->rw == WRITE);
437 */
438static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
Mark Fashehccd979b2005-12-15 14:31:24 -0800439 struct buffer_head *bh_result, int create)
440{
441 int ret;
Mark Fasheh564f8a32006-12-14 13:01:05 -0800442 u64 p_blkno, inode_blocks;
Mark Fashehccd979b2005-12-15 14:31:24 -0800443 int contig_blocks;
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800444 unsigned int ext_flags;
Florin Malita184d7d22006-06-03 19:30:10 -0400445 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -0800446 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
Mark Fashehccd979b2005-12-15 14:31:24 -0800447
Mark Fashehccd979b2005-12-15 14:31:24 -0800448 /* This function won't even be called if the request isn't all
449 * nicely aligned and of the right size, so there's no need
450 * for us to check any of that. */
451
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800452 inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
Mark Fasheh564f8a32006-12-14 13:01:05 -0800453
454 /*
455 * Any write past EOF is not allowed because we'd be extending.
456 */
457 if (create && (iblock + max_blocks) > inode_blocks) {
Mark Fashehccd979b2005-12-15 14:31:24 -0800458 ret = -EIO;
459 goto bail;
460 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800461
462 /* This figures out the size of the next contiguous block, and
463 * our logical offset */
Mark Fasheh363041a2007-01-17 12:31:35 -0800464 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800465 &contig_blocks, &ext_flags);
Mark Fashehccd979b2005-12-15 14:31:24 -0800466 if (ret) {
467 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
468 (unsigned long long)iblock);
469 ret = -EIO;
470 goto bail;
471 }
472
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800473 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)) && !p_blkno) {
474 ocfs2_error(inode->i_sb,
475 "Inode %llu has a hole at block %llu\n",
476 (unsigned long long)OCFS2_I(inode)->ip_blkno,
477 (unsigned long long)iblock);
478 ret = -EROFS;
479 goto bail;
480 }
481
482 /*
483 * get_more_blocks() expects us to describe a hole by clearing
484 * the mapped bit on bh_result().
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800485 *
486 * Consider an unwritten extent as a hole.
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800487 */
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800488 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800489 map_bh(bh_result, inode->i_sb, p_blkno);
490 else {
491 /*
492 * ocfs2_prepare_inode_for_write() should have caught
493 * the case where we'd be filling a hole and triggered
494 * a buffered write instead.
495 */
496 if (create) {
497 ret = -EIO;
498 mlog_errno(ret);
499 goto bail;
500 }
501
502 clear_buffer_mapped(bh_result);
503 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800504
505 /* make sure we don't map more than max_blocks blocks here as
506 that's all the kernel will handle at this point. */
507 if (max_blocks < contig_blocks)
508 contig_blocks = max_blocks;
509 bh_result->b_size = contig_blocks << blocksize_bits;
510bail:
511 return ret;
512}
513
514/*
515 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
516 * particularly interested in the aio/dio case. Like the core uses
517 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
518 * truncation on another.
519 */
520static void ocfs2_dio_end_io(struct kiocb *iocb,
521 loff_t offset,
522 ssize_t bytes,
523 void *private)
524{
Josef Sipekd28c9172006-12-08 02:37:25 -0800525 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
Mark Fashehccd979b2005-12-15 14:31:24 -0800526
527 /* this io's submitter should not have unlocked this before we could */
528 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
529 ocfs2_iocb_clear_rw_locked(iocb);
530 up_read(&inode->i_alloc_sem);
531 ocfs2_rw_unlock(inode, 0);
532}
533
Joel Becker03f981c2007-01-04 14:54:41 -0800534/*
535 * ocfs2_invalidatepage() and ocfs2_releasepage() are shamelessly stolen
536 * from ext3. PageChecked() bits have been removed as OCFS2 does not
537 * do journalled data.
538 */
539static void ocfs2_invalidatepage(struct page *page, unsigned long offset)
540{
541 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
542
543 journal_invalidatepage(journal, page, offset);
544}
545
546static int ocfs2_releasepage(struct page *page, gfp_t wait)
547{
548 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
549
550 if (!page_has_buffers(page))
551 return 0;
552 return journal_try_to_free_buffers(journal, page, wait);
553}
554
Mark Fashehccd979b2005-12-15 14:31:24 -0800555static ssize_t ocfs2_direct_IO(int rw,
556 struct kiocb *iocb,
557 const struct iovec *iov,
558 loff_t offset,
559 unsigned long nr_segs)
560{
561 struct file *file = iocb->ki_filp;
Josef Sipekd28c9172006-12-08 02:37:25 -0800562 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
Mark Fashehccd979b2005-12-15 14:31:24 -0800563 int ret;
564
565 mlog_entry_void();
Mark Fasheh53013cb2006-05-05 19:04:03 -0700566
Mark Fasheh9517bac2007-02-09 20:24:12 -0800567 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
568 /*
569 * We get PR data locks even for O_DIRECT. This
570 * allows concurrent O_DIRECT I/O but doesn't let
571 * O_DIRECT with extending and buffered zeroing writes
572 * race. If they did race then the buffered zeroing
573 * could be written back after the O_DIRECT I/O. It's
574 * one thing to tell people not to mix buffered and
575 * O_DIRECT writes, but expecting them to understand
576 * that file extension is also an implicit buffered
577 * write is too much. By getting the PR we force
578 * writeback of the buffered zeroing before
579 * proceeding.
580 */
581 ret = ocfs2_data_lock(inode, 0);
582 if (ret < 0) {
583 mlog_errno(ret);
584 goto out;
585 }
586 ocfs2_data_unlock(inode, 0);
Mark Fasheh53013cb2006-05-05 19:04:03 -0700587 }
Mark Fasheh53013cb2006-05-05 19:04:03 -0700588
Mark Fashehccd979b2005-12-15 14:31:24 -0800589 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
590 inode->i_sb->s_bdev, iov, offset,
591 nr_segs,
592 ocfs2_direct_IO_get_blocks,
593 ocfs2_dio_end_io);
Mark Fasheh53013cb2006-05-05 19:04:03 -0700594out:
Mark Fashehccd979b2005-12-15 14:31:24 -0800595 mlog_exit(ret);
596 return ret;
597}
598
Mark Fasheh9517bac2007-02-09 20:24:12 -0800599static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
600 u32 cpos,
601 unsigned int *start,
602 unsigned int *end)
603{
604 unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE;
605
606 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) {
607 unsigned int cpp;
608
609 cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits);
610
611 cluster_start = cpos % cpp;
612 cluster_start = cluster_start << osb->s_clustersize_bits;
613
614 cluster_end = cluster_start + osb->s_clustersize;
615 }
616
617 BUG_ON(cluster_start > PAGE_SIZE);
618 BUG_ON(cluster_end > PAGE_SIZE);
619
620 if (start)
621 *start = cluster_start;
622 if (end)
623 *end = cluster_end;
624}
625
626/*
627 * 'from' and 'to' are the region in the page to avoid zeroing.
628 *
629 * If pagesize > clustersize, this function will avoid zeroing outside
630 * of the cluster boundary.
631 *
632 * from == to == 0 is code for "zero the entire cluster region"
633 */
634static void ocfs2_clear_page_regions(struct page *page,
635 struct ocfs2_super *osb, u32 cpos,
636 unsigned from, unsigned to)
637{
638 void *kaddr;
639 unsigned int cluster_start, cluster_end;
640
641 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
642
643 kaddr = kmap_atomic(page, KM_USER0);
644
645 if (from || to) {
646 if (from > cluster_start)
647 memset(kaddr + cluster_start, 0, from - cluster_start);
648 if (to < cluster_end)
649 memset(kaddr + to, 0, cluster_end - to);
650 } else {
651 memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
652 }
653
654 kunmap_atomic(kaddr, KM_USER0);
655}
656
657/*
658 * Some of this taken from block_prepare_write(). We already have our
659 * mapping by now though, and the entire write will be allocating or
660 * it won't, so not much need to use BH_New.
661 *
662 * This will also skip zeroing, which is handled externally.
663 */
Mark Fasheh60b11392007-02-16 11:46:50 -0800664int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
665 struct inode *inode, unsigned int from,
666 unsigned int to, int new)
Mark Fasheh9517bac2007-02-09 20:24:12 -0800667{
668 int ret = 0;
669 struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
670 unsigned int block_end, block_start;
671 unsigned int bsize = 1 << inode->i_blkbits;
672
673 if (!page_has_buffers(page))
674 create_empty_buffers(page, bsize, 0);
675
676 head = page_buffers(page);
677 for (bh = head, block_start = 0; bh != head || !block_start;
678 bh = bh->b_this_page, block_start += bsize) {
679 block_end = block_start + bsize;
680
681 /*
682 * Ignore blocks outside of our i/o range -
683 * they may belong to unallocated clusters.
684 */
Mark Fasheh60b11392007-02-16 11:46:50 -0800685 if (block_start >= to || block_end <= from) {
Mark Fasheh9517bac2007-02-09 20:24:12 -0800686 if (PageUptodate(page))
687 set_buffer_uptodate(bh);
688 continue;
689 }
690
691 /*
692 * For an allocating write with cluster size >= page
693 * size, we always write the entire page.
694 */
695
696 if (buffer_new(bh))
697 clear_buffer_new(bh);
698
699 if (!buffer_mapped(bh)) {
700 map_bh(bh, inode->i_sb, *p_blkno);
701 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
702 }
703
704 if (PageUptodate(page)) {
705 if (!buffer_uptodate(bh))
706 set_buffer_uptodate(bh);
707 } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
708 (block_start < from || block_end > to)) {
709 ll_rw_block(READ, 1, &bh);
710 *wait_bh++=bh;
711 }
712
713 *p_blkno = *p_blkno + 1;
714 }
715
716 /*
717 * If we issued read requests - let them complete.
718 */
719 while(wait_bh > wait) {
720 wait_on_buffer(*--wait_bh);
721 if (!buffer_uptodate(*wait_bh))
722 ret = -EIO;
723 }
724
725 if (ret == 0 || !new)
726 return ret;
727
728 /*
729 * If we get -EIO above, zero out any newly allocated blocks
730 * to avoid exposing stale data.
731 */
732 bh = head;
733 block_start = 0;
734 do {
735 void *kaddr;
736
737 block_end = block_start + bsize;
738 if (block_end <= from)
739 goto next_bh;
740 if (block_start >= to)
741 break;
742
743 kaddr = kmap_atomic(page, KM_USER0);
744 memset(kaddr+block_start, 0, bh->b_size);
745 flush_dcache_page(page);
746 kunmap_atomic(kaddr, KM_USER0);
747 set_buffer_uptodate(bh);
748 mark_buffer_dirty(bh);
749
750next_bh:
751 block_start = block_end;
752 bh = bh->b_this_page;
753 } while (bh != head);
754
755 return ret;
756}
757
758/*
Mark Fasheh6af67d82007-03-06 17:24:46 -0800759 * This will copy user data from the buffer page in the splice
760 * context.
761 *
762 * For now, we ignore SPLICE_F_MOVE as that would require some extra
763 * communication out all the way to ocfs2_write().
764 */
765int ocfs2_map_and_write_splice_data(struct inode *inode,
766 struct ocfs2_write_ctxt *wc, u64 *p_blkno,
767 unsigned int *ret_from, unsigned int *ret_to)
768{
769 int ret;
770 unsigned int to, from, cluster_start, cluster_end;
771 char *src, *dst;
772 struct ocfs2_splice_write_priv *sp = wc->w_private;
773 struct pipe_buffer *buf = sp->s_buf;
774 unsigned long bytes, src_from;
775 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
776
777 ocfs2_figure_cluster_boundaries(osb, wc->w_cpos, &cluster_start,
778 &cluster_end);
779
780 from = sp->s_offset;
781 src_from = sp->s_buf_offset;
782 bytes = wc->w_count;
783
784 if (wc->w_large_pages) {
785 /*
786 * For cluster size < page size, we have to
787 * calculate pos within the cluster and obey
788 * the rightmost boundary.
789 */
790 bytes = min(bytes, (unsigned long)(osb->s_clustersize
791 - (wc->w_pos & (osb->s_clustersize - 1))));
792 }
793 to = from + bytes;
794
795 if (wc->w_this_page_new)
796 ret = ocfs2_map_page_blocks(wc->w_this_page, p_blkno, inode,
797 cluster_start, cluster_end, 1);
798 else
799 ret = ocfs2_map_page_blocks(wc->w_this_page, p_blkno, inode,
800 from, to, 0);
801 if (ret) {
802 mlog_errno(ret);
803 goto out;
804 }
805
806 BUG_ON(from > PAGE_CACHE_SIZE);
807 BUG_ON(to > PAGE_CACHE_SIZE);
808 BUG_ON(from > osb->s_clustersize);
809 BUG_ON(to > osb->s_clustersize);
810
811 src = buf->ops->map(sp->s_pipe, buf, 1);
812 dst = kmap_atomic(wc->w_this_page, KM_USER1);
813 memcpy(dst + from, src + src_from, bytes);
814 kunmap_atomic(wc->w_this_page, KM_USER1);
815 buf->ops->unmap(sp->s_pipe, buf, src);
816
817 wc->w_finished_copy = 1;
818
819 *ret_from = from;
820 *ret_to = to;
821out:
822
823 return bytes ? (unsigned int)bytes : ret;
824}
825
826/*
Mark Fasheh9517bac2007-02-09 20:24:12 -0800827 * This will copy user data from the iovec in the buffered write
828 * context.
829 */
830int ocfs2_map_and_write_user_data(struct inode *inode,
831 struct ocfs2_write_ctxt *wc, u64 *p_blkno,
832 unsigned int *ret_from, unsigned int *ret_to)
833{
834 int ret;
835 unsigned int to, from, cluster_start, cluster_end;
836 unsigned long bytes, src_from;
837 char *dst;
838 struct ocfs2_buffered_write_priv *bp = wc->w_private;
839 const struct iovec *cur_iov = bp->b_cur_iov;
840 char __user *buf;
841 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
842
843 ocfs2_figure_cluster_boundaries(osb, wc->w_cpos, &cluster_start,
844 &cluster_end);
845
846 buf = cur_iov->iov_base + bp->b_cur_off;
847 src_from = (unsigned long)buf & ~PAGE_CACHE_MASK;
848
849 from = wc->w_pos & (PAGE_CACHE_SIZE - 1);
850
851 /*
852 * This is a lot of comparisons, but it reads quite
853 * easily, which is important here.
854 */
855 /* Stay within the src page */
856 bytes = PAGE_SIZE - src_from;
857 /* Stay within the vector */
858 bytes = min(bytes,
859 (unsigned long)(cur_iov->iov_len - bp->b_cur_off));
860 /* Stay within count */
861 bytes = min(bytes, (unsigned long)wc->w_count);
862 /*
863 * For clustersize > page size, just stay within
864 * target page, otherwise we have to calculate pos
865 * within the cluster and obey the rightmost
866 * boundary.
867 */
868 if (wc->w_large_pages) {
869 /*
870 * For cluster size < page size, we have to
871 * calculate pos within the cluster and obey
872 * the rightmost boundary.
873 */
874 bytes = min(bytes, (unsigned long)(osb->s_clustersize
875 - (wc->w_pos & (osb->s_clustersize - 1))));
876 } else {
877 /*
878 * cluster size > page size is the most common
879 * case - we just stay within the target page
880 * boundary.
881 */
882 bytes = min(bytes, PAGE_CACHE_SIZE - from);
883 }
884
885 to = from + bytes;
886
887 if (wc->w_this_page_new)
888 ret = ocfs2_map_page_blocks(wc->w_this_page, p_blkno, inode,
889 cluster_start, cluster_end, 1);
890 else
891 ret = ocfs2_map_page_blocks(wc->w_this_page, p_blkno, inode,
892 from, to, 0);
893 if (ret) {
894 mlog_errno(ret);
895 goto out;
896 }
897
898 BUG_ON(from > PAGE_CACHE_SIZE);
899 BUG_ON(to > PAGE_CACHE_SIZE);
900 BUG_ON(from > osb->s_clustersize);
901 BUG_ON(to > osb->s_clustersize);
902
903 dst = kmap(wc->w_this_page);
904 memcpy(dst + from, bp->b_src_buf + src_from, bytes);
905 kunmap(wc->w_this_page);
906
907 /*
908 * XXX: This is slow, but simple. The caller of
909 * ocfs2_buffered_write_cluster() is responsible for
910 * passing through the iovecs, so it's difficult to
911 * predict what our next step is in here after our
912 * initial write. A future version should be pushing
913 * that iovec manipulation further down.
914 *
915 * By setting this, we indicate that a copy from user
916 * data was done, and subsequent calls for this
917 * cluster will skip copying more data.
918 */
919 wc->w_finished_copy = 1;
920
921 *ret_from = from;
922 *ret_to = to;
923out:
924
925 return bytes ? (unsigned int)bytes : ret;
926}
927
928/*
929 * Map, fill and write a page to disk.
930 *
931 * The work of copying data is done via callback. Newly allocated
932 * pages which don't take user data will be zero'd (set 'new' to
933 * indicate an allocating write)
934 *
935 * Returns a negative error code or the number of bytes copied into
936 * the page.
937 */
938int ocfs2_write_data_page(struct inode *inode, handle_t *handle,
939 u64 *p_blkno, struct page *page,
940 struct ocfs2_write_ctxt *wc, int new)
941{
942 int ret, copied = 0;
943 unsigned int from = 0, to = 0;
944 unsigned int cluster_start, cluster_end;
945 unsigned int zero_from = 0, zero_to = 0;
946
947 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), wc->w_cpos,
948 &cluster_start, &cluster_end);
949
950 if ((wc->w_pos >> PAGE_CACHE_SHIFT) == page->index
951 && !wc->w_finished_copy) {
952
953 wc->w_this_page = page;
954 wc->w_this_page_new = new;
955 ret = wc->w_write_data_page(inode, wc, p_blkno, &from, &to);
956 if (ret < 0) {
957 mlog_errno(ret);
958 goto out;
959 }
960
961 copied = ret;
962
963 zero_from = from;
964 zero_to = to;
965 if (new) {
966 from = cluster_start;
967 to = cluster_end;
968 }
969 } else {
970 /*
971 * If we haven't allocated the new page yet, we
972 * shouldn't be writing it out without copying user
973 * data. This is likely a math error from the caller.
974 */
975 BUG_ON(!new);
976
977 from = cluster_start;
978 to = cluster_end;
979
980 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
981 cluster_start, cluster_end, 1);
982 if (ret) {
983 mlog_errno(ret);
984 goto out;
985 }
986 }
987
988 /*
989 * Parts of newly allocated pages need to be zero'd.
990 *
991 * Above, we have also rewritten 'to' and 'from' - as far as
992 * the rest of the function is concerned, the entire cluster
993 * range inside of a page needs to be written.
994 *
995 * We can skip this if the page is up to date - it's already
996 * been zero'd from being read in as a hole.
997 */
998 if (new && !PageUptodate(page))
999 ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
1000 wc->w_cpos, zero_from, zero_to);
1001
1002 flush_dcache_page(page);
1003
1004 if (ocfs2_should_order_data(inode)) {
1005 ret = walk_page_buffers(handle,
1006 page_buffers(page),
1007 from, to, NULL,
1008 ocfs2_journal_dirty_data);
1009 if (ret < 0)
1010 mlog_errno(ret);
1011 }
1012
1013 /*
1014 * We don't use generic_commit_write() because we need to
1015 * handle our own i_size update.
1016 */
1017 ret = block_commit_write(page, from, to);
1018 if (ret)
1019 mlog_errno(ret);
1020out:
1021
1022 return copied ? copied : ret;
1023}
1024
1025/*
1026 * Do the actual write of some data into an inode. Optionally allocate
1027 * in order to fulfill the write.
1028 *
1029 * cpos is the logical cluster offset within the file to write at
1030 *
1031 * 'phys' is the physical mapping of that offset. a 'phys' value of
1032 * zero indicates that allocation is required. In this case, data_ac
1033 * and meta_ac should be valid (meta_ac can be null if metadata
1034 * allocation isn't required).
1035 */
1036static ssize_t ocfs2_write(struct file *file, u32 phys, handle_t *handle,
1037 struct buffer_head *di_bh,
1038 struct ocfs2_alloc_context *data_ac,
1039 struct ocfs2_alloc_context *meta_ac,
1040 struct ocfs2_write_ctxt *wc)
1041{
1042 int ret, i, numpages = 1, new;
1043 unsigned int copied = 0;
1044 u32 tmp_pos;
1045 u64 v_blkno, p_blkno;
1046 struct address_space *mapping = file->f_mapping;
1047 struct inode *inode = mapping->host;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001048 unsigned long index, start;
1049 struct page **cpages;
1050
1051 new = phys == 0 ? 1 : 0;
1052
1053 /*
1054 * Figure out how many pages we'll be manipulating here. For
Mark Fasheh60b11392007-02-16 11:46:50 -08001055 * non allocating write, we just change the one
1056 * page. Otherwise, we'll need a whole clusters worth.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001057 */
Mark Fasheh60b11392007-02-16 11:46:50 -08001058 if (new)
1059 numpages = ocfs2_pages_per_cluster(inode->i_sb);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001060
1061 cpages = kzalloc(sizeof(*cpages) * numpages, GFP_NOFS);
1062 if (!cpages) {
1063 ret = -ENOMEM;
1064 mlog_errno(ret);
1065 return ret;
1066 }
1067
1068 /*
1069 * Fill our page array first. That way we've grabbed enough so
1070 * that we can zero and flush if we error after adding the
1071 * extent.
1072 */
1073 if (new) {
1074 start = ocfs2_align_clusters_to_page_index(inode->i_sb,
1075 wc->w_cpos);
1076 v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, wc->w_cpos);
1077 } else {
1078 start = wc->w_pos >> PAGE_CACHE_SHIFT;
1079 v_blkno = wc->w_pos >> inode->i_sb->s_blocksize_bits;
1080 }
1081
1082 for(i = 0; i < numpages; i++) {
1083 index = start + i;
1084
1085 cpages[i] = grab_cache_page(mapping, index);
1086 if (!cpages[i]) {
1087 ret = -ENOMEM;
1088 mlog_errno(ret);
1089 goto out;
1090 }
1091 }
1092
1093 if (new) {
1094 /*
1095 * This is safe to call with the page locks - it won't take
1096 * any additional semaphores or cluster locks.
1097 */
1098 tmp_pos = wc->w_cpos;
1099 ret = ocfs2_do_extend_allocation(OCFS2_SB(inode->i_sb), inode,
1100 &tmp_pos, 1, di_bh, handle,
1101 data_ac, meta_ac, NULL);
1102 /*
1103 * This shouldn't happen because we must have already
1104 * calculated the correct meta data allocation required. The
1105 * internal tree allocation code should know how to increase
1106 * transaction credits itself.
1107 *
1108 * If need be, we could handle -EAGAIN for a
1109 * RESTART_TRANS here.
1110 */
1111 mlog_bug_on_msg(ret == -EAGAIN,
1112 "Inode %llu: EAGAIN return during allocation.\n",
1113 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1114 if (ret < 0) {
1115 mlog_errno(ret);
1116 goto out;
1117 }
1118 }
1119
Mark Fasheh49cb8d22007-03-09 16:21:46 -08001120 ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL,
1121 NULL);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001122 if (ret < 0) {
1123
1124 /*
1125 * XXX: Should we go readonly here?
1126 */
1127
1128 mlog_errno(ret);
1129 goto out;
1130 }
1131
1132 BUG_ON(p_blkno == 0);
1133
1134 for(i = 0; i < numpages; i++) {
1135 ret = ocfs2_write_data_page(inode, handle, &p_blkno, cpages[i],
1136 wc, new);
1137 if (ret < 0) {
1138 mlog_errno(ret);
1139 goto out;
1140 }
1141
1142 copied += ret;
1143 }
1144
1145out:
1146 for(i = 0; i < numpages; i++) {
1147 unlock_page(cpages[i]);
1148 mark_page_accessed(cpages[i]);
1149 page_cache_release(cpages[i]);
1150 }
1151 kfree(cpages);
1152
1153 return copied ? copied : ret;
1154}
1155
1156static void ocfs2_write_ctxt_init(struct ocfs2_write_ctxt *wc,
1157 struct ocfs2_super *osb, loff_t pos,
1158 size_t count, ocfs2_page_writer *cb,
1159 void *cb_priv)
1160{
1161 wc->w_count = count;
1162 wc->w_pos = pos;
1163 wc->w_cpos = wc->w_pos >> osb->s_clustersize_bits;
1164 wc->w_finished_copy = 0;
1165
1166 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits))
1167 wc->w_large_pages = 1;
1168 else
1169 wc->w_large_pages = 0;
1170
1171 wc->w_write_data_page = cb;
1172 wc->w_private = cb_priv;
1173}
1174
1175/*
1176 * Write a cluster to an inode. The cluster may not be allocated yet,
1177 * in which case it will be. This only exists for buffered writes -
1178 * O_DIRECT takes a more "traditional" path through the kernel.
1179 *
1180 * The caller is responsible for incrementing pos, written counts, etc
1181 *
1182 * For file systems that don't support sparse files, pre-allocation
1183 * and page zeroing up until cpos should be done prior to this
1184 * function call.
1185 *
1186 * Callers should be holding i_sem, and the rw cluster lock.
1187 *
1188 * Returns the number of user bytes written, or less than zero for
1189 * error.
1190 */
1191ssize_t ocfs2_buffered_write_cluster(struct file *file, loff_t pos,
1192 size_t count, ocfs2_page_writer *actor,
1193 void *priv)
1194{
1195 int ret, credits = OCFS2_INODE_UPDATE_CREDITS;
1196 ssize_t written = 0;
1197 u32 phys;
1198 struct inode *inode = file->f_mapping->host;
1199 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1200 struct buffer_head *di_bh = NULL;
1201 struct ocfs2_dinode *di;
1202 struct ocfs2_alloc_context *data_ac = NULL;
1203 struct ocfs2_alloc_context *meta_ac = NULL;
1204 handle_t *handle;
1205 struct ocfs2_write_ctxt wc;
1206
1207 ocfs2_write_ctxt_init(&wc, osb, pos, count, actor, priv);
1208
1209 ret = ocfs2_meta_lock(inode, &di_bh, 1);
1210 if (ret) {
1211 mlog_errno(ret);
1212 goto out;
1213 }
1214 di = (struct ocfs2_dinode *)di_bh->b_data;
1215
1216 /*
1217 * Take alloc sem here to prevent concurrent lookups. That way
1218 * the mapping, zeroing and tree manipulation within
1219 * ocfs2_write() will be safe against ->readpage(). This
1220 * should also serve to lock out allocation from a shared
1221 * writeable region.
1222 */
1223 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1224
Mark Fasheh49cb8d22007-03-09 16:21:46 -08001225 ret = ocfs2_get_clusters(inode, wc.w_cpos, &phys, NULL, NULL);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001226 if (ret) {
1227 mlog_errno(ret);
1228 goto out_meta;
1229 }
1230
1231 /* phys == 0 means that allocation is required. */
1232 if (phys == 0) {
1233 ret = ocfs2_lock_allocators(inode, di, 1, &data_ac, &meta_ac);
1234 if (ret) {
1235 mlog_errno(ret);
1236 goto out_meta;
1237 }
1238
1239 credits = ocfs2_calc_extend_credits(inode->i_sb, di, 1);
1240 }
1241
1242 ret = ocfs2_data_lock(inode, 1);
1243 if (ret) {
1244 mlog_errno(ret);
1245 goto out_meta;
1246 }
1247
1248 handle = ocfs2_start_trans(osb, credits);
1249 if (IS_ERR(handle)) {
1250 ret = PTR_ERR(handle);
1251 mlog_errno(ret);
1252 goto out_data;
1253 }
1254
1255 written = ocfs2_write(file, phys, handle, di_bh, data_ac,
1256 meta_ac, &wc);
1257 if (written < 0) {
1258 ret = written;
1259 mlog_errno(ret);
1260 goto out_commit;
1261 }
1262
1263 ret = ocfs2_journal_access(handle, inode, di_bh,
1264 OCFS2_JOURNAL_ACCESS_WRITE);
1265 if (ret) {
1266 mlog_errno(ret);
1267 goto out_commit;
1268 }
1269
1270 pos += written;
1271 if (pos > inode->i_size) {
1272 i_size_write(inode, pos);
1273 mark_inode_dirty(inode);
1274 }
1275 inode->i_blocks = ocfs2_align_bytes_to_sectors((u64)(i_size_read(inode)));
1276 di->i_size = cpu_to_le64((u64)i_size_read(inode));
1277 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1278 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
1279 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
1280
1281 ret = ocfs2_journal_dirty(handle, di_bh);
1282 if (ret)
1283 mlog_errno(ret);
1284
1285out_commit:
1286 ocfs2_commit_trans(osb, handle);
1287
1288out_data:
1289 ocfs2_data_unlock(inode, 1);
1290
1291out_meta:
1292 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1293 ocfs2_meta_unlock(inode, 1);
1294
1295out:
1296 brelse(di_bh);
1297 if (data_ac)
1298 ocfs2_free_alloc_context(data_ac);
1299 if (meta_ac)
1300 ocfs2_free_alloc_context(meta_ac);
1301
1302 return written ? written : ret;
1303}
1304
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001305const struct address_space_operations ocfs2_aops = {
Mark Fashehccd979b2005-12-15 14:31:24 -08001306 .readpage = ocfs2_readpage,
1307 .writepage = ocfs2_writepage,
Mark Fashehccd979b2005-12-15 14:31:24 -08001308 .bmap = ocfs2_bmap,
1309 .sync_page = block_sync_page,
Joel Becker03f981c2007-01-04 14:54:41 -08001310 .direct_IO = ocfs2_direct_IO,
1311 .invalidatepage = ocfs2_invalidatepage,
1312 .releasepage = ocfs2_releasepage,
1313 .migratepage = buffer_migrate_page,
Mark Fashehccd979b2005-12-15 14:31:24 -08001314};