blob: f1e962cb3b73084699a182933b7760926c36880e [file] [log] [blame]
Mark Fashehccd979b2005-12-15 14:31:24 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#include <linux/fs.h>
23#include <linux/slab.h>
24#include <linux/highmem.h>
25#include <linux/pagemap.h>
26#include <asm/byteorder.h>
Mark Fasheh9517bac2007-02-09 20:24:12 -080027#include <linux/swap.h>
Mark Fasheh6af67d82007-03-06 17:24:46 -080028#include <linux/pipe_fs_i.h>
Mark Fasheh628a24f2007-10-30 12:08:32 -070029#include <linux/mpage.h>
Jan Karaa90714c2008-10-09 19:38:40 +020030#include <linux/quotaops.h>
Mark Fashehccd979b2005-12-15 14:31:24 -080031
32#define MLOG_MASK_PREFIX ML_FILE_IO
33#include <cluster/masklog.h>
34
35#include "ocfs2.h"
36
37#include "alloc.h"
38#include "aops.h"
39#include "dlmglue.h"
40#include "extent_map.h"
41#include "file.h"
42#include "inode.h"
43#include "journal.h"
Mark Fasheh9517bac2007-02-09 20:24:12 -080044#include "suballoc.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080045#include "super.h"
46#include "symlink.h"
Tao Ma293b2f72009-08-25 08:02:48 +080047#include "refcounttree.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080048
49#include "buffer_head_io.h"
50
51static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
52 struct buffer_head *bh_result, int create)
53{
54 int err = -EIO;
55 int status;
56 struct ocfs2_dinode *fe = NULL;
57 struct buffer_head *bh = NULL;
58 struct buffer_head *buffer_cache_bh = NULL;
59 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
60 void *kaddr;
61
62 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
63 (unsigned long long)iblock, bh_result, create);
64
65 BUG_ON(ocfs2_inode_is_fast_symlink(inode));
66
67 if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
68 mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
69 (unsigned long long)iblock);
70 goto bail;
71 }
72
Joel Beckerb657c952008-11-13 14:49:11 -080073 status = ocfs2_read_inode_block(inode, &bh);
Mark Fashehccd979b2005-12-15 14:31:24 -080074 if (status < 0) {
75 mlog_errno(status);
76 goto bail;
77 }
78 fe = (struct ocfs2_dinode *) bh->b_data;
79
Mark Fashehccd979b2005-12-15 14:31:24 -080080 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
81 le32_to_cpu(fe->i_clusters))) {
82 mlog(ML_ERROR, "block offset is outside the allocated size: "
83 "%llu\n", (unsigned long long)iblock);
84 goto bail;
85 }
86
87 /* We don't use the page cache to create symlink data, so if
88 * need be, copy it over from the buffer cache. */
89 if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
90 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
91 iblock;
92 buffer_cache_bh = sb_getblk(osb->sb, blkno);
93 if (!buffer_cache_bh) {
94 mlog(ML_ERROR, "couldn't getblock for symlink!\n");
95 goto bail;
96 }
97
98 /* we haven't locked out transactions, so a commit
99 * could've happened. Since we've got a reference on
100 * the bh, even if it commits while we're doing the
101 * copy, the data is still good. */
102 if (buffer_jbd(buffer_cache_bh)
103 && ocfs2_inode_is_new(inode)) {
104 kaddr = kmap_atomic(bh_result->b_page, KM_USER0);
105 if (!kaddr) {
106 mlog(ML_ERROR, "couldn't kmap!\n");
107 goto bail;
108 }
109 memcpy(kaddr + (bh_result->b_size * iblock),
110 buffer_cache_bh->b_data,
111 bh_result->b_size);
112 kunmap_atomic(kaddr, KM_USER0);
113 set_buffer_uptodate(bh_result);
114 }
115 brelse(buffer_cache_bh);
116 }
117
118 map_bh(bh_result, inode->i_sb,
119 le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
120
121 err = 0;
122
123bail:
Mark Fasheha81cb882008-10-07 14:25:16 -0700124 brelse(bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800125
126 mlog_exit(err);
127 return err;
128}
129
Tao Ma6f70fa52009-08-25 08:05:12 +0800130int ocfs2_get_block(struct inode *inode, sector_t iblock,
131 struct buffer_head *bh_result, int create)
Mark Fashehccd979b2005-12-15 14:31:24 -0800132{
133 int err = 0;
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800134 unsigned int ext_flags;
Mark Fasheh628a24f2007-10-30 12:08:32 -0700135 u64 max_blocks = bh_result->b_size >> inode->i_blkbits;
136 u64 p_blkno, count, past_eof;
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800137 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Mark Fashehccd979b2005-12-15 14:31:24 -0800138
139 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
140 (unsigned long long)iblock, bh_result, create);
141
142 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
143 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
144 inode, inode->i_ino);
145
146 if (S_ISLNK(inode->i_mode)) {
147 /* this always does I/O for some reason. */
148 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
149 goto bail;
150 }
151
Mark Fasheh628a24f2007-10-30 12:08:32 -0700152 err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800153 &ext_flags);
Mark Fashehccd979b2005-12-15 14:31:24 -0800154 if (err) {
155 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
Mark Fashehb0697052006-03-03 10:24:33 -0800156 "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
157 (unsigned long long)p_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -0800158 goto bail;
159 }
160
Mark Fasheh628a24f2007-10-30 12:08:32 -0700161 if (max_blocks < count)
162 count = max_blocks;
163
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800164 /*
165 * ocfs2 never allocates in this function - the only time we
166 * need to use BH_New is when we're extending i_size on a file
167 * system which doesn't support holes, in which case BH_New
Christoph Hellwigebdec242010-10-06 10:47:23 +0200168 * allows __block_write_begin() to zero.
Coly Lic0420ad2008-06-30 18:45:45 +0800169 *
170 * If we see this on a sparse file system, then a truncate has
171 * raced us and removed the cluster. In this case, we clear
172 * the buffers dirty and uptodate bits and let the buffer code
173 * ignore it as a hole.
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800174 */
Coly Lic0420ad2008-06-30 18:45:45 +0800175 if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) {
176 clear_buffer_dirty(bh_result);
177 clear_buffer_uptodate(bh_result);
178 goto bail;
179 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800180
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800181 /* Treat the unwritten extent as a hole for zeroing purposes. */
182 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800183 map_bh(bh_result, inode->i_sb, p_blkno);
184
Mark Fasheh628a24f2007-10-30 12:08:32 -0700185 bh_result->b_size = count << inode->i_blkbits;
186
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800187 if (!ocfs2_sparse_alloc(osb)) {
188 if (p_blkno == 0) {
189 err = -EIO;
190 mlog(ML_ERROR,
191 "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
192 (unsigned long long)iblock,
193 (unsigned long long)p_blkno,
194 (unsigned long long)OCFS2_I(inode)->ip_blkno);
195 mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
196 dump_stack();
Wengang Wang1f4cea32009-07-13 11:38:58 +0800197 goto bail;
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800198 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800199 }
200
Joel Becker56934862010-07-01 15:13:31 -0700201 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
202 mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
203 (unsigned long long)past_eof);
204 if (create && (iblock >= past_eof))
205 set_buffer_new(bh_result);
206
Mark Fashehccd979b2005-12-15 14:31:24 -0800207bail:
208 if (err < 0)
209 err = -EIO;
210
211 mlog_exit(err);
212 return err;
213}
214
Mark Fasheh1afc32b2007-09-07 14:46:51 -0700215int ocfs2_read_inline_data(struct inode *inode, struct page *page,
216 struct buffer_head *di_bh)
Mark Fasheh6798d352007-09-07 14:05:51 -0700217{
218 void *kaddr;
Jan Karad2849fb2007-12-19 15:24:09 +0100219 loff_t size;
Mark Fasheh6798d352007-09-07 14:05:51 -0700220 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
221
222 if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
223 ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag",
224 (unsigned long long)OCFS2_I(inode)->ip_blkno);
225 return -EROFS;
226 }
227
228 size = i_size_read(inode);
229
230 if (size > PAGE_CACHE_SIZE ||
Tiger Yangd9ae49d2009-03-05 11:06:15 +0800231 size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
Mark Fasheh6798d352007-09-07 14:05:51 -0700232 ocfs2_error(inode->i_sb,
Jan Karad2849fb2007-12-19 15:24:09 +0100233 "Inode %llu has with inline data has bad size: %Lu",
234 (unsigned long long)OCFS2_I(inode)->ip_blkno,
235 (unsigned long long)size);
Mark Fasheh6798d352007-09-07 14:05:51 -0700236 return -EROFS;
237 }
238
239 kaddr = kmap_atomic(page, KM_USER0);
240 if (size)
241 memcpy(kaddr, di->id2.i_data.id_data, size);
242 /* Clear the remaining part of the page */
243 memset(kaddr + size, 0, PAGE_CACHE_SIZE - size);
244 flush_dcache_page(page);
245 kunmap_atomic(kaddr, KM_USER0);
246
247 SetPageUptodate(page);
248
249 return 0;
250}
251
252static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
253{
254 int ret;
255 struct buffer_head *di_bh = NULL;
Mark Fasheh6798d352007-09-07 14:05:51 -0700256
257 BUG_ON(!PageLocked(page));
Julia Lawall86c838b2008-02-26 21:45:56 +0100258 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
Mark Fasheh6798d352007-09-07 14:05:51 -0700259
Joel Beckerb657c952008-11-13 14:49:11 -0800260 ret = ocfs2_read_inode_block(inode, &di_bh);
Mark Fasheh6798d352007-09-07 14:05:51 -0700261 if (ret) {
262 mlog_errno(ret);
263 goto out;
264 }
265
266 ret = ocfs2_read_inline_data(inode, page, di_bh);
267out:
268 unlock_page(page);
269
270 brelse(di_bh);
271 return ret;
272}
273
Mark Fashehccd979b2005-12-15 14:31:24 -0800274static int ocfs2_readpage(struct file *file, struct page *page)
275{
276 struct inode *inode = page->mapping->host;
Mark Fasheh6798d352007-09-07 14:05:51 -0700277 struct ocfs2_inode_info *oi = OCFS2_I(inode);
Mark Fashehccd979b2005-12-15 14:31:24 -0800278 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
279 int ret, unlock = 1;
280
281 mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));
282
Mark Fashehe63aecb62007-10-18 15:30:42 -0700283 ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
Mark Fashehccd979b2005-12-15 14:31:24 -0800284 if (ret != 0) {
285 if (ret == AOP_TRUNCATED_PAGE)
286 unlock = 0;
287 mlog_errno(ret);
288 goto out;
289 }
290
Mark Fasheh6798d352007-09-07 14:05:51 -0700291 if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
Mark Fashehe9dfc0b2007-05-14 11:38:51 -0700292 ret = AOP_TRUNCATED_PAGE;
Mark Fashehe63aecb62007-10-18 15:30:42 -0700293 goto out_inode_unlock;
Mark Fashehe9dfc0b2007-05-14 11:38:51 -0700294 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800295
296 /*
297 * i_size might have just been updated as we grabed the meta lock. We
298 * might now be discovering a truncate that hit on another node.
299 * block_read_full_page->get_block freaks out if it is asked to read
300 * beyond the end of a file, so we check here. Callers
Nick Piggin54cb8822007-07-19 01:46:59 -0700301 * (generic_file_read, vm_ops->fault) are clever enough to check i_size
Mark Fashehccd979b2005-12-15 14:31:24 -0800302 * and notice that the page they just read isn't needed.
303 *
304 * XXX sys_readahead() seems to get that wrong?
305 */
306 if (start >= i_size_read(inode)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800307 zero_user(page, 0, PAGE_SIZE);
Mark Fashehccd979b2005-12-15 14:31:24 -0800308 SetPageUptodate(page);
309 ret = 0;
310 goto out_alloc;
311 }
312
Mark Fasheh6798d352007-09-07 14:05:51 -0700313 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
314 ret = ocfs2_readpage_inline(inode, page);
315 else
316 ret = block_read_full_page(page, ocfs2_get_block);
Mark Fashehccd979b2005-12-15 14:31:24 -0800317 unlock = 0;
318
Mark Fashehccd979b2005-12-15 14:31:24 -0800319out_alloc:
320 up_read(&OCFS2_I(inode)->ip_alloc_sem);
Mark Fashehe63aecb62007-10-18 15:30:42 -0700321out_inode_unlock:
322 ocfs2_inode_unlock(inode, 0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800323out:
324 if (unlock)
325 unlock_page(page);
326 mlog_exit(ret);
327 return ret;
328}
329
Mark Fasheh628a24f2007-10-30 12:08:32 -0700330/*
331 * This is used only for read-ahead. Failures or difficult to handle
332 * situations are safe to ignore.
333 *
334 * Right now, we don't bother with BH_Boundary - in-inode extent lists
335 * are quite large (243 extents on 4k blocks), so most inodes don't
336 * grow out to a tree. If need be, detecting boundary extents could
337 * trivially be added in a future version of ocfs2_get_block().
338 */
339static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
340 struct list_head *pages, unsigned nr_pages)
341{
342 int ret, err = -EIO;
343 struct inode *inode = mapping->host;
344 struct ocfs2_inode_info *oi = OCFS2_I(inode);
345 loff_t start;
346 struct page *last;
347
348 /*
349 * Use the nonblocking flag for the dlm code to avoid page
350 * lock inversion, but don't bother with retrying.
351 */
352 ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
353 if (ret)
354 return err;
355
356 if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
357 ocfs2_inode_unlock(inode, 0);
358 return err;
359 }
360
361 /*
362 * Don't bother with inline-data. There isn't anything
363 * to read-ahead in that case anyway...
364 */
365 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
366 goto out_unlock;
367
368 /*
369 * Check whether a remote node truncated this file - we just
370 * drop out in that case as it's not worth handling here.
371 */
372 last = list_entry(pages->prev, struct page, lru);
373 start = (loff_t)last->index << PAGE_CACHE_SHIFT;
374 if (start >= i_size_read(inode))
375 goto out_unlock;
376
377 err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block);
378
379out_unlock:
380 up_read(&oi->ip_alloc_sem);
381 ocfs2_inode_unlock(inode, 0);
382
383 return err;
384}
385
Mark Fashehccd979b2005-12-15 14:31:24 -0800386/* Note: Because we don't support holes, our allocation has
387 * already happened (allocation writes zeros to the file data)
388 * so we don't have to worry about ordered writes in
389 * ocfs2_writepage.
390 *
391 * ->writepage is called during the process of invalidating the page cache
392 * during blocked lock processing. It can't block on any cluster locks
393 * to during block mapping. It's relying on the fact that the block
394 * mapping can't have disappeared under the dirty pages that it is
395 * being asked to write back.
396 */
397static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
398{
399 int ret;
400
401 mlog_entry("(0x%p)\n", page);
402
403 ret = block_write_full_page(page, ocfs2_get_block, wbc);
404
405 mlog_exit(ret);
406
407 return ret;
408}
409
Mark Fashehccd979b2005-12-15 14:31:24 -0800410/* Taken from ext3. We don't necessarily need the full blown
411 * functionality yet, but IMHO it's better to cut and paste the whole
412 * thing so we can avoid introducing our own bugs (and easily pick up
413 * their fixes when they happen) --Mark */
Mark Fasheh60b11392007-02-16 11:46:50 -0800414int walk_page_buffers( handle_t *handle,
415 struct buffer_head *head,
416 unsigned from,
417 unsigned to,
418 int *partial,
419 int (*fn)( handle_t *handle,
420 struct buffer_head *bh))
Mark Fashehccd979b2005-12-15 14:31:24 -0800421{
422 struct buffer_head *bh;
423 unsigned block_start, block_end;
424 unsigned blocksize = head->b_size;
425 int err, ret = 0;
426 struct buffer_head *next;
427
428 for ( bh = head, block_start = 0;
429 ret == 0 && (bh != head || !block_start);
430 block_start = block_end, bh = next)
431 {
432 next = bh->b_this_page;
433 block_end = block_start + blocksize;
434 if (block_end <= from || block_start >= to) {
435 if (partial && !buffer_uptodate(bh))
436 *partial = 1;
437 continue;
438 }
439 err = (*fn)(handle, bh);
440 if (!ret)
441 ret = err;
442 }
443 return ret;
444}
445
Mark Fashehccd979b2005-12-15 14:31:24 -0800446static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
447{
448 sector_t status;
449 u64 p_blkno = 0;
450 int err = 0;
451 struct inode *inode = mapping->host;
452
453 mlog_entry("(block = %llu)\n", (unsigned long long)block);
454
455 /* We don't need to lock journal system files, since they aren't
456 * accessed concurrently from multiple nodes.
457 */
458 if (!INODE_JOURNAL(inode)) {
Mark Fashehe63aecb62007-10-18 15:30:42 -0700459 err = ocfs2_inode_lock(inode, NULL, 0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800460 if (err) {
461 if (err != -ENOENT)
462 mlog_errno(err);
463 goto bail;
464 }
465 down_read(&OCFS2_I(inode)->ip_alloc_sem);
466 }
467
Mark Fasheh6798d352007-09-07 14:05:51 -0700468 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
469 err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
470 NULL);
Mark Fashehccd979b2005-12-15 14:31:24 -0800471
472 if (!INODE_JOURNAL(inode)) {
473 up_read(&OCFS2_I(inode)->ip_alloc_sem);
Mark Fashehe63aecb62007-10-18 15:30:42 -0700474 ocfs2_inode_unlock(inode, 0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800475 }
476
477 if (err) {
478 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
479 (unsigned long long)block);
480 mlog_errno(err);
481 goto bail;
482 }
483
Mark Fashehccd979b2005-12-15 14:31:24 -0800484bail:
485 status = err ? 0 : p_blkno;
486
487 mlog_exit((int)status);
488
489 return status;
490}
491
492/*
493 * TODO: Make this into a generic get_blocks function.
494 *
495 * From do_direct_io in direct-io.c:
496 * "So what we do is to permit the ->get_blocks function to populate
497 * bh.b_size with the size of IO which is permitted at this offset and
498 * this i_blkbits."
499 *
500 * This function is called directly from get_more_blocks in direct-io.c.
501 *
502 * called like this: dio->get_blocks(dio->inode, fs_startblk,
503 * fs_count, map_bh, dio->rw == WRITE);
Christoph Hellwig5fe878a2009-12-15 16:47:50 -0800504 *
505 * Note that we never bother to allocate blocks here, and thus ignore the
506 * create argument.
Mark Fashehccd979b2005-12-15 14:31:24 -0800507 */
508static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
Mark Fashehccd979b2005-12-15 14:31:24 -0800509 struct buffer_head *bh_result, int create)
510{
511 int ret;
Mark Fasheh4f902c32007-03-09 16:26:50 -0800512 u64 p_blkno, inode_blocks, contig_blocks;
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800513 unsigned int ext_flags;
Florin Malita184d7d22006-06-03 19:30:10 -0400514 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -0800515 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
Mark Fashehccd979b2005-12-15 14:31:24 -0800516
Mark Fashehccd979b2005-12-15 14:31:24 -0800517 /* This function won't even be called if the request isn't all
518 * nicely aligned and of the right size, so there's no need
519 * for us to check any of that. */
520
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800521 inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
Mark Fasheh564f8a32006-12-14 13:01:05 -0800522
Mark Fashehccd979b2005-12-15 14:31:24 -0800523 /* This figures out the size of the next contiguous block, and
524 * our logical offset */
Mark Fasheh363041a2007-01-17 12:31:35 -0800525 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800526 &contig_blocks, &ext_flags);
Mark Fashehccd979b2005-12-15 14:31:24 -0800527 if (ret) {
528 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
529 (unsigned long long)iblock);
530 ret = -EIO;
531 goto bail;
532 }
533
Tao Macbaee472010-02-26 10:54:52 +0800534 /* We should already CoW the refcounted extent in case of create. */
535 BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED));
536
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800537 /*
538 * get_more_blocks() expects us to describe a hole by clearing
539 * the mapped bit on bh_result().
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800540 *
541 * Consider an unwritten extent as a hole.
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800542 */
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800543 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800544 map_bh(bh_result, inode->i_sb, p_blkno);
Christoph Hellwig5fe878a2009-12-15 16:47:50 -0800545 else
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800546 clear_buffer_mapped(bh_result);
Mark Fashehccd979b2005-12-15 14:31:24 -0800547
548 /* make sure we don't map more than max_blocks blocks here as
549 that's all the kernel will handle at this point. */
550 if (max_blocks < contig_blocks)
551 contig_blocks = max_blocks;
552 bh_result->b_size = contig_blocks << blocksize_bits;
553bail:
554 return ret;
555}
556
Sunil Mushran2bd63212010-01-25 16:57:38 -0800557/*
Mark Fashehccd979b2005-12-15 14:31:24 -0800558 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
559 * particularly interested in the aio/dio case. Like the core uses
560 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
561 * truncation on another.
562 */
563static void ocfs2_dio_end_io(struct kiocb *iocb,
564 loff_t offset,
565 ssize_t bytes,
Christoph Hellwig40e2e972010-07-18 21:17:09 +0000566 void *private,
567 int ret,
568 bool is_async)
Mark Fashehccd979b2005-12-15 14:31:24 -0800569{
Josef Sipekd28c9172006-12-08 02:37:25 -0800570 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -0700571 int level;
Mark Fashehccd979b2005-12-15 14:31:24 -0800572
573 /* this io's submitter should not have unlocked this before we could */
574 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -0700575
Mark Fashehccd979b2005-12-15 14:31:24 -0800576 ocfs2_iocb_clear_rw_locked(iocb);
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -0700577
578 level = ocfs2_iocb_rw_locked_level(iocb);
579 if (!level)
580 up_read(&inode->i_alloc_sem);
581 ocfs2_rw_unlock(inode, level);
Christoph Hellwig40e2e972010-07-18 21:17:09 +0000582
583 if (is_async)
584 aio_complete(iocb, ret, 0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800585}
586
Joel Becker03f981c2007-01-04 14:54:41 -0800587/*
588 * ocfs2_invalidatepage() and ocfs2_releasepage() are shamelessly stolen
589 * from ext3. PageChecked() bits have been removed as OCFS2 does not
590 * do journalled data.
591 */
592static void ocfs2_invalidatepage(struct page *page, unsigned long offset)
593{
594 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
595
Joel Becker2b4e30f2008-09-03 20:03:41 -0700596 jbd2_journal_invalidatepage(journal, page, offset);
Joel Becker03f981c2007-01-04 14:54:41 -0800597}
598
599static int ocfs2_releasepage(struct page *page, gfp_t wait)
600{
601 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
602
603 if (!page_has_buffers(page))
604 return 0;
Joel Becker2b4e30f2008-09-03 20:03:41 -0700605 return jbd2_journal_try_to_free_buffers(journal, page, wait);
Joel Becker03f981c2007-01-04 14:54:41 -0800606}
607
Mark Fashehccd979b2005-12-15 14:31:24 -0800608static ssize_t ocfs2_direct_IO(int rw,
609 struct kiocb *iocb,
610 const struct iovec *iov,
611 loff_t offset,
612 unsigned long nr_segs)
613{
614 struct file *file = iocb->ki_filp;
Josef Sipekd28c9172006-12-08 02:37:25 -0800615 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
Mark Fashehccd979b2005-12-15 14:31:24 -0800616 int ret;
617
618 mlog_entry_void();
Mark Fasheh53013cb2006-05-05 19:04:03 -0700619
Mark Fasheh6798d352007-09-07 14:05:51 -0700620 /*
621 * Fallback to buffered I/O if we see an inode without
622 * extents.
623 */
624 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
625 return 0;
626
Tao Mab80474b2009-09-10 15:28:47 +0800627 /* Fallback to buffered I/O if we are appending. */
628 if (i_size_read(inode) <= offset)
629 return 0;
630
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +0200631 ret = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
632 iov, offset, nr_segs,
633 ocfs2_direct_IO_get_blocks,
634 ocfs2_dio_end_io, NULL, 0);
Mark Fashehc934a922007-10-18 15:23:46 -0700635
Mark Fashehccd979b2005-12-15 14:31:24 -0800636 mlog_exit(ret);
637 return ret;
638}
639
Mark Fasheh9517bac2007-02-09 20:24:12 -0800640static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
641 u32 cpos,
642 unsigned int *start,
643 unsigned int *end)
644{
645 unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE;
646
647 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) {
648 unsigned int cpp;
649
650 cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits);
651
652 cluster_start = cpos % cpp;
653 cluster_start = cluster_start << osb->s_clustersize_bits;
654
655 cluster_end = cluster_start + osb->s_clustersize;
656 }
657
658 BUG_ON(cluster_start > PAGE_SIZE);
659 BUG_ON(cluster_end > PAGE_SIZE);
660
661 if (start)
662 *start = cluster_start;
663 if (end)
664 *end = cluster_end;
665}
666
667/*
668 * 'from' and 'to' are the region in the page to avoid zeroing.
669 *
670 * If pagesize > clustersize, this function will avoid zeroing outside
671 * of the cluster boundary.
672 *
673 * from == to == 0 is code for "zero the entire cluster region"
674 */
675static void ocfs2_clear_page_regions(struct page *page,
676 struct ocfs2_super *osb, u32 cpos,
677 unsigned from, unsigned to)
678{
679 void *kaddr;
680 unsigned int cluster_start, cluster_end;
681
682 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
683
684 kaddr = kmap_atomic(page, KM_USER0);
685
686 if (from || to) {
687 if (from > cluster_start)
688 memset(kaddr + cluster_start, 0, from - cluster_start);
689 if (to < cluster_end)
690 memset(kaddr + to, 0, cluster_end - to);
691 } else {
692 memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
693 }
694
695 kunmap_atomic(kaddr, KM_USER0);
696}
697
698/*
Mark Fasheh4e9563f2007-11-01 11:37:48 -0700699 * Nonsparse file systems fully allocate before we get to the write
700 * code. This prevents ocfs2_write() from tagging the write as an
701 * allocating one, which means ocfs2_map_page_blocks() might try to
702 * read-in the blocks at the tail of our file. Avoid reading them by
703 * testing i_size against each block offset.
704 */
705static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
706 unsigned int block_start)
707{
708 u64 offset = page_offset(page) + block_start;
709
710 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
711 return 1;
712
713 if (i_size_read(inode) > offset)
714 return 1;
715
716 return 0;
717}
718
719/*
Christoph Hellwigebdec242010-10-06 10:47:23 +0200720 * Some of this taken from __block_write_begin(). We already have our
Mark Fasheh9517bac2007-02-09 20:24:12 -0800721 * mapping by now though, and the entire write will be allocating or
722 * it won't, so not much need to use BH_New.
723 *
724 * This will also skip zeroing, which is handled externally.
725 */
Mark Fasheh60b11392007-02-16 11:46:50 -0800726int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
727 struct inode *inode, unsigned int from,
728 unsigned int to, int new)
Mark Fasheh9517bac2007-02-09 20:24:12 -0800729{
730 int ret = 0;
731 struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
732 unsigned int block_end, block_start;
733 unsigned int bsize = 1 << inode->i_blkbits;
734
735 if (!page_has_buffers(page))
736 create_empty_buffers(page, bsize, 0);
737
738 head = page_buffers(page);
739 for (bh = head, block_start = 0; bh != head || !block_start;
740 bh = bh->b_this_page, block_start += bsize) {
741 block_end = block_start + bsize;
742
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700743 clear_buffer_new(bh);
744
Mark Fasheh9517bac2007-02-09 20:24:12 -0800745 /*
746 * Ignore blocks outside of our i/o range -
747 * they may belong to unallocated clusters.
748 */
Mark Fasheh60b11392007-02-16 11:46:50 -0800749 if (block_start >= to || block_end <= from) {
Mark Fasheh9517bac2007-02-09 20:24:12 -0800750 if (PageUptodate(page))
751 set_buffer_uptodate(bh);
752 continue;
753 }
754
755 /*
756 * For an allocating write with cluster size >= page
757 * size, we always write the entire page.
758 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700759 if (new)
760 set_buffer_new(bh);
Mark Fasheh9517bac2007-02-09 20:24:12 -0800761
762 if (!buffer_mapped(bh)) {
763 map_bh(bh, inode->i_sb, *p_blkno);
764 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
765 }
766
767 if (PageUptodate(page)) {
768 if (!buffer_uptodate(bh))
769 set_buffer_uptodate(bh);
770 } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
Mark Fashehbce99762007-06-18 11:12:36 -0700771 !buffer_new(bh) &&
Mark Fasheh4e9563f2007-11-01 11:37:48 -0700772 ocfs2_should_read_blk(inode, page, block_start) &&
Mark Fashehbce99762007-06-18 11:12:36 -0700773 (block_start < from || block_end > to)) {
Mark Fasheh9517bac2007-02-09 20:24:12 -0800774 ll_rw_block(READ, 1, &bh);
775 *wait_bh++=bh;
776 }
777
778 *p_blkno = *p_blkno + 1;
779 }
780
781 /*
782 * If we issued read requests - let them complete.
783 */
784 while(wait_bh > wait) {
785 wait_on_buffer(*--wait_bh);
786 if (!buffer_uptodate(*wait_bh))
787 ret = -EIO;
788 }
789
790 if (ret == 0 || !new)
791 return ret;
792
793 /*
794 * If we get -EIO above, zero out any newly allocated blocks
795 * to avoid exposing stale data.
796 */
797 bh = head;
798 block_start = 0;
799 do {
Mark Fasheh9517bac2007-02-09 20:24:12 -0800800 block_end = block_start + bsize;
801 if (block_end <= from)
802 goto next_bh;
803 if (block_start >= to)
804 break;
805
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800806 zero_user(page, block_start, bh->b_size);
Mark Fasheh9517bac2007-02-09 20:24:12 -0800807 set_buffer_uptodate(bh);
808 mark_buffer_dirty(bh);
809
810next_bh:
811 block_start = block_end;
812 bh = bh->b_this_page;
813 } while (bh != head);
814
815 return ret;
816}
817
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700818#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
819#define OCFS2_MAX_CTXT_PAGES 1
820#else
821#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE)
822#endif
Mark Fasheh6af67d82007-03-06 17:24:46 -0800823
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700824#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE)
Mark Fasheh6af67d82007-03-06 17:24:46 -0800825
826/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700827 * Describe the state of a single cluster to be written to.
Mark Fasheh9517bac2007-02-09 20:24:12 -0800828 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700829struct ocfs2_write_cluster_desc {
830 u32 c_cpos;
831 u32 c_phys;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800832 /*
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700833 * Give this a unique field because c_phys eventually gets
834 * filled.
Mark Fasheh9517bac2007-02-09 20:24:12 -0800835 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700836 unsigned c_new;
Mark Fashehb27b7cb2007-06-18 11:22:56 -0700837 unsigned c_unwritten;
Sunil Mushrane7432672009-08-06 16:12:58 -0700838 unsigned c_needs_zero;
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700839};
Mark Fasheh9517bac2007-02-09 20:24:12 -0800840
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700841struct ocfs2_write_ctxt {
842 /* Logical cluster position / len of write */
843 u32 w_cpos;
844 u32 w_clen;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800845
Sunil Mushrane7432672009-08-06 16:12:58 -0700846 /* First cluster allocated in a nonsparse extend */
847 u32 w_first_new_cpos;
848
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700849 struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
Mark Fasheh9517bac2007-02-09 20:24:12 -0800850
851 /*
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700852 * This is true if page_size > cluster_size.
Mark Fasheh9517bac2007-02-09 20:24:12 -0800853 *
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700854 * It triggers a set of special cases during write which might
855 * have to deal with allocating writes to partial pages.
Mark Fasheh9517bac2007-02-09 20:24:12 -0800856 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700857 unsigned int w_large_pages;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800858
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700859 /*
860 * Pages involved in this write.
861 *
862 * w_target_page is the page being written to by the user.
863 *
864 * w_pages is an array of pages which always contains
865 * w_target_page, and in the case of an allocating write with
866 * page_size < cluster size, it will contain zero'd and mapped
867 * pages adjacent to w_target_page which need to be written
868 * out in so that future reads from that region will get
869 * zero's.
870 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700871 unsigned int w_num_pages;
Goldwyn Rodrigues83fd9c72010-06-10 17:21:36 -0500872 struct page *w_pages[OCFS2_MAX_CTXT_PAGES];
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700873 struct page *w_target_page;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800874
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700875 /*
876 * ocfs2_write_end() uses this to know what the real range to
877 * write in the target should be.
878 */
879 unsigned int w_target_from;
880 unsigned int w_target_to;
881
882 /*
883 * We could use journal_current_handle() but this is cleaner,
884 * IMHO -Mark
885 */
886 handle_t *w_handle;
887
888 struct buffer_head *w_di_bh;
Mark Fashehb27b7cb2007-06-18 11:22:56 -0700889
890 struct ocfs2_cached_dealloc_ctxt w_dealloc;
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700891};
892
Mark Fasheh1d410a62007-09-07 14:20:45 -0700893void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700894{
895 int i;
896
Mark Fasheh1d410a62007-09-07 14:20:45 -0700897 for(i = 0; i < num_pages; i++) {
898 if (pages[i]) {
899 unlock_page(pages[i]);
900 mark_page_accessed(pages[i]);
901 page_cache_release(pages[i]);
902 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700903 }
Mark Fasheh1d410a62007-09-07 14:20:45 -0700904}
905
906static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
907{
908 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700909
910 brelse(wc->w_di_bh);
911 kfree(wc);
912}
913
914static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
915 struct ocfs2_super *osb, loff_t pos,
Mark Fasheh607d44a2007-05-09 15:14:45 -0700916 unsigned len, struct buffer_head *di_bh)
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700917{
tao.ma@oracle.com30b85482007-09-06 08:02:25 +0800918 u32 cend;
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700919 struct ocfs2_write_ctxt *wc;
920
921 wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS);
922 if (!wc)
923 return -ENOMEM;
924
925 wc->w_cpos = pos >> osb->s_clustersize_bits;
Sunil Mushrane7432672009-08-06 16:12:58 -0700926 wc->w_first_new_cpos = UINT_MAX;
tao.ma@oracle.com30b85482007-09-06 08:02:25 +0800927 cend = (pos + len - 1) >> osb->s_clustersize_bits;
928 wc->w_clen = cend - wc->w_cpos + 1;
Mark Fasheh607d44a2007-05-09 15:14:45 -0700929 get_bh(di_bh);
930 wc->w_di_bh = di_bh;
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700931
932 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits))
933 wc->w_large_pages = 1;
934 else
935 wc->w_large_pages = 0;
936
Mark Fashehb27b7cb2007-06-18 11:22:56 -0700937 ocfs2_init_dealloc_ctxt(&wc->w_dealloc);
938
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700939 *wcp = wc;
940
941 return 0;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800942}
943
944/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700945 * If a page has any new buffers, zero them out here, and mark them uptodate
946 * and dirty so they'll be written out (in order to prevent uninitialised
947 * block data from leaking). And clear the new bit.
Mark Fasheh9517bac2007-02-09 20:24:12 -0800948 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700949static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
Mark Fasheh9517bac2007-02-09 20:24:12 -0800950{
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700951 unsigned int block_start, block_end;
952 struct buffer_head *head, *bh;
Mark Fasheh9517bac2007-02-09 20:24:12 -0800953
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700954 BUG_ON(!PageLocked(page));
955 if (!page_has_buffers(page))
956 return;
957
958 bh = head = page_buffers(page);
959 block_start = 0;
960 do {
961 block_end = block_start + bh->b_size;
962
963 if (buffer_new(bh)) {
964 if (block_end > from && block_start < to) {
965 if (!PageUptodate(page)) {
966 unsigned start, end;
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700967
968 start = max(from, block_start);
969 end = min(to, block_end);
970
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800971 zero_user_segment(page, start, end);
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700972 set_buffer_uptodate(bh);
973 }
974
975 clear_buffer_new(bh);
976 mark_buffer_dirty(bh);
977 }
978 }
979
980 block_start = block_end;
981 bh = bh->b_this_page;
982 } while (bh != head);
983}
984
985/*
986 * Only called when we have a failure during allocating write to write
987 * zero's to the newly allocated region.
988 */
989static void ocfs2_write_failure(struct inode *inode,
990 struct ocfs2_write_ctxt *wc,
991 loff_t user_pos, unsigned user_len)
992{
993 int i;
Mark Fasheh5c26a7b2007-09-18 17:49:29 -0700994 unsigned from = user_pos & (PAGE_CACHE_SIZE - 1),
995 to = user_pos + user_len;
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700996 struct page *tmppage;
997
Mark Fasheh5c26a7b2007-09-18 17:49:29 -0700998 ocfs2_zero_new_buffers(wc->w_target_page, from, to);
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700999
1000 for(i = 0; i < wc->w_num_pages; i++) {
1001 tmppage = wc->w_pages[i];
1002
Sunil Mushran961cecb2008-07-16 17:22:22 -07001003 if (page_has_buffers(tmppage)) {
Mark Fasheh53ef99c2008-11-18 16:53:43 -08001004 if (ocfs2_should_order_data(inode))
Joel Becker2b4e30f2008-09-03 20:03:41 -07001005 ocfs2_jbd2_file_inode(wc->w_handle, inode);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001006
Sunil Mushran961cecb2008-07-16 17:22:22 -07001007 block_commit_write(tmppage, from, to);
1008 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001009 }
1010}
1011
1012static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
1013 struct ocfs2_write_ctxt *wc,
1014 struct page *page, u32 cpos,
1015 loff_t user_pos, unsigned user_len,
1016 int new)
1017{
1018 int ret;
1019 unsigned int map_from = 0, map_to = 0;
1020 unsigned int cluster_start, cluster_end;
1021 unsigned int user_data_from = 0, user_data_to = 0;
1022
1023 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
Mark Fasheh9517bac2007-02-09 20:24:12 -08001024 &cluster_start, &cluster_end);
1025
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001026 if (page == wc->w_target_page) {
1027 map_from = user_pos & (PAGE_CACHE_SIZE - 1);
1028 map_to = map_from + user_len;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001029
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001030 if (new)
1031 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
1032 cluster_start, cluster_end,
1033 new);
1034 else
1035 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
1036 map_from, map_to, new);
1037 if (ret) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001038 mlog_errno(ret);
1039 goto out;
1040 }
1041
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001042 user_data_from = map_from;
1043 user_data_to = map_to;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001044 if (new) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001045 map_from = cluster_start;
1046 map_to = cluster_end;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001047 }
1048 } else {
1049 /*
1050 * If we haven't allocated the new page yet, we
1051 * shouldn't be writing it out without copying user
1052 * data. This is likely a math error from the caller.
1053 */
1054 BUG_ON(!new);
1055
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001056 map_from = cluster_start;
1057 map_to = cluster_end;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001058
1059 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001060 cluster_start, cluster_end, new);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001061 if (ret) {
1062 mlog_errno(ret);
1063 goto out;
1064 }
1065 }
1066
1067 /*
1068 * Parts of newly allocated pages need to be zero'd.
1069 *
1070 * Above, we have also rewritten 'to' and 'from' - as far as
1071 * the rest of the function is concerned, the entire cluster
1072 * range inside of a page needs to be written.
1073 *
1074 * We can skip this if the page is up to date - it's already
1075 * been zero'd from being read in as a hole.
1076 */
1077 if (new && !PageUptodate(page))
1078 ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001079 cpos, user_data_from, user_data_to);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001080
1081 flush_dcache_page(page);
1082
Mark Fasheh9517bac2007-02-09 20:24:12 -08001083out:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001084 return ret;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001085}
1086
1087/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001088 * This function will only grab one clusters worth of pages.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001089 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001090static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1091 struct ocfs2_write_ctxt *wc,
Joel Becker693c2412010-07-02 17:20:27 -07001092 u32 cpos, loff_t user_pos,
1093 unsigned user_len, int new,
Mark Fasheh7307de82007-05-09 15:16:19 -07001094 struct page *mmap_page)
Mark Fasheh9517bac2007-02-09 20:24:12 -08001095{
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001096 int ret = 0, i;
Joel Becker693c2412010-07-02 17:20:27 -07001097 unsigned long start, target_index, end_index, index;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001098 struct inode *inode = mapping->host;
Joel Becker693c2412010-07-02 17:20:27 -07001099 loff_t last_byte;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001100
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001101 target_index = user_pos >> PAGE_CACHE_SHIFT;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001102
1103 /*
1104 * Figure out how many pages we'll be manipulating here. For
Mark Fasheh60b11392007-02-16 11:46:50 -08001105 * non allocating write, we just change the one
Joel Becker693c2412010-07-02 17:20:27 -07001106 * page. Otherwise, we'll need a whole clusters worth. If we're
1107 * writing past i_size, we only need enough pages to cover the
1108 * last page of the write.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001109 */
Mark Fasheh9517bac2007-02-09 20:24:12 -08001110 if (new) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001111 wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
1112 start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
Joel Becker693c2412010-07-02 17:20:27 -07001113 /*
1114 * We need the index *past* the last page we could possibly
1115 * touch. This is the page past the end of the write or
1116 * i_size, whichever is greater.
1117 */
1118 last_byte = max(user_pos + user_len, i_size_read(inode));
1119 BUG_ON(last_byte < 1);
1120 end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1;
1121 if ((start + wc->w_num_pages) > end_index)
1122 wc->w_num_pages = end_index - start;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001123 } else {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001124 wc->w_num_pages = 1;
1125 start = target_index;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001126 }
1127
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001128 for(i = 0; i < wc->w_num_pages; i++) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001129 index = start + i;
1130
Mark Fasheh7307de82007-05-09 15:16:19 -07001131 if (index == target_index && mmap_page) {
1132 /*
1133 * ocfs2_pagemkwrite() is a little different
1134 * and wants us to directly use the page
1135 * passed in.
1136 */
1137 lock_page(mmap_page);
1138
1139 if (mmap_page->mapping != mapping) {
1140 unlock_page(mmap_page);
1141 /*
1142 * Sanity check - the locking in
1143 * ocfs2_pagemkwrite() should ensure
1144 * that this code doesn't trigger.
1145 */
1146 ret = -EINVAL;
1147 mlog_errno(ret);
1148 goto out;
1149 }
1150
1151 page_cache_get(mmap_page);
1152 wc->w_pages[i] = mmap_page;
1153 } else {
1154 wc->w_pages[i] = find_or_create_page(mapping, index,
1155 GFP_NOFS);
1156 if (!wc->w_pages[i]) {
1157 ret = -ENOMEM;
1158 mlog_errno(ret);
1159 goto out;
1160 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001161 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001162
1163 if (index == target_index)
1164 wc->w_target_page = wc->w_pages[i];
Mark Fasheh9517bac2007-02-09 20:24:12 -08001165 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001166out:
1167 return ret;
1168}
1169
1170/*
1171 * Prepare a single cluster for write one cluster into the file.
1172 */
1173static int ocfs2_write_cluster(struct address_space *mapping,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001174 u32 phys, unsigned int unwritten,
Sunil Mushrane7432672009-08-06 16:12:58 -07001175 unsigned int should_zero,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001176 struct ocfs2_alloc_context *data_ac,
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001177 struct ocfs2_alloc_context *meta_ac,
1178 struct ocfs2_write_ctxt *wc, u32 cpos,
1179 loff_t user_pos, unsigned user_len)
1180{
Sunil Mushrane7432672009-08-06 16:12:58 -07001181 int ret, i, new;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001182 u64 v_blkno, p_blkno;
1183 struct inode *inode = mapping->host;
Joel Beckerf99b9b72008-08-20 19:36:33 -07001184 struct ocfs2_extent_tree et;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001185
1186 new = phys == 0 ? 1 : 0;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001187 if (new) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001188 u32 tmp_pos;
1189
Mark Fasheh9517bac2007-02-09 20:24:12 -08001190 /*
1191 * This is safe to call with the page locks - it won't take
1192 * any additional semaphores or cluster locks.
1193 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001194 tmp_pos = cpos;
Tao Ma0eb8d472008-08-18 17:38:45 +08001195 ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode,
1196 &tmp_pos, 1, 0, wc->w_di_bh,
1197 wc->w_handle, data_ac,
1198 meta_ac, NULL);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001199 /*
1200 * This shouldn't happen because we must have already
1201 * calculated the correct meta data allocation required. The
1202 * internal tree allocation code should know how to increase
1203 * transaction credits itself.
1204 *
1205 * If need be, we could handle -EAGAIN for a
1206 * RESTART_TRANS here.
1207 */
1208 mlog_bug_on_msg(ret == -EAGAIN,
1209 "Inode %llu: EAGAIN return during allocation.\n",
1210 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1211 if (ret < 0) {
1212 mlog_errno(ret);
1213 goto out;
1214 }
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001215 } else if (unwritten) {
Joel Becker5e404e92009-02-13 03:54:22 -08001216 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1217 wc->w_di_bh);
Joel Beckerf99b9b72008-08-20 19:36:33 -07001218 ret = ocfs2_mark_extent_written(inode, &et,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001219 wc->w_handle, cpos, 1, phys,
Joel Beckerf99b9b72008-08-20 19:36:33 -07001220 meta_ac, &wc->w_dealloc);
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001221 if (ret < 0) {
1222 mlog_errno(ret);
1223 goto out;
1224 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001225 }
1226
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001227 if (should_zero)
1228 v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, cpos);
1229 else
1230 v_blkno = user_pos >> inode->i_sb->s_blocksize_bits;
1231
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001232 /*
1233 * The only reason this should fail is due to an inability to
1234 * find the extent added.
1235 */
Mark Fasheh49cb8d22007-03-09 16:21:46 -08001236 ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL,
1237 NULL);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001238 if (ret < 0) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001239 ocfs2_error(inode->i_sb, "Corrupting extend for inode %llu, "
1240 "at logical block %llu",
1241 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1242 (unsigned long long)v_blkno);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001243 goto out;
1244 }
1245
1246 BUG_ON(p_blkno == 0);
1247
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001248 for(i = 0; i < wc->w_num_pages; i++) {
1249 int tmpret;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001250
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001251 tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
1252 wc->w_pages[i], cpos,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001253 user_pos, user_len,
1254 should_zero);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001255 if (tmpret) {
1256 mlog_errno(tmpret);
1257 if (ret == 0)
Wengang Wangcbfa9632009-07-13 11:38:23 +08001258 ret = tmpret;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001259 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001260 }
1261
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001262 /*
1263 * We only have cleanup to do in case of allocating write.
1264 */
1265 if (ret && new)
1266 ocfs2_write_failure(inode, wc, user_pos, user_len);
1267
Mark Fasheh9517bac2007-02-09 20:24:12 -08001268out:
Mark Fasheh9517bac2007-02-09 20:24:12 -08001269
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001270 return ret;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001271}
1272
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001273static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1274 struct ocfs2_alloc_context *data_ac,
1275 struct ocfs2_alloc_context *meta_ac,
1276 struct ocfs2_write_ctxt *wc,
1277 loff_t pos, unsigned len)
1278{
1279 int ret, i;
Mark Fashehdb562462007-09-17 09:06:29 -07001280 loff_t cluster_off;
1281 unsigned int local_len = len;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001282 struct ocfs2_write_cluster_desc *desc;
Mark Fashehdb562462007-09-17 09:06:29 -07001283 struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001284
1285 for (i = 0; i < wc->w_clen; i++) {
1286 desc = &wc->w_desc[i];
1287
Mark Fashehdb562462007-09-17 09:06:29 -07001288 /*
1289 * We have to make sure that the total write passed in
1290 * doesn't extend past a single cluster.
1291 */
1292 local_len = len;
1293 cluster_off = pos & (osb->s_clustersize - 1);
1294 if ((cluster_off + local_len) > osb->s_clustersize)
1295 local_len = osb->s_clustersize - cluster_off;
1296
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001297 ret = ocfs2_write_cluster(mapping, desc->c_phys,
Sunil Mushrane7432672009-08-06 16:12:58 -07001298 desc->c_unwritten,
1299 desc->c_needs_zero,
1300 data_ac, meta_ac,
Mark Fashehdb562462007-09-17 09:06:29 -07001301 wc, desc->c_cpos, pos, local_len);
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001302 if (ret) {
1303 mlog_errno(ret);
1304 goto out;
1305 }
Mark Fashehdb562462007-09-17 09:06:29 -07001306
1307 len -= local_len;
1308 pos += local_len;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001309 }
1310
1311 ret = 0;
1312out:
1313 return ret;
1314}
1315
Mark Fasheh9517bac2007-02-09 20:24:12 -08001316/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001317 * ocfs2_write_end() wants to know which parts of the target page it
1318 * should complete the write on. It's easiest to compute them ahead of
1319 * time when a more complete view of the write is available.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001320 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001321static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1322 struct ocfs2_write_ctxt *wc,
1323 loff_t pos, unsigned len, int alloc)
Mark Fasheh9517bac2007-02-09 20:24:12 -08001324{
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001325 struct ocfs2_write_cluster_desc *desc;
1326
1327 wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1);
1328 wc->w_target_to = wc->w_target_from + len;
1329
1330 if (alloc == 0)
1331 return;
1332
1333 /*
1334 * Allocating write - we may have different boundaries based
1335 * on page size and cluster size.
1336 *
1337 * NOTE: We can no longer compute one value from the other as
1338 * the actual write length and user provided length may be
1339 * different.
1340 */
1341
1342 if (wc->w_large_pages) {
1343 /*
1344 * We only care about the 1st and last cluster within
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001345 * our range and whether they should be zero'd or not. Either
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001346 * value may be extended out to the start/end of a
1347 * newly allocated cluster.
1348 */
1349 desc = &wc->w_desc[0];
Sunil Mushrane7432672009-08-06 16:12:58 -07001350 if (desc->c_needs_zero)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001351 ocfs2_figure_cluster_boundaries(osb,
1352 desc->c_cpos,
1353 &wc->w_target_from,
1354 NULL);
1355
1356 desc = &wc->w_desc[wc->w_clen - 1];
Sunil Mushrane7432672009-08-06 16:12:58 -07001357 if (desc->c_needs_zero)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001358 ocfs2_figure_cluster_boundaries(osb,
1359 desc->c_cpos,
1360 NULL,
1361 &wc->w_target_to);
1362 } else {
1363 wc->w_target_from = 0;
1364 wc->w_target_to = PAGE_CACHE_SIZE;
1365 }
1366}
1367
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001368/*
1369 * Populate each single-cluster write descriptor in the write context
1370 * with information about the i/o to be done.
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001371 *
1372 * Returns the number of clusters that will have to be allocated, as
1373 * well as a worst case estimate of the number of extent records that
1374 * would have to be created during a write to an unwritten region.
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001375 */
1376static int ocfs2_populate_write_desc(struct inode *inode,
1377 struct ocfs2_write_ctxt *wc,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001378 unsigned int *clusters_to_alloc,
1379 unsigned int *extents_to_split)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001380{
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001381 int ret;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001382 struct ocfs2_write_cluster_desc *desc;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001383 unsigned int num_clusters = 0;
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001384 unsigned int ext_flags = 0;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001385 u32 phys = 0;
1386 int i;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001387
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001388 *clusters_to_alloc = 0;
1389 *extents_to_split = 0;
1390
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001391 for (i = 0; i < wc->w_clen; i++) {
1392 desc = &wc->w_desc[i];
1393 desc->c_cpos = wc->w_cpos + i;
1394
1395 if (num_clusters == 0) {
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001396 /*
1397 * Need to look up the next extent record.
1398 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001399 ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001400 &num_clusters, &ext_flags);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001401 if (ret) {
1402 mlog_errno(ret);
Mark Fasheh607d44a2007-05-09 15:14:45 -07001403 goto out;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001404 }
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001405
Tao Ma293b2f72009-08-25 08:02:48 +08001406 /* We should already CoW the refcountd extent. */
1407 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
1408
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001409 /*
1410 * Assume worst case - that we're writing in
1411 * the middle of the extent.
1412 *
1413 * We can assume that the write proceeds from
1414 * left to right, in which case the extent
1415 * insert code is smart enough to coalesce the
1416 * next splits into the previous records created.
1417 */
1418 if (ext_flags & OCFS2_EXT_UNWRITTEN)
1419 *extents_to_split = *extents_to_split + 2;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001420 } else if (phys) {
1421 /*
1422 * Only increment phys if it doesn't describe
1423 * a hole.
1424 */
1425 phys++;
1426 }
1427
Sunil Mushrane7432672009-08-06 16:12:58 -07001428 /*
1429 * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
1430 * file that got extended. w_first_new_cpos tells us
1431 * where the newly allocated clusters are so we can
1432 * zero them.
1433 */
1434 if (desc->c_cpos >= wc->w_first_new_cpos) {
1435 BUG_ON(phys == 0);
1436 desc->c_needs_zero = 1;
1437 }
1438
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001439 desc->c_phys = phys;
1440 if (phys == 0) {
1441 desc->c_new = 1;
Sunil Mushrane7432672009-08-06 16:12:58 -07001442 desc->c_needs_zero = 1;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001443 *clusters_to_alloc = *clusters_to_alloc + 1;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001444 }
Sunil Mushrane7432672009-08-06 16:12:58 -07001445
1446 if (ext_flags & OCFS2_EXT_UNWRITTEN) {
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001447 desc->c_unwritten = 1;
Sunil Mushrane7432672009-08-06 16:12:58 -07001448 desc->c_needs_zero = 1;
1449 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001450
1451 num_clusters--;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001452 }
1453
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001454 ret = 0;
1455out:
1456 return ret;
1457}
1458
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001459static int ocfs2_write_begin_inline(struct address_space *mapping,
1460 struct inode *inode,
1461 struct ocfs2_write_ctxt *wc)
1462{
1463 int ret;
1464 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1465 struct page *page;
1466 handle_t *handle;
1467 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1468
1469 page = find_or_create_page(mapping, 0, GFP_NOFS);
1470 if (!page) {
1471 ret = -ENOMEM;
1472 mlog_errno(ret);
1473 goto out;
1474 }
1475 /*
1476 * If we don't set w_num_pages then this page won't get unlocked
1477 * and freed on cleanup of the write context.
1478 */
1479 wc->w_pages[0] = wc->w_target_page = page;
1480 wc->w_num_pages = 1;
1481
1482 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1483 if (IS_ERR(handle)) {
1484 ret = PTR_ERR(handle);
1485 mlog_errno(ret);
1486 goto out;
1487 }
1488
Joel Becker0cf2f762009-02-12 16:41:25 -08001489 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
Joel Becker13723d02008-10-17 19:25:01 -07001490 OCFS2_JOURNAL_ACCESS_WRITE);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001491 if (ret) {
1492 ocfs2_commit_trans(osb, handle);
1493
1494 mlog_errno(ret);
1495 goto out;
1496 }
1497
1498 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1499 ocfs2_set_inode_data_inline(inode, di);
1500
1501 if (!PageUptodate(page)) {
1502 ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
1503 if (ret) {
1504 ocfs2_commit_trans(osb, handle);
1505
1506 goto out;
1507 }
1508 }
1509
1510 wc->w_handle = handle;
1511out:
1512 return ret;
1513}
1514
1515int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
1516{
1517 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1518
Mark Fasheh0d8a4e02007-11-20 11:48:41 -08001519 if (new_size <= le16_to_cpu(di->id2.i_data.id_count))
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001520 return 1;
1521 return 0;
1522}
1523
1524static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
1525 struct inode *inode, loff_t pos,
1526 unsigned len, struct page *mmap_page,
1527 struct ocfs2_write_ctxt *wc)
1528{
1529 int ret, written = 0;
1530 loff_t end = pos + len;
1531 struct ocfs2_inode_info *oi = OCFS2_I(inode);
Tiger Yangd9ae49d2009-03-05 11:06:15 +08001532 struct ocfs2_dinode *di = NULL;
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001533
1534 mlog(0, "Inode %llu, write of %u bytes at off %llu. features: 0x%x\n",
1535 (unsigned long long)oi->ip_blkno, len, (unsigned long long)pos,
1536 oi->ip_dyn_features);
1537
1538 /*
1539 * Handle inodes which already have inline data 1st.
1540 */
1541 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1542 if (mmap_page == NULL &&
1543 ocfs2_size_fits_inline_data(wc->w_di_bh, end))
1544 goto do_inline_write;
1545
1546 /*
1547 * The write won't fit - we have to give this inode an
1548 * inline extent list now.
1549 */
1550 ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh);
1551 if (ret)
1552 mlog_errno(ret);
1553 goto out;
1554 }
1555
1556 /*
1557 * Check whether the inode can accept inline data.
1558 */
1559 if (oi->ip_clusters != 0 || i_size_read(inode) != 0)
1560 return 0;
1561
1562 /*
1563 * Check whether the write can fit.
1564 */
Tiger Yangd9ae49d2009-03-05 11:06:15 +08001565 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1566 if (mmap_page ||
1567 end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001568 return 0;
1569
1570do_inline_write:
1571 ret = ocfs2_write_begin_inline(mapping, inode, wc);
1572 if (ret) {
1573 mlog_errno(ret);
1574 goto out;
1575 }
1576
1577 /*
1578 * This signals to the caller that the data can be written
1579 * inline.
1580 */
1581 written = 1;
1582out:
1583 return written ? written : ret;
1584}
1585
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001586/*
1587 * This function only does anything for file systems which can't
1588 * handle sparse files.
1589 *
1590 * What we want to do here is fill in any hole between the current end
1591 * of allocation and the end of our write. That way the rest of the
1592 * write path can treat it as an non-allocating write, which has no
1593 * special case code for sparse/nonsparse files.
1594 */
Joel Becker56934862010-07-01 15:13:31 -07001595static int ocfs2_expand_nonsparse_inode(struct inode *inode,
1596 struct buffer_head *di_bh,
1597 loff_t pos, unsigned len,
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001598 struct ocfs2_write_ctxt *wc)
1599{
1600 int ret;
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001601 loff_t newsize = pos + len;
1602
Joel Becker56934862010-07-01 15:13:31 -07001603 BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001604
1605 if (newsize <= i_size_read(inode))
1606 return 0;
1607
Joel Becker56934862010-07-01 15:13:31 -07001608 ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001609 if (ret)
1610 mlog_errno(ret);
1611
Sunil Mushrane7432672009-08-06 16:12:58 -07001612 wc->w_first_new_cpos =
1613 ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
1614
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001615 return ret;
1616}
1617
Joel Becker56934862010-07-01 15:13:31 -07001618static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
1619 loff_t pos)
1620{
1621 int ret = 0;
1622
1623 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
1624 if (pos > i_size_read(inode))
1625 ret = ocfs2_zero_extend(inode, di_bh, pos);
1626
1627 return ret;
1628}
1629
Tao Ma0378da0f2010-08-12 10:25:28 +08001630int ocfs2_write_begin_nolock(struct file *filp,
1631 struct address_space *mapping,
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001632 loff_t pos, unsigned len, unsigned flags,
1633 struct page **pagep, void **fsdata,
1634 struct buffer_head *di_bh, struct page *mmap_page)
1635{
Sunil Mushrane7432672009-08-06 16:12:58 -07001636 int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001637 unsigned int clusters_to_alloc, extents_to_split;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001638 struct ocfs2_write_ctxt *wc;
1639 struct inode *inode = mapping->host;
1640 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1641 struct ocfs2_dinode *di;
1642 struct ocfs2_alloc_context *data_ac = NULL;
1643 struct ocfs2_alloc_context *meta_ac = NULL;
1644 handle_t *handle;
Joel Beckerf99b9b72008-08-20 19:36:33 -07001645 struct ocfs2_extent_tree et;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001646
1647 ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh);
1648 if (ret) {
1649 mlog_errno(ret);
1650 return ret;
1651 }
1652
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001653 if (ocfs2_supports_inline_data(osb)) {
1654 ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
1655 mmap_page, wc);
1656 if (ret == 1) {
1657 ret = 0;
1658 goto success;
1659 }
1660 if (ret < 0) {
1661 mlog_errno(ret);
1662 goto out;
1663 }
1664 }
1665
Joel Becker56934862010-07-01 15:13:31 -07001666 if (ocfs2_sparse_alloc(osb))
1667 ret = ocfs2_zero_tail(inode, di_bh, pos);
1668 else
1669 ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len,
1670 wc);
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001671 if (ret) {
1672 mlog_errno(ret);
1673 goto out;
1674 }
1675
Tao Ma293b2f72009-08-25 08:02:48 +08001676 ret = ocfs2_check_range_for_refcount(inode, pos, len);
1677 if (ret < 0) {
1678 mlog_errno(ret);
1679 goto out;
1680 } else if (ret == 1) {
Tao Ma15502712010-08-12 10:36:38 +08001681 ret = ocfs2_refcount_cow(inode, filp, di_bh,
Tao Ma37f8a2b2009-08-26 09:47:28 +08001682 wc->w_cpos, wc->w_clen, UINT_MAX);
Tao Ma293b2f72009-08-25 08:02:48 +08001683 if (ret) {
1684 mlog_errno(ret);
1685 goto out;
1686 }
1687 }
1688
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001689 ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
1690 &extents_to_split);
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001691 if (ret) {
1692 mlog_errno(ret);
1693 goto out;
1694 }
1695
1696 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1697
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001698 /*
1699 * We set w_target_from, w_target_to here so that
1700 * ocfs2_write_end() knows which range in the target page to
1701 * write out. An allocation requires that we write the entire
1702 * cluster range.
1703 */
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001704 if (clusters_to_alloc || extents_to_split) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001705 /*
1706 * XXX: We are stretching the limits of
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001707 * ocfs2_lock_allocators(). It greatly over-estimates
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001708 * the work to be done.
1709 */
Tao Mae7d4cb62008-08-18 17:38:44 +08001710 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u,"
1711 " clusters_to_add = %u, extents_to_split = %u\n",
1712 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1713 (long long)i_size_read(inode), le32_to_cpu(di->i_clusters),
1714 clusters_to_alloc, extents_to_split);
1715
Joel Becker5e404e92009-02-13 03:54:22 -08001716 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1717 wc->w_di_bh);
Joel Beckerf99b9b72008-08-20 19:36:33 -07001718 ret = ocfs2_lock_allocators(inode, &et,
Tao Ma231b87d2008-08-18 17:38:42 +08001719 clusters_to_alloc, extents_to_split,
Joel Beckerf99b9b72008-08-20 19:36:33 -07001720 &data_ac, &meta_ac);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001721 if (ret) {
1722 mlog_errno(ret);
Mark Fasheh607d44a2007-05-09 15:14:45 -07001723 goto out;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001724 }
1725
Mark Fasheh4fe370a2009-12-07 13:15:40 -08001726 if (data_ac)
1727 data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
1728
Tao Ma811f9332008-08-18 17:38:43 +08001729 credits = ocfs2_calc_extend_credits(inode->i_sb,
1730 &di->id2.i_list,
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001731 clusters_to_alloc);
1732
Mark Fasheh9517bac2007-02-09 20:24:12 -08001733 }
1734
Sunil Mushrane7432672009-08-06 16:12:58 -07001735 /*
1736 * We have to zero sparse allocated clusters, unwritten extent clusters,
1737 * and non-sparse clusters we just extended. For non-sparse writes,
1738 * we know zeros will only be needed in the first and/or last cluster.
1739 */
1740 if (clusters_to_alloc || extents_to_split ||
Sunil Mushran8379e7c2009-09-04 11:12:01 -07001741 (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
1742 wc->w_desc[wc->w_clen - 1].c_needs_zero)))
Sunil Mushrane7432672009-08-06 16:12:58 -07001743 cluster_of_pages = 1;
1744 else
1745 cluster_of_pages = 0;
1746
1747 ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001748
Mark Fasheh9517bac2007-02-09 20:24:12 -08001749 handle = ocfs2_start_trans(osb, credits);
1750 if (IS_ERR(handle)) {
1751 ret = PTR_ERR(handle);
1752 mlog_errno(ret);
Mark Fasheh607d44a2007-05-09 15:14:45 -07001753 goto out;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001754 }
1755
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001756 wc->w_handle = handle;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001757
Christoph Hellwig5dd40562010-03-03 09:05:00 -05001758 if (clusters_to_alloc) {
1759 ret = dquot_alloc_space_nodirty(inode,
1760 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
1761 if (ret)
1762 goto out_commit;
Jan Karaa90714c2008-10-09 19:38:40 +02001763 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001764 /*
1765 * We don't want this to fail in ocfs2_write_end(), so do it
1766 * here.
1767 */
Joel Becker0cf2f762009-02-12 16:41:25 -08001768 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
Joel Becker13723d02008-10-17 19:25:01 -07001769 OCFS2_JOURNAL_ACCESS_WRITE);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001770 if (ret) {
1771 mlog_errno(ret);
Jan Karaa90714c2008-10-09 19:38:40 +02001772 goto out_quota;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001773 }
1774
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001775 /*
1776 * Fill our page array first. That way we've grabbed enough so
1777 * that we can zero and flush if we error after adding the
1778 * extent.
1779 */
Joel Becker693c2412010-07-02 17:20:27 -07001780 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
Sunil Mushrane7432672009-08-06 16:12:58 -07001781 cluster_of_pages, mmap_page);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001782 if (ret) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001783 mlog_errno(ret);
Jan Karaa90714c2008-10-09 19:38:40 +02001784 goto out_quota;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001785 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001786
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001787 ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
1788 len);
1789 if (ret) {
1790 mlog_errno(ret);
Jan Karaa90714c2008-10-09 19:38:40 +02001791 goto out_quota;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001792 }
1793
1794 if (data_ac)
1795 ocfs2_free_alloc_context(data_ac);
1796 if (meta_ac)
1797 ocfs2_free_alloc_context(meta_ac);
1798
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001799success:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001800 *pagep = wc->w_target_page;
1801 *fsdata = wc;
1802 return 0;
Jan Karaa90714c2008-10-09 19:38:40 +02001803out_quota:
1804 if (clusters_to_alloc)
Christoph Hellwig5dd40562010-03-03 09:05:00 -05001805 dquot_free_space(inode,
Jan Karaa90714c2008-10-09 19:38:40 +02001806 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
Mark Fasheh9517bac2007-02-09 20:24:12 -08001807out_commit:
1808 ocfs2_commit_trans(osb, handle);
1809
Mark Fasheh9517bac2007-02-09 20:24:12 -08001810out:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001811 ocfs2_free_write_ctxt(wc);
1812
Mark Fasheh9517bac2007-02-09 20:24:12 -08001813 if (data_ac)
1814 ocfs2_free_alloc_context(data_ac);
1815 if (meta_ac)
1816 ocfs2_free_alloc_context(meta_ac);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001817 return ret;
1818}
Mark Fasheh9517bac2007-02-09 20:24:12 -08001819
Nick Pigginb6af1bc2007-10-16 01:25:24 -07001820static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
1821 loff_t pos, unsigned len, unsigned flags,
1822 struct page **pagep, void **fsdata)
Mark Fasheh607d44a2007-05-09 15:14:45 -07001823{
1824 int ret;
1825 struct buffer_head *di_bh = NULL;
1826 struct inode *inode = mapping->host;
1827
Mark Fashehe63aecb62007-10-18 15:30:42 -07001828 ret = ocfs2_inode_lock(inode, &di_bh, 1);
Mark Fasheh607d44a2007-05-09 15:14:45 -07001829 if (ret) {
1830 mlog_errno(ret);
1831 return ret;
1832 }
1833
1834 /*
1835 * Take alloc sem here to prevent concurrent lookups. That way
1836 * the mapping, zeroing and tree manipulation within
1837 * ocfs2_write() will be safe against ->readpage(). This
1838 * should also serve to lock out allocation from a shared
1839 * writeable region.
1840 */
1841 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1842
Tao Ma0378da0f2010-08-12 10:25:28 +08001843 ret = ocfs2_write_begin_nolock(file, mapping, pos, len, flags, pagep,
Mark Fasheh7307de82007-05-09 15:16:19 -07001844 fsdata, di_bh, NULL);
Mark Fasheh607d44a2007-05-09 15:14:45 -07001845 if (ret) {
1846 mlog_errno(ret);
Mark Fashehc934a922007-10-18 15:23:46 -07001847 goto out_fail;
Mark Fasheh607d44a2007-05-09 15:14:45 -07001848 }
1849
1850 brelse(di_bh);
1851
1852 return 0;
1853
Mark Fasheh607d44a2007-05-09 15:14:45 -07001854out_fail:
1855 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1856
1857 brelse(di_bh);
Mark Fashehe63aecb62007-10-18 15:30:42 -07001858 ocfs2_inode_unlock(inode, 1);
Mark Fasheh607d44a2007-05-09 15:14:45 -07001859
1860 return ret;
1861}
1862
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001863static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
1864 unsigned len, unsigned *copied,
1865 struct ocfs2_dinode *di,
1866 struct ocfs2_write_ctxt *wc)
1867{
1868 void *kaddr;
1869
1870 if (unlikely(*copied < len)) {
1871 if (!PageUptodate(wc->w_target_page)) {
1872 *copied = 0;
1873 return;
1874 }
1875 }
1876
1877 kaddr = kmap_atomic(wc->w_target_page, KM_USER0);
1878 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
1879 kunmap_atomic(kaddr, KM_USER0);
1880
1881 mlog(0, "Data written to inode at offset %llu. "
1882 "id_count = %u, copied = %u, i_dyn_features = 0x%x\n",
1883 (unsigned long long)pos, *copied,
1884 le16_to_cpu(di->id2.i_data.id_count),
1885 le16_to_cpu(di->i_dyn_features));
1886}
1887
Mark Fasheh7307de82007-05-09 15:16:19 -07001888int ocfs2_write_end_nolock(struct address_space *mapping,
1889 loff_t pos, unsigned len, unsigned copied,
1890 struct page *page, void *fsdata)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001891{
1892 int i;
1893 unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1);
1894 struct inode *inode = mapping->host;
1895 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1896 struct ocfs2_write_ctxt *wc = fsdata;
1897 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1898 handle_t *handle = wc->w_handle;
1899 struct page *tmppage;
1900
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001901 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1902 ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
1903 goto out_write_size;
1904 }
1905
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001906 if (unlikely(copied < len)) {
1907 if (!PageUptodate(wc->w_target_page))
1908 copied = 0;
1909
1910 ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
1911 start+len);
1912 }
1913 flush_dcache_page(wc->w_target_page);
1914
1915 for(i = 0; i < wc->w_num_pages; i++) {
1916 tmppage = wc->w_pages[i];
1917
1918 if (tmppage == wc->w_target_page) {
1919 from = wc->w_target_from;
1920 to = wc->w_target_to;
1921
1922 BUG_ON(from > PAGE_CACHE_SIZE ||
1923 to > PAGE_CACHE_SIZE ||
1924 to < from);
1925 } else {
1926 /*
1927 * Pages adjacent to the target (if any) imply
1928 * a hole-filling write in which case we want
1929 * to flush their entire range.
1930 */
1931 from = 0;
1932 to = PAGE_CACHE_SIZE;
1933 }
1934
Sunil Mushran961cecb2008-07-16 17:22:22 -07001935 if (page_has_buffers(tmppage)) {
Mark Fasheh53ef99c2008-11-18 16:53:43 -08001936 if (ocfs2_should_order_data(inode))
Joel Becker2b4e30f2008-09-03 20:03:41 -07001937 ocfs2_jbd2_file_inode(wc->w_handle, inode);
Sunil Mushran961cecb2008-07-16 17:22:22 -07001938 block_commit_write(tmppage, from, to);
1939 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001940 }
1941
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001942out_write_size:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001943 pos += copied;
1944 if (pos > inode->i_size) {
1945 i_size_write(inode, pos);
1946 mark_inode_dirty(inode);
1947 }
1948 inode->i_blocks = ocfs2_inode_sector_count(inode);
1949 di->i_size = cpu_to_le64((u64)i_size_read(inode));
1950 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1951 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
1952 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001953 ocfs2_journal_dirty(handle, wc->w_di_bh);
1954
1955 ocfs2_commit_trans(osb, handle);
Mark Fasheh59a5e412007-06-22 15:52:36 -07001956
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001957 ocfs2_run_deallocs(osb, &wc->w_dealloc);
1958
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001959 ocfs2_free_write_ctxt(wc);
1960
1961 return copied;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001962}
1963
Nick Pigginb6af1bc2007-10-16 01:25:24 -07001964static int ocfs2_write_end(struct file *file, struct address_space *mapping,
1965 loff_t pos, unsigned len, unsigned copied,
1966 struct page *page, void *fsdata)
Mark Fasheh607d44a2007-05-09 15:14:45 -07001967{
1968 int ret;
1969 struct inode *inode = mapping->host;
1970
1971 ret = ocfs2_write_end_nolock(mapping, pos, len, copied, page, fsdata);
1972
Mark Fasheh607d44a2007-05-09 15:14:45 -07001973 up_write(&OCFS2_I(inode)->ip_alloc_sem);
Mark Fashehe63aecb62007-10-18 15:30:42 -07001974 ocfs2_inode_unlock(inode, 1);
Mark Fasheh607d44a2007-05-09 15:14:45 -07001975
1976 return ret;
1977}
1978
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001979const struct address_space_operations ocfs2_aops = {
Hisashi Hifumi1fca3a02009-03-05 17:22:21 +09001980 .readpage = ocfs2_readpage,
1981 .readpages = ocfs2_readpages,
1982 .writepage = ocfs2_writepage,
1983 .write_begin = ocfs2_write_begin,
1984 .write_end = ocfs2_write_end,
1985 .bmap = ocfs2_bmap,
1986 .sync_page = block_sync_page,
1987 .direct_IO = ocfs2_direct_IO,
1988 .invalidatepage = ocfs2_invalidatepage,
1989 .releasepage = ocfs2_releasepage,
1990 .migratepage = buffer_migrate_page,
1991 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02001992 .error_remove_page = generic_error_remove_page,
Mark Fashehccd979b2005-12-15 14:31:24 -08001993};