blob: f04914cc19a43f1a5bf0c882ee671435c2cd31de [file] [log] [blame]
Mark Fashehccd979b2005-12-15 14:31:24 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#include <linux/fs.h>
23#include <linux/slab.h>
24#include <linux/highmem.h>
25#include <linux/pagemap.h>
26#include <asm/byteorder.h>
Mark Fasheh9517bac2007-02-09 20:24:12 -080027#include <linux/swap.h>
Mark Fasheh6af67d82007-03-06 17:24:46 -080028#include <linux/pipe_fs_i.h>
Mark Fasheh628a24f2007-10-30 12:08:32 -070029#include <linux/mpage.h>
Jan Karaa90714c2008-10-09 19:38:40 +020030#include <linux/quotaops.h>
Joseph Qi24c40b32015-02-16 16:00:00 -080031#include <linux/blkdev.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080032#include <linux/uio.h>
Mark Fashehccd979b2005-12-15 14:31:24 -080033
Mark Fashehccd979b2005-12-15 14:31:24 -080034#include <cluster/masklog.h>
35
36#include "ocfs2.h"
37
38#include "alloc.h"
39#include "aops.h"
40#include "dlmglue.h"
41#include "extent_map.h"
42#include "file.h"
43#include "inode.h"
44#include "journal.h"
Mark Fasheh9517bac2007-02-09 20:24:12 -080045#include "suballoc.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080046#include "super.h"
47#include "symlink.h"
Tao Ma293b2f72009-08-25 08:02:48 +080048#include "refcounttree.h"
Tao Ma95581562011-02-22 21:33:59 +080049#include "ocfs2_trace.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080050
51#include "buffer_head_io.h"
Joseph Qi24c40b32015-02-16 16:00:00 -080052#include "dir.h"
53#include "namei.h"
54#include "sysfile.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080055
56static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
57 struct buffer_head *bh_result, int create)
58{
59 int err = -EIO;
60 int status;
61 struct ocfs2_dinode *fe = NULL;
62 struct buffer_head *bh = NULL;
63 struct buffer_head *buffer_cache_bh = NULL;
64 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
65 void *kaddr;
66
Tao Ma95581562011-02-22 21:33:59 +080067 trace_ocfs2_symlink_get_block(
68 (unsigned long long)OCFS2_I(inode)->ip_blkno,
69 (unsigned long long)iblock, bh_result, create);
Mark Fashehccd979b2005-12-15 14:31:24 -080070
71 BUG_ON(ocfs2_inode_is_fast_symlink(inode));
72
73 if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
74 mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
75 (unsigned long long)iblock);
76 goto bail;
77 }
78
Joel Beckerb657c952008-11-13 14:49:11 -080079 status = ocfs2_read_inode_block(inode, &bh);
Mark Fashehccd979b2005-12-15 14:31:24 -080080 if (status < 0) {
81 mlog_errno(status);
82 goto bail;
83 }
84 fe = (struct ocfs2_dinode *) bh->b_data;
85
Mark Fashehccd979b2005-12-15 14:31:24 -080086 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
87 le32_to_cpu(fe->i_clusters))) {
Rui Xiang7391a292013-11-12 15:06:54 -080088 err = -ENOMEM;
Mark Fashehccd979b2005-12-15 14:31:24 -080089 mlog(ML_ERROR, "block offset is outside the allocated size: "
90 "%llu\n", (unsigned long long)iblock);
91 goto bail;
92 }
93
94 /* We don't use the page cache to create symlink data, so if
95 * need be, copy it over from the buffer cache. */
96 if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
97 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
98 iblock;
99 buffer_cache_bh = sb_getblk(osb->sb, blkno);
100 if (!buffer_cache_bh) {
Rui Xiang7391a292013-11-12 15:06:54 -0800101 err = -ENOMEM;
Mark Fashehccd979b2005-12-15 14:31:24 -0800102 mlog(ML_ERROR, "couldn't getblock for symlink!\n");
103 goto bail;
104 }
105
106 /* we haven't locked out transactions, so a commit
107 * could've happened. Since we've got a reference on
108 * the bh, even if it commits while we're doing the
109 * copy, the data is still good. */
110 if (buffer_jbd(buffer_cache_bh)
111 && ocfs2_inode_is_new(inode)) {
Cong Wangc4bc8dc2011-11-25 23:14:34 +0800112 kaddr = kmap_atomic(bh_result->b_page);
Mark Fashehccd979b2005-12-15 14:31:24 -0800113 if (!kaddr) {
114 mlog(ML_ERROR, "couldn't kmap!\n");
115 goto bail;
116 }
117 memcpy(kaddr + (bh_result->b_size * iblock),
118 buffer_cache_bh->b_data,
119 bh_result->b_size);
Cong Wangc4bc8dc2011-11-25 23:14:34 +0800120 kunmap_atomic(kaddr);
Mark Fashehccd979b2005-12-15 14:31:24 -0800121 set_buffer_uptodate(bh_result);
122 }
123 brelse(buffer_cache_bh);
124 }
125
126 map_bh(bh_result, inode->i_sb,
127 le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
128
129 err = 0;
130
131bail:
Mark Fasheha81cb882008-10-07 14:25:16 -0700132 brelse(bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800133
Mark Fashehccd979b2005-12-15 14:31:24 -0800134 return err;
135}
136
Tao Ma6f70fa52009-08-25 08:05:12 +0800137int ocfs2_get_block(struct inode *inode, sector_t iblock,
138 struct buffer_head *bh_result, int create)
Mark Fashehccd979b2005-12-15 14:31:24 -0800139{
140 int err = 0;
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800141 unsigned int ext_flags;
Mark Fasheh628a24f2007-10-30 12:08:32 -0700142 u64 max_blocks = bh_result->b_size >> inode->i_blkbits;
143 u64 p_blkno, count, past_eof;
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800144 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Mark Fashehccd979b2005-12-15 14:31:24 -0800145
Tao Ma95581562011-02-22 21:33:59 +0800146 trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno,
147 (unsigned long long)iblock, bh_result, create);
Mark Fashehccd979b2005-12-15 14:31:24 -0800148
149 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
150 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
151 inode, inode->i_ino);
152
153 if (S_ISLNK(inode->i_mode)) {
154 /* this always does I/O for some reason. */
155 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
156 goto bail;
157 }
158
Mark Fasheh628a24f2007-10-30 12:08:32 -0700159 err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800160 &ext_flags);
Mark Fashehccd979b2005-12-15 14:31:24 -0800161 if (err) {
162 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
Mark Fashehb06970532006-03-03 10:24:33 -0800163 "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
164 (unsigned long long)p_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -0800165 goto bail;
166 }
167
Mark Fasheh628a24f2007-10-30 12:08:32 -0700168 if (max_blocks < count)
169 count = max_blocks;
170
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800171 /*
172 * ocfs2 never allocates in this function - the only time we
173 * need to use BH_New is when we're extending i_size on a file
174 * system which doesn't support holes, in which case BH_New
Christoph Hellwigebdec242010-10-06 10:47:23 +0200175 * allows __block_write_begin() to zero.
Coly Lic0420ad2008-06-30 18:45:45 +0800176 *
177 * If we see this on a sparse file system, then a truncate has
178 * raced us and removed the cluster. In this case, we clear
179 * the buffers dirty and uptodate bits and let the buffer code
180 * ignore it as a hole.
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800181 */
Coly Lic0420ad2008-06-30 18:45:45 +0800182 if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) {
183 clear_buffer_dirty(bh_result);
184 clear_buffer_uptodate(bh_result);
185 goto bail;
186 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800187
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800188 /* Treat the unwritten extent as a hole for zeroing purposes. */
189 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800190 map_bh(bh_result, inode->i_sb, p_blkno);
191
Mark Fasheh628a24f2007-10-30 12:08:32 -0700192 bh_result->b_size = count << inode->i_blkbits;
193
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800194 if (!ocfs2_sparse_alloc(osb)) {
195 if (p_blkno == 0) {
196 err = -EIO;
197 mlog(ML_ERROR,
198 "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
199 (unsigned long long)iblock,
200 (unsigned long long)p_blkno,
201 (unsigned long long)OCFS2_I(inode)->ip_blkno);
202 mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
203 dump_stack();
Wengang Wang1f4cea32009-07-13 11:38:58 +0800204 goto bail;
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800205 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800206 }
207
Joel Becker56934862010-07-01 15:13:31 -0700208 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
Tao Ma95581562011-02-22 21:33:59 +0800209
210 trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
211 (unsigned long long)past_eof);
Joel Becker56934862010-07-01 15:13:31 -0700212 if (create && (iblock >= past_eof))
213 set_buffer_new(bh_result);
214
Mark Fashehccd979b2005-12-15 14:31:24 -0800215bail:
216 if (err < 0)
217 err = -EIO;
218
Mark Fashehccd979b2005-12-15 14:31:24 -0800219 return err;
220}
221
Mark Fasheh1afc32b2007-09-07 14:46:51 -0700222int ocfs2_read_inline_data(struct inode *inode, struct page *page,
223 struct buffer_head *di_bh)
Mark Fasheh6798d352007-09-07 14:05:51 -0700224{
225 void *kaddr;
Jan Karad2849fb2007-12-19 15:24:09 +0100226 loff_t size;
Mark Fasheh6798d352007-09-07 14:05:51 -0700227 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
228
229 if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
Joe Perches7ecef142015-09-04 15:44:51 -0700230 ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n",
Mark Fasheh6798d352007-09-07 14:05:51 -0700231 (unsigned long long)OCFS2_I(inode)->ip_blkno);
232 return -EROFS;
233 }
234
235 size = i_size_read(inode);
236
237 if (size > PAGE_CACHE_SIZE ||
Tiger Yangd9ae49d2009-03-05 11:06:15 +0800238 size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
Mark Fasheh6798d352007-09-07 14:05:51 -0700239 ocfs2_error(inode->i_sb,
Joe Perches7ecef142015-09-04 15:44:51 -0700240 "Inode %llu has with inline data has bad size: %Lu\n",
Jan Karad2849fb2007-12-19 15:24:09 +0100241 (unsigned long long)OCFS2_I(inode)->ip_blkno,
242 (unsigned long long)size);
Mark Fasheh6798d352007-09-07 14:05:51 -0700243 return -EROFS;
244 }
245
Cong Wangc4bc8dc2011-11-25 23:14:34 +0800246 kaddr = kmap_atomic(page);
Mark Fasheh6798d352007-09-07 14:05:51 -0700247 if (size)
248 memcpy(kaddr, di->id2.i_data.id_data, size);
249 /* Clear the remaining part of the page */
250 memset(kaddr + size, 0, PAGE_CACHE_SIZE - size);
251 flush_dcache_page(page);
Cong Wangc4bc8dc2011-11-25 23:14:34 +0800252 kunmap_atomic(kaddr);
Mark Fasheh6798d352007-09-07 14:05:51 -0700253
254 SetPageUptodate(page);
255
256 return 0;
257}
258
259static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
260{
261 int ret;
262 struct buffer_head *di_bh = NULL;
Mark Fasheh6798d352007-09-07 14:05:51 -0700263
264 BUG_ON(!PageLocked(page));
Julia Lawall86c838b2008-02-26 21:45:56 +0100265 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
Mark Fasheh6798d352007-09-07 14:05:51 -0700266
Joel Beckerb657c952008-11-13 14:49:11 -0800267 ret = ocfs2_read_inode_block(inode, &di_bh);
Mark Fasheh6798d352007-09-07 14:05:51 -0700268 if (ret) {
269 mlog_errno(ret);
270 goto out;
271 }
272
273 ret = ocfs2_read_inline_data(inode, page, di_bh);
274out:
275 unlock_page(page);
276
277 brelse(di_bh);
278 return ret;
279}
280
Mark Fashehccd979b2005-12-15 14:31:24 -0800281static int ocfs2_readpage(struct file *file, struct page *page)
282{
283 struct inode *inode = page->mapping->host;
Mark Fasheh6798d352007-09-07 14:05:51 -0700284 struct ocfs2_inode_info *oi = OCFS2_I(inode);
Mark Fashehccd979b2005-12-15 14:31:24 -0800285 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
286 int ret, unlock = 1;
287
Tao Ma95581562011-02-22 21:33:59 +0800288 trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
289 (page ? page->index : 0));
Mark Fashehccd979b2005-12-15 14:31:24 -0800290
Mark Fashehe63aecb62007-10-18 15:30:42 -0700291 ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
Mark Fashehccd979b2005-12-15 14:31:24 -0800292 if (ret != 0) {
293 if (ret == AOP_TRUNCATED_PAGE)
294 unlock = 0;
295 mlog_errno(ret);
296 goto out;
297 }
298
Mark Fasheh6798d352007-09-07 14:05:51 -0700299 if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
Jan Karac7e25e62011-06-23 22:51:47 +0200300 /*
301 * Unlock the page and cycle ip_alloc_sem so that we don't
302 * busyloop waiting for ip_alloc_sem to unlock
303 */
Mark Fashehe9dfc0b2007-05-14 11:38:51 -0700304 ret = AOP_TRUNCATED_PAGE;
Jan Karac7e25e62011-06-23 22:51:47 +0200305 unlock_page(page);
306 unlock = 0;
307 down_read(&oi->ip_alloc_sem);
308 up_read(&oi->ip_alloc_sem);
Mark Fashehe63aecb62007-10-18 15:30:42 -0700309 goto out_inode_unlock;
Mark Fashehe9dfc0b2007-05-14 11:38:51 -0700310 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800311
312 /*
313 * i_size might have just been updated as we grabed the meta lock. We
314 * might now be discovering a truncate that hit on another node.
315 * block_read_full_page->get_block freaks out if it is asked to read
316 * beyond the end of a file, so we check here. Callers
Nick Piggin54cb8822007-07-19 01:46:59 -0700317 * (generic_file_read, vm_ops->fault) are clever enough to check i_size
Mark Fashehccd979b2005-12-15 14:31:24 -0800318 * and notice that the page they just read isn't needed.
319 *
320 * XXX sys_readahead() seems to get that wrong?
321 */
322 if (start >= i_size_read(inode)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800323 zero_user(page, 0, PAGE_SIZE);
Mark Fashehccd979b2005-12-15 14:31:24 -0800324 SetPageUptodate(page);
325 ret = 0;
326 goto out_alloc;
327 }
328
Mark Fasheh6798d352007-09-07 14:05:51 -0700329 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
330 ret = ocfs2_readpage_inline(inode, page);
331 else
332 ret = block_read_full_page(page, ocfs2_get_block);
Mark Fashehccd979b2005-12-15 14:31:24 -0800333 unlock = 0;
334
Mark Fashehccd979b2005-12-15 14:31:24 -0800335out_alloc:
336 up_read(&OCFS2_I(inode)->ip_alloc_sem);
Mark Fashehe63aecb62007-10-18 15:30:42 -0700337out_inode_unlock:
338 ocfs2_inode_unlock(inode, 0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800339out:
340 if (unlock)
341 unlock_page(page);
Mark Fashehccd979b2005-12-15 14:31:24 -0800342 return ret;
343}
344
Mark Fasheh628a24f2007-10-30 12:08:32 -0700345/*
346 * This is used only for read-ahead. Failures or difficult to handle
347 * situations are safe to ignore.
348 *
349 * Right now, we don't bother with BH_Boundary - in-inode extent lists
350 * are quite large (243 extents on 4k blocks), so most inodes don't
351 * grow out to a tree. If need be, detecting boundary extents could
352 * trivially be added in a future version of ocfs2_get_block().
353 */
354static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
355 struct list_head *pages, unsigned nr_pages)
356{
357 int ret, err = -EIO;
358 struct inode *inode = mapping->host;
359 struct ocfs2_inode_info *oi = OCFS2_I(inode);
360 loff_t start;
361 struct page *last;
362
363 /*
364 * Use the nonblocking flag for the dlm code to avoid page
365 * lock inversion, but don't bother with retrying.
366 */
367 ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
368 if (ret)
369 return err;
370
371 if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
372 ocfs2_inode_unlock(inode, 0);
373 return err;
374 }
375
376 /*
377 * Don't bother with inline-data. There isn't anything
378 * to read-ahead in that case anyway...
379 */
380 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
381 goto out_unlock;
382
383 /*
384 * Check whether a remote node truncated this file - we just
385 * drop out in that case as it's not worth handling here.
386 */
387 last = list_entry(pages->prev, struct page, lru);
388 start = (loff_t)last->index << PAGE_CACHE_SHIFT;
389 if (start >= i_size_read(inode))
390 goto out_unlock;
391
392 err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block);
393
394out_unlock:
395 up_read(&oi->ip_alloc_sem);
396 ocfs2_inode_unlock(inode, 0);
397
398 return err;
399}
400
Mark Fashehccd979b2005-12-15 14:31:24 -0800401/* Note: Because we don't support holes, our allocation has
402 * already happened (allocation writes zeros to the file data)
403 * so we don't have to worry about ordered writes in
404 * ocfs2_writepage.
405 *
406 * ->writepage is called during the process of invalidating the page cache
407 * during blocked lock processing. It can't block on any cluster locks
408 * to during block mapping. It's relying on the fact that the block
409 * mapping can't have disappeared under the dirty pages that it is
410 * being asked to write back.
411 */
412static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
413{
Tao Ma95581562011-02-22 21:33:59 +0800414 trace_ocfs2_writepage(
415 (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
416 page->index);
Mark Fashehccd979b2005-12-15 14:31:24 -0800417
Tao Ma95581562011-02-22 21:33:59 +0800418 return block_write_full_page(page, ocfs2_get_block, wbc);
Mark Fashehccd979b2005-12-15 14:31:24 -0800419}
420
Mark Fashehccd979b2005-12-15 14:31:24 -0800421/* Taken from ext3. We don't necessarily need the full blown
422 * functionality yet, but IMHO it's better to cut and paste the whole
423 * thing so we can avoid introducing our own bugs (and easily pick up
424 * their fixes when they happen) --Mark */
Mark Fasheh60b11392007-02-16 11:46:50 -0800425int walk_page_buffers( handle_t *handle,
426 struct buffer_head *head,
427 unsigned from,
428 unsigned to,
429 int *partial,
430 int (*fn)( handle_t *handle,
431 struct buffer_head *bh))
Mark Fashehccd979b2005-12-15 14:31:24 -0800432{
433 struct buffer_head *bh;
434 unsigned block_start, block_end;
435 unsigned blocksize = head->b_size;
436 int err, ret = 0;
437 struct buffer_head *next;
438
439 for ( bh = head, block_start = 0;
440 ret == 0 && (bh != head || !block_start);
441 block_start = block_end, bh = next)
442 {
443 next = bh->b_this_page;
444 block_end = block_start + blocksize;
445 if (block_end <= from || block_start >= to) {
446 if (partial && !buffer_uptodate(bh))
447 *partial = 1;
448 continue;
449 }
450 err = (*fn)(handle, bh);
451 if (!ret)
452 ret = err;
453 }
454 return ret;
455}
456
Mark Fashehccd979b2005-12-15 14:31:24 -0800457static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
458{
459 sector_t status;
460 u64 p_blkno = 0;
461 int err = 0;
462 struct inode *inode = mapping->host;
463
Tao Ma95581562011-02-22 21:33:59 +0800464 trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno,
465 (unsigned long long)block);
Mark Fashehccd979b2005-12-15 14:31:24 -0800466
467 /* We don't need to lock journal system files, since they aren't
468 * accessed concurrently from multiple nodes.
469 */
470 if (!INODE_JOURNAL(inode)) {
Mark Fashehe63aecb62007-10-18 15:30:42 -0700471 err = ocfs2_inode_lock(inode, NULL, 0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800472 if (err) {
473 if (err != -ENOENT)
474 mlog_errno(err);
475 goto bail;
476 }
477 down_read(&OCFS2_I(inode)->ip_alloc_sem);
478 }
479
Mark Fasheh6798d352007-09-07 14:05:51 -0700480 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
481 err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
482 NULL);
Mark Fashehccd979b2005-12-15 14:31:24 -0800483
484 if (!INODE_JOURNAL(inode)) {
485 up_read(&OCFS2_I(inode)->ip_alloc_sem);
Mark Fashehe63aecb62007-10-18 15:30:42 -0700486 ocfs2_inode_unlock(inode, 0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800487 }
488
489 if (err) {
490 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
491 (unsigned long long)block);
492 mlog_errno(err);
493 goto bail;
494 }
495
Mark Fashehccd979b2005-12-15 14:31:24 -0800496bail:
497 status = err ? 0 : p_blkno;
498
Mark Fashehccd979b2005-12-15 14:31:24 -0800499 return status;
500}
501
502/*
503 * TODO: Make this into a generic get_blocks function.
504 *
505 * From do_direct_io in direct-io.c:
506 * "So what we do is to permit the ->get_blocks function to populate
507 * bh.b_size with the size of IO which is permitted at this offset and
508 * this i_blkbits."
509 *
510 * This function is called directly from get_more_blocks in direct-io.c.
511 *
512 * called like this: dio->get_blocks(dio->inode, fs_startblk,
513 * fs_count, map_bh, dio->rw == WRITE);
514 */
515static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
Mark Fashehccd979b2005-12-15 14:31:24 -0800516 struct buffer_head *bh_result, int create)
517{
518 int ret;
Joseph Qi49255dc2015-02-16 16:00:03 -0800519 u32 cpos = 0;
520 int alloc_locked = 0;
Mark Fasheh4f902c32007-03-09 16:26:50 -0800521 u64 p_blkno, inode_blocks, contig_blocks;
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800522 unsigned int ext_flags;
Florin Malita184d7d22006-06-03 19:30:10 -0400523 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -0800524 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
Joseph Qi49255dc2015-02-16 16:00:03 -0800525 unsigned long len = bh_result->b_size;
Joseph Qiae1f0812015-06-24 16:55:23 -0700526 unsigned int clusters_to_alloc = 0, contig_clusters = 0;
Joseph Qi49255dc2015-02-16 16:00:03 -0800527
528 cpos = ocfs2_blocks_to_clusters(inode->i_sb, iblock);
Mark Fashehccd979b2005-12-15 14:31:24 -0800529
Mark Fashehccd979b2005-12-15 14:31:24 -0800530 /* This function won't even be called if the request isn't all
531 * nicely aligned and of the right size, so there's no need
532 * for us to check any of that. */
533
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800534 inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
Mark Fasheh564f8a32006-12-14 13:01:05 -0800535
WeiWei Wang6ab855a2015-09-04 15:44:23 -0700536 down_read(&OCFS2_I(inode)->ip_alloc_sem);
537
Mark Fashehccd979b2005-12-15 14:31:24 -0800538 /* This figures out the size of the next contiguous block, and
539 * our logical offset */
Mark Fasheh363041a2007-01-17 12:31:35 -0800540 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800541 &contig_blocks, &ext_flags);
WeiWei Wang6ab855a2015-09-04 15:44:23 -0700542 up_read(&OCFS2_I(inode)->ip_alloc_sem);
543
Mark Fashehccd979b2005-12-15 14:31:24 -0800544 if (ret) {
545 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
546 (unsigned long long)iblock);
547 ret = -EIO;
548 goto bail;
549 }
550
Tao Macbaee472010-02-26 10:54:52 +0800551 /* We should already CoW the refcounted extent in case of create. */
552 BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED));
553
Joseph Qi49255dc2015-02-16 16:00:03 -0800554 /* allocate blocks if no p_blkno is found, and create == 1 */
555 if (!p_blkno && create) {
556 ret = ocfs2_inode_lock(inode, NULL, 1);
557 if (ret < 0) {
558 mlog_errno(ret);
559 goto bail;
560 }
561
562 alloc_locked = 1;
563
WeiWei Wang6ab855a2015-09-04 15:44:23 -0700564 down_write(&OCFS2_I(inode)->ip_alloc_sem);
565
Joseph Qi49255dc2015-02-16 16:00:03 -0800566 /* fill hole, allocate blocks can't be larger than the size
567 * of the hole */
568 clusters_to_alloc = ocfs2_clusters_for_bytes(inode->i_sb, len);
Joseph Qiae1f0812015-06-24 16:55:23 -0700569 contig_clusters = ocfs2_clusters_for_blocks(inode->i_sb,
570 contig_blocks);
571 if (clusters_to_alloc > contig_clusters)
572 clusters_to_alloc = contig_clusters;
Joseph Qi49255dc2015-02-16 16:00:03 -0800573
574 /* allocate extent and insert them into the extent tree */
575 ret = ocfs2_extend_allocation(inode, cpos,
576 clusters_to_alloc, 0);
577 if (ret < 0) {
WeiWei Wang6ab855a2015-09-04 15:44:23 -0700578 up_write(&OCFS2_I(inode)->ip_alloc_sem);
Joseph Qi49255dc2015-02-16 16:00:03 -0800579 mlog_errno(ret);
580 goto bail;
581 }
582
583 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
584 &contig_blocks, &ext_flags);
585 if (ret < 0) {
WeiWei Wang6ab855a2015-09-04 15:44:23 -0700586 up_write(&OCFS2_I(inode)->ip_alloc_sem);
Joseph Qi49255dc2015-02-16 16:00:03 -0800587 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
588 (unsigned long long)iblock);
589 ret = -EIO;
590 goto bail;
591 }
WeiWei Wang6ab855a2015-09-04 15:44:23 -0700592 up_write(&OCFS2_I(inode)->ip_alloc_sem);
Joseph Qi49255dc2015-02-16 16:00:03 -0800593 }
594
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800595 /*
596 * get_more_blocks() expects us to describe a hole by clearing
597 * the mapped bit on bh_result().
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800598 *
599 * Consider an unwritten extent as a hole.
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800600 */
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800601 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800602 map_bh(bh_result, inode->i_sb, p_blkno);
Christoph Hellwig5fe878a2009-12-15 16:47:50 -0800603 else
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800604 clear_buffer_mapped(bh_result);
Mark Fashehccd979b2005-12-15 14:31:24 -0800605
606 /* make sure we don't map more than max_blocks blocks here as
607 that's all the kernel will handle at this point. */
608 if (max_blocks < contig_blocks)
609 contig_blocks = max_blocks;
610 bh_result->b_size = contig_blocks << blocksize_bits;
611bail:
Joseph Qi49255dc2015-02-16 16:00:03 -0800612 if (alloc_locked)
613 ocfs2_inode_unlock(inode, 1);
Mark Fashehccd979b2005-12-15 14:31:24 -0800614 return ret;
615}
616
Sunil Mushran2bd63212010-01-25 16:57:38 -0800617/*
Mark Fashehccd979b2005-12-15 14:31:24 -0800618 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
Christoph Hellwigbd5fe6c2011-06-24 14:29:43 -0400619 * particularly interested in the aio/dio case. We use the rw_lock DLM lock
620 * to protect io on one node from truncation on another.
Mark Fashehccd979b2005-12-15 14:31:24 -0800621 */
622static void ocfs2_dio_end_io(struct kiocb *iocb,
623 loff_t offset,
624 ssize_t bytes,
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200625 void *private)
Mark Fashehccd979b2005-12-15 14:31:24 -0800626{
Al Viro496ad9a2013-01-23 17:07:38 -0500627 struct inode *inode = file_inode(iocb->ki_filp);
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -0700628 int level;
Mark Fashehccd979b2005-12-15 14:31:24 -0800629
630 /* this io's submitter should not have unlocked this before we could */
631 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -0700632
Mark Fasheha11f7e62011-06-22 14:23:38 -0700633 if (ocfs2_iocb_is_unaligned_aio(iocb)) {
634 ocfs2_iocb_clear_unaligned_aio(iocb);
635
Wengang Wangc18ceab2014-04-03 14:46:46 -0700636 mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio);
Mark Fasheha11f7e62011-06-22 14:23:38 -0700637 }
638
Joseph Qi512f62a2015-09-04 15:43:37 -0700639 /* Let rw unlock to be done later to protect append direct io write */
640 if (offset + bytes <= i_size_read(inode)) {
641 ocfs2_iocb_clear_rw_locked(iocb);
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -0700642
Joseph Qi512f62a2015-09-04 15:43:37 -0700643 level = ocfs2_iocb_rw_locked_level(iocb);
644 ocfs2_rw_unlock(inode, level);
645 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800646}
647
Joel Becker03f981c2007-01-04 14:54:41 -0800648static int ocfs2_releasepage(struct page *page, gfp_t wait)
649{
Joel Becker03f981c2007-01-04 14:54:41 -0800650 if (!page_has_buffers(page))
651 return 0;
Jan Kara41ecc342013-11-12 15:07:08 -0800652 return try_to_free_buffers(page);
Joel Becker03f981c2007-01-04 14:54:41 -0800653}
654
Joseph Qi24c40b32015-02-16 16:00:00 -0800655static int ocfs2_is_overwrite(struct ocfs2_super *osb,
656 struct inode *inode, loff_t offset)
657{
658 int ret = 0;
659 u32 v_cpos = 0;
660 u32 p_cpos = 0;
661 unsigned int num_clusters = 0;
662 unsigned int ext_flags = 0;
663
664 v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset);
665 ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos,
666 &num_clusters, &ext_flags);
667 if (ret < 0) {
668 mlog_errno(ret);
669 return ret;
670 }
671
672 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN))
673 return 1;
674
675 return 0;
676}
677
Joseph Qi14a52752015-04-14 15:43:13 -0700678static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
679 struct inode *inode, loff_t offset,
680 u64 zero_len, int cluster_align)
681{
682 u32 p_cpos = 0;
683 u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode));
684 unsigned int num_clusters = 0;
685 unsigned int ext_flags = 0;
686 int ret = 0;
687
688 if (offset <= i_size_read(inode) || cluster_align)
689 return 0;
690
691 ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters,
692 &ext_flags);
693 if (ret < 0) {
694 mlog_errno(ret);
695 return ret;
696 }
697
698 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
699 u64 s = i_size_read(inode);
Joseph Qi32e5a2a2015-08-06 15:46:48 -0700700 sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
Joseph Qi14a52752015-04-14 15:43:13 -0700701 (do_div(s, osb->s_clustersize) >> 9);
702
703 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
704 zero_len >> 9, GFP_NOFS, false);
705 if (ret < 0)
706 mlog_errno(ret);
707 }
708
709 return ret;
710}
711
712static int ocfs2_direct_IO_extend_no_holes(struct ocfs2_super *osb,
713 struct inode *inode, loff_t offset)
714{
715 u64 zero_start, zero_len, total_zero_len;
716 u32 p_cpos = 0, clusters_to_add;
717 u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode));
718 unsigned int num_clusters = 0;
719 unsigned int ext_flags = 0;
720 u32 size_div, offset_div;
721 int ret = 0;
722
723 {
724 u64 o = offset;
725 u64 s = i_size_read(inode);
726
727 offset_div = do_div(o, osb->s_clustersize);
728 size_div = do_div(s, osb->s_clustersize);
729 }
730
731 if (offset <= i_size_read(inode))
732 return 0;
733
734 clusters_to_add = ocfs2_bytes_to_clusters(inode->i_sb, offset) -
735 ocfs2_bytes_to_clusters(inode->i_sb, i_size_read(inode));
736 total_zero_len = offset - i_size_read(inode);
737 if (clusters_to_add)
738 total_zero_len -= offset_div;
739
740 /* Allocate clusters to fill out holes, and this is only needed
741 * when we add more than one clusters. Otherwise the cluster will
742 * be allocated during direct IO */
743 if (clusters_to_add > 1) {
744 ret = ocfs2_extend_allocation(inode,
745 OCFS2_I(inode)->ip_clusters,
746 clusters_to_add - 1, 0);
747 if (ret) {
748 mlog_errno(ret);
749 goto out;
750 }
751 }
752
753 while (total_zero_len) {
754 ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters,
755 &ext_flags);
756 if (ret < 0) {
757 mlog_errno(ret);
758 goto out;
759 }
760
761 zero_start = ocfs2_clusters_to_bytes(osb->sb, p_cpos) +
762 size_div;
763 zero_len = ocfs2_clusters_to_bytes(osb->sb, num_clusters) -
764 size_div;
765 zero_len = min(total_zero_len, zero_len);
766
767 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
768 ret = blkdev_issue_zeroout(osb->sb->s_bdev,
769 zero_start >> 9, zero_len >> 9,
770 GFP_NOFS, false);
771 if (ret < 0) {
772 mlog_errno(ret);
773 goto out;
774 }
775 }
776
777 total_zero_len -= zero_len;
778 v_cpos += ocfs2_bytes_to_clusters(osb->sb, zero_len + size_div);
779
780 /* Only at first iteration can be cluster not aligned.
781 * So set size_div to 0 for the rest */
782 size_div = 0;
783 }
784
785out:
786 return ret;
787}
788
Joseph Qi24c40b32015-02-16 16:00:00 -0800789static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
790 struct iov_iter *iter,
791 loff_t offset)
792{
793 ssize_t ret = 0;
794 ssize_t written = 0;
795 bool orphaned = false;
796 int is_overwrite = 0;
797 struct file *file = iocb->ki_filp;
798 struct inode *inode = file_inode(file)->i_mapping->host;
799 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
800 struct buffer_head *di_bh = NULL;
801 size_t count = iter->count;
802 journal_t *journal = osb->journal->j_journal;
Joseph Qi14a52752015-04-14 15:43:13 -0700803 u64 zero_len_head, zero_len_tail;
804 int cluster_align_head, cluster_align_tail;
Joseph Qi24c40b32015-02-16 16:00:00 -0800805 loff_t final_size = offset + count;
806 int append_write = offset >= i_size_read(inode) ? 1 : 0;
807 unsigned int num_clusters = 0;
808 unsigned int ext_flags = 0;
809
810 {
811 u64 o = offset;
Joseph Qi14a52752015-04-14 15:43:13 -0700812 u64 s = i_size_read(inode);
Joseph Qi24c40b32015-02-16 16:00:00 -0800813
Joseph Qi14a52752015-04-14 15:43:13 -0700814 zero_len_head = do_div(o, 1 << osb->s_clustersize_bits);
815 cluster_align_head = !zero_len_head;
816
817 zero_len_tail = osb->s_clustersize -
818 do_div(s, osb->s_clustersize);
819 if ((offset - i_size_read(inode)) < zero_len_tail)
820 zero_len_tail = offset - i_size_read(inode);
821 cluster_align_tail = !zero_len_tail;
Joseph Qi24c40b32015-02-16 16:00:00 -0800822 }
823
824 /*
825 * when final_size > inode->i_size, inode->i_size will be
826 * updated after direct write, so add the inode to orphan
827 * dir first.
828 */
829 if (final_size > i_size_read(inode)) {
830 ret = ocfs2_add_inode_to_orphan(osb, inode);
831 if (ret < 0) {
832 mlog_errno(ret);
833 goto out;
834 }
835 orphaned = true;
836 }
837
838 if (append_write) {
Joseph Qi7e9b1952015-04-14 15:43:08 -0700839 ret = ocfs2_inode_lock(inode, NULL, 1);
Joseph Qi24c40b32015-02-16 16:00:00 -0800840 if (ret < 0) {
841 mlog_errno(ret);
842 goto clean_orphan;
843 }
844
Joseph Qi14a52752015-04-14 15:43:13 -0700845 /* zeroing out the previously allocated cluster tail
846 * that but not zeroed */
WeiWei Wang6ab855a2015-09-04 15:44:23 -0700847 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
848 down_read(&OCFS2_I(inode)->ip_alloc_sem);
Joseph Qi14a52752015-04-14 15:43:13 -0700849 ret = ocfs2_direct_IO_zero_extend(osb, inode, offset,
850 zero_len_tail, cluster_align_tail);
WeiWei Wang6ab855a2015-09-04 15:44:23 -0700851 up_read(&OCFS2_I(inode)->ip_alloc_sem);
852 } else {
853 down_write(&OCFS2_I(inode)->ip_alloc_sem);
Joseph Qi14a52752015-04-14 15:43:13 -0700854 ret = ocfs2_direct_IO_extend_no_holes(osb, inode,
Joseph Qi24c40b32015-02-16 16:00:00 -0800855 offset);
WeiWei Wang6ab855a2015-09-04 15:44:23 -0700856 up_write(&OCFS2_I(inode)->ip_alloc_sem);
857 }
Joseph Qi24c40b32015-02-16 16:00:00 -0800858 if (ret < 0) {
859 mlog_errno(ret);
860 ocfs2_inode_unlock(inode, 1);
Joseph Qi24c40b32015-02-16 16:00:00 -0800861 goto clean_orphan;
862 }
863
864 is_overwrite = ocfs2_is_overwrite(osb, inode, offset);
865 if (is_overwrite < 0) {
866 mlog_errno(is_overwrite);
Norton.Zhud162eaa2015-11-05 18:43:52 -0800867 ret = is_overwrite;
Joseph Qi24c40b32015-02-16 16:00:00 -0800868 ocfs2_inode_unlock(inode, 1);
Joseph Qi24c40b32015-02-16 16:00:00 -0800869 goto clean_orphan;
870 }
871
872 ocfs2_inode_unlock(inode, 1);
Joseph Qi24c40b32015-02-16 16:00:00 -0800873 }
874
Omar Sandoval17f8c842015-03-16 04:33:50 -0700875 written = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
876 offset, ocfs2_direct_IO_get_blocks,
877 ocfs2_dio_end_io, NULL, 0);
Joseph Qifaaebf12015-09-04 15:43:40 -0700878 /* overwrite aio may return -EIOCBQUEUED, and it is not an error */
879 if ((written < 0) && (written != -EIOCBQUEUED)) {
Joseph Qi24c40b32015-02-16 16:00:00 -0800880 loff_t i_size = i_size_read(inode);
881
882 if (offset + count > i_size) {
883 ret = ocfs2_inode_lock(inode, &di_bh, 1);
884 if (ret < 0) {
885 mlog_errno(ret);
886 goto clean_orphan;
887 }
888
889 if (i_size == i_size_read(inode)) {
890 ret = ocfs2_truncate_file(inode, di_bh,
891 i_size);
892 if (ret < 0) {
893 if (ret != -ENOSPC)
894 mlog_errno(ret);
895
896 ocfs2_inode_unlock(inode, 1);
897 brelse(di_bh);
Joseph Qifaaebf12015-09-04 15:43:40 -0700898 di_bh = NULL;
Joseph Qi24c40b32015-02-16 16:00:00 -0800899 goto clean_orphan;
900 }
901 }
902
903 ocfs2_inode_unlock(inode, 1);
904 brelse(di_bh);
Joseph Qifaaebf12015-09-04 15:43:40 -0700905 di_bh = NULL;
Joseph Qi24c40b32015-02-16 16:00:00 -0800906
907 ret = jbd2_journal_force_commit(journal);
908 if (ret < 0)
909 mlog_errno(ret);
910 }
Joseph Qibdd86212015-04-14 15:43:05 -0700911 } else if (written > 0 && append_write && !is_overwrite &&
Joseph Qi14a52752015-04-14 15:43:13 -0700912 !cluster_align_head) {
913 /* zeroing out the allocated cluster head */
Joseph Qi24c40b32015-02-16 16:00:00 -0800914 u32 p_cpos = 0;
915 u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset);
916
Joseph Qi37a8d892015-04-14 15:43:10 -0700917 ret = ocfs2_inode_lock(inode, NULL, 0);
918 if (ret < 0) {
919 mlog_errno(ret);
920 goto clean_orphan;
921 }
922
Joseph Qi24c40b32015-02-16 16:00:00 -0800923 ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos,
924 &num_clusters, &ext_flags);
925 if (ret < 0) {
926 mlog_errno(ret);
Joseph Qi37a8d892015-04-14 15:43:10 -0700927 ocfs2_inode_unlock(inode, 0);
Joseph Qi24c40b32015-02-16 16:00:00 -0800928 goto clean_orphan;
929 }
930
931 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
932
933 ret = blkdev_issue_zeroout(osb->sb->s_bdev,
Joseph Qi32e5a2a2015-08-06 15:46:48 -0700934 (u64)p_cpos << (osb->s_clustersize_bits - 9),
Joseph Qi14a52752015-04-14 15:43:13 -0700935 zero_len_head >> 9, GFP_NOFS, false);
Joseph Qi24c40b32015-02-16 16:00:00 -0800936 if (ret < 0)
937 mlog_errno(ret);
Joseph Qi37a8d892015-04-14 15:43:10 -0700938
939 ocfs2_inode_unlock(inode, 0);
Joseph Qi24c40b32015-02-16 16:00:00 -0800940 }
941
942clean_orphan:
943 if (orphaned) {
944 int tmp_ret;
945 int update_isize = written > 0 ? 1 : 0;
946 loff_t end = update_isize ? offset + written : 0;
947
Joseph Qicf1776a2015-06-24 16:54:59 -0700948 tmp_ret = ocfs2_inode_lock(inode, &di_bh, 1);
949 if (tmp_ret < 0) {
950 ret = tmp_ret;
951 mlog_errno(ret);
952 goto out;
953 }
954
955 tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
Joseph Qi24c40b32015-02-16 16:00:00 -0800956 update_isize, end);
957 if (tmp_ret < 0) {
958 ret = tmp_ret;
Joseph Qicf1776a2015-06-24 16:54:59 -0700959 mlog_errno(ret);
Joseph Qifaaebf12015-09-04 15:43:40 -0700960 brelse(di_bh);
Joseph Qi24c40b32015-02-16 16:00:00 -0800961 goto out;
962 }
963
Joseph Qicf1776a2015-06-24 16:54:59 -0700964 ocfs2_inode_unlock(inode, 1);
Joseph Qifaaebf12015-09-04 15:43:40 -0700965 brelse(di_bh);
Joseph Qicf1776a2015-06-24 16:54:59 -0700966
Joseph Qi24c40b32015-02-16 16:00:00 -0800967 tmp_ret = jbd2_journal_force_commit(journal);
968 if (tmp_ret < 0) {
969 ret = tmp_ret;
970 mlog_errno(tmp_ret);
971 }
972 }
973
974out:
975 if (ret >= 0)
976 ret = written;
977 return ret;
978}
979
Omar Sandoval22c61862015-03-16 04:33:53 -0700980static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
Al Virod8d3d942014-03-04 21:27:34 -0500981 loff_t offset)
Mark Fashehccd979b2005-12-15 14:31:24 -0800982{
983 struct file *file = iocb->ki_filp;
Al Viro496ad9a2013-01-23 17:07:38 -0500984 struct inode *inode = file_inode(file)->i_mapping->host;
Joseph Qi24c40b32015-02-16 16:00:00 -0800985 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
986 int full_coherency = !(osb->s_mount_opt &
987 OCFS2_MOUNT_COHERENCY_BUFFERED);
Mark Fasheh53013cb2006-05-05 19:04:03 -0700988
Mark Fasheh6798d352007-09-07 14:05:51 -0700989 /*
990 * Fallback to buffered I/O if we see an inode without
991 * extents.
992 */
993 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
994 return 0;
995
Joseph Qi24c40b32015-02-16 16:00:00 -0800996 /* Fallback to buffered I/O if we are appending and
997 * concurrent O_DIRECT writes are allowed.
998 */
999 if (i_size_read(inode) <= offset && !full_coherency)
Tao Mab80474b2009-09-10 15:28:47 +08001000 return 0;
1001
Omar Sandoval6f673762015-03-16 04:33:52 -07001002 if (iov_iter_rw(iter) == READ)
Omar Sandoval17f8c842015-03-16 04:33:50 -07001003 return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
1004 iter, offset,
1005 ocfs2_direct_IO_get_blocks,
1006 ocfs2_dio_end_io, NULL, 0);
Joseph Qi24c40b32015-02-16 16:00:00 -08001007 else
1008 return ocfs2_direct_IO_write(iocb, iter, offset);
Mark Fashehccd979b2005-12-15 14:31:24 -08001009}
1010
Mark Fasheh9517bac2007-02-09 20:24:12 -08001011static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
1012 u32 cpos,
1013 unsigned int *start,
1014 unsigned int *end)
1015{
1016 unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE;
1017
1018 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) {
1019 unsigned int cpp;
1020
1021 cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits);
1022
1023 cluster_start = cpos % cpp;
1024 cluster_start = cluster_start << osb->s_clustersize_bits;
1025
1026 cluster_end = cluster_start + osb->s_clustersize;
1027 }
1028
1029 BUG_ON(cluster_start > PAGE_SIZE);
1030 BUG_ON(cluster_end > PAGE_SIZE);
1031
1032 if (start)
1033 *start = cluster_start;
1034 if (end)
1035 *end = cluster_end;
1036}
1037
1038/*
1039 * 'from' and 'to' are the region in the page to avoid zeroing.
1040 *
1041 * If pagesize > clustersize, this function will avoid zeroing outside
1042 * of the cluster boundary.
1043 *
1044 * from == to == 0 is code for "zero the entire cluster region"
1045 */
1046static void ocfs2_clear_page_regions(struct page *page,
1047 struct ocfs2_super *osb, u32 cpos,
1048 unsigned from, unsigned to)
1049{
1050 void *kaddr;
1051 unsigned int cluster_start, cluster_end;
1052
1053 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
1054
Cong Wangc4bc8dc2011-11-25 23:14:34 +08001055 kaddr = kmap_atomic(page);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001056
1057 if (from || to) {
1058 if (from > cluster_start)
1059 memset(kaddr + cluster_start, 0, from - cluster_start);
1060 if (to < cluster_end)
1061 memset(kaddr + to, 0, cluster_end - to);
1062 } else {
1063 memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
1064 }
1065
Cong Wangc4bc8dc2011-11-25 23:14:34 +08001066 kunmap_atomic(kaddr);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001067}
1068
1069/*
Mark Fasheh4e9563f2007-11-01 11:37:48 -07001070 * Nonsparse file systems fully allocate before we get to the write
1071 * code. This prevents ocfs2_write() from tagging the write as an
1072 * allocating one, which means ocfs2_map_page_blocks() might try to
1073 * read-in the blocks at the tail of our file. Avoid reading them by
1074 * testing i_size against each block offset.
1075 */
1076static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
1077 unsigned int block_start)
1078{
1079 u64 offset = page_offset(page) + block_start;
1080
1081 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1082 return 1;
1083
1084 if (i_size_read(inode) > offset)
1085 return 1;
1086
1087 return 0;
1088}
1089
1090/*
Christoph Hellwigebdec242010-10-06 10:47:23 +02001091 * Some of this taken from __block_write_begin(). We already have our
Mark Fasheh9517bac2007-02-09 20:24:12 -08001092 * mapping by now though, and the entire write will be allocating or
1093 * it won't, so not much need to use BH_New.
1094 *
1095 * This will also skip zeroing, which is handled externally.
1096 */
Mark Fasheh60b11392007-02-16 11:46:50 -08001097int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
1098 struct inode *inode, unsigned int from,
1099 unsigned int to, int new)
Mark Fasheh9517bac2007-02-09 20:24:12 -08001100{
1101 int ret = 0;
1102 struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
1103 unsigned int block_end, block_start;
1104 unsigned int bsize = 1 << inode->i_blkbits;
1105
1106 if (!page_has_buffers(page))
1107 create_empty_buffers(page, bsize, 0);
1108
1109 head = page_buffers(page);
1110 for (bh = head, block_start = 0; bh != head || !block_start;
1111 bh = bh->b_this_page, block_start += bsize) {
1112 block_end = block_start + bsize;
1113
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001114 clear_buffer_new(bh);
1115
Mark Fasheh9517bac2007-02-09 20:24:12 -08001116 /*
1117 * Ignore blocks outside of our i/o range -
1118 * they may belong to unallocated clusters.
1119 */
Mark Fasheh60b11392007-02-16 11:46:50 -08001120 if (block_start >= to || block_end <= from) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001121 if (PageUptodate(page))
1122 set_buffer_uptodate(bh);
1123 continue;
1124 }
1125
1126 /*
1127 * For an allocating write with cluster size >= page
1128 * size, we always write the entire page.
1129 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001130 if (new)
1131 set_buffer_new(bh);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001132
1133 if (!buffer_mapped(bh)) {
1134 map_bh(bh, inode->i_sb, *p_blkno);
1135 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
1136 }
1137
1138 if (PageUptodate(page)) {
1139 if (!buffer_uptodate(bh))
1140 set_buffer_uptodate(bh);
1141 } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
Mark Fashehbce99762007-06-18 11:12:36 -07001142 !buffer_new(bh) &&
Mark Fasheh4e9563f2007-11-01 11:37:48 -07001143 ocfs2_should_read_blk(inode, page, block_start) &&
Mark Fashehbce99762007-06-18 11:12:36 -07001144 (block_start < from || block_end > to)) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001145 ll_rw_block(READ, 1, &bh);
1146 *wait_bh++=bh;
1147 }
1148
1149 *p_blkno = *p_blkno + 1;
1150 }
1151
1152 /*
1153 * If we issued read requests - let them complete.
1154 */
1155 while(wait_bh > wait) {
1156 wait_on_buffer(*--wait_bh);
1157 if (!buffer_uptodate(*wait_bh))
1158 ret = -EIO;
1159 }
1160
1161 if (ret == 0 || !new)
1162 return ret;
1163
1164 /*
1165 * If we get -EIO above, zero out any newly allocated blocks
1166 * to avoid exposing stale data.
1167 */
1168 bh = head;
1169 block_start = 0;
1170 do {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001171 block_end = block_start + bsize;
1172 if (block_end <= from)
1173 goto next_bh;
1174 if (block_start >= to)
1175 break;
1176
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001177 zero_user(page, block_start, bh->b_size);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001178 set_buffer_uptodate(bh);
1179 mark_buffer_dirty(bh);
1180
1181next_bh:
1182 block_start = block_end;
1183 bh = bh->b_this_page;
1184 } while (bh != head);
1185
1186 return ret;
1187}
1188
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001189#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
1190#define OCFS2_MAX_CTXT_PAGES 1
1191#else
1192#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE)
1193#endif
Mark Fasheh6af67d82007-03-06 17:24:46 -08001194
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001195#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE)
Mark Fasheh6af67d82007-03-06 17:24:46 -08001196
1197/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001198 * Describe the state of a single cluster to be written to.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001199 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001200struct ocfs2_write_cluster_desc {
1201 u32 c_cpos;
1202 u32 c_phys;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001203 /*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001204 * Give this a unique field because c_phys eventually gets
1205 * filled.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001206 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001207 unsigned c_new;
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001208 unsigned c_unwritten;
Sunil Mushrane7432672009-08-06 16:12:58 -07001209 unsigned c_needs_zero;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001210};
Mark Fasheh9517bac2007-02-09 20:24:12 -08001211
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001212struct ocfs2_write_ctxt {
1213 /* Logical cluster position / len of write */
1214 u32 w_cpos;
1215 u32 w_clen;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001216
Sunil Mushrane7432672009-08-06 16:12:58 -07001217 /* First cluster allocated in a nonsparse extend */
1218 u32 w_first_new_cpos;
1219
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001220 struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
Mark Fasheh9517bac2007-02-09 20:24:12 -08001221
1222 /*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001223 * This is true if page_size > cluster_size.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001224 *
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001225 * It triggers a set of special cases during write which might
1226 * have to deal with allocating writes to partial pages.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001227 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001228 unsigned int w_large_pages;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001229
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001230 /*
1231 * Pages involved in this write.
1232 *
1233 * w_target_page is the page being written to by the user.
1234 *
1235 * w_pages is an array of pages which always contains
1236 * w_target_page, and in the case of an allocating write with
1237 * page_size < cluster size, it will contain zero'd and mapped
1238 * pages adjacent to w_target_page which need to be written
1239 * out in so that future reads from that region will get
1240 * zero's.
1241 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001242 unsigned int w_num_pages;
Goldwyn Rodrigues83fd9c72010-06-10 17:21:36 -05001243 struct page *w_pages[OCFS2_MAX_CTXT_PAGES];
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001244 struct page *w_target_page;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001245
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001246 /*
Wengang Wang5cffff92011-07-24 10:36:54 -07001247 * w_target_locked is used for page_mkwrite path indicating no unlocking
1248 * against w_target_page in ocfs2_write_end_nolock.
1249 */
1250 unsigned int w_target_locked:1;
1251
1252 /*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001253 * ocfs2_write_end() uses this to know what the real range to
1254 * write in the target should be.
1255 */
1256 unsigned int w_target_from;
1257 unsigned int w_target_to;
1258
1259 /*
1260 * We could use journal_current_handle() but this is cleaner,
1261 * IMHO -Mark
1262 */
1263 handle_t *w_handle;
1264
1265 struct buffer_head *w_di_bh;
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001266
1267 struct ocfs2_cached_dealloc_ctxt w_dealloc;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001268};
1269
Mark Fasheh1d410a62007-09-07 14:20:45 -07001270void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001271{
1272 int i;
1273
Mark Fasheh1d410a62007-09-07 14:20:45 -07001274 for(i = 0; i < num_pages; i++) {
1275 if (pages[i]) {
1276 unlock_page(pages[i]);
1277 mark_page_accessed(pages[i]);
1278 page_cache_release(pages[i]);
1279 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001280 }
Mark Fasheh1d410a62007-09-07 14:20:45 -07001281}
1282
Junxiao Bi136f49b2014-12-18 16:17:37 -08001283static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
Mark Fasheh1d410a62007-09-07 14:20:45 -07001284{
Wengang Wang5cffff92011-07-24 10:36:54 -07001285 int i;
1286
1287 /*
1288 * w_target_locked is only set to true in the page_mkwrite() case.
1289 * The intent is to allow us to lock the target page from write_begin()
1290 * to write_end(). The caller must hold a ref on w_target_page.
1291 */
1292 if (wc->w_target_locked) {
1293 BUG_ON(!wc->w_target_page);
1294 for (i = 0; i < wc->w_num_pages; i++) {
1295 if (wc->w_target_page == wc->w_pages[i]) {
1296 wc->w_pages[i] = NULL;
1297 break;
1298 }
1299 }
1300 mark_page_accessed(wc->w_target_page);
1301 page_cache_release(wc->w_target_page);
1302 }
Mark Fasheh1d410a62007-09-07 14:20:45 -07001303 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
Junxiao Bi136f49b2014-12-18 16:17:37 -08001304}
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001305
Junxiao Bi136f49b2014-12-18 16:17:37 -08001306static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
1307{
1308 ocfs2_unlock_pages(wc);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001309 brelse(wc->w_di_bh);
1310 kfree(wc);
1311}
1312
1313static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
1314 struct ocfs2_super *osb, loff_t pos,
Mark Fasheh607d44a2007-05-09 15:14:45 -07001315 unsigned len, struct buffer_head *di_bh)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001316{
tao.ma@oracle.com30b85482007-09-06 08:02:25 +08001317 u32 cend;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001318 struct ocfs2_write_ctxt *wc;
1319
1320 wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS);
1321 if (!wc)
1322 return -ENOMEM;
1323
1324 wc->w_cpos = pos >> osb->s_clustersize_bits;
Sunil Mushrane7432672009-08-06 16:12:58 -07001325 wc->w_first_new_cpos = UINT_MAX;
tao.ma@oracle.com30b85482007-09-06 08:02:25 +08001326 cend = (pos + len - 1) >> osb->s_clustersize_bits;
1327 wc->w_clen = cend - wc->w_cpos + 1;
Mark Fasheh607d44a2007-05-09 15:14:45 -07001328 get_bh(di_bh);
1329 wc->w_di_bh = di_bh;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001330
1331 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits))
1332 wc->w_large_pages = 1;
1333 else
1334 wc->w_large_pages = 0;
1335
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001336 ocfs2_init_dealloc_ctxt(&wc->w_dealloc);
1337
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001338 *wcp = wc;
1339
1340 return 0;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001341}
1342
1343/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001344 * If a page has any new buffers, zero them out here, and mark them uptodate
1345 * and dirty so they'll be written out (in order to prevent uninitialised
1346 * block data from leaking). And clear the new bit.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001347 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001348static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
Mark Fasheh9517bac2007-02-09 20:24:12 -08001349{
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001350 unsigned int block_start, block_end;
1351 struct buffer_head *head, *bh;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001352
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001353 BUG_ON(!PageLocked(page));
1354 if (!page_has_buffers(page))
1355 return;
1356
1357 bh = head = page_buffers(page);
1358 block_start = 0;
1359 do {
1360 block_end = block_start + bh->b_size;
1361
1362 if (buffer_new(bh)) {
1363 if (block_end > from && block_start < to) {
1364 if (!PageUptodate(page)) {
1365 unsigned start, end;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001366
1367 start = max(from, block_start);
1368 end = min(to, block_end);
1369
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001370 zero_user_segment(page, start, end);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001371 set_buffer_uptodate(bh);
1372 }
1373
1374 clear_buffer_new(bh);
1375 mark_buffer_dirty(bh);
1376 }
1377 }
1378
1379 block_start = block_end;
1380 bh = bh->b_this_page;
1381 } while (bh != head);
1382}
1383
1384/*
1385 * Only called when we have a failure during allocating write to write
1386 * zero's to the newly allocated region.
1387 */
1388static void ocfs2_write_failure(struct inode *inode,
1389 struct ocfs2_write_ctxt *wc,
1390 loff_t user_pos, unsigned user_len)
1391{
1392 int i;
Mark Fasheh5c26a7b2007-09-18 17:49:29 -07001393 unsigned from = user_pos & (PAGE_CACHE_SIZE - 1),
1394 to = user_pos + user_len;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001395 struct page *tmppage;
1396
Mark Fasheh5c26a7b2007-09-18 17:49:29 -07001397 ocfs2_zero_new_buffers(wc->w_target_page, from, to);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001398
1399 for(i = 0; i < wc->w_num_pages; i++) {
1400 tmppage = wc->w_pages[i];
1401
Sunil Mushran961cecb2008-07-16 17:22:22 -07001402 if (page_has_buffers(tmppage)) {
Mark Fasheh53ef99c2008-11-18 16:53:43 -08001403 if (ocfs2_should_order_data(inode))
Joel Becker2b4e30f2008-09-03 20:03:41 -07001404 ocfs2_jbd2_file_inode(wc->w_handle, inode);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001405
Sunil Mushran961cecb2008-07-16 17:22:22 -07001406 block_commit_write(tmppage, from, to);
1407 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001408 }
1409}
1410
1411static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
1412 struct ocfs2_write_ctxt *wc,
1413 struct page *page, u32 cpos,
1414 loff_t user_pos, unsigned user_len,
1415 int new)
1416{
1417 int ret;
1418 unsigned int map_from = 0, map_to = 0;
1419 unsigned int cluster_start, cluster_end;
1420 unsigned int user_data_from = 0, user_data_to = 0;
1421
1422 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
Mark Fasheh9517bac2007-02-09 20:24:12 -08001423 &cluster_start, &cluster_end);
1424
Goldwyn Rodrigues272b62c2011-02-17 09:44:40 -06001425 /* treat the write as new if the a hole/lseek spanned across
1426 * the page boundary.
1427 */
1428 new = new | ((i_size_read(inode) <= page_offset(page)) &&
1429 (page_offset(page) <= user_pos));
1430
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001431 if (page == wc->w_target_page) {
1432 map_from = user_pos & (PAGE_CACHE_SIZE - 1);
1433 map_to = map_from + user_len;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001434
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001435 if (new)
1436 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
1437 cluster_start, cluster_end,
1438 new);
1439 else
1440 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
1441 map_from, map_to, new);
1442 if (ret) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001443 mlog_errno(ret);
1444 goto out;
1445 }
1446
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001447 user_data_from = map_from;
1448 user_data_to = map_to;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001449 if (new) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001450 map_from = cluster_start;
1451 map_to = cluster_end;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001452 }
1453 } else {
1454 /*
1455 * If we haven't allocated the new page yet, we
1456 * shouldn't be writing it out without copying user
1457 * data. This is likely a math error from the caller.
1458 */
1459 BUG_ON(!new);
1460
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001461 map_from = cluster_start;
1462 map_to = cluster_end;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001463
1464 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001465 cluster_start, cluster_end, new);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001466 if (ret) {
1467 mlog_errno(ret);
1468 goto out;
1469 }
1470 }
1471
1472 /*
1473 * Parts of newly allocated pages need to be zero'd.
1474 *
1475 * Above, we have also rewritten 'to' and 'from' - as far as
1476 * the rest of the function is concerned, the entire cluster
1477 * range inside of a page needs to be written.
1478 *
1479 * We can skip this if the page is up to date - it's already
1480 * been zero'd from being read in as a hole.
1481 */
1482 if (new && !PageUptodate(page))
1483 ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001484 cpos, user_data_from, user_data_to);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001485
1486 flush_dcache_page(page);
1487
Mark Fasheh9517bac2007-02-09 20:24:12 -08001488out:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001489 return ret;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001490}
1491
1492/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001493 * This function will only grab one clusters worth of pages.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001494 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001495static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1496 struct ocfs2_write_ctxt *wc,
Joel Becker693c2412010-07-02 17:20:27 -07001497 u32 cpos, loff_t user_pos,
1498 unsigned user_len, int new,
Mark Fasheh7307de82007-05-09 15:16:19 -07001499 struct page *mmap_page)
Mark Fasheh9517bac2007-02-09 20:24:12 -08001500{
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001501 int ret = 0, i;
Joel Becker693c2412010-07-02 17:20:27 -07001502 unsigned long start, target_index, end_index, index;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001503 struct inode *inode = mapping->host;
Joel Becker693c2412010-07-02 17:20:27 -07001504 loff_t last_byte;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001505
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001506 target_index = user_pos >> PAGE_CACHE_SHIFT;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001507
1508 /*
1509 * Figure out how many pages we'll be manipulating here. For
Mark Fasheh60b11392007-02-16 11:46:50 -08001510 * non allocating write, we just change the one
Joel Becker693c2412010-07-02 17:20:27 -07001511 * page. Otherwise, we'll need a whole clusters worth. If we're
1512 * writing past i_size, we only need enough pages to cover the
1513 * last page of the write.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001514 */
Mark Fasheh9517bac2007-02-09 20:24:12 -08001515 if (new) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001516 wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
1517 start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
Joel Becker693c2412010-07-02 17:20:27 -07001518 /*
1519 * We need the index *past* the last page we could possibly
1520 * touch. This is the page past the end of the write or
1521 * i_size, whichever is greater.
1522 */
1523 last_byte = max(user_pos + user_len, i_size_read(inode));
1524 BUG_ON(last_byte < 1);
1525 end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1;
1526 if ((start + wc->w_num_pages) > end_index)
1527 wc->w_num_pages = end_index - start;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001528 } else {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001529 wc->w_num_pages = 1;
1530 start = target_index;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001531 }
1532
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001533 for(i = 0; i < wc->w_num_pages; i++) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001534 index = start + i;
1535
Mark Fasheh7307de82007-05-09 15:16:19 -07001536 if (index == target_index && mmap_page) {
1537 /*
1538 * ocfs2_pagemkwrite() is a little different
1539 * and wants us to directly use the page
1540 * passed in.
1541 */
1542 lock_page(mmap_page);
1543
Wengang Wang5cffff92011-07-24 10:36:54 -07001544 /* Exit and let the caller retry */
Mark Fasheh7307de82007-05-09 15:16:19 -07001545 if (mmap_page->mapping != mapping) {
Wengang Wang5cffff92011-07-24 10:36:54 -07001546 WARN_ON(mmap_page->mapping);
Mark Fasheh7307de82007-05-09 15:16:19 -07001547 unlock_page(mmap_page);
Wengang Wang5cffff92011-07-24 10:36:54 -07001548 ret = -EAGAIN;
Mark Fasheh7307de82007-05-09 15:16:19 -07001549 goto out;
1550 }
1551
1552 page_cache_get(mmap_page);
1553 wc->w_pages[i] = mmap_page;
Wengang Wang5cffff92011-07-24 10:36:54 -07001554 wc->w_target_locked = true;
Mark Fasheh7307de82007-05-09 15:16:19 -07001555 } else {
1556 wc->w_pages[i] = find_or_create_page(mapping, index,
1557 GFP_NOFS);
1558 if (!wc->w_pages[i]) {
1559 ret = -ENOMEM;
1560 mlog_errno(ret);
1561 goto out;
1562 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001563 }
Jan Kara12695292013-02-21 16:42:57 -08001564 wait_for_stable_page(wc->w_pages[i]);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001565
1566 if (index == target_index)
1567 wc->w_target_page = wc->w_pages[i];
Mark Fasheh9517bac2007-02-09 20:24:12 -08001568 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001569out:
Wengang Wang5cffff92011-07-24 10:36:54 -07001570 if (ret)
1571 wc->w_target_locked = false;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001572 return ret;
1573}
1574
1575/*
1576 * Prepare a single cluster for write one cluster into the file.
1577 */
1578static int ocfs2_write_cluster(struct address_space *mapping,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001579 u32 phys, unsigned int unwritten,
Sunil Mushrane7432672009-08-06 16:12:58 -07001580 unsigned int should_zero,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001581 struct ocfs2_alloc_context *data_ac,
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001582 struct ocfs2_alloc_context *meta_ac,
1583 struct ocfs2_write_ctxt *wc, u32 cpos,
1584 loff_t user_pos, unsigned user_len)
1585{
Sunil Mushrane7432672009-08-06 16:12:58 -07001586 int ret, i, new;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001587 u64 v_blkno, p_blkno;
1588 struct inode *inode = mapping->host;
Joel Beckerf99b9b72008-08-20 19:36:33 -07001589 struct ocfs2_extent_tree et;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001590
1591 new = phys == 0 ? 1 : 0;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001592 if (new) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001593 u32 tmp_pos;
1594
Mark Fasheh9517bac2007-02-09 20:24:12 -08001595 /*
1596 * This is safe to call with the page locks - it won't take
1597 * any additional semaphores or cluster locks.
1598 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001599 tmp_pos = cpos;
Tao Ma0eb8d472008-08-18 17:38:45 +08001600 ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode,
1601 &tmp_pos, 1, 0, wc->w_di_bh,
1602 wc->w_handle, data_ac,
1603 meta_ac, NULL);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001604 /*
1605 * This shouldn't happen because we must have already
1606 * calculated the correct meta data allocation required. The
1607 * internal tree allocation code should know how to increase
1608 * transaction credits itself.
1609 *
1610 * If need be, we could handle -EAGAIN for a
1611 * RESTART_TRANS here.
1612 */
1613 mlog_bug_on_msg(ret == -EAGAIN,
1614 "Inode %llu: EAGAIN return during allocation.\n",
1615 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1616 if (ret < 0) {
1617 mlog_errno(ret);
1618 goto out;
1619 }
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001620 } else if (unwritten) {
Joel Becker5e404e92009-02-13 03:54:22 -08001621 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1622 wc->w_di_bh);
Joel Beckerf99b9b72008-08-20 19:36:33 -07001623 ret = ocfs2_mark_extent_written(inode, &et,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001624 wc->w_handle, cpos, 1, phys,
Joel Beckerf99b9b72008-08-20 19:36:33 -07001625 meta_ac, &wc->w_dealloc);
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001626 if (ret < 0) {
1627 mlog_errno(ret);
1628 goto out;
1629 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001630 }
1631
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001632 if (should_zero)
1633 v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, cpos);
1634 else
1635 v_blkno = user_pos >> inode->i_sb->s_blocksize_bits;
1636
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001637 /*
1638 * The only reason this should fail is due to an inability to
1639 * find the extent added.
1640 */
Mark Fasheh49cb8d22007-03-09 16:21:46 -08001641 ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL,
1642 NULL);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001643 if (ret < 0) {
jiangyiwen61fb9ea2014-12-10 15:42:02 -08001644 mlog(ML_ERROR, "Get physical blkno failed for inode %llu, "
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001645 "at logical block %llu",
1646 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1647 (unsigned long long)v_blkno);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001648 goto out;
1649 }
1650
1651 BUG_ON(p_blkno == 0);
1652
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001653 for(i = 0; i < wc->w_num_pages; i++) {
1654 int tmpret;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001655
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001656 tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
1657 wc->w_pages[i], cpos,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001658 user_pos, user_len,
1659 should_zero);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001660 if (tmpret) {
1661 mlog_errno(tmpret);
1662 if (ret == 0)
Wengang Wangcbfa9632009-07-13 11:38:23 +08001663 ret = tmpret;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001664 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001665 }
1666
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001667 /*
1668 * We only have cleanup to do in case of allocating write.
1669 */
1670 if (ret && new)
1671 ocfs2_write_failure(inode, wc, user_pos, user_len);
1672
Mark Fasheh9517bac2007-02-09 20:24:12 -08001673out:
Mark Fasheh9517bac2007-02-09 20:24:12 -08001674
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001675 return ret;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001676}
1677
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001678static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1679 struct ocfs2_alloc_context *data_ac,
1680 struct ocfs2_alloc_context *meta_ac,
1681 struct ocfs2_write_ctxt *wc,
1682 loff_t pos, unsigned len)
1683{
1684 int ret, i;
Mark Fashehdb562462007-09-17 09:06:29 -07001685 loff_t cluster_off;
1686 unsigned int local_len = len;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001687 struct ocfs2_write_cluster_desc *desc;
Mark Fashehdb562462007-09-17 09:06:29 -07001688 struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001689
1690 for (i = 0; i < wc->w_clen; i++) {
1691 desc = &wc->w_desc[i];
1692
Mark Fashehdb562462007-09-17 09:06:29 -07001693 /*
1694 * We have to make sure that the total write passed in
1695 * doesn't extend past a single cluster.
1696 */
1697 local_len = len;
1698 cluster_off = pos & (osb->s_clustersize - 1);
1699 if ((cluster_off + local_len) > osb->s_clustersize)
1700 local_len = osb->s_clustersize - cluster_off;
1701
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001702 ret = ocfs2_write_cluster(mapping, desc->c_phys,
Sunil Mushrane7432672009-08-06 16:12:58 -07001703 desc->c_unwritten,
1704 desc->c_needs_zero,
1705 data_ac, meta_ac,
Mark Fashehdb562462007-09-17 09:06:29 -07001706 wc, desc->c_cpos, pos, local_len);
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001707 if (ret) {
1708 mlog_errno(ret);
1709 goto out;
1710 }
Mark Fashehdb562462007-09-17 09:06:29 -07001711
1712 len -= local_len;
1713 pos += local_len;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001714 }
1715
1716 ret = 0;
1717out:
1718 return ret;
1719}
1720
Mark Fasheh9517bac2007-02-09 20:24:12 -08001721/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001722 * ocfs2_write_end() wants to know which parts of the target page it
1723 * should complete the write on. It's easiest to compute them ahead of
1724 * time when a more complete view of the write is available.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001725 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001726static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1727 struct ocfs2_write_ctxt *wc,
1728 loff_t pos, unsigned len, int alloc)
Mark Fasheh9517bac2007-02-09 20:24:12 -08001729{
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001730 struct ocfs2_write_cluster_desc *desc;
1731
1732 wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1);
1733 wc->w_target_to = wc->w_target_from + len;
1734
1735 if (alloc == 0)
1736 return;
1737
1738 /*
1739 * Allocating write - we may have different boundaries based
1740 * on page size and cluster size.
1741 *
1742 * NOTE: We can no longer compute one value from the other as
1743 * the actual write length and user provided length may be
1744 * different.
1745 */
1746
1747 if (wc->w_large_pages) {
1748 /*
1749 * We only care about the 1st and last cluster within
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001750 * our range and whether they should be zero'd or not. Either
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001751 * value may be extended out to the start/end of a
1752 * newly allocated cluster.
1753 */
1754 desc = &wc->w_desc[0];
Sunil Mushrane7432672009-08-06 16:12:58 -07001755 if (desc->c_needs_zero)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001756 ocfs2_figure_cluster_boundaries(osb,
1757 desc->c_cpos,
1758 &wc->w_target_from,
1759 NULL);
1760
1761 desc = &wc->w_desc[wc->w_clen - 1];
Sunil Mushrane7432672009-08-06 16:12:58 -07001762 if (desc->c_needs_zero)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001763 ocfs2_figure_cluster_boundaries(osb,
1764 desc->c_cpos,
1765 NULL,
1766 &wc->w_target_to);
1767 } else {
1768 wc->w_target_from = 0;
1769 wc->w_target_to = PAGE_CACHE_SIZE;
1770 }
1771}
1772
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001773/*
1774 * Populate each single-cluster write descriptor in the write context
1775 * with information about the i/o to be done.
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001776 *
1777 * Returns the number of clusters that will have to be allocated, as
1778 * well as a worst case estimate of the number of extent records that
1779 * would have to be created during a write to an unwritten region.
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001780 */
1781static int ocfs2_populate_write_desc(struct inode *inode,
1782 struct ocfs2_write_ctxt *wc,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001783 unsigned int *clusters_to_alloc,
1784 unsigned int *extents_to_split)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001785{
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001786 int ret;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001787 struct ocfs2_write_cluster_desc *desc;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001788 unsigned int num_clusters = 0;
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001789 unsigned int ext_flags = 0;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001790 u32 phys = 0;
1791 int i;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001792
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001793 *clusters_to_alloc = 0;
1794 *extents_to_split = 0;
1795
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001796 for (i = 0; i < wc->w_clen; i++) {
1797 desc = &wc->w_desc[i];
1798 desc->c_cpos = wc->w_cpos + i;
1799
1800 if (num_clusters == 0) {
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001801 /*
1802 * Need to look up the next extent record.
1803 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001804 ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001805 &num_clusters, &ext_flags);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001806 if (ret) {
1807 mlog_errno(ret);
Mark Fasheh607d44a2007-05-09 15:14:45 -07001808 goto out;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001809 }
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001810
Tao Ma293b2f72009-08-25 08:02:48 +08001811 /* We should already CoW the refcountd extent. */
1812 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
1813
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001814 /*
1815 * Assume worst case - that we're writing in
1816 * the middle of the extent.
1817 *
1818 * We can assume that the write proceeds from
1819 * left to right, in which case the extent
1820 * insert code is smart enough to coalesce the
1821 * next splits into the previous records created.
1822 */
1823 if (ext_flags & OCFS2_EXT_UNWRITTEN)
1824 *extents_to_split = *extents_to_split + 2;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001825 } else if (phys) {
1826 /*
1827 * Only increment phys if it doesn't describe
1828 * a hole.
1829 */
1830 phys++;
1831 }
1832
Sunil Mushrane7432672009-08-06 16:12:58 -07001833 /*
1834 * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
1835 * file that got extended. w_first_new_cpos tells us
1836 * where the newly allocated clusters are so we can
1837 * zero them.
1838 */
1839 if (desc->c_cpos >= wc->w_first_new_cpos) {
1840 BUG_ON(phys == 0);
1841 desc->c_needs_zero = 1;
1842 }
1843
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001844 desc->c_phys = phys;
1845 if (phys == 0) {
1846 desc->c_new = 1;
Sunil Mushrane7432672009-08-06 16:12:58 -07001847 desc->c_needs_zero = 1;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001848 *clusters_to_alloc = *clusters_to_alloc + 1;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001849 }
Sunil Mushrane7432672009-08-06 16:12:58 -07001850
1851 if (ext_flags & OCFS2_EXT_UNWRITTEN) {
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001852 desc->c_unwritten = 1;
Sunil Mushrane7432672009-08-06 16:12:58 -07001853 desc->c_needs_zero = 1;
1854 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001855
1856 num_clusters--;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001857 }
1858
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001859 ret = 0;
1860out:
1861 return ret;
1862}
1863
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001864static int ocfs2_write_begin_inline(struct address_space *mapping,
1865 struct inode *inode,
1866 struct ocfs2_write_ctxt *wc)
1867{
1868 int ret;
1869 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1870 struct page *page;
1871 handle_t *handle;
1872 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1873
Junxiao Bif775da22014-10-09 15:25:15 -07001874 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1875 if (IS_ERR(handle)) {
1876 ret = PTR_ERR(handle);
1877 mlog_errno(ret);
1878 goto out;
1879 }
1880
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001881 page = find_or_create_page(mapping, 0, GFP_NOFS);
1882 if (!page) {
Junxiao Bif775da22014-10-09 15:25:15 -07001883 ocfs2_commit_trans(osb, handle);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001884 ret = -ENOMEM;
1885 mlog_errno(ret);
1886 goto out;
1887 }
1888 /*
1889 * If we don't set w_num_pages then this page won't get unlocked
1890 * and freed on cleanup of the write context.
1891 */
1892 wc->w_pages[0] = wc->w_target_page = page;
1893 wc->w_num_pages = 1;
1894
Joel Becker0cf2f762009-02-12 16:41:25 -08001895 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
Joel Becker13723d02008-10-17 19:25:01 -07001896 OCFS2_JOURNAL_ACCESS_WRITE);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001897 if (ret) {
1898 ocfs2_commit_trans(osb, handle);
1899
1900 mlog_errno(ret);
1901 goto out;
1902 }
1903
1904 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1905 ocfs2_set_inode_data_inline(inode, di);
1906
1907 if (!PageUptodate(page)) {
1908 ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
1909 if (ret) {
1910 ocfs2_commit_trans(osb, handle);
1911
1912 goto out;
1913 }
1914 }
1915
1916 wc->w_handle = handle;
1917out:
1918 return ret;
1919}
1920
1921int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
1922{
1923 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1924
Mark Fasheh0d8a4e02007-11-20 11:48:41 -08001925 if (new_size <= le16_to_cpu(di->id2.i_data.id_count))
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001926 return 1;
1927 return 0;
1928}
1929
1930static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
1931 struct inode *inode, loff_t pos,
1932 unsigned len, struct page *mmap_page,
1933 struct ocfs2_write_ctxt *wc)
1934{
1935 int ret, written = 0;
1936 loff_t end = pos + len;
1937 struct ocfs2_inode_info *oi = OCFS2_I(inode);
Tiger Yangd9ae49d2009-03-05 11:06:15 +08001938 struct ocfs2_dinode *di = NULL;
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001939
Tao Ma95581562011-02-22 21:33:59 +08001940 trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno,
1941 len, (unsigned long long)pos,
1942 oi->ip_dyn_features);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001943
1944 /*
1945 * Handle inodes which already have inline data 1st.
1946 */
1947 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1948 if (mmap_page == NULL &&
1949 ocfs2_size_fits_inline_data(wc->w_di_bh, end))
1950 goto do_inline_write;
1951
1952 /*
1953 * The write won't fit - we have to give this inode an
1954 * inline extent list now.
1955 */
1956 ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh);
1957 if (ret)
1958 mlog_errno(ret);
1959 goto out;
1960 }
1961
1962 /*
1963 * Check whether the inode can accept inline data.
1964 */
1965 if (oi->ip_clusters != 0 || i_size_read(inode) != 0)
1966 return 0;
1967
1968 /*
1969 * Check whether the write can fit.
1970 */
Tiger Yangd9ae49d2009-03-05 11:06:15 +08001971 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1972 if (mmap_page ||
1973 end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001974 return 0;
1975
1976do_inline_write:
1977 ret = ocfs2_write_begin_inline(mapping, inode, wc);
1978 if (ret) {
1979 mlog_errno(ret);
1980 goto out;
1981 }
1982
1983 /*
1984 * This signals to the caller that the data can be written
1985 * inline.
1986 */
1987 written = 1;
1988out:
1989 return written ? written : ret;
1990}
1991
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001992/*
1993 * This function only does anything for file systems which can't
1994 * handle sparse files.
1995 *
1996 * What we want to do here is fill in any hole between the current end
1997 * of allocation and the end of our write. That way the rest of the
1998 * write path can treat it as an non-allocating write, which has no
1999 * special case code for sparse/nonsparse files.
2000 */
Joel Becker56934862010-07-01 15:13:31 -07002001static int ocfs2_expand_nonsparse_inode(struct inode *inode,
2002 struct buffer_head *di_bh,
2003 loff_t pos, unsigned len,
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002004 struct ocfs2_write_ctxt *wc)
2005{
2006 int ret;
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002007 loff_t newsize = pos + len;
2008
Joel Becker56934862010-07-01 15:13:31 -07002009 BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002010
2011 if (newsize <= i_size_read(inode))
2012 return 0;
2013
Joel Becker56934862010-07-01 15:13:31 -07002014 ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002015 if (ret)
2016 mlog_errno(ret);
2017
Sunil Mushrane7432672009-08-06 16:12:58 -07002018 wc->w_first_new_cpos =
2019 ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
2020
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002021 return ret;
2022}
2023
Joel Becker56934862010-07-01 15:13:31 -07002024static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
2025 loff_t pos)
2026{
2027 int ret = 0;
2028
2029 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
2030 if (pos > i_size_read(inode))
2031 ret = ocfs2_zero_extend(inode, di_bh, pos);
2032
2033 return ret;
2034}
2035
Tao Ma50308d82010-11-04 15:14:11 +08002036/*
2037 * Try to flush truncate logs if we can free enough clusters from it.
2038 * As for return value, "< 0" means error, "0" no space and "1" means
2039 * we have freed enough spaces and let the caller try to allocate again.
2040 */
2041static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
2042 unsigned int needed)
2043{
2044 tid_t target;
2045 int ret = 0;
2046 unsigned int truncated_clusters;
2047
2048 mutex_lock(&osb->osb_tl_inode->i_mutex);
2049 truncated_clusters = osb->truncated_clusters;
2050 mutex_unlock(&osb->osb_tl_inode->i_mutex);
2051
2052 /*
2053 * Check whether we can succeed in allocating if we free
2054 * the truncate log.
2055 */
2056 if (truncated_clusters < needed)
2057 goto out;
2058
2059 ret = ocfs2_flush_truncate_log(osb);
2060 if (ret) {
2061 mlog_errno(ret);
2062 goto out;
2063 }
2064
2065 if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
2066 jbd2_log_wait_commit(osb->journal->j_journal, target);
2067 ret = 1;
2068 }
2069out:
2070 return ret;
2071}
2072
Tao Ma0378da0f2010-08-12 10:25:28 +08002073int ocfs2_write_begin_nolock(struct file *filp,
2074 struct address_space *mapping,
Mark Fasheh0d172ba2007-05-14 18:09:54 -07002075 loff_t pos, unsigned len, unsigned flags,
2076 struct page **pagep, void **fsdata,
2077 struct buffer_head *di_bh, struct page *mmap_page)
2078{
Sunil Mushrane7432672009-08-06 16:12:58 -07002079 int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
Tao Ma50308d82010-11-04 15:14:11 +08002080 unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07002081 struct ocfs2_write_ctxt *wc;
2082 struct inode *inode = mapping->host;
2083 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2084 struct ocfs2_dinode *di;
2085 struct ocfs2_alloc_context *data_ac = NULL;
2086 struct ocfs2_alloc_context *meta_ac = NULL;
2087 handle_t *handle;
Joel Beckerf99b9b72008-08-20 19:36:33 -07002088 struct ocfs2_extent_tree et;
Tao Ma50308d82010-11-04 15:14:11 +08002089 int try_free = 1, ret1;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07002090
Tao Ma50308d82010-11-04 15:14:11 +08002091try_again:
Mark Fasheh0d172ba2007-05-14 18:09:54 -07002092 ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh);
2093 if (ret) {
2094 mlog_errno(ret);
2095 return ret;
2096 }
2097
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002098 if (ocfs2_supports_inline_data(osb)) {
2099 ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
2100 mmap_page, wc);
2101 if (ret == 1) {
2102 ret = 0;
2103 goto success;
2104 }
2105 if (ret < 0) {
2106 mlog_errno(ret);
2107 goto out;
2108 }
2109 }
2110
Joel Becker56934862010-07-01 15:13:31 -07002111 if (ocfs2_sparse_alloc(osb))
2112 ret = ocfs2_zero_tail(inode, di_bh, pos);
2113 else
2114 ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len,
2115 wc);
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002116 if (ret) {
2117 mlog_errno(ret);
2118 goto out;
2119 }
2120
Tao Ma293b2f72009-08-25 08:02:48 +08002121 ret = ocfs2_check_range_for_refcount(inode, pos, len);
2122 if (ret < 0) {
2123 mlog_errno(ret);
2124 goto out;
2125 } else if (ret == 1) {
Tao Ma50308d82010-11-04 15:14:11 +08002126 clusters_need = wc->w_clen;
Tiger Yangc7dd3392013-08-13 16:00:58 -07002127 ret = ocfs2_refcount_cow(inode, di_bh,
Tao Ma37f8a2b2009-08-26 09:47:28 +08002128 wc->w_cpos, wc->w_clen, UINT_MAX);
Tao Ma293b2f72009-08-25 08:02:48 +08002129 if (ret) {
2130 mlog_errno(ret);
2131 goto out;
2132 }
2133 }
2134
Mark Fashehb27b7cb2007-06-18 11:22:56 -07002135 ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
2136 &extents_to_split);
Mark Fasheh0d172ba2007-05-14 18:09:54 -07002137 if (ret) {
2138 mlog_errno(ret);
2139 goto out;
2140 }
Tao Ma50308d82010-11-04 15:14:11 +08002141 clusters_need += clusters_to_alloc;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07002142
2143 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
2144
Tao Ma95581562011-02-22 21:33:59 +08002145 trace_ocfs2_write_begin_nolock(
2146 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2147 (long long)i_size_read(inode),
2148 le32_to_cpu(di->i_clusters),
2149 pos, len, flags, mmap_page,
2150 clusters_to_alloc, extents_to_split);
2151
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002152 /*
2153 * We set w_target_from, w_target_to here so that
2154 * ocfs2_write_end() knows which range in the target page to
2155 * write out. An allocation requires that we write the entire
2156 * cluster range.
2157 */
Mark Fashehb27b7cb2007-06-18 11:22:56 -07002158 if (clusters_to_alloc || extents_to_split) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002159 /*
2160 * XXX: We are stretching the limits of
Mark Fashehb27b7cb2007-06-18 11:22:56 -07002161 * ocfs2_lock_allocators(). It greatly over-estimates
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002162 * the work to be done.
2163 */
Joel Becker5e404e92009-02-13 03:54:22 -08002164 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
2165 wc->w_di_bh);
Joel Beckerf99b9b72008-08-20 19:36:33 -07002166 ret = ocfs2_lock_allocators(inode, &et,
Tao Ma231b87d2008-08-18 17:38:42 +08002167 clusters_to_alloc, extents_to_split,
Joel Beckerf99b9b72008-08-20 19:36:33 -07002168 &data_ac, &meta_ac);
Mark Fasheh9517bac2007-02-09 20:24:12 -08002169 if (ret) {
2170 mlog_errno(ret);
Mark Fasheh607d44a2007-05-09 15:14:45 -07002171 goto out;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002172 }
2173
Mark Fasheh4fe370a2009-12-07 13:15:40 -08002174 if (data_ac)
2175 data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
2176
Tao Ma811f9332008-08-18 17:38:43 +08002177 credits = ocfs2_calc_extend_credits(inode->i_sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08002178 &di->id2.i_list);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002179
Mark Fasheh9517bac2007-02-09 20:24:12 -08002180 }
2181
Sunil Mushrane7432672009-08-06 16:12:58 -07002182 /*
2183 * We have to zero sparse allocated clusters, unwritten extent clusters,
2184 * and non-sparse clusters we just extended. For non-sparse writes,
2185 * we know zeros will only be needed in the first and/or last cluster.
2186 */
2187 if (clusters_to_alloc || extents_to_split ||
Sunil Mushran8379e7c2009-09-04 11:12:01 -07002188 (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
2189 wc->w_desc[wc->w_clen - 1].c_needs_zero)))
Sunil Mushrane7432672009-08-06 16:12:58 -07002190 cluster_of_pages = 1;
2191 else
2192 cluster_of_pages = 0;
2193
2194 ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002195
Mark Fasheh9517bac2007-02-09 20:24:12 -08002196 handle = ocfs2_start_trans(osb, credits);
2197 if (IS_ERR(handle)) {
2198 ret = PTR_ERR(handle);
2199 mlog_errno(ret);
Mark Fasheh607d44a2007-05-09 15:14:45 -07002200 goto out;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002201 }
2202
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002203 wc->w_handle = handle;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002204
Christoph Hellwig5dd40562010-03-03 09:05:00 -05002205 if (clusters_to_alloc) {
2206 ret = dquot_alloc_space_nodirty(inode,
2207 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
2208 if (ret)
2209 goto out_commit;
Jan Karaa90714c2008-10-09 19:38:40 +02002210 }
yangwenfang7f27ec92015-09-04 15:44:45 -07002211
Joel Becker0cf2f762009-02-12 16:41:25 -08002212 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
Joel Becker13723d02008-10-17 19:25:01 -07002213 OCFS2_JOURNAL_ACCESS_WRITE);
Mark Fasheh9517bac2007-02-09 20:24:12 -08002214 if (ret) {
2215 mlog_errno(ret);
Jan Karaa90714c2008-10-09 19:38:40 +02002216 goto out_quota;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002217 }
2218
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002219 /*
2220 * Fill our page array first. That way we've grabbed enough so
2221 * that we can zero and flush if we error after adding the
2222 * extent.
2223 */
Joel Becker693c2412010-07-02 17:20:27 -07002224 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
Sunil Mushrane7432672009-08-06 16:12:58 -07002225 cluster_of_pages, mmap_page);
Wengang Wang5cffff92011-07-24 10:36:54 -07002226 if (ret && ret != -EAGAIN) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08002227 mlog_errno(ret);
Jan Karaa90714c2008-10-09 19:38:40 +02002228 goto out_quota;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002229 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08002230
Wengang Wang5cffff92011-07-24 10:36:54 -07002231 /*
2232 * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
2233 * the target page. In this case, we exit with no error and no target
2234 * page. This will trigger the caller, page_mkwrite(), to re-try
2235 * the operation.
2236 */
2237 if (ret == -EAGAIN) {
2238 BUG_ON(wc->w_target_page);
2239 ret = 0;
2240 goto out_quota;
2241 }
2242
Mark Fasheh0d172ba2007-05-14 18:09:54 -07002243 ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
2244 len);
2245 if (ret) {
2246 mlog_errno(ret);
Jan Karaa90714c2008-10-09 19:38:40 +02002247 goto out_quota;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002248 }
2249
2250 if (data_ac)
2251 ocfs2_free_alloc_context(data_ac);
2252 if (meta_ac)
2253 ocfs2_free_alloc_context(meta_ac);
2254
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002255success:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002256 *pagep = wc->w_target_page;
2257 *fsdata = wc;
2258 return 0;
Jan Karaa90714c2008-10-09 19:38:40 +02002259out_quota:
2260 if (clusters_to_alloc)
Christoph Hellwig5dd40562010-03-03 09:05:00 -05002261 dquot_free_space(inode,
Jan Karaa90714c2008-10-09 19:38:40 +02002262 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
Mark Fasheh9517bac2007-02-09 20:24:12 -08002263out_commit:
2264 ocfs2_commit_trans(osb, handle);
2265
Mark Fasheh9517bac2007-02-09 20:24:12 -08002266out:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002267 ocfs2_free_write_ctxt(wc);
2268
Xue jiufeib1214e42013-11-12 15:07:06 -08002269 if (data_ac) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08002270 ocfs2_free_alloc_context(data_ac);
Xue jiufeib1214e42013-11-12 15:07:06 -08002271 data_ac = NULL;
2272 }
2273 if (meta_ac) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08002274 ocfs2_free_alloc_context(meta_ac);
Xue jiufeib1214e42013-11-12 15:07:06 -08002275 meta_ac = NULL;
2276 }
Tao Ma50308d82010-11-04 15:14:11 +08002277
2278 if (ret == -ENOSPC && try_free) {
2279 /*
2280 * Try to free some truncate log so that we can have enough
2281 * clusters to allocate.
2282 */
2283 try_free = 0;
2284
2285 ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
2286 if (ret1 == 1)
2287 goto try_again;
2288
2289 if (ret1 < 0)
2290 mlog_errno(ret1);
2291 }
2292
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002293 return ret;
2294}
Mark Fasheh9517bac2007-02-09 20:24:12 -08002295
Nick Pigginb6af1bc2007-10-16 01:25:24 -07002296static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
2297 loff_t pos, unsigned len, unsigned flags,
2298 struct page **pagep, void **fsdata)
Mark Fasheh607d44a2007-05-09 15:14:45 -07002299{
2300 int ret;
2301 struct buffer_head *di_bh = NULL;
2302 struct inode *inode = mapping->host;
2303
Mark Fashehe63aecb62007-10-18 15:30:42 -07002304 ret = ocfs2_inode_lock(inode, &di_bh, 1);
Mark Fasheh607d44a2007-05-09 15:14:45 -07002305 if (ret) {
2306 mlog_errno(ret);
2307 return ret;
2308 }
2309
2310 /*
2311 * Take alloc sem here to prevent concurrent lookups. That way
2312 * the mapping, zeroing and tree manipulation within
2313 * ocfs2_write() will be safe against ->readpage(). This
2314 * should also serve to lock out allocation from a shared
2315 * writeable region.
2316 */
2317 down_write(&OCFS2_I(inode)->ip_alloc_sem);
2318
Tao Ma0378da0f2010-08-12 10:25:28 +08002319 ret = ocfs2_write_begin_nolock(file, mapping, pos, len, flags, pagep,
Mark Fasheh7307de82007-05-09 15:16:19 -07002320 fsdata, di_bh, NULL);
Mark Fasheh607d44a2007-05-09 15:14:45 -07002321 if (ret) {
2322 mlog_errno(ret);
Mark Fashehc934a922007-10-18 15:23:46 -07002323 goto out_fail;
Mark Fasheh607d44a2007-05-09 15:14:45 -07002324 }
2325
2326 brelse(di_bh);
2327
2328 return 0;
2329
Mark Fasheh607d44a2007-05-09 15:14:45 -07002330out_fail:
2331 up_write(&OCFS2_I(inode)->ip_alloc_sem);
2332
2333 brelse(di_bh);
Mark Fashehe63aecb62007-10-18 15:30:42 -07002334 ocfs2_inode_unlock(inode, 1);
Mark Fasheh607d44a2007-05-09 15:14:45 -07002335
2336 return ret;
2337}
2338
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002339static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
2340 unsigned len, unsigned *copied,
2341 struct ocfs2_dinode *di,
2342 struct ocfs2_write_ctxt *wc)
2343{
2344 void *kaddr;
2345
2346 if (unlikely(*copied < len)) {
2347 if (!PageUptodate(wc->w_target_page)) {
2348 *copied = 0;
2349 return;
2350 }
2351 }
2352
Cong Wangc4bc8dc2011-11-25 23:14:34 +08002353 kaddr = kmap_atomic(wc->w_target_page);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002354 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
Cong Wangc4bc8dc2011-11-25 23:14:34 +08002355 kunmap_atomic(kaddr);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002356
Tao Ma95581562011-02-22 21:33:59 +08002357 trace_ocfs2_write_end_inline(
2358 (unsigned long long)OCFS2_I(inode)->ip_blkno,
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002359 (unsigned long long)pos, *copied,
2360 le16_to_cpu(di->id2.i_data.id_count),
2361 le16_to_cpu(di->i_dyn_features));
2362}
2363
Mark Fasheh7307de82007-05-09 15:16:19 -07002364int ocfs2_write_end_nolock(struct address_space *mapping,
2365 loff_t pos, unsigned len, unsigned copied,
2366 struct page *page, void *fsdata)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002367{
yangwenfang7f27ec92015-09-04 15:44:45 -07002368 int i, ret;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002369 unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1);
2370 struct inode *inode = mapping->host;
2371 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2372 struct ocfs2_write_ctxt *wc = fsdata;
2373 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
2374 handle_t *handle = wc->w_handle;
2375 struct page *tmppage;
2376
yangwenfang7f27ec92015-09-04 15:44:45 -07002377 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
2378 OCFS2_JOURNAL_ACCESS_WRITE);
2379 if (ret) {
2380 copied = ret;
2381 mlog_errno(ret);
2382 goto out;
2383 }
2384
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002385 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2386 ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
2387 goto out_write_size;
2388 }
2389
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002390 if (unlikely(copied < len)) {
2391 if (!PageUptodate(wc->w_target_page))
2392 copied = 0;
2393
2394 ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
2395 start+len);
2396 }
2397 flush_dcache_page(wc->w_target_page);
2398
2399 for(i = 0; i < wc->w_num_pages; i++) {
2400 tmppage = wc->w_pages[i];
2401
2402 if (tmppage == wc->w_target_page) {
2403 from = wc->w_target_from;
2404 to = wc->w_target_to;
2405
2406 BUG_ON(from > PAGE_CACHE_SIZE ||
2407 to > PAGE_CACHE_SIZE ||
2408 to < from);
2409 } else {
2410 /*
2411 * Pages adjacent to the target (if any) imply
2412 * a hole-filling write in which case we want
2413 * to flush their entire range.
2414 */
2415 from = 0;
2416 to = PAGE_CACHE_SIZE;
2417 }
2418
Sunil Mushran961cecb2008-07-16 17:22:22 -07002419 if (page_has_buffers(tmppage)) {
Mark Fasheh53ef99c2008-11-18 16:53:43 -08002420 if (ocfs2_should_order_data(inode))
Joel Becker2b4e30f2008-09-03 20:03:41 -07002421 ocfs2_jbd2_file_inode(wc->w_handle, inode);
Sunil Mushran961cecb2008-07-16 17:22:22 -07002422 block_commit_write(tmppage, from, to);
2423 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002424 }
2425
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002426out_write_size:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002427 pos += copied;
Junxiao Bif17c20d2013-09-11 14:19:45 -07002428 if (pos > i_size_read(inode)) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002429 i_size_write(inode, pos);
2430 mark_inode_dirty(inode);
2431 }
2432 inode->i_blocks = ocfs2_inode_sector_count(inode);
2433 di->i_size = cpu_to_le64((u64)i_size_read(inode));
2434 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2435 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
2436 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
Darrick J. Wong2931cdc2014-04-03 14:46:48 -07002437 ocfs2_update_inode_fsync_trans(handle, inode, 1);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002438 ocfs2_journal_dirty(handle, wc->w_di_bh);
2439
yangwenfang7f27ec92015-09-04 15:44:45 -07002440out:
Junxiao Bi136f49b2014-12-18 16:17:37 -08002441 /* unlock pages before dealloc since it needs acquiring j_trans_barrier
2442 * lock, or it will cause a deadlock since journal commit threads holds
2443 * this lock and will ask for the page lock when flushing the data.
2444 * put it here to preserve the unlock order.
2445 */
2446 ocfs2_unlock_pages(wc);
2447
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002448 ocfs2_commit_trans(osb, handle);
Mark Fasheh59a5e412007-06-22 15:52:36 -07002449
Mark Fashehb27b7cb2007-06-18 11:22:56 -07002450 ocfs2_run_deallocs(osb, &wc->w_dealloc);
2451
Junxiao Bi136f49b2014-12-18 16:17:37 -08002452 brelse(wc->w_di_bh);
2453 kfree(wc);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002454
2455 return copied;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002456}
2457
Nick Pigginb6af1bc2007-10-16 01:25:24 -07002458static int ocfs2_write_end(struct file *file, struct address_space *mapping,
2459 loff_t pos, unsigned len, unsigned copied,
2460 struct page *page, void *fsdata)
Mark Fasheh607d44a2007-05-09 15:14:45 -07002461{
2462 int ret;
2463 struct inode *inode = mapping->host;
2464
2465 ret = ocfs2_write_end_nolock(mapping, pos, len, copied, page, fsdata);
2466
Mark Fasheh607d44a2007-05-09 15:14:45 -07002467 up_write(&OCFS2_I(inode)->ip_alloc_sem);
Mark Fashehe63aecb62007-10-18 15:30:42 -07002468 ocfs2_inode_unlock(inode, 1);
Mark Fasheh607d44a2007-05-09 15:14:45 -07002469
2470 return ret;
2471}
2472
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07002473const struct address_space_operations ocfs2_aops = {
Hisashi Hifumi1fca3a02009-03-05 17:22:21 +09002474 .readpage = ocfs2_readpage,
2475 .readpages = ocfs2_readpages,
2476 .writepage = ocfs2_writepage,
2477 .write_begin = ocfs2_write_begin,
2478 .write_end = ocfs2_write_end,
2479 .bmap = ocfs2_bmap,
Hisashi Hifumi1fca3a02009-03-05 17:22:21 +09002480 .direct_IO = ocfs2_direct_IO,
Jan Kara41ecc342013-11-12 15:07:08 -08002481 .invalidatepage = block_invalidatepage,
Hisashi Hifumi1fca3a02009-03-05 17:22:21 +09002482 .releasepage = ocfs2_releasepage,
2483 .migratepage = buffer_migrate_page,
2484 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02002485 .error_remove_page = generic_error_remove_page,
Mark Fashehccd979b2005-12-15 14:31:24 -08002486};