blob: 2a618dd2577d42b0151e46664242ebfcc3ec7a9b [file] [log] [blame]
Mark Fashehccd979b2005-12-15 14:31:24 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
20 */
21
22#include <linux/fs.h>
23#include <linux/slab.h>
24#include <linux/highmem.h>
25#include <linux/pagemap.h>
26#include <asm/byteorder.h>
Mark Fasheh9517bac2007-02-09 20:24:12 -080027#include <linux/swap.h>
Mark Fasheh6af67d82007-03-06 17:24:46 -080028#include <linux/pipe_fs_i.h>
Mark Fasheh628a24f2007-10-30 12:08:32 -070029#include <linux/mpage.h>
Jan Karaa90714c2008-10-09 19:38:40 +020030#include <linux/quotaops.h>
Joseph Qi24c40b32015-02-16 16:00:00 -080031#include <linux/blkdev.h>
Mark Fashehccd979b2005-12-15 14:31:24 -080032
Mark Fashehccd979b2005-12-15 14:31:24 -080033#include <cluster/masklog.h>
34
35#include "ocfs2.h"
36
37#include "alloc.h"
38#include "aops.h"
39#include "dlmglue.h"
40#include "extent_map.h"
41#include "file.h"
42#include "inode.h"
43#include "journal.h"
Mark Fasheh9517bac2007-02-09 20:24:12 -080044#include "suballoc.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080045#include "super.h"
46#include "symlink.h"
Tao Ma293b2f72009-08-25 08:02:48 +080047#include "refcounttree.h"
Tao Ma95581562011-02-22 21:33:59 +080048#include "ocfs2_trace.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080049
50#include "buffer_head_io.h"
Joseph Qi24c40b32015-02-16 16:00:00 -080051#include "dir.h"
52#include "namei.h"
53#include "sysfile.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080054
55static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
56 struct buffer_head *bh_result, int create)
57{
58 int err = -EIO;
59 int status;
60 struct ocfs2_dinode *fe = NULL;
61 struct buffer_head *bh = NULL;
62 struct buffer_head *buffer_cache_bh = NULL;
63 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
64 void *kaddr;
65
Tao Ma95581562011-02-22 21:33:59 +080066 trace_ocfs2_symlink_get_block(
67 (unsigned long long)OCFS2_I(inode)->ip_blkno,
68 (unsigned long long)iblock, bh_result, create);
Mark Fashehccd979b2005-12-15 14:31:24 -080069
70 BUG_ON(ocfs2_inode_is_fast_symlink(inode));
71
72 if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
73 mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
74 (unsigned long long)iblock);
75 goto bail;
76 }
77
Joel Beckerb657c952008-11-13 14:49:11 -080078 status = ocfs2_read_inode_block(inode, &bh);
Mark Fashehccd979b2005-12-15 14:31:24 -080079 if (status < 0) {
80 mlog_errno(status);
81 goto bail;
82 }
83 fe = (struct ocfs2_dinode *) bh->b_data;
84
Mark Fashehccd979b2005-12-15 14:31:24 -080085 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
86 le32_to_cpu(fe->i_clusters))) {
Rui Xiang7391a292013-11-12 15:06:54 -080087 err = -ENOMEM;
Mark Fashehccd979b2005-12-15 14:31:24 -080088 mlog(ML_ERROR, "block offset is outside the allocated size: "
89 "%llu\n", (unsigned long long)iblock);
90 goto bail;
91 }
92
93 /* We don't use the page cache to create symlink data, so if
94 * need be, copy it over from the buffer cache. */
95 if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
96 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
97 iblock;
98 buffer_cache_bh = sb_getblk(osb->sb, blkno);
99 if (!buffer_cache_bh) {
Rui Xiang7391a292013-11-12 15:06:54 -0800100 err = -ENOMEM;
Mark Fashehccd979b2005-12-15 14:31:24 -0800101 mlog(ML_ERROR, "couldn't getblock for symlink!\n");
102 goto bail;
103 }
104
105 /* we haven't locked out transactions, so a commit
106 * could've happened. Since we've got a reference on
107 * the bh, even if it commits while we're doing the
108 * copy, the data is still good. */
109 if (buffer_jbd(buffer_cache_bh)
110 && ocfs2_inode_is_new(inode)) {
Cong Wangc4bc8dc2011-11-25 23:14:34 +0800111 kaddr = kmap_atomic(bh_result->b_page);
Mark Fashehccd979b2005-12-15 14:31:24 -0800112 if (!kaddr) {
113 mlog(ML_ERROR, "couldn't kmap!\n");
114 goto bail;
115 }
116 memcpy(kaddr + (bh_result->b_size * iblock),
117 buffer_cache_bh->b_data,
118 bh_result->b_size);
Cong Wangc4bc8dc2011-11-25 23:14:34 +0800119 kunmap_atomic(kaddr);
Mark Fashehccd979b2005-12-15 14:31:24 -0800120 set_buffer_uptodate(bh_result);
121 }
122 brelse(buffer_cache_bh);
123 }
124
125 map_bh(bh_result, inode->i_sb,
126 le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
127
128 err = 0;
129
130bail:
Mark Fasheha81cb882008-10-07 14:25:16 -0700131 brelse(bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800132
Mark Fashehccd979b2005-12-15 14:31:24 -0800133 return err;
134}
135
Tao Ma6f70fa52009-08-25 08:05:12 +0800136int ocfs2_get_block(struct inode *inode, sector_t iblock,
137 struct buffer_head *bh_result, int create)
Mark Fashehccd979b2005-12-15 14:31:24 -0800138{
139 int err = 0;
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800140 unsigned int ext_flags;
Mark Fasheh628a24f2007-10-30 12:08:32 -0700141 u64 max_blocks = bh_result->b_size >> inode->i_blkbits;
142 u64 p_blkno, count, past_eof;
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800143 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Mark Fashehccd979b2005-12-15 14:31:24 -0800144
Tao Ma95581562011-02-22 21:33:59 +0800145 trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno,
146 (unsigned long long)iblock, bh_result, create);
Mark Fashehccd979b2005-12-15 14:31:24 -0800147
148 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
149 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
150 inode, inode->i_ino);
151
152 if (S_ISLNK(inode->i_mode)) {
153 /* this always does I/O for some reason. */
154 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
155 goto bail;
156 }
157
Mark Fasheh628a24f2007-10-30 12:08:32 -0700158 err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800159 &ext_flags);
Mark Fashehccd979b2005-12-15 14:31:24 -0800160 if (err) {
161 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
Mark Fashehb06970532006-03-03 10:24:33 -0800162 "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
163 (unsigned long long)p_blkno);
Mark Fashehccd979b2005-12-15 14:31:24 -0800164 goto bail;
165 }
166
Mark Fasheh628a24f2007-10-30 12:08:32 -0700167 if (max_blocks < count)
168 count = max_blocks;
169
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800170 /*
171 * ocfs2 never allocates in this function - the only time we
172 * need to use BH_New is when we're extending i_size on a file
173 * system which doesn't support holes, in which case BH_New
Christoph Hellwigebdec242010-10-06 10:47:23 +0200174 * allows __block_write_begin() to zero.
Coly Lic0420ad2008-06-30 18:45:45 +0800175 *
176 * If we see this on a sparse file system, then a truncate has
177 * raced us and removed the cluster. In this case, we clear
178 * the buffers dirty and uptodate bits and let the buffer code
179 * ignore it as a hole.
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800180 */
Coly Lic0420ad2008-06-30 18:45:45 +0800181 if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) {
182 clear_buffer_dirty(bh_result);
183 clear_buffer_uptodate(bh_result);
184 goto bail;
185 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800186
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800187 /* Treat the unwritten extent as a hole for zeroing purposes. */
188 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800189 map_bh(bh_result, inode->i_sb, p_blkno);
190
Mark Fasheh628a24f2007-10-30 12:08:32 -0700191 bh_result->b_size = count << inode->i_blkbits;
192
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800193 if (!ocfs2_sparse_alloc(osb)) {
194 if (p_blkno == 0) {
195 err = -EIO;
196 mlog(ML_ERROR,
197 "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
198 (unsigned long long)iblock,
199 (unsigned long long)p_blkno,
200 (unsigned long long)OCFS2_I(inode)->ip_blkno);
201 mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
202 dump_stack();
Wengang Wang1f4cea32009-07-13 11:38:58 +0800203 goto bail;
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800204 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800205 }
206
Joel Becker56934862010-07-01 15:13:31 -0700207 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
Tao Ma95581562011-02-22 21:33:59 +0800208
209 trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
210 (unsigned long long)past_eof);
Joel Becker56934862010-07-01 15:13:31 -0700211 if (create && (iblock >= past_eof))
212 set_buffer_new(bh_result);
213
Mark Fashehccd979b2005-12-15 14:31:24 -0800214bail:
215 if (err < 0)
216 err = -EIO;
217
Mark Fashehccd979b2005-12-15 14:31:24 -0800218 return err;
219}
220
Mark Fasheh1afc32b2007-09-07 14:46:51 -0700221int ocfs2_read_inline_data(struct inode *inode, struct page *page,
222 struct buffer_head *di_bh)
Mark Fasheh6798d352007-09-07 14:05:51 -0700223{
224 void *kaddr;
Jan Karad2849fb2007-12-19 15:24:09 +0100225 loff_t size;
Mark Fasheh6798d352007-09-07 14:05:51 -0700226 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
227
228 if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
229 ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag",
230 (unsigned long long)OCFS2_I(inode)->ip_blkno);
231 return -EROFS;
232 }
233
234 size = i_size_read(inode);
235
236 if (size > PAGE_CACHE_SIZE ||
Tiger Yangd9ae49d2009-03-05 11:06:15 +0800237 size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
Mark Fasheh6798d352007-09-07 14:05:51 -0700238 ocfs2_error(inode->i_sb,
Jan Karad2849fb2007-12-19 15:24:09 +0100239 "Inode %llu has with inline data has bad size: %Lu",
240 (unsigned long long)OCFS2_I(inode)->ip_blkno,
241 (unsigned long long)size);
Mark Fasheh6798d352007-09-07 14:05:51 -0700242 return -EROFS;
243 }
244
Cong Wangc4bc8dc2011-11-25 23:14:34 +0800245 kaddr = kmap_atomic(page);
Mark Fasheh6798d352007-09-07 14:05:51 -0700246 if (size)
247 memcpy(kaddr, di->id2.i_data.id_data, size);
248 /* Clear the remaining part of the page */
249 memset(kaddr + size, 0, PAGE_CACHE_SIZE - size);
250 flush_dcache_page(page);
Cong Wangc4bc8dc2011-11-25 23:14:34 +0800251 kunmap_atomic(kaddr);
Mark Fasheh6798d352007-09-07 14:05:51 -0700252
253 SetPageUptodate(page);
254
255 return 0;
256}
257
258static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
259{
260 int ret;
261 struct buffer_head *di_bh = NULL;
Mark Fasheh6798d352007-09-07 14:05:51 -0700262
263 BUG_ON(!PageLocked(page));
Julia Lawall86c838b2008-02-26 21:45:56 +0100264 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
Mark Fasheh6798d352007-09-07 14:05:51 -0700265
Joel Beckerb657c952008-11-13 14:49:11 -0800266 ret = ocfs2_read_inode_block(inode, &di_bh);
Mark Fasheh6798d352007-09-07 14:05:51 -0700267 if (ret) {
268 mlog_errno(ret);
269 goto out;
270 }
271
272 ret = ocfs2_read_inline_data(inode, page, di_bh);
273out:
274 unlock_page(page);
275
276 brelse(di_bh);
277 return ret;
278}
279
Mark Fashehccd979b2005-12-15 14:31:24 -0800280static int ocfs2_readpage(struct file *file, struct page *page)
281{
282 struct inode *inode = page->mapping->host;
Mark Fasheh6798d352007-09-07 14:05:51 -0700283 struct ocfs2_inode_info *oi = OCFS2_I(inode);
Mark Fashehccd979b2005-12-15 14:31:24 -0800284 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
285 int ret, unlock = 1;
286
Tao Ma95581562011-02-22 21:33:59 +0800287 trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
288 (page ? page->index : 0));
Mark Fashehccd979b2005-12-15 14:31:24 -0800289
Mark Fashehe63aecb62007-10-18 15:30:42 -0700290 ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
Mark Fashehccd979b2005-12-15 14:31:24 -0800291 if (ret != 0) {
292 if (ret == AOP_TRUNCATED_PAGE)
293 unlock = 0;
294 mlog_errno(ret);
295 goto out;
296 }
297
Mark Fasheh6798d352007-09-07 14:05:51 -0700298 if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
Jan Karac7e25e62011-06-23 22:51:47 +0200299 /*
300 * Unlock the page and cycle ip_alloc_sem so that we don't
301 * busyloop waiting for ip_alloc_sem to unlock
302 */
Mark Fashehe9dfc0b2007-05-14 11:38:51 -0700303 ret = AOP_TRUNCATED_PAGE;
Jan Karac7e25e62011-06-23 22:51:47 +0200304 unlock_page(page);
305 unlock = 0;
306 down_read(&oi->ip_alloc_sem);
307 up_read(&oi->ip_alloc_sem);
Mark Fashehe63aecb62007-10-18 15:30:42 -0700308 goto out_inode_unlock;
Mark Fashehe9dfc0b2007-05-14 11:38:51 -0700309 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800310
311 /*
312 * i_size might have just been updated as we grabed the meta lock. We
313 * might now be discovering a truncate that hit on another node.
314 * block_read_full_page->get_block freaks out if it is asked to read
315 * beyond the end of a file, so we check here. Callers
Nick Piggin54cb8822007-07-19 01:46:59 -0700316 * (generic_file_read, vm_ops->fault) are clever enough to check i_size
Mark Fashehccd979b2005-12-15 14:31:24 -0800317 * and notice that the page they just read isn't needed.
318 *
319 * XXX sys_readahead() seems to get that wrong?
320 */
321 if (start >= i_size_read(inode)) {
Christoph Lametereebd2aa2008-02-04 22:28:29 -0800322 zero_user(page, 0, PAGE_SIZE);
Mark Fashehccd979b2005-12-15 14:31:24 -0800323 SetPageUptodate(page);
324 ret = 0;
325 goto out_alloc;
326 }
327
Mark Fasheh6798d352007-09-07 14:05:51 -0700328 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
329 ret = ocfs2_readpage_inline(inode, page);
330 else
331 ret = block_read_full_page(page, ocfs2_get_block);
Mark Fashehccd979b2005-12-15 14:31:24 -0800332 unlock = 0;
333
Mark Fashehccd979b2005-12-15 14:31:24 -0800334out_alloc:
335 up_read(&OCFS2_I(inode)->ip_alloc_sem);
Mark Fashehe63aecb62007-10-18 15:30:42 -0700336out_inode_unlock:
337 ocfs2_inode_unlock(inode, 0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800338out:
339 if (unlock)
340 unlock_page(page);
Mark Fashehccd979b2005-12-15 14:31:24 -0800341 return ret;
342}
343
Mark Fasheh628a24f2007-10-30 12:08:32 -0700344/*
345 * This is used only for read-ahead. Failures or difficult to handle
346 * situations are safe to ignore.
347 *
348 * Right now, we don't bother with BH_Boundary - in-inode extent lists
349 * are quite large (243 extents on 4k blocks), so most inodes don't
350 * grow out to a tree. If need be, detecting boundary extents could
351 * trivially be added in a future version of ocfs2_get_block().
352 */
353static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
354 struct list_head *pages, unsigned nr_pages)
355{
356 int ret, err = -EIO;
357 struct inode *inode = mapping->host;
358 struct ocfs2_inode_info *oi = OCFS2_I(inode);
359 loff_t start;
360 struct page *last;
361
362 /*
363 * Use the nonblocking flag for the dlm code to avoid page
364 * lock inversion, but don't bother with retrying.
365 */
366 ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
367 if (ret)
368 return err;
369
370 if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
371 ocfs2_inode_unlock(inode, 0);
372 return err;
373 }
374
375 /*
376 * Don't bother with inline-data. There isn't anything
377 * to read-ahead in that case anyway...
378 */
379 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
380 goto out_unlock;
381
382 /*
383 * Check whether a remote node truncated this file - we just
384 * drop out in that case as it's not worth handling here.
385 */
386 last = list_entry(pages->prev, struct page, lru);
387 start = (loff_t)last->index << PAGE_CACHE_SHIFT;
388 if (start >= i_size_read(inode))
389 goto out_unlock;
390
391 err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block);
392
393out_unlock:
394 up_read(&oi->ip_alloc_sem);
395 ocfs2_inode_unlock(inode, 0);
396
397 return err;
398}
399
Mark Fashehccd979b2005-12-15 14:31:24 -0800400/* Note: Because we don't support holes, our allocation has
401 * already happened (allocation writes zeros to the file data)
402 * so we don't have to worry about ordered writes in
403 * ocfs2_writepage.
404 *
405 * ->writepage is called during the process of invalidating the page cache
406 * during blocked lock processing. It can't block on any cluster locks
407 * to during block mapping. It's relying on the fact that the block
408 * mapping can't have disappeared under the dirty pages that it is
409 * being asked to write back.
410 */
411static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
412{
Tao Ma95581562011-02-22 21:33:59 +0800413 trace_ocfs2_writepage(
414 (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
415 page->index);
Mark Fashehccd979b2005-12-15 14:31:24 -0800416
Tao Ma95581562011-02-22 21:33:59 +0800417 return block_write_full_page(page, ocfs2_get_block, wbc);
Mark Fashehccd979b2005-12-15 14:31:24 -0800418}
419
Mark Fashehccd979b2005-12-15 14:31:24 -0800420/* Taken from ext3. We don't necessarily need the full blown
421 * functionality yet, but IMHO it's better to cut and paste the whole
422 * thing so we can avoid introducing our own bugs (and easily pick up
423 * their fixes when they happen) --Mark */
Mark Fasheh60b11392007-02-16 11:46:50 -0800424int walk_page_buffers( handle_t *handle,
425 struct buffer_head *head,
426 unsigned from,
427 unsigned to,
428 int *partial,
429 int (*fn)( handle_t *handle,
430 struct buffer_head *bh))
Mark Fashehccd979b2005-12-15 14:31:24 -0800431{
432 struct buffer_head *bh;
433 unsigned block_start, block_end;
434 unsigned blocksize = head->b_size;
435 int err, ret = 0;
436 struct buffer_head *next;
437
438 for ( bh = head, block_start = 0;
439 ret == 0 && (bh != head || !block_start);
440 block_start = block_end, bh = next)
441 {
442 next = bh->b_this_page;
443 block_end = block_start + blocksize;
444 if (block_end <= from || block_start >= to) {
445 if (partial && !buffer_uptodate(bh))
446 *partial = 1;
447 continue;
448 }
449 err = (*fn)(handle, bh);
450 if (!ret)
451 ret = err;
452 }
453 return ret;
454}
455
Mark Fashehccd979b2005-12-15 14:31:24 -0800456static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
457{
458 sector_t status;
459 u64 p_blkno = 0;
460 int err = 0;
461 struct inode *inode = mapping->host;
462
Tao Ma95581562011-02-22 21:33:59 +0800463 trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno,
464 (unsigned long long)block);
Mark Fashehccd979b2005-12-15 14:31:24 -0800465
466 /* We don't need to lock journal system files, since they aren't
467 * accessed concurrently from multiple nodes.
468 */
469 if (!INODE_JOURNAL(inode)) {
Mark Fashehe63aecb62007-10-18 15:30:42 -0700470 err = ocfs2_inode_lock(inode, NULL, 0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800471 if (err) {
472 if (err != -ENOENT)
473 mlog_errno(err);
474 goto bail;
475 }
476 down_read(&OCFS2_I(inode)->ip_alloc_sem);
477 }
478
Mark Fasheh6798d352007-09-07 14:05:51 -0700479 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
480 err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
481 NULL);
Mark Fashehccd979b2005-12-15 14:31:24 -0800482
483 if (!INODE_JOURNAL(inode)) {
484 up_read(&OCFS2_I(inode)->ip_alloc_sem);
Mark Fashehe63aecb62007-10-18 15:30:42 -0700485 ocfs2_inode_unlock(inode, 0);
Mark Fashehccd979b2005-12-15 14:31:24 -0800486 }
487
488 if (err) {
489 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
490 (unsigned long long)block);
491 mlog_errno(err);
492 goto bail;
493 }
494
Mark Fashehccd979b2005-12-15 14:31:24 -0800495bail:
496 status = err ? 0 : p_blkno;
497
Mark Fashehccd979b2005-12-15 14:31:24 -0800498 return status;
499}
500
501/*
502 * TODO: Make this into a generic get_blocks function.
503 *
504 * From do_direct_io in direct-io.c:
505 * "So what we do is to permit the ->get_blocks function to populate
506 * bh.b_size with the size of IO which is permitted at this offset and
507 * this i_blkbits."
508 *
509 * This function is called directly from get_more_blocks in direct-io.c.
510 *
511 * called like this: dio->get_blocks(dio->inode, fs_startblk,
512 * fs_count, map_bh, dio->rw == WRITE);
513 */
514static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
Mark Fashehccd979b2005-12-15 14:31:24 -0800515 struct buffer_head *bh_result, int create)
516{
517 int ret;
Joseph Qi49255dc2015-02-16 16:00:03 -0800518 u32 cpos = 0;
519 int alloc_locked = 0;
Mark Fasheh4f902c32007-03-09 16:26:50 -0800520 u64 p_blkno, inode_blocks, contig_blocks;
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800521 unsigned int ext_flags;
Florin Malita184d7d22006-06-03 19:30:10 -0400522 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
Badari Pulavarty1d8fa7a2006-03-26 01:38:02 -0800523 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
Joseph Qi49255dc2015-02-16 16:00:03 -0800524 unsigned long len = bh_result->b_size;
525 unsigned int clusters_to_alloc = 0;
526
527 cpos = ocfs2_blocks_to_clusters(inode->i_sb, iblock);
Mark Fashehccd979b2005-12-15 14:31:24 -0800528
Mark Fashehccd979b2005-12-15 14:31:24 -0800529 /* This function won't even be called if the request isn't all
530 * nicely aligned and of the right size, so there's no need
531 * for us to check any of that. */
532
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800533 inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
Mark Fasheh564f8a32006-12-14 13:01:05 -0800534
Mark Fashehccd979b2005-12-15 14:31:24 -0800535 /* This figures out the size of the next contiguous block, and
536 * our logical offset */
Mark Fasheh363041a2007-01-17 12:31:35 -0800537 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800538 &contig_blocks, &ext_flags);
Mark Fashehccd979b2005-12-15 14:31:24 -0800539 if (ret) {
540 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
541 (unsigned long long)iblock);
542 ret = -EIO;
543 goto bail;
544 }
545
Tao Macbaee472010-02-26 10:54:52 +0800546 /* We should already CoW the refcounted extent in case of create. */
547 BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED));
548
Joseph Qi49255dc2015-02-16 16:00:03 -0800549 /* allocate blocks if no p_blkno is found, and create == 1 */
550 if (!p_blkno && create) {
551 ret = ocfs2_inode_lock(inode, NULL, 1);
552 if (ret < 0) {
553 mlog_errno(ret);
554 goto bail;
555 }
556
557 alloc_locked = 1;
558
559 /* fill hole, allocate blocks can't be larger than the size
560 * of the hole */
561 clusters_to_alloc = ocfs2_clusters_for_bytes(inode->i_sb, len);
562 if (clusters_to_alloc > contig_blocks)
563 clusters_to_alloc = contig_blocks;
564
565 /* allocate extent and insert them into the extent tree */
566 ret = ocfs2_extend_allocation(inode, cpos,
567 clusters_to_alloc, 0);
568 if (ret < 0) {
569 mlog_errno(ret);
570 goto bail;
571 }
572
573 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
574 &contig_blocks, &ext_flags);
575 if (ret < 0) {
576 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
577 (unsigned long long)iblock);
578 ret = -EIO;
579 goto bail;
580 }
581 }
582
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800583 /*
584 * get_more_blocks() expects us to describe a hole by clearing
585 * the mapped bit on bh_result().
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800586 *
587 * Consider an unwritten extent as a hole.
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800588 */
Mark Fasheh49cb8d22007-03-09 16:21:46 -0800589 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800590 map_bh(bh_result, inode->i_sb, p_blkno);
Christoph Hellwig5fe878a2009-12-15 16:47:50 -0800591 else
Mark Fasheh25baf2d2007-02-14 15:30:30 -0800592 clear_buffer_mapped(bh_result);
Mark Fashehccd979b2005-12-15 14:31:24 -0800593
594 /* make sure we don't map more than max_blocks blocks here as
595 that's all the kernel will handle at this point. */
596 if (max_blocks < contig_blocks)
597 contig_blocks = max_blocks;
598 bh_result->b_size = contig_blocks << blocksize_bits;
599bail:
Joseph Qi49255dc2015-02-16 16:00:03 -0800600 if (alloc_locked)
601 ocfs2_inode_unlock(inode, 1);
Mark Fashehccd979b2005-12-15 14:31:24 -0800602 return ret;
603}
604
Sunil Mushran2bd63212010-01-25 16:57:38 -0800605/*
Mark Fashehccd979b2005-12-15 14:31:24 -0800606 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
Christoph Hellwigbd5fe6c2011-06-24 14:29:43 -0400607 * particularly interested in the aio/dio case. We use the rw_lock DLM lock
608 * to protect io on one node from truncation on another.
Mark Fashehccd979b2005-12-15 14:31:24 -0800609 */
610static void ocfs2_dio_end_io(struct kiocb *iocb,
611 loff_t offset,
612 ssize_t bytes,
Christoph Hellwig7b7a8662013-09-04 15:04:39 +0200613 void *private)
Mark Fashehccd979b2005-12-15 14:31:24 -0800614{
Al Viro496ad9a2013-01-23 17:07:38 -0500615 struct inode *inode = file_inode(iocb->ki_filp);
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -0700616 int level;
Mark Fashehccd979b2005-12-15 14:31:24 -0800617
618 /* this io's submitter should not have unlocked this before we could */
619 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -0700620
Christoph Hellwigdf2d6f22011-06-24 14:29:46 -0400621 if (ocfs2_iocb_is_sem_locked(iocb))
Tristan Ye39c99f12010-12-07 14:35:07 +0800622 ocfs2_iocb_clear_sem_locked(iocb);
Tristan Ye39c99f12010-12-07 14:35:07 +0800623
Mark Fasheha11f7e62011-06-22 14:23:38 -0700624 if (ocfs2_iocb_is_unaligned_aio(iocb)) {
625 ocfs2_iocb_clear_unaligned_aio(iocb);
626
Wengang Wangc18ceab2014-04-03 14:46:46 -0700627 mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio);
Mark Fasheha11f7e62011-06-22 14:23:38 -0700628 }
629
Mark Fashehccd979b2005-12-15 14:31:24 -0800630 ocfs2_iocb_clear_rw_locked(iocb);
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -0700631
632 level = ocfs2_iocb_rw_locked_level(iocb);
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -0700633 ocfs2_rw_unlock(inode, level);
Mark Fashehccd979b2005-12-15 14:31:24 -0800634}
635
Joel Becker03f981c2007-01-04 14:54:41 -0800636static int ocfs2_releasepage(struct page *page, gfp_t wait)
637{
Joel Becker03f981c2007-01-04 14:54:41 -0800638 if (!page_has_buffers(page))
639 return 0;
Jan Kara41ecc342013-11-12 15:07:08 -0800640 return try_to_free_buffers(page);
Joel Becker03f981c2007-01-04 14:54:41 -0800641}
642
Joseph Qi24c40b32015-02-16 16:00:00 -0800643static int ocfs2_is_overwrite(struct ocfs2_super *osb,
644 struct inode *inode, loff_t offset)
645{
646 int ret = 0;
647 u32 v_cpos = 0;
648 u32 p_cpos = 0;
649 unsigned int num_clusters = 0;
650 unsigned int ext_flags = 0;
651
652 v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset);
653 ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos,
654 &num_clusters, &ext_flags);
655 if (ret < 0) {
656 mlog_errno(ret);
657 return ret;
658 }
659
660 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN))
661 return 1;
662
663 return 0;
664}
665
666static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
667 struct iov_iter *iter,
668 loff_t offset)
669{
670 ssize_t ret = 0;
671 ssize_t written = 0;
672 bool orphaned = false;
673 int is_overwrite = 0;
674 struct file *file = iocb->ki_filp;
675 struct inode *inode = file_inode(file)->i_mapping->host;
676 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
677 struct buffer_head *di_bh = NULL;
678 size_t count = iter->count;
679 journal_t *journal = osb->journal->j_journal;
680 u32 zero_len;
681 int cluster_align;
682 loff_t final_size = offset + count;
683 int append_write = offset >= i_size_read(inode) ? 1 : 0;
684 unsigned int num_clusters = 0;
685 unsigned int ext_flags = 0;
686
687 {
688 u64 o = offset;
689
690 zero_len = do_div(o, 1 << osb->s_clustersize_bits);
691 cluster_align = !zero_len;
692 }
693
694 /*
695 * when final_size > inode->i_size, inode->i_size will be
696 * updated after direct write, so add the inode to orphan
697 * dir first.
698 */
699 if (final_size > i_size_read(inode)) {
700 ret = ocfs2_add_inode_to_orphan(osb, inode);
701 if (ret < 0) {
702 mlog_errno(ret);
703 goto out;
704 }
705 orphaned = true;
706 }
707
708 if (append_write) {
Joseph Qi7e9b1952015-04-14 15:43:08 -0700709 ret = ocfs2_inode_lock(inode, NULL, 1);
Joseph Qi24c40b32015-02-16 16:00:00 -0800710 if (ret < 0) {
711 mlog_errno(ret);
712 goto clean_orphan;
713 }
714
715 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
716 ret = ocfs2_zero_extend(inode, di_bh, offset);
717 else
718 ret = ocfs2_extend_no_holes(inode, di_bh, offset,
719 offset);
720 if (ret < 0) {
721 mlog_errno(ret);
722 ocfs2_inode_unlock(inode, 1);
Joseph Qi24c40b32015-02-16 16:00:00 -0800723 goto clean_orphan;
724 }
725
726 is_overwrite = ocfs2_is_overwrite(osb, inode, offset);
727 if (is_overwrite < 0) {
728 mlog_errno(is_overwrite);
729 ocfs2_inode_unlock(inode, 1);
Joseph Qi24c40b32015-02-16 16:00:00 -0800730 goto clean_orphan;
731 }
732
733 ocfs2_inode_unlock(inode, 1);
Joseph Qi24c40b32015-02-16 16:00:00 -0800734 }
735
736 written = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev,
737 iter, offset,
738 ocfs2_direct_IO_get_blocks,
739 ocfs2_dio_end_io, NULL, 0);
740 if (unlikely(written < 0)) {
741 loff_t i_size = i_size_read(inode);
742
743 if (offset + count > i_size) {
744 ret = ocfs2_inode_lock(inode, &di_bh, 1);
745 if (ret < 0) {
746 mlog_errno(ret);
747 goto clean_orphan;
748 }
749
750 if (i_size == i_size_read(inode)) {
751 ret = ocfs2_truncate_file(inode, di_bh,
752 i_size);
753 if (ret < 0) {
754 if (ret != -ENOSPC)
755 mlog_errno(ret);
756
757 ocfs2_inode_unlock(inode, 1);
758 brelse(di_bh);
759 goto clean_orphan;
760 }
761 }
762
763 ocfs2_inode_unlock(inode, 1);
764 brelse(di_bh);
765
766 ret = jbd2_journal_force_commit(journal);
767 if (ret < 0)
768 mlog_errno(ret);
769 }
Joseph Qibdd86212015-04-14 15:43:05 -0700770 } else if (written > 0 && append_write && !is_overwrite &&
Joseph Qi24c40b32015-02-16 16:00:00 -0800771 !cluster_align) {
772 u32 p_cpos = 0;
773 u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset);
774
775 ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos,
776 &num_clusters, &ext_flags);
777 if (ret < 0) {
778 mlog_errno(ret);
779 goto clean_orphan;
780 }
781
782 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
783
784 ret = blkdev_issue_zeroout(osb->sb->s_bdev,
785 p_cpos << (osb->s_clustersize_bits - 9),
786 zero_len >> 9, GFP_KERNEL, false);
787 if (ret < 0)
788 mlog_errno(ret);
789 }
790
791clean_orphan:
792 if (orphaned) {
793 int tmp_ret;
794 int update_isize = written > 0 ? 1 : 0;
795 loff_t end = update_isize ? offset + written : 0;
796
797 tmp_ret = ocfs2_del_inode_from_orphan(osb, inode,
798 update_isize, end);
799 if (tmp_ret < 0) {
800 ret = tmp_ret;
801 goto out;
802 }
803
804 tmp_ret = jbd2_journal_force_commit(journal);
805 if (tmp_ret < 0) {
806 ret = tmp_ret;
807 mlog_errno(tmp_ret);
808 }
809 }
810
811out:
812 if (ret >= 0)
813 ret = written;
814 return ret;
815}
816
Mark Fashehccd979b2005-12-15 14:31:24 -0800817static ssize_t ocfs2_direct_IO(int rw,
818 struct kiocb *iocb,
Al Virod8d3d942014-03-04 21:27:34 -0500819 struct iov_iter *iter,
820 loff_t offset)
Mark Fashehccd979b2005-12-15 14:31:24 -0800821{
822 struct file *file = iocb->ki_filp;
Al Viro496ad9a2013-01-23 17:07:38 -0500823 struct inode *inode = file_inode(file)->i_mapping->host;
Joseph Qi24c40b32015-02-16 16:00:00 -0800824 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
825 int full_coherency = !(osb->s_mount_opt &
826 OCFS2_MOUNT_COHERENCY_BUFFERED);
Mark Fasheh53013cb2006-05-05 19:04:03 -0700827
Mark Fasheh6798d352007-09-07 14:05:51 -0700828 /*
829 * Fallback to buffered I/O if we see an inode without
830 * extents.
831 */
832 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
833 return 0;
834
Joseph Qi24c40b32015-02-16 16:00:00 -0800835 /* Fallback to buffered I/O if we are appending and
836 * concurrent O_DIRECT writes are allowed.
837 */
838 if (i_size_read(inode) <= offset && !full_coherency)
Tao Mab80474b2009-09-10 15:28:47 +0800839 return 0;
840
Joseph Qi24c40b32015-02-16 16:00:00 -0800841 if (rw == READ)
842 return __blockdev_direct_IO(rw, iocb, inode,
843 inode->i_sb->s_bdev,
Al Viro31b14032014-03-05 01:33:16 -0500844 iter, offset,
Tao Mac1e8d352011-03-07 16:43:21 +0800845 ocfs2_direct_IO_get_blocks,
846 ocfs2_dio_end_io, NULL, 0);
Joseph Qi24c40b32015-02-16 16:00:00 -0800847 else
848 return ocfs2_direct_IO_write(iocb, iter, offset);
Mark Fashehccd979b2005-12-15 14:31:24 -0800849}
850
Mark Fasheh9517bac2007-02-09 20:24:12 -0800851static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
852 u32 cpos,
853 unsigned int *start,
854 unsigned int *end)
855{
856 unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE;
857
858 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) {
859 unsigned int cpp;
860
861 cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits);
862
863 cluster_start = cpos % cpp;
864 cluster_start = cluster_start << osb->s_clustersize_bits;
865
866 cluster_end = cluster_start + osb->s_clustersize;
867 }
868
869 BUG_ON(cluster_start > PAGE_SIZE);
870 BUG_ON(cluster_end > PAGE_SIZE);
871
872 if (start)
873 *start = cluster_start;
874 if (end)
875 *end = cluster_end;
876}
877
878/*
879 * 'from' and 'to' are the region in the page to avoid zeroing.
880 *
881 * If pagesize > clustersize, this function will avoid zeroing outside
882 * of the cluster boundary.
883 *
884 * from == to == 0 is code for "zero the entire cluster region"
885 */
886static void ocfs2_clear_page_regions(struct page *page,
887 struct ocfs2_super *osb, u32 cpos,
888 unsigned from, unsigned to)
889{
890 void *kaddr;
891 unsigned int cluster_start, cluster_end;
892
893 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
894
Cong Wangc4bc8dc2011-11-25 23:14:34 +0800895 kaddr = kmap_atomic(page);
Mark Fasheh9517bac2007-02-09 20:24:12 -0800896
897 if (from || to) {
898 if (from > cluster_start)
899 memset(kaddr + cluster_start, 0, from - cluster_start);
900 if (to < cluster_end)
901 memset(kaddr + to, 0, cluster_end - to);
902 } else {
903 memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
904 }
905
Cong Wangc4bc8dc2011-11-25 23:14:34 +0800906 kunmap_atomic(kaddr);
Mark Fasheh9517bac2007-02-09 20:24:12 -0800907}
908
909/*
Mark Fasheh4e9563f2007-11-01 11:37:48 -0700910 * Nonsparse file systems fully allocate before we get to the write
911 * code. This prevents ocfs2_write() from tagging the write as an
912 * allocating one, which means ocfs2_map_page_blocks() might try to
913 * read-in the blocks at the tail of our file. Avoid reading them by
914 * testing i_size against each block offset.
915 */
916static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
917 unsigned int block_start)
918{
919 u64 offset = page_offset(page) + block_start;
920
921 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
922 return 1;
923
924 if (i_size_read(inode) > offset)
925 return 1;
926
927 return 0;
928}
929
930/*
Christoph Hellwigebdec242010-10-06 10:47:23 +0200931 * Some of this taken from __block_write_begin(). We already have our
Mark Fasheh9517bac2007-02-09 20:24:12 -0800932 * mapping by now though, and the entire write will be allocating or
933 * it won't, so not much need to use BH_New.
934 *
935 * This will also skip zeroing, which is handled externally.
936 */
Mark Fasheh60b11392007-02-16 11:46:50 -0800937int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
938 struct inode *inode, unsigned int from,
939 unsigned int to, int new)
Mark Fasheh9517bac2007-02-09 20:24:12 -0800940{
941 int ret = 0;
942 struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
943 unsigned int block_end, block_start;
944 unsigned int bsize = 1 << inode->i_blkbits;
945
946 if (!page_has_buffers(page))
947 create_empty_buffers(page, bsize, 0);
948
949 head = page_buffers(page);
950 for (bh = head, block_start = 0; bh != head || !block_start;
951 bh = bh->b_this_page, block_start += bsize) {
952 block_end = block_start + bsize;
953
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700954 clear_buffer_new(bh);
955
Mark Fasheh9517bac2007-02-09 20:24:12 -0800956 /*
957 * Ignore blocks outside of our i/o range -
958 * they may belong to unallocated clusters.
959 */
Mark Fasheh60b11392007-02-16 11:46:50 -0800960 if (block_start >= to || block_end <= from) {
Mark Fasheh9517bac2007-02-09 20:24:12 -0800961 if (PageUptodate(page))
962 set_buffer_uptodate(bh);
963 continue;
964 }
965
966 /*
967 * For an allocating write with cluster size >= page
968 * size, we always write the entire page.
969 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -0700970 if (new)
971 set_buffer_new(bh);
Mark Fasheh9517bac2007-02-09 20:24:12 -0800972
973 if (!buffer_mapped(bh)) {
974 map_bh(bh, inode->i_sb, *p_blkno);
975 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
976 }
977
978 if (PageUptodate(page)) {
979 if (!buffer_uptodate(bh))
980 set_buffer_uptodate(bh);
981 } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
Mark Fashehbce99762007-06-18 11:12:36 -0700982 !buffer_new(bh) &&
Mark Fasheh4e9563f2007-11-01 11:37:48 -0700983 ocfs2_should_read_blk(inode, page, block_start) &&
Mark Fashehbce99762007-06-18 11:12:36 -0700984 (block_start < from || block_end > to)) {
Mark Fasheh9517bac2007-02-09 20:24:12 -0800985 ll_rw_block(READ, 1, &bh);
986 *wait_bh++=bh;
987 }
988
989 *p_blkno = *p_blkno + 1;
990 }
991
992 /*
993 * If we issued read requests - let them complete.
994 */
995 while(wait_bh > wait) {
996 wait_on_buffer(*--wait_bh);
997 if (!buffer_uptodate(*wait_bh))
998 ret = -EIO;
999 }
1000
1001 if (ret == 0 || !new)
1002 return ret;
1003
1004 /*
1005 * If we get -EIO above, zero out any newly allocated blocks
1006 * to avoid exposing stale data.
1007 */
1008 bh = head;
1009 block_start = 0;
1010 do {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001011 block_end = block_start + bsize;
1012 if (block_end <= from)
1013 goto next_bh;
1014 if (block_start >= to)
1015 break;
1016
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001017 zero_user(page, block_start, bh->b_size);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001018 set_buffer_uptodate(bh);
1019 mark_buffer_dirty(bh);
1020
1021next_bh:
1022 block_start = block_end;
1023 bh = bh->b_this_page;
1024 } while (bh != head);
1025
1026 return ret;
1027}
1028
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001029#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
1030#define OCFS2_MAX_CTXT_PAGES 1
1031#else
1032#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE)
1033#endif
Mark Fasheh6af67d82007-03-06 17:24:46 -08001034
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001035#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE)
Mark Fasheh6af67d82007-03-06 17:24:46 -08001036
1037/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001038 * Describe the state of a single cluster to be written to.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001039 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001040struct ocfs2_write_cluster_desc {
1041 u32 c_cpos;
1042 u32 c_phys;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001043 /*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001044 * Give this a unique field because c_phys eventually gets
1045 * filled.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001046 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001047 unsigned c_new;
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001048 unsigned c_unwritten;
Sunil Mushrane7432672009-08-06 16:12:58 -07001049 unsigned c_needs_zero;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001050};
Mark Fasheh9517bac2007-02-09 20:24:12 -08001051
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001052struct ocfs2_write_ctxt {
1053 /* Logical cluster position / len of write */
1054 u32 w_cpos;
1055 u32 w_clen;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001056
Sunil Mushrane7432672009-08-06 16:12:58 -07001057 /* First cluster allocated in a nonsparse extend */
1058 u32 w_first_new_cpos;
1059
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001060 struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
Mark Fasheh9517bac2007-02-09 20:24:12 -08001061
1062 /*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001063 * This is true if page_size > cluster_size.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001064 *
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001065 * It triggers a set of special cases during write which might
1066 * have to deal with allocating writes to partial pages.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001067 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001068 unsigned int w_large_pages;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001069
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001070 /*
1071 * Pages involved in this write.
1072 *
1073 * w_target_page is the page being written to by the user.
1074 *
1075 * w_pages is an array of pages which always contains
1076 * w_target_page, and in the case of an allocating write with
1077 * page_size < cluster size, it will contain zero'd and mapped
1078 * pages adjacent to w_target_page which need to be written
1079 * out in so that future reads from that region will get
1080 * zero's.
1081 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001082 unsigned int w_num_pages;
Goldwyn Rodrigues83fd9c72010-06-10 17:21:36 -05001083 struct page *w_pages[OCFS2_MAX_CTXT_PAGES];
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001084 struct page *w_target_page;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001085
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001086 /*
Wengang Wang5cffff92011-07-24 10:36:54 -07001087 * w_target_locked is used for page_mkwrite path indicating no unlocking
1088 * against w_target_page in ocfs2_write_end_nolock.
1089 */
1090 unsigned int w_target_locked:1;
1091
1092 /*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001093 * ocfs2_write_end() uses this to know what the real range to
1094 * write in the target should be.
1095 */
1096 unsigned int w_target_from;
1097 unsigned int w_target_to;
1098
1099 /*
1100 * We could use journal_current_handle() but this is cleaner,
1101 * IMHO -Mark
1102 */
1103 handle_t *w_handle;
1104
1105 struct buffer_head *w_di_bh;
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001106
1107 struct ocfs2_cached_dealloc_ctxt w_dealloc;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001108};
1109
Mark Fasheh1d410a62007-09-07 14:20:45 -07001110void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001111{
1112 int i;
1113
Mark Fasheh1d410a62007-09-07 14:20:45 -07001114 for(i = 0; i < num_pages; i++) {
1115 if (pages[i]) {
1116 unlock_page(pages[i]);
1117 mark_page_accessed(pages[i]);
1118 page_cache_release(pages[i]);
1119 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001120 }
Mark Fasheh1d410a62007-09-07 14:20:45 -07001121}
1122
Junxiao Bi136f49b2014-12-18 16:17:37 -08001123static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
Mark Fasheh1d410a62007-09-07 14:20:45 -07001124{
Wengang Wang5cffff92011-07-24 10:36:54 -07001125 int i;
1126
1127 /*
1128 * w_target_locked is only set to true in the page_mkwrite() case.
1129 * The intent is to allow us to lock the target page from write_begin()
1130 * to write_end(). The caller must hold a ref on w_target_page.
1131 */
1132 if (wc->w_target_locked) {
1133 BUG_ON(!wc->w_target_page);
1134 for (i = 0; i < wc->w_num_pages; i++) {
1135 if (wc->w_target_page == wc->w_pages[i]) {
1136 wc->w_pages[i] = NULL;
1137 break;
1138 }
1139 }
1140 mark_page_accessed(wc->w_target_page);
1141 page_cache_release(wc->w_target_page);
1142 }
Mark Fasheh1d410a62007-09-07 14:20:45 -07001143 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
Junxiao Bi136f49b2014-12-18 16:17:37 -08001144}
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001145
Junxiao Bi136f49b2014-12-18 16:17:37 -08001146static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
1147{
1148 ocfs2_unlock_pages(wc);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001149 brelse(wc->w_di_bh);
1150 kfree(wc);
1151}
1152
1153static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
1154 struct ocfs2_super *osb, loff_t pos,
Mark Fasheh607d44a2007-05-09 15:14:45 -07001155 unsigned len, struct buffer_head *di_bh)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001156{
tao.ma@oracle.com30b85482007-09-06 08:02:25 +08001157 u32 cend;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001158 struct ocfs2_write_ctxt *wc;
1159
1160 wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS);
1161 if (!wc)
1162 return -ENOMEM;
1163
1164 wc->w_cpos = pos >> osb->s_clustersize_bits;
Sunil Mushrane7432672009-08-06 16:12:58 -07001165 wc->w_first_new_cpos = UINT_MAX;
tao.ma@oracle.com30b85482007-09-06 08:02:25 +08001166 cend = (pos + len - 1) >> osb->s_clustersize_bits;
1167 wc->w_clen = cend - wc->w_cpos + 1;
Mark Fasheh607d44a2007-05-09 15:14:45 -07001168 get_bh(di_bh);
1169 wc->w_di_bh = di_bh;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001170
1171 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits))
1172 wc->w_large_pages = 1;
1173 else
1174 wc->w_large_pages = 0;
1175
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001176 ocfs2_init_dealloc_ctxt(&wc->w_dealloc);
1177
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001178 *wcp = wc;
1179
1180 return 0;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001181}
1182
1183/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001184 * If a page has any new buffers, zero them out here, and mark them uptodate
1185 * and dirty so they'll be written out (in order to prevent uninitialised
1186 * block data from leaking). And clear the new bit.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001187 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001188static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
Mark Fasheh9517bac2007-02-09 20:24:12 -08001189{
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001190 unsigned int block_start, block_end;
1191 struct buffer_head *head, *bh;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001192
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001193 BUG_ON(!PageLocked(page));
1194 if (!page_has_buffers(page))
1195 return;
1196
1197 bh = head = page_buffers(page);
1198 block_start = 0;
1199 do {
1200 block_end = block_start + bh->b_size;
1201
1202 if (buffer_new(bh)) {
1203 if (block_end > from && block_start < to) {
1204 if (!PageUptodate(page)) {
1205 unsigned start, end;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001206
1207 start = max(from, block_start);
1208 end = min(to, block_end);
1209
Christoph Lametereebd2aa2008-02-04 22:28:29 -08001210 zero_user_segment(page, start, end);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001211 set_buffer_uptodate(bh);
1212 }
1213
1214 clear_buffer_new(bh);
1215 mark_buffer_dirty(bh);
1216 }
1217 }
1218
1219 block_start = block_end;
1220 bh = bh->b_this_page;
1221 } while (bh != head);
1222}
1223
1224/*
1225 * Only called when we have a failure during allocating write to write
1226 * zero's to the newly allocated region.
1227 */
1228static void ocfs2_write_failure(struct inode *inode,
1229 struct ocfs2_write_ctxt *wc,
1230 loff_t user_pos, unsigned user_len)
1231{
1232 int i;
Mark Fasheh5c26a7b2007-09-18 17:49:29 -07001233 unsigned from = user_pos & (PAGE_CACHE_SIZE - 1),
1234 to = user_pos + user_len;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001235 struct page *tmppage;
1236
Mark Fasheh5c26a7b2007-09-18 17:49:29 -07001237 ocfs2_zero_new_buffers(wc->w_target_page, from, to);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001238
1239 for(i = 0; i < wc->w_num_pages; i++) {
1240 tmppage = wc->w_pages[i];
1241
Sunil Mushran961cecb2008-07-16 17:22:22 -07001242 if (page_has_buffers(tmppage)) {
Mark Fasheh53ef99c2008-11-18 16:53:43 -08001243 if (ocfs2_should_order_data(inode))
Joel Becker2b4e30f2008-09-03 20:03:41 -07001244 ocfs2_jbd2_file_inode(wc->w_handle, inode);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001245
Sunil Mushran961cecb2008-07-16 17:22:22 -07001246 block_commit_write(tmppage, from, to);
1247 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001248 }
1249}
1250
1251static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
1252 struct ocfs2_write_ctxt *wc,
1253 struct page *page, u32 cpos,
1254 loff_t user_pos, unsigned user_len,
1255 int new)
1256{
1257 int ret;
1258 unsigned int map_from = 0, map_to = 0;
1259 unsigned int cluster_start, cluster_end;
1260 unsigned int user_data_from = 0, user_data_to = 0;
1261
1262 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
Mark Fasheh9517bac2007-02-09 20:24:12 -08001263 &cluster_start, &cluster_end);
1264
Goldwyn Rodrigues272b62c2011-02-17 09:44:40 -06001265 /* treat the write as new if the a hole/lseek spanned across
1266 * the page boundary.
1267 */
1268 new = new | ((i_size_read(inode) <= page_offset(page)) &&
1269 (page_offset(page) <= user_pos));
1270
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001271 if (page == wc->w_target_page) {
1272 map_from = user_pos & (PAGE_CACHE_SIZE - 1);
1273 map_to = map_from + user_len;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001274
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001275 if (new)
1276 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
1277 cluster_start, cluster_end,
1278 new);
1279 else
1280 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
1281 map_from, map_to, new);
1282 if (ret) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001283 mlog_errno(ret);
1284 goto out;
1285 }
1286
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001287 user_data_from = map_from;
1288 user_data_to = map_to;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001289 if (new) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001290 map_from = cluster_start;
1291 map_to = cluster_end;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001292 }
1293 } else {
1294 /*
1295 * If we haven't allocated the new page yet, we
1296 * shouldn't be writing it out without copying user
1297 * data. This is likely a math error from the caller.
1298 */
1299 BUG_ON(!new);
1300
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001301 map_from = cluster_start;
1302 map_to = cluster_end;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001303
1304 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001305 cluster_start, cluster_end, new);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001306 if (ret) {
1307 mlog_errno(ret);
1308 goto out;
1309 }
1310 }
1311
1312 /*
1313 * Parts of newly allocated pages need to be zero'd.
1314 *
1315 * Above, we have also rewritten 'to' and 'from' - as far as
1316 * the rest of the function is concerned, the entire cluster
1317 * range inside of a page needs to be written.
1318 *
1319 * We can skip this if the page is up to date - it's already
1320 * been zero'd from being read in as a hole.
1321 */
1322 if (new && !PageUptodate(page))
1323 ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001324 cpos, user_data_from, user_data_to);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001325
1326 flush_dcache_page(page);
1327
Mark Fasheh9517bac2007-02-09 20:24:12 -08001328out:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001329 return ret;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001330}
1331
1332/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001333 * This function will only grab one clusters worth of pages.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001334 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001335static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1336 struct ocfs2_write_ctxt *wc,
Joel Becker693c2412010-07-02 17:20:27 -07001337 u32 cpos, loff_t user_pos,
1338 unsigned user_len, int new,
Mark Fasheh7307de82007-05-09 15:16:19 -07001339 struct page *mmap_page)
Mark Fasheh9517bac2007-02-09 20:24:12 -08001340{
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001341 int ret = 0, i;
Joel Becker693c2412010-07-02 17:20:27 -07001342 unsigned long start, target_index, end_index, index;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001343 struct inode *inode = mapping->host;
Joel Becker693c2412010-07-02 17:20:27 -07001344 loff_t last_byte;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001345
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001346 target_index = user_pos >> PAGE_CACHE_SHIFT;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001347
1348 /*
1349 * Figure out how many pages we'll be manipulating here. For
Mark Fasheh60b11392007-02-16 11:46:50 -08001350 * non allocating write, we just change the one
Joel Becker693c2412010-07-02 17:20:27 -07001351 * page. Otherwise, we'll need a whole clusters worth. If we're
1352 * writing past i_size, we only need enough pages to cover the
1353 * last page of the write.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001354 */
Mark Fasheh9517bac2007-02-09 20:24:12 -08001355 if (new) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001356 wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
1357 start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
Joel Becker693c2412010-07-02 17:20:27 -07001358 /*
1359 * We need the index *past* the last page we could possibly
1360 * touch. This is the page past the end of the write or
1361 * i_size, whichever is greater.
1362 */
1363 last_byte = max(user_pos + user_len, i_size_read(inode));
1364 BUG_ON(last_byte < 1);
1365 end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1;
1366 if ((start + wc->w_num_pages) > end_index)
1367 wc->w_num_pages = end_index - start;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001368 } else {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001369 wc->w_num_pages = 1;
1370 start = target_index;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001371 }
1372
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001373 for(i = 0; i < wc->w_num_pages; i++) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001374 index = start + i;
1375
Mark Fasheh7307de82007-05-09 15:16:19 -07001376 if (index == target_index && mmap_page) {
1377 /*
1378 * ocfs2_pagemkwrite() is a little different
1379 * and wants us to directly use the page
1380 * passed in.
1381 */
1382 lock_page(mmap_page);
1383
Wengang Wang5cffff92011-07-24 10:36:54 -07001384 /* Exit and let the caller retry */
Mark Fasheh7307de82007-05-09 15:16:19 -07001385 if (mmap_page->mapping != mapping) {
Wengang Wang5cffff92011-07-24 10:36:54 -07001386 WARN_ON(mmap_page->mapping);
Mark Fasheh7307de82007-05-09 15:16:19 -07001387 unlock_page(mmap_page);
Wengang Wang5cffff92011-07-24 10:36:54 -07001388 ret = -EAGAIN;
Mark Fasheh7307de82007-05-09 15:16:19 -07001389 goto out;
1390 }
1391
1392 page_cache_get(mmap_page);
1393 wc->w_pages[i] = mmap_page;
Wengang Wang5cffff92011-07-24 10:36:54 -07001394 wc->w_target_locked = true;
Mark Fasheh7307de82007-05-09 15:16:19 -07001395 } else {
1396 wc->w_pages[i] = find_or_create_page(mapping, index,
1397 GFP_NOFS);
1398 if (!wc->w_pages[i]) {
1399 ret = -ENOMEM;
1400 mlog_errno(ret);
1401 goto out;
1402 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001403 }
Jan Kara12695292013-02-21 16:42:57 -08001404 wait_for_stable_page(wc->w_pages[i]);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001405
1406 if (index == target_index)
1407 wc->w_target_page = wc->w_pages[i];
Mark Fasheh9517bac2007-02-09 20:24:12 -08001408 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001409out:
Wengang Wang5cffff92011-07-24 10:36:54 -07001410 if (ret)
1411 wc->w_target_locked = false;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001412 return ret;
1413}
1414
1415/*
1416 * Prepare a single cluster for write one cluster into the file.
1417 */
1418static int ocfs2_write_cluster(struct address_space *mapping,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001419 u32 phys, unsigned int unwritten,
Sunil Mushrane7432672009-08-06 16:12:58 -07001420 unsigned int should_zero,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001421 struct ocfs2_alloc_context *data_ac,
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001422 struct ocfs2_alloc_context *meta_ac,
1423 struct ocfs2_write_ctxt *wc, u32 cpos,
1424 loff_t user_pos, unsigned user_len)
1425{
Sunil Mushrane7432672009-08-06 16:12:58 -07001426 int ret, i, new;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001427 u64 v_blkno, p_blkno;
1428 struct inode *inode = mapping->host;
Joel Beckerf99b9b72008-08-20 19:36:33 -07001429 struct ocfs2_extent_tree et;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001430
1431 new = phys == 0 ? 1 : 0;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001432 if (new) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001433 u32 tmp_pos;
1434
Mark Fasheh9517bac2007-02-09 20:24:12 -08001435 /*
1436 * This is safe to call with the page locks - it won't take
1437 * any additional semaphores or cluster locks.
1438 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001439 tmp_pos = cpos;
Tao Ma0eb8d472008-08-18 17:38:45 +08001440 ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode,
1441 &tmp_pos, 1, 0, wc->w_di_bh,
1442 wc->w_handle, data_ac,
1443 meta_ac, NULL);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001444 /*
1445 * This shouldn't happen because we must have already
1446 * calculated the correct meta data allocation required. The
1447 * internal tree allocation code should know how to increase
1448 * transaction credits itself.
1449 *
1450 * If need be, we could handle -EAGAIN for a
1451 * RESTART_TRANS here.
1452 */
1453 mlog_bug_on_msg(ret == -EAGAIN,
1454 "Inode %llu: EAGAIN return during allocation.\n",
1455 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1456 if (ret < 0) {
1457 mlog_errno(ret);
1458 goto out;
1459 }
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001460 } else if (unwritten) {
Joel Becker5e404e92009-02-13 03:54:22 -08001461 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1462 wc->w_di_bh);
Joel Beckerf99b9b72008-08-20 19:36:33 -07001463 ret = ocfs2_mark_extent_written(inode, &et,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001464 wc->w_handle, cpos, 1, phys,
Joel Beckerf99b9b72008-08-20 19:36:33 -07001465 meta_ac, &wc->w_dealloc);
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001466 if (ret < 0) {
1467 mlog_errno(ret);
1468 goto out;
1469 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001470 }
1471
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001472 if (should_zero)
1473 v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, cpos);
1474 else
1475 v_blkno = user_pos >> inode->i_sb->s_blocksize_bits;
1476
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001477 /*
1478 * The only reason this should fail is due to an inability to
1479 * find the extent added.
1480 */
Mark Fasheh49cb8d22007-03-09 16:21:46 -08001481 ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL,
1482 NULL);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001483 if (ret < 0) {
jiangyiwen61fb9ea2014-12-10 15:42:02 -08001484 mlog(ML_ERROR, "Get physical blkno failed for inode %llu, "
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001485 "at logical block %llu",
1486 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1487 (unsigned long long)v_blkno);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001488 goto out;
1489 }
1490
1491 BUG_ON(p_blkno == 0);
1492
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001493 for(i = 0; i < wc->w_num_pages; i++) {
1494 int tmpret;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001495
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001496 tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
1497 wc->w_pages[i], cpos,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001498 user_pos, user_len,
1499 should_zero);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001500 if (tmpret) {
1501 mlog_errno(tmpret);
1502 if (ret == 0)
Wengang Wangcbfa9632009-07-13 11:38:23 +08001503 ret = tmpret;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001504 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08001505 }
1506
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001507 /*
1508 * We only have cleanup to do in case of allocating write.
1509 */
1510 if (ret && new)
1511 ocfs2_write_failure(inode, wc, user_pos, user_len);
1512
Mark Fasheh9517bac2007-02-09 20:24:12 -08001513out:
Mark Fasheh9517bac2007-02-09 20:24:12 -08001514
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001515 return ret;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001516}
1517
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001518static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1519 struct ocfs2_alloc_context *data_ac,
1520 struct ocfs2_alloc_context *meta_ac,
1521 struct ocfs2_write_ctxt *wc,
1522 loff_t pos, unsigned len)
1523{
1524 int ret, i;
Mark Fashehdb562462007-09-17 09:06:29 -07001525 loff_t cluster_off;
1526 unsigned int local_len = len;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001527 struct ocfs2_write_cluster_desc *desc;
Mark Fashehdb562462007-09-17 09:06:29 -07001528 struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001529
1530 for (i = 0; i < wc->w_clen; i++) {
1531 desc = &wc->w_desc[i];
1532
Mark Fashehdb562462007-09-17 09:06:29 -07001533 /*
1534 * We have to make sure that the total write passed in
1535 * doesn't extend past a single cluster.
1536 */
1537 local_len = len;
1538 cluster_off = pos & (osb->s_clustersize - 1);
1539 if ((cluster_off + local_len) > osb->s_clustersize)
1540 local_len = osb->s_clustersize - cluster_off;
1541
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001542 ret = ocfs2_write_cluster(mapping, desc->c_phys,
Sunil Mushrane7432672009-08-06 16:12:58 -07001543 desc->c_unwritten,
1544 desc->c_needs_zero,
1545 data_ac, meta_ac,
Mark Fashehdb562462007-09-17 09:06:29 -07001546 wc, desc->c_cpos, pos, local_len);
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001547 if (ret) {
1548 mlog_errno(ret);
1549 goto out;
1550 }
Mark Fashehdb562462007-09-17 09:06:29 -07001551
1552 len -= local_len;
1553 pos += local_len;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001554 }
1555
1556 ret = 0;
1557out:
1558 return ret;
1559}
1560
Mark Fasheh9517bac2007-02-09 20:24:12 -08001561/*
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001562 * ocfs2_write_end() wants to know which parts of the target page it
1563 * should complete the write on. It's easiest to compute them ahead of
1564 * time when a more complete view of the write is available.
Mark Fasheh9517bac2007-02-09 20:24:12 -08001565 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001566static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1567 struct ocfs2_write_ctxt *wc,
1568 loff_t pos, unsigned len, int alloc)
Mark Fasheh9517bac2007-02-09 20:24:12 -08001569{
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001570 struct ocfs2_write_cluster_desc *desc;
1571
1572 wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1);
1573 wc->w_target_to = wc->w_target_from + len;
1574
1575 if (alloc == 0)
1576 return;
1577
1578 /*
1579 * Allocating write - we may have different boundaries based
1580 * on page size and cluster size.
1581 *
1582 * NOTE: We can no longer compute one value from the other as
1583 * the actual write length and user provided length may be
1584 * different.
1585 */
1586
1587 if (wc->w_large_pages) {
1588 /*
1589 * We only care about the 1st and last cluster within
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001590 * our range and whether they should be zero'd or not. Either
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001591 * value may be extended out to the start/end of a
1592 * newly allocated cluster.
1593 */
1594 desc = &wc->w_desc[0];
Sunil Mushrane7432672009-08-06 16:12:58 -07001595 if (desc->c_needs_zero)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001596 ocfs2_figure_cluster_boundaries(osb,
1597 desc->c_cpos,
1598 &wc->w_target_from,
1599 NULL);
1600
1601 desc = &wc->w_desc[wc->w_clen - 1];
Sunil Mushrane7432672009-08-06 16:12:58 -07001602 if (desc->c_needs_zero)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001603 ocfs2_figure_cluster_boundaries(osb,
1604 desc->c_cpos,
1605 NULL,
1606 &wc->w_target_to);
1607 } else {
1608 wc->w_target_from = 0;
1609 wc->w_target_to = PAGE_CACHE_SIZE;
1610 }
1611}
1612
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001613/*
1614 * Populate each single-cluster write descriptor in the write context
1615 * with information about the i/o to be done.
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001616 *
1617 * Returns the number of clusters that will have to be allocated, as
1618 * well as a worst case estimate of the number of extent records that
1619 * would have to be created during a write to an unwritten region.
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001620 */
1621static int ocfs2_populate_write_desc(struct inode *inode,
1622 struct ocfs2_write_ctxt *wc,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001623 unsigned int *clusters_to_alloc,
1624 unsigned int *extents_to_split)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001625{
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001626 int ret;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001627 struct ocfs2_write_cluster_desc *desc;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001628 unsigned int num_clusters = 0;
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001629 unsigned int ext_flags = 0;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001630 u32 phys = 0;
1631 int i;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001632
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001633 *clusters_to_alloc = 0;
1634 *extents_to_split = 0;
1635
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001636 for (i = 0; i < wc->w_clen; i++) {
1637 desc = &wc->w_desc[i];
1638 desc->c_cpos = wc->w_cpos + i;
1639
1640 if (num_clusters == 0) {
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001641 /*
1642 * Need to look up the next extent record.
1643 */
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001644 ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys,
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001645 &num_clusters, &ext_flags);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001646 if (ret) {
1647 mlog_errno(ret);
Mark Fasheh607d44a2007-05-09 15:14:45 -07001648 goto out;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001649 }
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001650
Tao Ma293b2f72009-08-25 08:02:48 +08001651 /* We should already CoW the refcountd extent. */
1652 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
1653
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001654 /*
1655 * Assume worst case - that we're writing in
1656 * the middle of the extent.
1657 *
1658 * We can assume that the write proceeds from
1659 * left to right, in which case the extent
1660 * insert code is smart enough to coalesce the
1661 * next splits into the previous records created.
1662 */
1663 if (ext_flags & OCFS2_EXT_UNWRITTEN)
1664 *extents_to_split = *extents_to_split + 2;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001665 } else if (phys) {
1666 /*
1667 * Only increment phys if it doesn't describe
1668 * a hole.
1669 */
1670 phys++;
1671 }
1672
Sunil Mushrane7432672009-08-06 16:12:58 -07001673 /*
1674 * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
1675 * file that got extended. w_first_new_cpos tells us
1676 * where the newly allocated clusters are so we can
1677 * zero them.
1678 */
1679 if (desc->c_cpos >= wc->w_first_new_cpos) {
1680 BUG_ON(phys == 0);
1681 desc->c_needs_zero = 1;
1682 }
1683
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001684 desc->c_phys = phys;
1685 if (phys == 0) {
1686 desc->c_new = 1;
Sunil Mushrane7432672009-08-06 16:12:58 -07001687 desc->c_needs_zero = 1;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001688 *clusters_to_alloc = *clusters_to_alloc + 1;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001689 }
Sunil Mushrane7432672009-08-06 16:12:58 -07001690
1691 if (ext_flags & OCFS2_EXT_UNWRITTEN) {
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001692 desc->c_unwritten = 1;
Sunil Mushrane7432672009-08-06 16:12:58 -07001693 desc->c_needs_zero = 1;
1694 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001695
1696 num_clusters--;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001697 }
1698
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001699 ret = 0;
1700out:
1701 return ret;
1702}
1703
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001704static int ocfs2_write_begin_inline(struct address_space *mapping,
1705 struct inode *inode,
1706 struct ocfs2_write_ctxt *wc)
1707{
1708 int ret;
1709 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1710 struct page *page;
1711 handle_t *handle;
1712 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1713
Junxiao Bif775da22014-10-09 15:25:15 -07001714 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1715 if (IS_ERR(handle)) {
1716 ret = PTR_ERR(handle);
1717 mlog_errno(ret);
1718 goto out;
1719 }
1720
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001721 page = find_or_create_page(mapping, 0, GFP_NOFS);
1722 if (!page) {
Junxiao Bif775da22014-10-09 15:25:15 -07001723 ocfs2_commit_trans(osb, handle);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001724 ret = -ENOMEM;
1725 mlog_errno(ret);
1726 goto out;
1727 }
1728 /*
1729 * If we don't set w_num_pages then this page won't get unlocked
1730 * and freed on cleanup of the write context.
1731 */
1732 wc->w_pages[0] = wc->w_target_page = page;
1733 wc->w_num_pages = 1;
1734
Joel Becker0cf2f762009-02-12 16:41:25 -08001735 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
Joel Becker13723d02008-10-17 19:25:01 -07001736 OCFS2_JOURNAL_ACCESS_WRITE);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001737 if (ret) {
1738 ocfs2_commit_trans(osb, handle);
1739
1740 mlog_errno(ret);
1741 goto out;
1742 }
1743
1744 if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1745 ocfs2_set_inode_data_inline(inode, di);
1746
1747 if (!PageUptodate(page)) {
1748 ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
1749 if (ret) {
1750 ocfs2_commit_trans(osb, handle);
1751
1752 goto out;
1753 }
1754 }
1755
1756 wc->w_handle = handle;
1757out:
1758 return ret;
1759}
1760
1761int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
1762{
1763 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1764
Mark Fasheh0d8a4e02007-11-20 11:48:41 -08001765 if (new_size <= le16_to_cpu(di->id2.i_data.id_count))
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001766 return 1;
1767 return 0;
1768}
1769
1770static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
1771 struct inode *inode, loff_t pos,
1772 unsigned len, struct page *mmap_page,
1773 struct ocfs2_write_ctxt *wc)
1774{
1775 int ret, written = 0;
1776 loff_t end = pos + len;
1777 struct ocfs2_inode_info *oi = OCFS2_I(inode);
Tiger Yangd9ae49d2009-03-05 11:06:15 +08001778 struct ocfs2_dinode *di = NULL;
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001779
Tao Ma95581562011-02-22 21:33:59 +08001780 trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno,
1781 len, (unsigned long long)pos,
1782 oi->ip_dyn_features);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001783
1784 /*
1785 * Handle inodes which already have inline data 1st.
1786 */
1787 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1788 if (mmap_page == NULL &&
1789 ocfs2_size_fits_inline_data(wc->w_di_bh, end))
1790 goto do_inline_write;
1791
1792 /*
1793 * The write won't fit - we have to give this inode an
1794 * inline extent list now.
1795 */
1796 ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh);
1797 if (ret)
1798 mlog_errno(ret);
1799 goto out;
1800 }
1801
1802 /*
1803 * Check whether the inode can accept inline data.
1804 */
1805 if (oi->ip_clusters != 0 || i_size_read(inode) != 0)
1806 return 0;
1807
1808 /*
1809 * Check whether the write can fit.
1810 */
Tiger Yangd9ae49d2009-03-05 11:06:15 +08001811 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1812 if (mmap_page ||
1813 end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001814 return 0;
1815
1816do_inline_write:
1817 ret = ocfs2_write_begin_inline(mapping, inode, wc);
1818 if (ret) {
1819 mlog_errno(ret);
1820 goto out;
1821 }
1822
1823 /*
1824 * This signals to the caller that the data can be written
1825 * inline.
1826 */
1827 written = 1;
1828out:
1829 return written ? written : ret;
1830}
1831
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001832/*
1833 * This function only does anything for file systems which can't
1834 * handle sparse files.
1835 *
1836 * What we want to do here is fill in any hole between the current end
1837 * of allocation and the end of our write. That way the rest of the
1838 * write path can treat it as an non-allocating write, which has no
1839 * special case code for sparse/nonsparse files.
1840 */
Joel Becker56934862010-07-01 15:13:31 -07001841static int ocfs2_expand_nonsparse_inode(struct inode *inode,
1842 struct buffer_head *di_bh,
1843 loff_t pos, unsigned len,
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001844 struct ocfs2_write_ctxt *wc)
1845{
1846 int ret;
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001847 loff_t newsize = pos + len;
1848
Joel Becker56934862010-07-01 15:13:31 -07001849 BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001850
1851 if (newsize <= i_size_read(inode))
1852 return 0;
1853
Joel Becker56934862010-07-01 15:13:31 -07001854 ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001855 if (ret)
1856 mlog_errno(ret);
1857
Sunil Mushrane7432672009-08-06 16:12:58 -07001858 wc->w_first_new_cpos =
1859 ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
1860
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001861 return ret;
1862}
1863
Joel Becker56934862010-07-01 15:13:31 -07001864static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
1865 loff_t pos)
1866{
1867 int ret = 0;
1868
1869 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
1870 if (pos > i_size_read(inode))
1871 ret = ocfs2_zero_extend(inode, di_bh, pos);
1872
1873 return ret;
1874}
1875
Tao Ma50308d82010-11-04 15:14:11 +08001876/*
1877 * Try to flush truncate logs if we can free enough clusters from it.
1878 * As for return value, "< 0" means error, "0" no space and "1" means
1879 * we have freed enough spaces and let the caller try to allocate again.
1880 */
1881static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
1882 unsigned int needed)
1883{
1884 tid_t target;
1885 int ret = 0;
1886 unsigned int truncated_clusters;
1887
1888 mutex_lock(&osb->osb_tl_inode->i_mutex);
1889 truncated_clusters = osb->truncated_clusters;
1890 mutex_unlock(&osb->osb_tl_inode->i_mutex);
1891
1892 /*
1893 * Check whether we can succeed in allocating if we free
1894 * the truncate log.
1895 */
1896 if (truncated_clusters < needed)
1897 goto out;
1898
1899 ret = ocfs2_flush_truncate_log(osb);
1900 if (ret) {
1901 mlog_errno(ret);
1902 goto out;
1903 }
1904
1905 if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
1906 jbd2_log_wait_commit(osb->journal->j_journal, target);
1907 ret = 1;
1908 }
1909out:
1910 return ret;
1911}
1912
Tao Ma0378da0f2010-08-12 10:25:28 +08001913int ocfs2_write_begin_nolock(struct file *filp,
1914 struct address_space *mapping,
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001915 loff_t pos, unsigned len, unsigned flags,
1916 struct page **pagep, void **fsdata,
1917 struct buffer_head *di_bh, struct page *mmap_page)
1918{
Sunil Mushrane7432672009-08-06 16:12:58 -07001919 int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
Tao Ma50308d82010-11-04 15:14:11 +08001920 unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001921 struct ocfs2_write_ctxt *wc;
1922 struct inode *inode = mapping->host;
1923 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1924 struct ocfs2_dinode *di;
1925 struct ocfs2_alloc_context *data_ac = NULL;
1926 struct ocfs2_alloc_context *meta_ac = NULL;
1927 handle_t *handle;
Joel Beckerf99b9b72008-08-20 19:36:33 -07001928 struct ocfs2_extent_tree et;
Tao Ma50308d82010-11-04 15:14:11 +08001929 int try_free = 1, ret1;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001930
Tao Ma50308d82010-11-04 15:14:11 +08001931try_again:
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001932 ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh);
1933 if (ret) {
1934 mlog_errno(ret);
1935 return ret;
1936 }
1937
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001938 if (ocfs2_supports_inline_data(osb)) {
1939 ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
1940 mmap_page, wc);
1941 if (ret == 1) {
1942 ret = 0;
1943 goto success;
1944 }
1945 if (ret < 0) {
1946 mlog_errno(ret);
1947 goto out;
1948 }
1949 }
1950
Joel Becker56934862010-07-01 15:13:31 -07001951 if (ocfs2_sparse_alloc(osb))
1952 ret = ocfs2_zero_tail(inode, di_bh, pos);
1953 else
1954 ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len,
1955 wc);
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001956 if (ret) {
1957 mlog_errno(ret);
1958 goto out;
1959 }
1960
Tao Ma293b2f72009-08-25 08:02:48 +08001961 ret = ocfs2_check_range_for_refcount(inode, pos, len);
1962 if (ret < 0) {
1963 mlog_errno(ret);
1964 goto out;
1965 } else if (ret == 1) {
Tao Ma50308d82010-11-04 15:14:11 +08001966 clusters_need = wc->w_clen;
Tiger Yangc7dd3392013-08-13 16:00:58 -07001967 ret = ocfs2_refcount_cow(inode, di_bh,
Tao Ma37f8a2b2009-08-26 09:47:28 +08001968 wc->w_cpos, wc->w_clen, UINT_MAX);
Tao Ma293b2f72009-08-25 08:02:48 +08001969 if (ret) {
1970 mlog_errno(ret);
1971 goto out;
1972 }
1973 }
1974
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001975 ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
1976 &extents_to_split);
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001977 if (ret) {
1978 mlog_errno(ret);
1979 goto out;
1980 }
Tao Ma50308d82010-11-04 15:14:11 +08001981 clusters_need += clusters_to_alloc;
Mark Fasheh0d172ba2007-05-14 18:09:54 -07001982
1983 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1984
Tao Ma95581562011-02-22 21:33:59 +08001985 trace_ocfs2_write_begin_nolock(
1986 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1987 (long long)i_size_read(inode),
1988 le32_to_cpu(di->i_clusters),
1989 pos, len, flags, mmap_page,
1990 clusters_to_alloc, extents_to_split);
1991
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001992 /*
1993 * We set w_target_from, w_target_to here so that
1994 * ocfs2_write_end() knows which range in the target page to
1995 * write out. An allocation requires that we write the entire
1996 * cluster range.
1997 */
Mark Fashehb27b7cb2007-06-18 11:22:56 -07001998 if (clusters_to_alloc || extents_to_split) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07001999 /*
2000 * XXX: We are stretching the limits of
Mark Fashehb27b7cb2007-06-18 11:22:56 -07002001 * ocfs2_lock_allocators(). It greatly over-estimates
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002002 * the work to be done.
2003 */
Joel Becker5e404e92009-02-13 03:54:22 -08002004 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
2005 wc->w_di_bh);
Joel Beckerf99b9b72008-08-20 19:36:33 -07002006 ret = ocfs2_lock_allocators(inode, &et,
Tao Ma231b87d2008-08-18 17:38:42 +08002007 clusters_to_alloc, extents_to_split,
Joel Beckerf99b9b72008-08-20 19:36:33 -07002008 &data_ac, &meta_ac);
Mark Fasheh9517bac2007-02-09 20:24:12 -08002009 if (ret) {
2010 mlog_errno(ret);
Mark Fasheh607d44a2007-05-09 15:14:45 -07002011 goto out;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002012 }
2013
Mark Fasheh4fe370a2009-12-07 13:15:40 -08002014 if (data_ac)
2015 data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
2016
Tao Ma811f9332008-08-18 17:38:43 +08002017 credits = ocfs2_calc_extend_credits(inode->i_sb,
Goldwyn Rodrigues06f9da62013-11-12 15:06:52 -08002018 &di->id2.i_list);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002019
Mark Fasheh9517bac2007-02-09 20:24:12 -08002020 }
2021
Sunil Mushrane7432672009-08-06 16:12:58 -07002022 /*
2023 * We have to zero sparse allocated clusters, unwritten extent clusters,
2024 * and non-sparse clusters we just extended. For non-sparse writes,
2025 * we know zeros will only be needed in the first and/or last cluster.
2026 */
2027 if (clusters_to_alloc || extents_to_split ||
Sunil Mushran8379e7c2009-09-04 11:12:01 -07002028 (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
2029 wc->w_desc[wc->w_clen - 1].c_needs_zero)))
Sunil Mushrane7432672009-08-06 16:12:58 -07002030 cluster_of_pages = 1;
2031 else
2032 cluster_of_pages = 0;
2033
2034 ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002035
Mark Fasheh9517bac2007-02-09 20:24:12 -08002036 handle = ocfs2_start_trans(osb, credits);
2037 if (IS_ERR(handle)) {
2038 ret = PTR_ERR(handle);
2039 mlog_errno(ret);
Mark Fasheh607d44a2007-05-09 15:14:45 -07002040 goto out;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002041 }
2042
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002043 wc->w_handle = handle;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002044
Christoph Hellwig5dd40562010-03-03 09:05:00 -05002045 if (clusters_to_alloc) {
2046 ret = dquot_alloc_space_nodirty(inode,
2047 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
2048 if (ret)
2049 goto out_commit;
Jan Karaa90714c2008-10-09 19:38:40 +02002050 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002051 /*
2052 * We don't want this to fail in ocfs2_write_end(), so do it
2053 * here.
2054 */
Joel Becker0cf2f762009-02-12 16:41:25 -08002055 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
Joel Becker13723d02008-10-17 19:25:01 -07002056 OCFS2_JOURNAL_ACCESS_WRITE);
Mark Fasheh9517bac2007-02-09 20:24:12 -08002057 if (ret) {
2058 mlog_errno(ret);
Jan Karaa90714c2008-10-09 19:38:40 +02002059 goto out_quota;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002060 }
2061
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002062 /*
2063 * Fill our page array first. That way we've grabbed enough so
2064 * that we can zero and flush if we error after adding the
2065 * extent.
2066 */
Joel Becker693c2412010-07-02 17:20:27 -07002067 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
Sunil Mushrane7432672009-08-06 16:12:58 -07002068 cluster_of_pages, mmap_page);
Wengang Wang5cffff92011-07-24 10:36:54 -07002069 if (ret && ret != -EAGAIN) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08002070 mlog_errno(ret);
Jan Karaa90714c2008-10-09 19:38:40 +02002071 goto out_quota;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002072 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08002073
Wengang Wang5cffff92011-07-24 10:36:54 -07002074 /*
2075 * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
2076 * the target page. In this case, we exit with no error and no target
2077 * page. This will trigger the caller, page_mkwrite(), to re-try
2078 * the operation.
2079 */
2080 if (ret == -EAGAIN) {
2081 BUG_ON(wc->w_target_page);
2082 ret = 0;
2083 goto out_quota;
2084 }
2085
Mark Fasheh0d172ba2007-05-14 18:09:54 -07002086 ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
2087 len);
2088 if (ret) {
2089 mlog_errno(ret);
Jan Karaa90714c2008-10-09 19:38:40 +02002090 goto out_quota;
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002091 }
2092
2093 if (data_ac)
2094 ocfs2_free_alloc_context(data_ac);
2095 if (meta_ac)
2096 ocfs2_free_alloc_context(meta_ac);
2097
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002098success:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002099 *pagep = wc->w_target_page;
2100 *fsdata = wc;
2101 return 0;
Jan Karaa90714c2008-10-09 19:38:40 +02002102out_quota:
2103 if (clusters_to_alloc)
Christoph Hellwig5dd40562010-03-03 09:05:00 -05002104 dquot_free_space(inode,
Jan Karaa90714c2008-10-09 19:38:40 +02002105 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
Mark Fasheh9517bac2007-02-09 20:24:12 -08002106out_commit:
2107 ocfs2_commit_trans(osb, handle);
2108
Mark Fasheh9517bac2007-02-09 20:24:12 -08002109out:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002110 ocfs2_free_write_ctxt(wc);
2111
Xue jiufeib1214e42013-11-12 15:07:06 -08002112 if (data_ac) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08002113 ocfs2_free_alloc_context(data_ac);
Xue jiufeib1214e42013-11-12 15:07:06 -08002114 data_ac = NULL;
2115 }
2116 if (meta_ac) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08002117 ocfs2_free_alloc_context(meta_ac);
Xue jiufeib1214e42013-11-12 15:07:06 -08002118 meta_ac = NULL;
2119 }
Tao Ma50308d82010-11-04 15:14:11 +08002120
2121 if (ret == -ENOSPC && try_free) {
2122 /*
2123 * Try to free some truncate log so that we can have enough
2124 * clusters to allocate.
2125 */
2126 try_free = 0;
2127
2128 ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
2129 if (ret1 == 1)
2130 goto try_again;
2131
2132 if (ret1 < 0)
2133 mlog_errno(ret1);
2134 }
2135
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002136 return ret;
2137}
Mark Fasheh9517bac2007-02-09 20:24:12 -08002138
Nick Pigginb6af1bc2007-10-16 01:25:24 -07002139static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
2140 loff_t pos, unsigned len, unsigned flags,
2141 struct page **pagep, void **fsdata)
Mark Fasheh607d44a2007-05-09 15:14:45 -07002142{
2143 int ret;
2144 struct buffer_head *di_bh = NULL;
2145 struct inode *inode = mapping->host;
2146
Mark Fashehe63aecb62007-10-18 15:30:42 -07002147 ret = ocfs2_inode_lock(inode, &di_bh, 1);
Mark Fasheh607d44a2007-05-09 15:14:45 -07002148 if (ret) {
2149 mlog_errno(ret);
2150 return ret;
2151 }
2152
2153 /*
2154 * Take alloc sem here to prevent concurrent lookups. That way
2155 * the mapping, zeroing and tree manipulation within
2156 * ocfs2_write() will be safe against ->readpage(). This
2157 * should also serve to lock out allocation from a shared
2158 * writeable region.
2159 */
2160 down_write(&OCFS2_I(inode)->ip_alloc_sem);
2161
Tao Ma0378da0f2010-08-12 10:25:28 +08002162 ret = ocfs2_write_begin_nolock(file, mapping, pos, len, flags, pagep,
Mark Fasheh7307de82007-05-09 15:16:19 -07002163 fsdata, di_bh, NULL);
Mark Fasheh607d44a2007-05-09 15:14:45 -07002164 if (ret) {
2165 mlog_errno(ret);
Mark Fashehc934a922007-10-18 15:23:46 -07002166 goto out_fail;
Mark Fasheh607d44a2007-05-09 15:14:45 -07002167 }
2168
2169 brelse(di_bh);
2170
2171 return 0;
2172
Mark Fasheh607d44a2007-05-09 15:14:45 -07002173out_fail:
2174 up_write(&OCFS2_I(inode)->ip_alloc_sem);
2175
2176 brelse(di_bh);
Mark Fashehe63aecb62007-10-18 15:30:42 -07002177 ocfs2_inode_unlock(inode, 1);
Mark Fasheh607d44a2007-05-09 15:14:45 -07002178
2179 return ret;
2180}
2181
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002182static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
2183 unsigned len, unsigned *copied,
2184 struct ocfs2_dinode *di,
2185 struct ocfs2_write_ctxt *wc)
2186{
2187 void *kaddr;
2188
2189 if (unlikely(*copied < len)) {
2190 if (!PageUptodate(wc->w_target_page)) {
2191 *copied = 0;
2192 return;
2193 }
2194 }
2195
Cong Wangc4bc8dc2011-11-25 23:14:34 +08002196 kaddr = kmap_atomic(wc->w_target_page);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002197 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
Cong Wangc4bc8dc2011-11-25 23:14:34 +08002198 kunmap_atomic(kaddr);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002199
Tao Ma95581562011-02-22 21:33:59 +08002200 trace_ocfs2_write_end_inline(
2201 (unsigned long long)OCFS2_I(inode)->ip_blkno,
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002202 (unsigned long long)pos, *copied,
2203 le16_to_cpu(di->id2.i_data.id_count),
2204 le16_to_cpu(di->i_dyn_features));
2205}
2206
Mark Fasheh7307de82007-05-09 15:16:19 -07002207int ocfs2_write_end_nolock(struct address_space *mapping,
2208 loff_t pos, unsigned len, unsigned copied,
2209 struct page *page, void *fsdata)
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002210{
2211 int i;
2212 unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1);
2213 struct inode *inode = mapping->host;
2214 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2215 struct ocfs2_write_ctxt *wc = fsdata;
2216 struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
2217 handle_t *handle = wc->w_handle;
2218 struct page *tmppage;
2219
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002220 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2221 ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
2222 goto out_write_size;
2223 }
2224
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002225 if (unlikely(copied < len)) {
2226 if (!PageUptodate(wc->w_target_page))
2227 copied = 0;
2228
2229 ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
2230 start+len);
2231 }
2232 flush_dcache_page(wc->w_target_page);
2233
2234 for(i = 0; i < wc->w_num_pages; i++) {
2235 tmppage = wc->w_pages[i];
2236
2237 if (tmppage == wc->w_target_page) {
2238 from = wc->w_target_from;
2239 to = wc->w_target_to;
2240
2241 BUG_ON(from > PAGE_CACHE_SIZE ||
2242 to > PAGE_CACHE_SIZE ||
2243 to < from);
2244 } else {
2245 /*
2246 * Pages adjacent to the target (if any) imply
2247 * a hole-filling write in which case we want
2248 * to flush their entire range.
2249 */
2250 from = 0;
2251 to = PAGE_CACHE_SIZE;
2252 }
2253
Sunil Mushran961cecb2008-07-16 17:22:22 -07002254 if (page_has_buffers(tmppage)) {
Mark Fasheh53ef99c2008-11-18 16:53:43 -08002255 if (ocfs2_should_order_data(inode))
Joel Becker2b4e30f2008-09-03 20:03:41 -07002256 ocfs2_jbd2_file_inode(wc->w_handle, inode);
Sunil Mushran961cecb2008-07-16 17:22:22 -07002257 block_commit_write(tmppage, from, to);
2258 }
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002259 }
2260
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002261out_write_size:
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002262 pos += copied;
Junxiao Bif17c20d2013-09-11 14:19:45 -07002263 if (pos > i_size_read(inode)) {
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002264 i_size_write(inode, pos);
2265 mark_inode_dirty(inode);
2266 }
2267 inode->i_blocks = ocfs2_inode_sector_count(inode);
2268 di->i_size = cpu_to_le64((u64)i_size_read(inode));
2269 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2270 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
2271 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
Darrick J. Wong2931cdc2014-04-03 14:46:48 -07002272 ocfs2_update_inode_fsync_trans(handle, inode, 1);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002273 ocfs2_journal_dirty(handle, wc->w_di_bh);
2274
Junxiao Bi136f49b2014-12-18 16:17:37 -08002275 /* unlock pages before dealloc since it needs acquiring j_trans_barrier
2276 * lock, or it will cause a deadlock since journal commit threads holds
2277 * this lock and will ask for the page lock when flushing the data.
2278 * put it here to preserve the unlock order.
2279 */
2280 ocfs2_unlock_pages(wc);
2281
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002282 ocfs2_commit_trans(osb, handle);
Mark Fasheh59a5e412007-06-22 15:52:36 -07002283
Mark Fashehb27b7cb2007-06-18 11:22:56 -07002284 ocfs2_run_deallocs(osb, &wc->w_dealloc);
2285
Junxiao Bi136f49b2014-12-18 16:17:37 -08002286 brelse(wc->w_di_bh);
2287 kfree(wc);
Mark Fasheh3a307ff2007-05-08 17:47:32 -07002288
2289 return copied;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002290}
2291
Nick Pigginb6af1bc2007-10-16 01:25:24 -07002292static int ocfs2_write_end(struct file *file, struct address_space *mapping,
2293 loff_t pos, unsigned len, unsigned copied,
2294 struct page *page, void *fsdata)
Mark Fasheh607d44a2007-05-09 15:14:45 -07002295{
2296 int ret;
2297 struct inode *inode = mapping->host;
2298
2299 ret = ocfs2_write_end_nolock(mapping, pos, len, copied, page, fsdata);
2300
Mark Fasheh607d44a2007-05-09 15:14:45 -07002301 up_write(&OCFS2_I(inode)->ip_alloc_sem);
Mark Fashehe63aecb62007-10-18 15:30:42 -07002302 ocfs2_inode_unlock(inode, 1);
Mark Fasheh607d44a2007-05-09 15:14:45 -07002303
2304 return ret;
2305}
2306
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07002307const struct address_space_operations ocfs2_aops = {
Hisashi Hifumi1fca3a02009-03-05 17:22:21 +09002308 .readpage = ocfs2_readpage,
2309 .readpages = ocfs2_readpages,
2310 .writepage = ocfs2_writepage,
2311 .write_begin = ocfs2_write_begin,
2312 .write_end = ocfs2_write_end,
2313 .bmap = ocfs2_bmap,
Hisashi Hifumi1fca3a02009-03-05 17:22:21 +09002314 .direct_IO = ocfs2_direct_IO,
Jan Kara41ecc342013-11-12 15:07:08 -08002315 .invalidatepage = block_invalidatepage,
Hisashi Hifumi1fca3a02009-03-05 17:22:21 +09002316 .releasepage = ocfs2_releasepage,
2317 .migratepage = buffer_migrate_page,
2318 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02002319 .error_remove_page = generic_error_remove_page,
Mark Fashehccd979b2005-12-15 14:31:24 -08002320};