blob: 1e1a93aa0526456f71ba7866bf01a7de92e4b7d3 [file] [log] [blame]
Mark Fashehccd979b2005-12-15 14:31:24 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * file.c
5 *
6 * File open, close, extend, truncate
7 *
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080026#include <linux/capability.h>
Mark Fashehccd979b2005-12-15 14:31:24 -080027#include <linux/fs.h>
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/highmem.h>
31#include <linux/pagemap.h>
32#include <linux/uio.h>
Mark Fashehe2057c52006-10-03 17:53:05 -070033#include <linux/sched.h>
Jens Axboed6b29d72007-06-04 09:59:47 +020034#include <linux/splice.h>
Tiger Yang7f1a37e2006-11-15 15:48:42 +080035#include <linux/mount.h>
Mark Fasheh9517bac2007-02-09 20:24:12 -080036#include <linux/writeback.h>
Mark Fasheh385820a2007-07-19 00:14:38 -070037#include <linux/falloc.h>
Jan Karaa90714c2008-10-09 19:38:40 +020038#include <linux/quotaops.h>
Jan Kara04eda1a2010-08-05 20:32:45 +020039#include <linux/blkdev.h>
Mark Fashehccd979b2005-12-15 14:31:24 -080040
41#define MLOG_MASK_PREFIX ML_INODE
42#include <cluster/masklog.h>
43
44#include "ocfs2.h"
45
46#include "alloc.h"
47#include "aops.h"
48#include "dir.h"
49#include "dlmglue.h"
50#include "extent_map.h"
51#include "file.h"
52#include "sysfile.h"
53#include "inode.h"
Herbert Poetzlca4d1472006-07-03 17:27:12 -070054#include "ioctl.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080055#include "journal.h"
Mark Fasheh53fc6222007-12-20 16:49:04 -080056#include "locks.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080057#include "mmap.h"
58#include "suballoc.h"
59#include "super.h"
Tiger Yangcf1d6c72008-08-18 17:11:00 +080060#include "xattr.h"
Tiger Yang23fc2702008-11-14 11:17:18 +080061#include "acl.h"
Jan Karaa90714c2008-10-09 19:38:40 +020062#include "quota.h"
Tao Ma293b2f72009-08-25 08:02:48 +080063#include "refcounttree.h"
Mark Fashehccd979b2005-12-15 14:31:24 -080064
65#include "buffer_head_io.h"
66
Mark Fasheh53fc6222007-12-20 16:49:04 -080067static int ocfs2_init_file_private(struct inode *inode, struct file *file)
68{
69 struct ocfs2_file_private *fp;
70
71 fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
72 if (!fp)
73 return -ENOMEM;
74
75 fp->fp_file = file;
76 mutex_init(&fp->fp_mutex);
77 ocfs2_file_lock_res_init(&fp->fp_flock, fp);
78 file->private_data = fp;
79
80 return 0;
81}
82
83static void ocfs2_free_file_private(struct inode *inode, struct file *file)
84{
85 struct ocfs2_file_private *fp = file->private_data;
86 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
87
88 if (fp) {
89 ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
90 ocfs2_lock_res_free(&fp->fp_flock);
91 kfree(fp);
92 file->private_data = NULL;
93 }
94}
95
Mark Fashehccd979b2005-12-15 14:31:24 -080096static int ocfs2_file_open(struct inode *inode, struct file *file)
97{
98 int status;
99 int mode = file->f_flags;
100 struct ocfs2_inode_info *oi = OCFS2_I(inode);
101
Tao Maef6b6892011-02-21 11:10:44 +0800102 mlog(0, "(0x%p, 0x%p, '%.*s')\n", inode, file,
103 file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
Mark Fashehccd979b2005-12-15 14:31:24 -0800104
Christoph Hellwig907f4552010-03-03 09:05:06 -0500105 if (file->f_mode & FMODE_WRITE)
Christoph Hellwig871a2932010-03-03 09:05:07 -0500106 dquot_initialize(inode);
Christoph Hellwig907f4552010-03-03 09:05:06 -0500107
Mark Fashehccd979b2005-12-15 14:31:24 -0800108 spin_lock(&oi->ip_lock);
109
110 /* Check that the inode hasn't been wiped from disk by another
111 * node. If it hasn't then we're safe as long as we hold the
112 * spin lock until our increment of open count. */
113 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
114 spin_unlock(&oi->ip_lock);
115
116 status = -ENOENT;
117 goto leave;
118 }
119
120 if (mode & O_DIRECT)
121 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
122
123 oi->ip_open_count++;
124 spin_unlock(&oi->ip_lock);
Mark Fasheh53fc6222007-12-20 16:49:04 -0800125
126 status = ocfs2_init_file_private(inode, file);
127 if (status) {
128 /*
129 * We want to set open count back if we're failing the
130 * open.
131 */
132 spin_lock(&oi->ip_lock);
133 oi->ip_open_count--;
134 spin_unlock(&oi->ip_lock);
135 }
136
Mark Fashehccd979b2005-12-15 14:31:24 -0800137leave:
Mark Fashehccd979b2005-12-15 14:31:24 -0800138 return status;
139}
140
141static int ocfs2_file_release(struct inode *inode, struct file *file)
142{
143 struct ocfs2_inode_info *oi = OCFS2_I(inode);
144
Tao Maef6b6892011-02-21 11:10:44 +0800145 mlog(0, "(0x%p, 0x%p, '%.*s')\n", inode, file,
146 file->f_path.dentry->d_name.len,
147 file->f_path.dentry->d_name.name);
Mark Fashehccd979b2005-12-15 14:31:24 -0800148
149 spin_lock(&oi->ip_lock);
150 if (!--oi->ip_open_count)
151 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
152 spin_unlock(&oi->ip_lock);
153
Mark Fasheh53fc6222007-12-20 16:49:04 -0800154 ocfs2_free_file_private(inode, file);
155
Mark Fashehccd979b2005-12-15 14:31:24 -0800156 return 0;
157}
158
Mark Fasheh53fc6222007-12-20 16:49:04 -0800159static int ocfs2_dir_open(struct inode *inode, struct file *file)
160{
161 return ocfs2_init_file_private(inode, file);
162}
163
164static int ocfs2_dir_release(struct inode *inode, struct file *file)
165{
166 ocfs2_free_file_private(inode, file);
167 return 0;
168}
169
Christoph Hellwig7ea80852010-05-26 17:53:25 +0200170static int ocfs2_sync_file(struct file *file, int datasync)
Mark Fashehccd979b2005-12-15 14:31:24 -0800171{
172 int err = 0;
173 journal_t *journal;
Christoph Hellwig7ea80852010-05-26 17:53:25 +0200174 struct inode *inode = file->f_mapping->host;
Mark Fashehccd979b2005-12-15 14:31:24 -0800175 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
176
Tao Maef6b6892011-02-21 11:10:44 +0800177 mlog(0, "(0x%p, %d, 0x%p, '%.*s')\n", file, datasync,
178 file->f_path.dentry, file->f_path.dentry->d_name.len,
179 file->f_path.dentry->d_name.name);
Mark Fashehccd979b2005-12-15 14:31:24 -0800180
Jan Kara04eda1a2010-08-05 20:32:45 +0200181 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
182 /*
183 * We still have to flush drive's caches to get data to the
184 * platter
185 */
186 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
Linus Torvaldsf8cae0f2010-10-22 19:30:38 -0700187 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
Hisashi Hifumie04cc152009-06-09 16:47:45 +0900188 goto bail;
Jan Kara04eda1a2010-08-05 20:32:45 +0200189 }
Hisashi Hifumie04cc152009-06-09 16:47:45 +0900190
Mark Fashehccd979b2005-12-15 14:31:24 -0800191 journal = osb->journal->j_journal;
Joel Becker2b4e30f2008-09-03 20:03:41 -0700192 err = jbd2_journal_force_commit(journal);
Mark Fashehccd979b2005-12-15 14:31:24 -0800193
194bail:
Tao Mac1e8d352011-03-07 16:43:21 +0800195 if (err)
196 mlog_errno(err);
Mark Fashehccd979b2005-12-15 14:31:24 -0800197
198 return (err < 0) ? -EIO : 0;
199}
200
Tiger Yang7f1a37e2006-11-15 15:48:42 +0800201int ocfs2_should_update_atime(struct inode *inode,
202 struct vfsmount *vfsmnt)
203{
204 struct timespec now;
205 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
206
207 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
208 return 0;
209
210 if ((inode->i_flags & S_NOATIME) ||
211 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
212 return 0;
213
Mark Fasheh6c2aad02006-12-19 15:25:52 -0800214 /*
215 * We can be called with no vfsmnt structure - NFSD will
216 * sometimes do this.
217 *
218 * Note that our action here is different than touch_atime() -
219 * if we can't tell whether this is a noatime mount, then we
220 * don't know whether to trust the value of s_atime_quantum.
221 */
222 if (vfsmnt == NULL)
223 return 0;
224
Tiger Yang7f1a37e2006-11-15 15:48:42 +0800225 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
226 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
227 return 0;
228
Mark Fasheh7e913c52006-12-13 00:34:35 -0800229 if (vfsmnt->mnt_flags & MNT_RELATIME) {
230 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
231 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
232 return 1;
233
234 return 0;
235 }
236
Tiger Yang7f1a37e2006-11-15 15:48:42 +0800237 now = CURRENT_TIME;
238 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
239 return 0;
240 else
241 return 1;
242}
243
244int ocfs2_update_inode_atime(struct inode *inode,
245 struct buffer_head *bh)
246{
247 int ret;
248 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
249 handle_t *handle;
Mark Fashehc11e9fa2007-07-20 11:24:53 -0700250 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
Tiger Yang7f1a37e2006-11-15 15:48:42 +0800251
Tiger Yang7f1a37e2006-11-15 15:48:42 +0800252 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
Jan Karafa38e922008-10-20 19:23:51 +0200253 if (IS_ERR(handle)) {
254 ret = PTR_ERR(handle);
Tiger Yang7f1a37e2006-11-15 15:48:42 +0800255 mlog_errno(ret);
256 goto out;
257 }
258
Joel Becker0cf2f762009-02-12 16:41:25 -0800259 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
Joel Becker13723d02008-10-17 19:25:01 -0700260 OCFS2_JOURNAL_ACCESS_WRITE);
Mark Fashehc11e9fa2007-07-20 11:24:53 -0700261 if (ret) {
262 mlog_errno(ret);
263 goto out_commit;
264 }
265
266 /*
267 * Don't use ocfs2_mark_inode_dirty() here as we don't always
268 * have i_mutex to guard against concurrent changes to other
269 * inode fields.
270 */
Tiger Yang7f1a37e2006-11-15 15:48:42 +0800271 inode->i_atime = CURRENT_TIME;
Mark Fashehc11e9fa2007-07-20 11:24:53 -0700272 di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
273 di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
Joel Beckerec20cec2010-03-19 14:13:52 -0700274 ocfs2_journal_dirty(handle, bh);
Tiger Yang7f1a37e2006-11-15 15:48:42 +0800275
Mark Fashehc11e9fa2007-07-20 11:24:53 -0700276out_commit:
Tiger Yang7f1a37e2006-11-15 15:48:42 +0800277 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
278out:
Tiger Yang7f1a37e2006-11-15 15:48:42 +0800279 return ret;
280}
281
Adrian Bunk6cb129f2007-04-26 00:29:35 -0700282static int ocfs2_set_inode_size(handle_t *handle,
283 struct inode *inode,
284 struct buffer_head *fe_bh,
285 u64 new_i_size)
Mark Fashehccd979b2005-12-15 14:31:24 -0800286{
287 int status;
288
Mark Fashehccd979b2005-12-15 14:31:24 -0800289 i_size_write(inode, new_i_size);
Mark Fasheh8110b072007-03-22 16:53:23 -0700290 inode->i_blocks = ocfs2_inode_sector_count(inode);
Mark Fashehccd979b2005-12-15 14:31:24 -0800291 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
292
293 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
294 if (status < 0) {
295 mlog_errno(status);
296 goto bail;
297 }
298
299bail:
Mark Fashehccd979b2005-12-15 14:31:24 -0800300 return status;
301}
302
Jan Kara9e33d692008-08-25 19:56:50 +0200303int ocfs2_simple_size_update(struct inode *inode,
304 struct buffer_head *di_bh,
305 u64 new_i_size)
Mark Fashehccd979b2005-12-15 14:31:24 -0800306{
307 int ret;
308 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Mark Fasheh1fabe142006-10-09 18:11:45 -0700309 handle_t *handle = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -0800310
Mark Fasheh65eff9c2006-10-09 17:26:22 -0700311 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
Jan Karafa38e922008-10-20 19:23:51 +0200312 if (IS_ERR(handle)) {
313 ret = PTR_ERR(handle);
Mark Fashehccd979b2005-12-15 14:31:24 -0800314 mlog_errno(ret);
315 goto out;
316 }
317
318 ret = ocfs2_set_inode_size(handle, inode, di_bh,
319 new_i_size);
320 if (ret < 0)
321 mlog_errno(ret);
322
Mark Fasheh02dc1af2006-10-09 16:48:10 -0700323 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -0800324out:
325 return ret;
326}
327
Tao Ma37f8a2b2009-08-26 09:47:28 +0800328static int ocfs2_cow_file_pos(struct inode *inode,
329 struct buffer_head *fe_bh,
330 u64 offset)
331{
332 int status;
333 u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
334 unsigned int num_clusters = 0;
335 unsigned int ext_flags = 0;
336
337 /*
338 * If the new offset is aligned to the range of the cluster, there is
339 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
340 * CoW either.
341 */
342 if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
343 return 0;
344
345 status = ocfs2_get_clusters(inode, cpos, &phys,
346 &num_clusters, &ext_flags);
347 if (status) {
348 mlog_errno(status);
349 goto out;
350 }
351
352 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
353 goto out;
354
Tao Ma15502712010-08-12 10:36:38 +0800355 return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
Tao Ma37f8a2b2009-08-26 09:47:28 +0800356
357out:
358 return status;
359}
360
Mark Fashehccd979b2005-12-15 14:31:24 -0800361static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
362 struct inode *inode,
363 struct buffer_head *fe_bh,
364 u64 new_i_size)
365{
366 int status;
Mark Fasheh1fabe142006-10-09 18:11:45 -0700367 handle_t *handle;
Mark Fasheh60b11392007-02-16 11:46:50 -0800368 struct ocfs2_dinode *di;
Mark Fasheh35edec12007-07-06 14:41:18 -0700369 u64 cluster_bytes;
Mark Fashehccd979b2005-12-15 14:31:24 -0800370
Tao Ma37f8a2b2009-08-26 09:47:28 +0800371 /*
372 * We need to CoW the cluster contains the offset if it is reflinked
373 * since we will call ocfs2_zero_range_for_truncate later which will
374 * write "0" from offset to the end of the cluster.
375 */
376 status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
377 if (status) {
378 mlog_errno(status);
379 return status;
380 }
381
Mark Fashehccd979b2005-12-15 14:31:24 -0800382 /* TODO: This needs to actually orphan the inode in this
383 * transaction. */
384
Mark Fasheh65eff9c2006-10-09 17:26:22 -0700385 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
Mark Fashehccd979b2005-12-15 14:31:24 -0800386 if (IS_ERR(handle)) {
387 status = PTR_ERR(handle);
388 mlog_errno(status);
389 goto out;
390 }
391
Joel Becker0cf2f762009-02-12 16:41:25 -0800392 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
Joel Becker13723d02008-10-17 19:25:01 -0700393 OCFS2_JOURNAL_ACCESS_WRITE);
Mark Fasheh60b11392007-02-16 11:46:50 -0800394 if (status < 0) {
395 mlog_errno(status);
396 goto out_commit;
397 }
398
399 /*
400 * Do this before setting i_size.
401 */
Mark Fasheh35edec12007-07-06 14:41:18 -0700402 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
403 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
404 cluster_bytes);
Mark Fasheh60b11392007-02-16 11:46:50 -0800405 if (status) {
406 mlog_errno(status);
407 goto out_commit;
408 }
409
410 i_size_write(inode, new_i_size);
Mark Fasheh60b11392007-02-16 11:46:50 -0800411 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
412
413 di = (struct ocfs2_dinode *) fe_bh->b_data;
414 di->i_size = cpu_to_le64(new_i_size);
415 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
416 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
417
Joel Beckerec20cec2010-03-19 14:13:52 -0700418 ocfs2_journal_dirty(handle, fe_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800419
Mark Fasheh60b11392007-02-16 11:46:50 -0800420out_commit:
Mark Fasheh02dc1af2006-10-09 16:48:10 -0700421 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -0800422out:
Mark Fashehccd979b2005-12-15 14:31:24 -0800423 return status;
424}
425
426static int ocfs2_truncate_file(struct inode *inode,
427 struct buffer_head *di_bh,
428 u64 new_i_size)
429{
430 int status = 0;
431 struct ocfs2_dinode *fe = NULL;
432 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Mark Fashehccd979b2005-12-15 14:31:24 -0800433
Tao Maef6b6892011-02-21 11:10:44 +0800434 mlog(0, "(inode = %llu, new_i_size = %llu\n",
435 (unsigned long long)OCFS2_I(inode)->ip_blkno,
436 (unsigned long long)new_i_size);
Mark Fashehccd979b2005-12-15 14:31:24 -0800437
Joel Beckerb657c952008-11-13 14:49:11 -0800438 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
439 * already validated it */
Mark Fashehccd979b2005-12-15 14:31:24 -0800440 fe = (struct ocfs2_dinode *) di_bh->b_data;
Mark Fashehccd979b2005-12-15 14:31:24 -0800441
442 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
Mark Fashehb0697052006-03-03 10:24:33 -0800443 "Inode %llu, inode i_size = %lld != di "
444 "i_size = %llu, i_flags = 0x%x\n",
445 (unsigned long long)OCFS2_I(inode)->ip_blkno,
Mark Fashehccd979b2005-12-15 14:31:24 -0800446 i_size_read(inode),
Mark Fashehb0697052006-03-03 10:24:33 -0800447 (unsigned long long)le64_to_cpu(fe->i_size),
448 le32_to_cpu(fe->i_flags));
Mark Fashehccd979b2005-12-15 14:31:24 -0800449
450 if (new_i_size > le64_to_cpu(fe->i_size)) {
Mark Fashehb0697052006-03-03 10:24:33 -0800451 mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
452 (unsigned long long)le64_to_cpu(fe->i_size),
453 (unsigned long long)new_i_size);
Mark Fashehccd979b2005-12-15 14:31:24 -0800454 status = -EINVAL;
455 mlog_errno(status);
456 goto bail;
457 }
458
Mark Fashehb0697052006-03-03 10:24:33 -0800459 mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
460 (unsigned long long)le64_to_cpu(fe->i_blkno),
461 (unsigned long long)le64_to_cpu(fe->i_size),
462 (unsigned long long)new_i_size);
Mark Fashehccd979b2005-12-15 14:31:24 -0800463
464 /* lets handle the simple truncate cases before doing any more
465 * cluster locking. */
466 if (new_i_size == le64_to_cpu(fe->i_size))
467 goto bail;
468
Mark Fasheh2e89b2e2007-05-09 13:40:18 -0700469 down_write(&OCFS2_I(inode)->ip_alloc_sem);
470
Mark Fasheh4fe370a2009-12-07 13:15:40 -0800471 ocfs2_resv_discard(&osb->osb_la_resmap,
472 &OCFS2_I(inode)->ip_la_data_resv);
473
Mark Fashehc934a922007-10-18 15:23:46 -0700474 /*
475 * The inode lock forced other nodes to sync and drop their
476 * pages, which (correctly) happens even if we have a truncate
477 * without allocation change - ocfs2 cluster sizes can be much
478 * greater than page size, so we have to truncate them
479 * anyway.
480 */
Mark Fasheh2e89b2e2007-05-09 13:40:18 -0700481 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
482 truncate_inode_pages(inode->i_mapping, new_i_size);
483
Mark Fasheh1afc32b2007-09-07 14:46:51 -0700484 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
485 status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
Mark Fashehb1967d02007-11-20 11:56:39 -0800486 i_size_read(inode), 1);
Mark Fasheh1afc32b2007-09-07 14:46:51 -0700487 if (status)
488 mlog_errno(status);
489
Mark Fashehc934a922007-10-18 15:23:46 -0700490 goto bail_unlock_sem;
Mark Fasheh1afc32b2007-09-07 14:46:51 -0700491 }
492
Mark Fashehccd979b2005-12-15 14:31:24 -0800493 /* alright, we're going to need to do a full blown alloc size
494 * change. Orphan the inode so that recovery can complete the
495 * truncate if necessary. This does the task of marking
496 * i_size. */
497 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
498 if (status < 0) {
499 mlog_errno(status);
Mark Fashehc934a922007-10-18 15:23:46 -0700500 goto bail_unlock_sem;
Mark Fashehccd979b2005-12-15 14:31:24 -0800501 }
502
Tristan Ye78f94672010-05-11 17:54:42 +0800503 status = ocfs2_commit_truncate(osb, inode, di_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800504 if (status < 0) {
505 mlog_errno(status);
Mark Fashehc934a922007-10-18 15:23:46 -0700506 goto bail_unlock_sem;
Mark Fashehccd979b2005-12-15 14:31:24 -0800507 }
508
509 /* TODO: orphan dir cleanup here. */
Mark Fashehc934a922007-10-18 15:23:46 -0700510bail_unlock_sem:
Mark Fasheh2e89b2e2007-05-09 13:40:18 -0700511 up_write(&OCFS2_I(inode)->ip_alloc_sem);
512
Mark Fashehccd979b2005-12-15 14:31:24 -0800513bail:
Tao Ma8b2c0db2009-08-18 11:43:49 +0800514 if (!status && OCFS2_I(inode)->ip_clusters == 0)
515 status = ocfs2_try_remove_refcount_tree(inode, di_bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800516
Mark Fashehccd979b2005-12-15 14:31:24 -0800517 return status;
518}
519
520/*
Tao Ma0eb8d472008-08-18 17:38:45 +0800521 * extend file allocation only here.
Mark Fashehccd979b2005-12-15 14:31:24 -0800522 * we'll update all the disk stuff, and oip->alloc_size
523 *
524 * expect stuff to be locked, a transaction started and enough data /
525 * metadata reservations in the contexts.
526 *
527 * Will return -EAGAIN, and a reason if a restart is needed.
528 * If passed in, *reason will always be set, even in error.
529 */
Tao Ma0eb8d472008-08-18 17:38:45 +0800530int ocfs2_add_inode_data(struct ocfs2_super *osb,
531 struct inode *inode,
532 u32 *logical_offset,
533 u32 clusters_to_add,
534 int mark_unwritten,
535 struct buffer_head *fe_bh,
536 handle_t *handle,
537 struct ocfs2_alloc_context *data_ac,
538 struct ocfs2_alloc_context *meta_ac,
539 enum ocfs2_alloc_restarted *reason_ret)
Mark Fashehccd979b2005-12-15 14:31:24 -0800540{
Joel Beckerf99b9b72008-08-20 19:36:33 -0700541 int ret;
542 struct ocfs2_extent_tree et;
Mark Fashehccd979b2005-12-15 14:31:24 -0800543
Joel Becker5e404e92009-02-13 03:54:22 -0800544 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
Joel Beckercbee7e12009-02-13 03:34:15 -0800545 ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
546 clusters_to_add, mark_unwritten,
547 data_ac, meta_ac, reason_ret);
Joel Beckerf99b9b72008-08-20 19:36:33 -0700548
549 return ret;
Mark Fashehccd979b2005-12-15 14:31:24 -0800550}
551
Mark Fasheh2ae99a62007-03-09 16:43:28 -0800552static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
553 u32 clusters_to_add, int mark_unwritten)
Mark Fashehccd979b2005-12-15 14:31:24 -0800554{
555 int status = 0;
556 int restart_func = 0;
Mark Fashehabf8b152007-01-17 13:07:24 -0800557 int credits;
Mark Fasheh2ae99a62007-03-09 16:43:28 -0800558 u32 prev_clusters;
Mark Fashehccd979b2005-12-15 14:31:24 -0800559 struct buffer_head *bh = NULL;
560 struct ocfs2_dinode *fe = NULL;
Mark Fasheh1fabe142006-10-09 18:11:45 -0700561 handle_t *handle = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -0800562 struct ocfs2_alloc_context *data_ac = NULL;
563 struct ocfs2_alloc_context *meta_ac = NULL;
564 enum ocfs2_alloc_restarted why;
565 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Joel Beckerf99b9b72008-08-20 19:36:33 -0700566 struct ocfs2_extent_tree et;
Jan Karaa90714c2008-10-09 19:38:40 +0200567 int did_quota = 0;
Mark Fashehccd979b2005-12-15 14:31:24 -0800568
Tao Maef6b6892011-02-21 11:10:44 +0800569 mlog(0, "(clusters_to_add = %u)\n", clusters_to_add);
Mark Fashehccd979b2005-12-15 14:31:24 -0800570
Mark Fashehdcd05382007-01-16 11:32:23 -0800571 /*
572 * This function only exists for file systems which don't
573 * support holes.
574 */
Mark Fasheh2ae99a62007-03-09 16:43:28 -0800575 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
Mark Fashehdcd05382007-01-16 11:32:23 -0800576
Joel Beckerb657c952008-11-13 14:49:11 -0800577 status = ocfs2_read_inode_block(inode, &bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800578 if (status < 0) {
579 mlog_errno(status);
580 goto leave;
581 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800582 fe = (struct ocfs2_dinode *) bh->b_data;
Mark Fashehccd979b2005-12-15 14:31:24 -0800583
584restart_all:
585 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
586
Tao Mae7d4cb62008-08-18 17:38:44 +0800587 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
588 "clusters_to_add = %u\n",
589 (unsigned long long)OCFS2_I(inode)->ip_blkno,
590 (long long)i_size_read(inode), le32_to_cpu(fe->i_clusters),
591 clusters_to_add);
Joel Becker5e404e92009-02-13 03:54:22 -0800592 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
Joel Beckerf99b9b72008-08-20 19:36:33 -0700593 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
594 &data_ac, &meta_ac);
Mark Fasheh9517bac2007-02-09 20:24:12 -0800595 if (status) {
596 mlog_errno(status);
597 goto leave;
598 }
599
Tao Ma811f9332008-08-18 17:38:43 +0800600 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list,
601 clusters_to_add);
Mark Fasheh65eff9c2006-10-09 17:26:22 -0700602 handle = ocfs2_start_trans(osb, credits);
Mark Fashehccd979b2005-12-15 14:31:24 -0800603 if (IS_ERR(handle)) {
604 status = PTR_ERR(handle);
605 handle = NULL;
606 mlog_errno(status);
607 goto leave;
608 }
609
610restarted_transaction:
Christoph Hellwig5dd40562010-03-03 09:05:00 -0500611 status = dquot_alloc_space_nodirty(inode,
612 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
613 if (status)
Jan Karaa90714c2008-10-09 19:38:40 +0200614 goto leave;
Jan Karaa90714c2008-10-09 19:38:40 +0200615 did_quota = 1;
616
Mark Fashehccd979b2005-12-15 14:31:24 -0800617 /* reserve a write to the file entry early on - that we if we
618 * run out of credits in the allocation path, we can still
619 * update i_size. */
Joel Becker0cf2f762009-02-12 16:41:25 -0800620 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
Joel Becker13723d02008-10-17 19:25:01 -0700621 OCFS2_JOURNAL_ACCESS_WRITE);
Mark Fashehccd979b2005-12-15 14:31:24 -0800622 if (status < 0) {
623 mlog_errno(status);
624 goto leave;
625 }
626
627 prev_clusters = OCFS2_I(inode)->ip_clusters;
628
Tao Ma0eb8d472008-08-18 17:38:45 +0800629 status = ocfs2_add_inode_data(osb,
630 inode,
631 &logical_start,
632 clusters_to_add,
633 mark_unwritten,
634 bh,
635 handle,
636 data_ac,
637 meta_ac,
638 &why);
Mark Fashehccd979b2005-12-15 14:31:24 -0800639 if ((status < 0) && (status != -EAGAIN)) {
640 if (status != -ENOSPC)
641 mlog_errno(status);
642 goto leave;
643 }
644
Joel Beckerec20cec2010-03-19 14:13:52 -0700645 ocfs2_journal_dirty(handle, bh);
Mark Fashehccd979b2005-12-15 14:31:24 -0800646
647 spin_lock(&OCFS2_I(inode)->ip_lock);
648 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
649 spin_unlock(&OCFS2_I(inode)->ip_lock);
Jan Karaa90714c2008-10-09 19:38:40 +0200650 /* Release unused quota reservation */
Christoph Hellwig5dd40562010-03-03 09:05:00 -0500651 dquot_free_space(inode,
Jan Karaa90714c2008-10-09 19:38:40 +0200652 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
653 did_quota = 0;
Mark Fashehccd979b2005-12-15 14:31:24 -0800654
655 if (why != RESTART_NONE && clusters_to_add) {
656 if (why == RESTART_META) {
657 mlog(0, "restarting function.\n");
658 restart_func = 1;
Tao Ma79681842010-04-16 13:59:25 +0800659 status = 0;
Mark Fashehccd979b2005-12-15 14:31:24 -0800660 } else {
661 BUG_ON(why != RESTART_TRANS);
662
663 mlog(0, "restarting transaction.\n");
664 /* TODO: This can be more intelligent. */
665 credits = ocfs2_calc_extend_credits(osb->sb,
Tao Ma811f9332008-08-18 17:38:43 +0800666 &fe->id2.i_list,
Mark Fashehccd979b2005-12-15 14:31:24 -0800667 clusters_to_add);
Mark Fasheh1fabe142006-10-09 18:11:45 -0700668 status = ocfs2_extend_trans(handle, credits);
Mark Fashehccd979b2005-12-15 14:31:24 -0800669 if (status < 0) {
670 /* handle still has to be committed at
671 * this point. */
672 status = -ENOMEM;
673 mlog_errno(status);
674 goto leave;
675 }
676 goto restarted_transaction;
677 }
678 }
679
Mark Fashehb0697052006-03-03 10:24:33 -0800680 mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
Mark Fasheh1ca1a112007-04-27 16:01:25 -0700681 le32_to_cpu(fe->i_clusters),
682 (unsigned long long)le64_to_cpu(fe->i_size));
Mark Fashehccd979b2005-12-15 14:31:24 -0800683 mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
Jan Kara634bf742007-12-19 15:25:42 +0100684 OCFS2_I(inode)->ip_clusters, (long long)i_size_read(inode));
Mark Fashehccd979b2005-12-15 14:31:24 -0800685
686leave:
Jan Karaa90714c2008-10-09 19:38:40 +0200687 if (status < 0 && did_quota)
Christoph Hellwig5dd40562010-03-03 09:05:00 -0500688 dquot_free_space(inode,
Jan Karaa90714c2008-10-09 19:38:40 +0200689 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
Mark Fashehccd979b2005-12-15 14:31:24 -0800690 if (handle) {
Mark Fasheh02dc1af2006-10-09 16:48:10 -0700691 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -0800692 handle = NULL;
693 }
694 if (data_ac) {
695 ocfs2_free_alloc_context(data_ac);
696 data_ac = NULL;
697 }
698 if (meta_ac) {
699 ocfs2_free_alloc_context(meta_ac);
700 meta_ac = NULL;
701 }
702 if ((!status) && restart_func) {
703 restart_func = 0;
704 goto restart_all;
705 }
Mark Fasheha81cb882008-10-07 14:25:16 -0700706 brelse(bh);
707 bh = NULL;
Mark Fashehccd979b2005-12-15 14:31:24 -0800708
Mark Fashehccd979b2005-12-15 14:31:24 -0800709 return status;
710}
711
Joel Beckera4bfb4c2010-07-06 14:36:06 -0700712/*
713 * While a write will already be ordering the data, a truncate will not.
714 * Thus, we need to explicitly order the zeroed pages.
715 */
716static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
717{
718 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
719 handle_t *handle = NULL;
720 int ret = 0;
721
722 if (!ocfs2_should_order_data(inode))
723 goto out;
724
725 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
726 if (IS_ERR(handle)) {
727 ret = -ENOMEM;
728 mlog_errno(ret);
729 goto out;
730 }
731
732 ret = ocfs2_jbd2_file_inode(handle, inode);
733 if (ret < 0)
734 mlog_errno(ret);
735
736out:
737 if (ret) {
738 if (!IS_ERR(handle))
739 ocfs2_commit_trans(osb, handle);
740 handle = ERR_PTR(ret);
741 }
742 return handle;
743}
744
Mark Fashehccd979b2005-12-15 14:31:24 -0800745/* Some parts of this taken from generic_cont_expand, which turned out
746 * to be too fragile to do exactly what we need without us having to
Nick Piggin4e02ed42008-10-29 14:00:55 -0700747 * worry about recursive locking in ->write_begin() and ->write_end(). */
Joel Beckera4bfb4c2010-07-06 14:36:06 -0700748static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
749 u64 abs_to)
Mark Fashehccd979b2005-12-15 14:31:24 -0800750{
751 struct address_space *mapping = inode->i_mapping;
752 struct page *page;
Joel Beckera4bfb4c2010-07-06 14:36:06 -0700753 unsigned long index = abs_from >> PAGE_CACHE_SHIFT;
Mark Fasheh1fabe142006-10-09 18:11:45 -0700754 handle_t *handle = NULL;
Joel Becker54532582010-07-16 13:32:33 -0700755 int ret = 0;
Joel Beckera4bfb4c2010-07-06 14:36:06 -0700756 unsigned zero_from, zero_to, block_start, block_end;
Mark Fashehccd979b2005-12-15 14:31:24 -0800757
Joel Beckera4bfb4c2010-07-06 14:36:06 -0700758 BUG_ON(abs_from >= abs_to);
759 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
760 BUG_ON(abs_from & (inode->i_blkbits - 1));
Mark Fashehccd979b2005-12-15 14:31:24 -0800761
Jan Kara9b4c0ff2010-08-24 14:28:03 +0200762 page = find_or_create_page(mapping, index, GFP_NOFS);
Mark Fashehccd979b2005-12-15 14:31:24 -0800763 if (!page) {
764 ret = -ENOMEM;
765 mlog_errno(ret);
766 goto out;
767 }
768
Joel Beckera4bfb4c2010-07-06 14:36:06 -0700769 /* Get the offsets within the page that we want to zero */
770 zero_from = abs_from & (PAGE_CACHE_SIZE - 1);
771 zero_to = abs_to & (PAGE_CACHE_SIZE - 1);
772 if (!zero_to)
773 zero_to = PAGE_CACHE_SIZE;
Mark Fashehccd979b2005-12-15 14:31:24 -0800774
Joel Becker56934862010-07-01 15:13:31 -0700775 mlog(0,
776 "abs_from = %llu, abs_to = %llu, index = %lu, zero_from = %u, zero_to = %u\n",
777 (unsigned long long)abs_from, (unsigned long long)abs_to,
778 index, zero_from, zero_to);
779
Joel Beckera4bfb4c2010-07-06 14:36:06 -0700780 /* We know that zero_from is block aligned */
781 for (block_start = zero_from; block_start < zero_to;
782 block_start = block_end) {
783 block_end = block_start + (1 << inode->i_blkbits);
784
785 /*
Christoph Hellwigebdec242010-10-06 10:47:23 +0200786 * block_start is block-aligned. Bump it by one to force
787 * __block_write_begin and block_commit_write to zero the
Joel Beckera4bfb4c2010-07-06 14:36:06 -0700788 * whole block.
789 */
Christoph Hellwigebdec242010-10-06 10:47:23 +0200790 ret = __block_write_begin(page, block_start + 1, 0,
791 ocfs2_get_block);
Joel Beckera4bfb4c2010-07-06 14:36:06 -0700792 if (ret < 0) {
793 mlog_errno(ret);
Mark Fashehccd979b2005-12-15 14:31:24 -0800794 goto out_unlock;
795 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800796
Joel Beckera4bfb4c2010-07-06 14:36:06 -0700797 if (!handle) {
798 handle = ocfs2_zero_start_ordered_transaction(inode);
799 if (IS_ERR(handle)) {
800 ret = PTR_ERR(handle);
801 handle = NULL;
802 break;
803 }
804 }
805
806 /* must not update i_size! */
807 ret = block_commit_write(page, block_start + 1,
808 block_start + 1);
809 if (ret < 0)
810 mlog_errno(ret);
811 else
812 ret = 0;
813 }
Mark Fashehccd979b2005-12-15 14:31:24 -0800814
815 if (handle)
Mark Fasheh02dc1af2006-10-09 16:48:10 -0700816 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
Joel Beckera4bfb4c2010-07-06 14:36:06 -0700817
Mark Fashehccd979b2005-12-15 14:31:24 -0800818out_unlock:
819 unlock_page(page);
820 page_cache_release(page);
821out:
822 return ret;
823}
824
Joel Becker56934862010-07-01 15:13:31 -0700825/*
826 * Find the next range to zero. We do this in terms of bytes because
827 * that's what ocfs2_zero_extend() wants, and it is dealing with the
828 * pagecache. We may return multiple extents.
829 *
830 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
831 * needs to be zeroed. range_start and range_end return the next zeroing
832 * range. A subsequent call should pass the previous range_end as its
833 * zero_start. If range_end is 0, there's nothing to do.
834 *
835 * Unwritten extents are skipped over. Refcounted extents are CoWd.
836 */
837static int ocfs2_zero_extend_get_range(struct inode *inode,
838 struct buffer_head *di_bh,
839 u64 zero_start, u64 zero_end,
840 u64 *range_start, u64 *range_end)
Mark Fashehccd979b2005-12-15 14:31:24 -0800841{
Joel Becker56934862010-07-01 15:13:31 -0700842 int rc = 0, needs_cow = 0;
843 u32 p_cpos, zero_clusters = 0;
844 u32 zero_cpos =
845 zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
846 u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
847 unsigned int num_clusters = 0;
848 unsigned int ext_flags = 0;
Mark Fashehccd979b2005-12-15 14:31:24 -0800849
Joel Becker56934862010-07-01 15:13:31 -0700850 while (zero_cpos < last_cpos) {
851 rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
852 &num_clusters, &ext_flags);
853 if (rc) {
854 mlog_errno(rc);
Mark Fashehccd979b2005-12-15 14:31:24 -0800855 goto out;
856 }
857
Joel Becker56934862010-07-01 15:13:31 -0700858 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
859 zero_clusters = num_clusters;
860 if (ext_flags & OCFS2_EXT_REFCOUNTED)
861 needs_cow = 1;
862 break;
863 }
864
865 zero_cpos += num_clusters;
866 }
867 if (!zero_clusters) {
868 *range_end = 0;
869 goto out;
870 }
871
872 while ((zero_cpos + zero_clusters) < last_cpos) {
873 rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
874 &p_cpos, &num_clusters,
875 &ext_flags);
876 if (rc) {
877 mlog_errno(rc);
878 goto out;
879 }
880
881 if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
882 break;
883 if (ext_flags & OCFS2_EXT_REFCOUNTED)
884 needs_cow = 1;
885 zero_clusters += num_clusters;
886 }
887 if ((zero_cpos + zero_clusters) > last_cpos)
888 zero_clusters = last_cpos - zero_cpos;
889
890 if (needs_cow) {
Tao Ma15502712010-08-12 10:36:38 +0800891 rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
892 zero_clusters, UINT_MAX);
Joel Becker56934862010-07-01 15:13:31 -0700893 if (rc) {
894 mlog_errno(rc);
895 goto out;
896 }
897 }
898
899 *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
900 *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
901 zero_cpos + zero_clusters);
902
903out:
904 return rc;
905}
906
907/*
908 * Zero one range returned from ocfs2_zero_extend_get_range(). The caller
909 * has made sure that the entire range needs zeroing.
910 */
911static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
912 u64 range_end)
913{
914 int rc = 0;
915 u64 next_pos;
916 u64 zero_pos = range_start;
917
918 mlog(0, "range_start = %llu, range_end = %llu\n",
919 (unsigned long long)range_start,
920 (unsigned long long)range_end);
921 BUG_ON(range_start >= range_end);
922
923 while (zero_pos < range_end) {
924 next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
925 if (next_pos > range_end)
926 next_pos = range_end;
927 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos);
928 if (rc < 0) {
929 mlog_errno(rc);
930 break;
931 }
932 zero_pos = next_pos;
Mark Fashehe2057c52006-10-03 17:53:05 -0700933
934 /*
935 * Very large extends have the potential to lock up
936 * the cpu for extended periods of time.
937 */
938 cond_resched();
Mark Fashehccd979b2005-12-15 14:31:24 -0800939 }
940
Joel Becker56934862010-07-01 15:13:31 -0700941 return rc;
942}
943
944int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
945 loff_t zero_to_size)
946{
947 int ret = 0;
948 u64 zero_start, range_start = 0, range_end = 0;
949 struct super_block *sb = inode->i_sb;
950
951 zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
952 mlog(0, "zero_start %llu for i_size %llu\n",
953 (unsigned long long)zero_start,
954 (unsigned long long)i_size_read(inode));
955 while (zero_start < zero_to_size) {
956 ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
957 zero_to_size,
958 &range_start,
959 &range_end);
960 if (ret) {
961 mlog_errno(ret);
962 break;
963 }
964 if (!range_end)
965 break;
966 /* Trim the ends */
967 if (range_start < zero_start)
968 range_start = zero_start;
969 if (range_end > zero_to_size)
970 range_end = zero_to_size;
971
972 ret = ocfs2_zero_extend_range(inode, range_start,
973 range_end);
974 if (ret) {
975 mlog_errno(ret);
976 break;
977 }
978 zero_start = range_end;
979 }
980
Mark Fashehccd979b2005-12-15 14:31:24 -0800981 return ret;
982}
983
Joel Becker56934862010-07-01 15:13:31 -0700984int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
985 u64 new_i_size, u64 zero_to)
Mark Fasheh65ed39d2007-08-28 17:13:23 -0700986{
987 int ret;
988 u32 clusters_to_add;
989 struct ocfs2_inode_info *oi = OCFS2_I(inode);
990
Joel Becker56934862010-07-01 15:13:31 -0700991 /*
992 * Only quota files call this without a bh, and they can't be
993 * refcounted.
994 */
995 BUG_ON(!di_bh && (oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
996 BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
997
Mark Fasheh65ed39d2007-08-28 17:13:23 -0700998 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
999 if (clusters_to_add < oi->ip_clusters)
1000 clusters_to_add = 0;
1001 else
1002 clusters_to_add -= oi->ip_clusters;
1003
1004 if (clusters_to_add) {
1005 ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
1006 clusters_to_add, 0);
1007 if (ret) {
1008 mlog_errno(ret);
1009 goto out;
1010 }
1011 }
1012
1013 /*
1014 * Call this even if we don't add any clusters to the tree. We
1015 * still need to zero the area between the old i_size and the
1016 * new i_size.
1017 */
Joel Becker56934862010-07-01 15:13:31 -07001018 ret = ocfs2_zero_extend(inode, di_bh, zero_to);
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001019 if (ret < 0)
1020 mlog_errno(ret);
1021
1022out:
1023 return ret;
1024}
1025
Mark Fashehccd979b2005-12-15 14:31:24 -08001026static int ocfs2_extend_file(struct inode *inode,
1027 struct buffer_head *di_bh,
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001028 u64 new_i_size)
Mark Fashehccd979b2005-12-15 14:31:24 -08001029{
Mark Fashehc934a922007-10-18 15:23:46 -07001030 int ret = 0;
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001031 struct ocfs2_inode_info *oi = OCFS2_I(inode);
Mark Fashehccd979b2005-12-15 14:31:24 -08001032
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001033 BUG_ON(!di_bh);
Mark Fasheh53013cb2006-05-05 19:04:03 -07001034
Mark Fashehccd979b2005-12-15 14:31:24 -08001035 /* setattr sometimes calls us like this. */
1036 if (new_i_size == 0)
1037 goto out;
1038
1039 if (i_size_read(inode) == new_i_size)
Joel Becker56934862010-07-01 15:13:31 -07001040 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08001041 BUG_ON(new_i_size < i_size_read(inode));
1042
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001043 /*
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001044 * The alloc sem blocks people in read/write from reading our
1045 * allocation until we're done changing it. We depend on
1046 * i_mutex to block other extend/truncate calls while we're
Joel Becker56934862010-07-01 15:13:31 -07001047 * here. We even have to hold it for sparse files because there
1048 * might be some tail zeroing.
Mark Fasheh0effef72006-10-03 17:44:42 -07001049 */
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001050 down_write(&oi->ip_alloc_sem);
1051
1052 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1053 /*
1054 * We can optimize small extends by keeping the inodes
1055 * inline data.
1056 */
1057 if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1058 up_write(&oi->ip_alloc_sem);
1059 goto out_update_size;
1060 }
1061
1062 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1063 if (ret) {
1064 up_write(&oi->ip_alloc_sem);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001065 mlog_errno(ret);
Mark Fashehc934a922007-10-18 15:23:46 -07001066 goto out;
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001067 }
1068 }
1069
Joel Becker56934862010-07-01 15:13:31 -07001070 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1071 ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1072 else
1073 ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1074 new_i_size);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001075
1076 up_write(&oi->ip_alloc_sem);
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001077
Mark Fasheh0effef72006-10-03 17:44:42 -07001078 if (ret < 0) {
1079 mlog_errno(ret);
Mark Fashehc934a922007-10-18 15:23:46 -07001080 goto out;
Mark Fashehccd979b2005-12-15 14:31:24 -08001081 }
1082
Mark Fasheh3a0782d2007-01-17 12:53:31 -08001083out_update_size:
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001084 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1085 if (ret < 0)
1086 mlog_errno(ret);
Mark Fasheh53013cb2006-05-05 19:04:03 -07001087
Mark Fashehccd979b2005-12-15 14:31:24 -08001088out:
1089 return ret;
1090}
1091
1092int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1093{
1094 int status = 0, size_change;
1095 struct inode *inode = dentry->d_inode;
1096 struct super_block *sb = inode->i_sb;
1097 struct ocfs2_super *osb = OCFS2_SB(sb);
1098 struct buffer_head *bh = NULL;
Mark Fasheh1fabe142006-10-09 18:11:45 -07001099 handle_t *handle = NULL;
Jan Kara65bac572009-06-02 14:24:01 +02001100 struct dquot *transfer_to[MAXQUOTAS] = { };
Jan Kara52a9ee22010-05-13 20:18:45 +02001101 int qtype;
Mark Fashehccd979b2005-12-15 14:31:24 -08001102
Tao Maef6b6892011-02-21 11:10:44 +08001103 mlog(0, "(0x%p, '%.*s')\n", dentry,
1104 dentry->d_name.len, dentry->d_name.name);
Mark Fashehccd979b2005-12-15 14:31:24 -08001105
Sunil Mushranbc535802008-04-18 10:23:53 -07001106 /* ensuring we don't even attempt to truncate a symlink */
1107 if (S_ISLNK(inode->i_mode))
1108 attr->ia_valid &= ~ATTR_SIZE;
1109
Mark Fashehccd979b2005-12-15 14:31:24 -08001110 if (attr->ia_valid & ATTR_MODE)
1111 mlog(0, "mode change: %d\n", attr->ia_mode);
1112 if (attr->ia_valid & ATTR_UID)
1113 mlog(0, "uid change: %d\n", attr->ia_uid);
1114 if (attr->ia_valid & ATTR_GID)
1115 mlog(0, "gid change: %d\n", attr->ia_gid);
1116 if (attr->ia_valid & ATTR_SIZE)
1117 mlog(0, "size change...\n");
1118 if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
1119 mlog(0, "time change...\n");
1120
1121#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1122 | ATTR_GID | ATTR_UID | ATTR_MODE)
1123 if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
1124 mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
1125 return 0;
1126 }
1127
1128 status = inode_change_ok(inode, attr);
1129 if (status)
1130 return status;
1131
Dmitry Monakhov12755622010-04-08 22:04:20 +04001132 if (is_quota_modification(inode, attr))
1133 dquot_initialize(inode);
Mark Fashehccd979b2005-12-15 14:31:24 -08001134 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1135 if (size_change) {
1136 status = ocfs2_rw_lock(inode, 1);
1137 if (status < 0) {
1138 mlog_errno(status);
1139 goto bail;
1140 }
1141 }
1142
Mark Fashehe63aecb62007-10-18 15:30:42 -07001143 status = ocfs2_inode_lock(inode, &bh, 1);
Mark Fashehccd979b2005-12-15 14:31:24 -08001144 if (status < 0) {
1145 if (status != -ENOENT)
1146 mlog_errno(status);
1147 goto bail_unlock_rw;
1148 }
1149
1150 if (size_change && attr->ia_size != i_size_read(inode)) {
Wengang Wang5051f762010-02-26 18:18:25 +08001151 status = inode_newsize_ok(inode, attr->ia_size);
1152 if (status)
Mark Fashehce76fd32007-07-20 12:02:14 -07001153 goto bail_unlock;
Mark Fashehce76fd32007-07-20 12:02:14 -07001154
Joel Becker2b4e30f2008-09-03 20:03:41 -07001155 if (i_size_read(inode) > attr->ia_size) {
1156 if (ocfs2_should_order_data(inode)) {
1157 status = ocfs2_begin_ordered_truncate(inode,
1158 attr->ia_size);
1159 if (status)
1160 goto bail_unlock;
1161 }
Mark Fashehccd979b2005-12-15 14:31:24 -08001162 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
Joel Becker2b4e30f2008-09-03 20:03:41 -07001163 } else
Mark Fasheh65ed39d2007-08-28 17:13:23 -07001164 status = ocfs2_extend_file(inode, bh, attr->ia_size);
Mark Fashehccd979b2005-12-15 14:31:24 -08001165 if (status < 0) {
1166 if (status != -ENOSPC)
1167 mlog_errno(status);
1168 status = -ENOSPC;
1169 goto bail_unlock;
1170 }
1171 }
1172
Jan Karaa90714c2008-10-09 19:38:40 +02001173 if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
1174 (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
Jan Kara65bac572009-06-02 14:24:01 +02001175 /*
1176 * Gather pointers to quota structures so that allocation /
1177 * freeing of quota structures happens here and not inside
Christoph Hellwigb43fa822010-03-03 09:05:03 -05001178 * dquot_transfer() where we have problems with lock ordering
Jan Kara65bac572009-06-02 14:24:01 +02001179 */
Jan Karaa90714c2008-10-09 19:38:40 +02001180 if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid
1181 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1182 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
Jan Kara65bac572009-06-02 14:24:01 +02001183 transfer_to[USRQUOTA] = dqget(sb, attr->ia_uid,
1184 USRQUOTA);
Jan Kara52a9ee22010-05-13 20:18:45 +02001185 if (!transfer_to[USRQUOTA]) {
Jan Kara65bac572009-06-02 14:24:01 +02001186 status = -ESRCH;
Jan Karaa90714c2008-10-09 19:38:40 +02001187 goto bail_unlock;
Jan Kara65bac572009-06-02 14:24:01 +02001188 }
Jan Karaa90714c2008-10-09 19:38:40 +02001189 }
1190 if (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid
1191 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1192 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
Jan Kara65bac572009-06-02 14:24:01 +02001193 transfer_to[GRPQUOTA] = dqget(sb, attr->ia_gid,
1194 GRPQUOTA);
Jan Kara52a9ee22010-05-13 20:18:45 +02001195 if (!transfer_to[GRPQUOTA]) {
Jan Kara65bac572009-06-02 14:24:01 +02001196 status = -ESRCH;
Jan Karaa90714c2008-10-09 19:38:40 +02001197 goto bail_unlock;
Jan Kara65bac572009-06-02 14:24:01 +02001198 }
Jan Karaa90714c2008-10-09 19:38:40 +02001199 }
Jan Kara65bac572009-06-02 14:24:01 +02001200 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1201 2 * ocfs2_quota_trans_credits(sb));
Jan Karaa90714c2008-10-09 19:38:40 +02001202 if (IS_ERR(handle)) {
1203 status = PTR_ERR(handle);
1204 mlog_errno(status);
1205 goto bail_unlock;
1206 }
Jan Kara52a9ee22010-05-13 20:18:45 +02001207 status = __dquot_transfer(inode, transfer_to);
Jan Karaa90714c2008-10-09 19:38:40 +02001208 if (status < 0)
1209 goto bail_commit;
1210 } else {
1211 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1212 if (IS_ERR(handle)) {
1213 status = PTR_ERR(handle);
1214 mlog_errno(status);
1215 goto bail_unlock;
1216 }
Mark Fashehccd979b2005-12-15 14:31:24 -08001217 }
1218
Mark Fasheh7307de82007-05-09 15:16:19 -07001219 /*
Christoph Hellwig2c27c652010-06-04 11:30:04 +02001220 * This will intentionally not wind up calling truncate_setsize(),
Mark Fasheh7307de82007-05-09 15:16:19 -07001221 * since all the work for a size change has been done above.
1222 * Otherwise, we could get into problems with truncate as
1223 * ip_alloc_sem is used there to protect against i_size
1224 * changes.
Christoph Hellwig10257742010-06-04 11:30:02 +02001225 *
1226 * XXX: this means the conditional below can probably be removed.
Mark Fasheh7307de82007-05-09 15:16:19 -07001227 */
Christoph Hellwig10257742010-06-04 11:30:02 +02001228 if ((attr->ia_valid & ATTR_SIZE) &&
1229 attr->ia_size != i_size_read(inode)) {
1230 status = vmtruncate(inode, attr->ia_size);
1231 if (status) {
1232 mlog_errno(status);
1233 goto bail_commit;
1234 }
Mark Fashehccd979b2005-12-15 14:31:24 -08001235 }
1236
Christoph Hellwig10257742010-06-04 11:30:02 +02001237 setattr_copy(inode, attr);
1238 mark_inode_dirty(inode);
1239
Mark Fashehccd979b2005-12-15 14:31:24 -08001240 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1241 if (status < 0)
1242 mlog_errno(status);
1243
1244bail_commit:
Mark Fasheh02dc1af2006-10-09 16:48:10 -07001245 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08001246bail_unlock:
Mark Fashehe63aecb62007-10-18 15:30:42 -07001247 ocfs2_inode_unlock(inode, 1);
Mark Fashehccd979b2005-12-15 14:31:24 -08001248bail_unlock_rw:
1249 if (size_change)
1250 ocfs2_rw_unlock(inode, 1);
1251bail:
Mark Fasheha81cb882008-10-07 14:25:16 -07001252 brelse(bh);
Mark Fashehccd979b2005-12-15 14:31:24 -08001253
Jan Kara65bac572009-06-02 14:24:01 +02001254 /* Release quota pointers in case we acquired them */
Jan Kara52a9ee22010-05-13 20:18:45 +02001255 for (qtype = 0; qtype < MAXQUOTAS; qtype++)
Jan Kara65bac572009-06-02 14:24:01 +02001256 dqput(transfer_to[qtype]);
Jan Kara65bac572009-06-02 14:24:01 +02001257
Tiger Yang060bc662008-11-14 11:17:29 +08001258 if (!status && attr->ia_valid & ATTR_MODE) {
1259 status = ocfs2_acl_chmod(inode);
1260 if (status < 0)
1261 mlog_errno(status);
1262 }
1263
Mark Fashehccd979b2005-12-15 14:31:24 -08001264 return status;
1265}
1266
1267int ocfs2_getattr(struct vfsmount *mnt,
1268 struct dentry *dentry,
1269 struct kstat *stat)
1270{
1271 struct inode *inode = dentry->d_inode;
1272 struct super_block *sb = dentry->d_inode->i_sb;
1273 struct ocfs2_super *osb = sb->s_fs_info;
1274 int err;
1275
Mark Fashehccd979b2005-12-15 14:31:24 -08001276 err = ocfs2_inode_revalidate(dentry);
1277 if (err) {
1278 if (err != -ENOENT)
1279 mlog_errno(err);
1280 goto bail;
1281 }
1282
1283 generic_fillattr(inode, stat);
1284
1285 /* We set the blksize from the cluster size for performance */
1286 stat->blksize = osb->s_clustersize;
1287
1288bail:
Mark Fashehccd979b2005-12-15 14:31:24 -08001289 return err;
1290}
1291
Nick Pigginb74c79e2011-01-07 17:49:58 +11001292int ocfs2_permission(struct inode *inode, int mask, unsigned int flags)
Tiger Yangd38eb8d2006-11-27 09:59:21 +08001293{
1294 int ret;
1295
Nick Pigginb74c79e2011-01-07 17:49:58 +11001296 if (flags & IPERM_FLAG_RCU)
1297 return -ECHILD;
1298
Mark Fashehe63aecb62007-10-18 15:30:42 -07001299 ret = ocfs2_inode_lock(inode, NULL, 0);
Tiger Yangd38eb8d2006-11-27 09:59:21 +08001300 if (ret) {
Mark Fasheha9f5f702007-04-26 11:43:43 -07001301 if (ret != -ENOENT)
1302 mlog_errno(ret);
Tiger Yangd38eb8d2006-11-27 09:59:21 +08001303 goto out;
1304 }
1305
Nick Pigginb74c79e2011-01-07 17:49:58 +11001306 ret = generic_permission(inode, mask, flags, ocfs2_check_acl);
Tiger Yangd38eb8d2006-11-27 09:59:21 +08001307
Mark Fashehe63aecb62007-10-18 15:30:42 -07001308 ocfs2_inode_unlock(inode, 0);
Tiger Yangd38eb8d2006-11-27 09:59:21 +08001309out:
Tiger Yangd38eb8d2006-11-27 09:59:21 +08001310 return ret;
1311}
1312
Mark Fashehb2580102007-03-09 16:53:21 -08001313static int __ocfs2_write_remove_suid(struct inode *inode,
1314 struct buffer_head *bh)
Mark Fashehccd979b2005-12-15 14:31:24 -08001315{
1316 int ret;
Mark Fasheh1fabe142006-10-09 18:11:45 -07001317 handle_t *handle;
Mark Fashehccd979b2005-12-15 14:31:24 -08001318 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1319 struct ocfs2_dinode *di;
1320
Tao Maef6b6892011-02-21 11:10:44 +08001321 mlog(0, "(Inode %llu, mode 0%o)\n",
1322 (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode);
Mark Fashehccd979b2005-12-15 14:31:24 -08001323
Mark Fasheh65eff9c2006-10-09 17:26:22 -07001324 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
Jan Karafa38e922008-10-20 19:23:51 +02001325 if (IS_ERR(handle)) {
1326 ret = PTR_ERR(handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08001327 mlog_errno(ret);
1328 goto out;
1329 }
1330
Joel Becker0cf2f762009-02-12 16:41:25 -08001331 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
Joel Becker13723d02008-10-17 19:25:01 -07001332 OCFS2_JOURNAL_ACCESS_WRITE);
Mark Fashehccd979b2005-12-15 14:31:24 -08001333 if (ret < 0) {
1334 mlog_errno(ret);
Mark Fashehb2580102007-03-09 16:53:21 -08001335 goto out_trans;
Mark Fashehccd979b2005-12-15 14:31:24 -08001336 }
1337
1338 inode->i_mode &= ~S_ISUID;
1339 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1340 inode->i_mode &= ~S_ISGID;
1341
1342 di = (struct ocfs2_dinode *) bh->b_data;
1343 di->i_mode = cpu_to_le16(inode->i_mode);
1344
Joel Beckerec20cec2010-03-19 14:13:52 -07001345 ocfs2_journal_dirty(handle, bh);
Mark Fashehb2580102007-03-09 16:53:21 -08001346
Mark Fashehccd979b2005-12-15 14:31:24 -08001347out_trans:
Mark Fasheh02dc1af2006-10-09 16:48:10 -07001348 ocfs2_commit_trans(osb, handle);
Mark Fashehccd979b2005-12-15 14:31:24 -08001349out:
Mark Fashehccd979b2005-12-15 14:31:24 -08001350 return ret;
1351}
1352
Mark Fasheh9517bac2007-02-09 20:24:12 -08001353/*
1354 * Will look for holes and unwritten extents in the range starting at
1355 * pos for count bytes (inclusive).
1356 */
1357static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1358 size_t count)
1359{
1360 int ret = 0;
Mark Fasheh49cb8d22007-03-09 16:21:46 -08001361 unsigned int extent_flags;
Mark Fasheh9517bac2007-02-09 20:24:12 -08001362 u32 cpos, clusters, extent_len, phys_cpos;
1363 struct super_block *sb = inode->i_sb;
1364
1365 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1366 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1367
1368 while (clusters) {
Mark Fasheh49cb8d22007-03-09 16:21:46 -08001369 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1370 &extent_flags);
Mark Fasheh9517bac2007-02-09 20:24:12 -08001371 if (ret < 0) {
1372 mlog_errno(ret);
1373 goto out;
1374 }
1375
Mark Fasheh49cb8d22007-03-09 16:21:46 -08001376 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08001377 ret = 1;
1378 break;
1379 }
1380
1381 if (extent_len > clusters)
1382 extent_len = clusters;
1383
1384 clusters -= extent_len;
1385 cpos += extent_len;
1386 }
1387out:
1388 return ret;
1389}
1390
Mark Fashehb2580102007-03-09 16:53:21 -08001391static int ocfs2_write_remove_suid(struct inode *inode)
1392{
1393 int ret;
1394 struct buffer_head *bh = NULL;
Mark Fashehb2580102007-03-09 16:53:21 -08001395
Joel Beckerb657c952008-11-13 14:49:11 -08001396 ret = ocfs2_read_inode_block(inode, &bh);
Mark Fashehb2580102007-03-09 16:53:21 -08001397 if (ret < 0) {
1398 mlog_errno(ret);
1399 goto out;
1400 }
1401
1402 ret = __ocfs2_write_remove_suid(inode, bh);
1403out:
1404 brelse(bh);
1405 return ret;
1406}
1407
Mark Fasheh2ae99a62007-03-09 16:43:28 -08001408/*
1409 * Allocate enough extents to cover the region starting at byte offset
1410 * start for len bytes. Existing extents are skipped, any extents
1411 * added are marked as "unwritten".
1412 */
1413static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1414 u64 start, u64 len)
1415{
1416 int ret;
1417 u32 cpos, phys_cpos, clusters, alloc_size;
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001418 u64 end = start + len;
1419 struct buffer_head *di_bh = NULL;
1420
1421 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
Joel Beckerb657c952008-11-13 14:49:11 -08001422 ret = ocfs2_read_inode_block(inode, &di_bh);
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001423 if (ret) {
1424 mlog_errno(ret);
1425 goto out;
1426 }
1427
1428 /*
1429 * Nothing to do if the requested reservation range
1430 * fits within the inode.
1431 */
1432 if (ocfs2_size_fits_inline_data(di_bh, end))
1433 goto out;
1434
1435 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1436 if (ret) {
1437 mlog_errno(ret);
1438 goto out;
1439 }
1440 }
Mark Fasheh2ae99a62007-03-09 16:43:28 -08001441
1442 /*
1443 * We consider both start and len to be inclusive.
1444 */
1445 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1446 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1447 clusters -= cpos;
1448
1449 while (clusters) {
1450 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1451 &alloc_size, NULL);
1452 if (ret) {
1453 mlog_errno(ret);
1454 goto out;
1455 }
1456
1457 /*
1458 * Hole or existing extent len can be arbitrary, so
1459 * cap it to our own allocation request.
1460 */
1461 if (alloc_size > clusters)
1462 alloc_size = clusters;
1463
1464 if (phys_cpos) {
1465 /*
1466 * We already have an allocation at this
1467 * region so we can safely skip it.
1468 */
1469 goto next;
1470 }
1471
1472 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1473 if (ret) {
1474 if (ret != -ENOSPC)
1475 mlog_errno(ret);
1476 goto out;
1477 }
1478
1479next:
1480 cpos += alloc_size;
1481 clusters -= alloc_size;
1482 }
1483
1484 ret = 0;
1485out:
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001486
1487 brelse(di_bh);
Mark Fasheh2ae99a62007-03-09 16:43:28 -08001488 return ret;
1489}
1490
Mark Fasheh063c4562007-07-03 13:34:11 -07001491/*
1492 * Truncate a byte range, avoiding pages within partial clusters. This
1493 * preserves those pages for the zeroing code to write to.
1494 */
1495static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1496 u64 byte_len)
1497{
1498 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1499 loff_t start, end;
1500 struct address_space *mapping = inode->i_mapping;
1501
1502 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1503 end = byte_start + byte_len;
1504 end = end & ~(osb->s_clustersize - 1);
1505
1506 if (start < end) {
1507 unmap_mapping_range(mapping, start, end - start, 0);
1508 truncate_inode_pages_range(mapping, start, end - 1);
1509 }
1510}
1511
1512static int ocfs2_zero_partial_clusters(struct inode *inode,
1513 u64 start, u64 len)
1514{
1515 int ret = 0;
1516 u64 tmpend, end = start + len;
1517 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1518 unsigned int csize = osb->s_clustersize;
1519 handle_t *handle;
1520
1521 /*
1522 * The "start" and "end" values are NOT necessarily part of
1523 * the range whose allocation is being deleted. Rather, this
1524 * is what the user passed in with the request. We must zero
1525 * partial clusters here. There's no need to worry about
1526 * physical allocation - the zeroing code knows to skip holes.
1527 */
1528 mlog(0, "byte start: %llu, end: %llu\n",
1529 (unsigned long long)start, (unsigned long long)end);
1530
1531 /*
1532 * If both edges are on a cluster boundary then there's no
1533 * zeroing required as the region is part of the allocation to
1534 * be truncated.
1535 */
1536 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1537 goto out;
1538
1539 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
Jan Karafa38e922008-10-20 19:23:51 +02001540 if (IS_ERR(handle)) {
1541 ret = PTR_ERR(handle);
Mark Fasheh063c4562007-07-03 13:34:11 -07001542 mlog_errno(ret);
1543 goto out;
1544 }
1545
1546 /*
1547 * We want to get the byte offset of the end of the 1st cluster.
1548 */
1549 tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
1550 if (tmpend > end)
1551 tmpend = end;
1552
1553 mlog(0, "1st range: start: %llu, tmpend: %llu\n",
1554 (unsigned long long)start, (unsigned long long)tmpend);
1555
1556 ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1557 if (ret)
1558 mlog_errno(ret);
1559
1560 if (tmpend < end) {
1561 /*
1562 * This may make start and end equal, but the zeroing
1563 * code will skip any work in that case so there's no
1564 * need to catch it up here.
1565 */
1566 start = end & ~(osb->s_clustersize - 1);
1567
1568 mlog(0, "2nd range: start: %llu, end: %llu\n",
1569 (unsigned long long)start, (unsigned long long)end);
1570
1571 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1572 if (ret)
1573 mlog_errno(ret);
1574 }
1575
1576 ocfs2_commit_trans(osb, handle);
1577out:
1578 return ret;
1579}
1580
Tristan Yec1631d42010-05-11 17:54:45 +08001581static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1582{
1583 int i;
1584 struct ocfs2_extent_rec *rec = NULL;
1585
1586 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1587
1588 rec = &el->l_recs[i];
1589
1590 if (le32_to_cpu(rec->e_cpos) < pos)
1591 break;
1592 }
1593
1594 return i;
1595}
1596
1597/*
1598 * Helper to calculate the punching pos and length in one run, we handle the
1599 * following three cases in order:
1600 *
1601 * - remove the entire record
1602 * - remove a partial record
1603 * - no record needs to be removed (hole-punching completed)
1604*/
1605static void ocfs2_calc_trunc_pos(struct inode *inode,
1606 struct ocfs2_extent_list *el,
1607 struct ocfs2_extent_rec *rec,
1608 u32 trunc_start, u32 *trunc_cpos,
1609 u32 *trunc_len, u32 *trunc_end,
1610 u64 *blkno, int *done)
1611{
1612 int ret = 0;
1613 u32 coff, range;
1614
1615 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1616
1617 if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1618 *trunc_cpos = le32_to_cpu(rec->e_cpos);
1619 /*
1620 * Skip holes if any.
1621 */
1622 if (range < *trunc_end)
1623 *trunc_end = range;
1624 *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1625 *blkno = le64_to_cpu(rec->e_blkno);
1626 *trunc_end = le32_to_cpu(rec->e_cpos);
1627 } else if (range > trunc_start) {
1628 *trunc_cpos = trunc_start;
1629 *trunc_len = *trunc_end - trunc_start;
1630 coff = trunc_start - le32_to_cpu(rec->e_cpos);
1631 *blkno = le64_to_cpu(rec->e_blkno) +
1632 ocfs2_clusters_to_blocks(inode->i_sb, coff);
1633 *trunc_end = trunc_start;
1634 } else {
1635 /*
1636 * It may have two following possibilities:
1637 *
1638 * - last record has been removed
1639 * - trunc_start was within a hole
1640 *
1641 * both two cases mean the completion of hole punching.
1642 */
1643 ret = 1;
1644 }
1645
1646 *done = ret;
1647}
1648
Mark Fasheh063c4562007-07-03 13:34:11 -07001649static int ocfs2_remove_inode_range(struct inode *inode,
1650 struct buffer_head *di_bh, u64 byte_start,
1651 u64 byte_len)
1652{
Tristan Yec1631d42010-05-11 17:54:45 +08001653 int ret = 0, flags = 0, done = 0, i;
1654 u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1655 u32 cluster_in_el;
Mark Fasheh063c4562007-07-03 13:34:11 -07001656 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1657 struct ocfs2_cached_dealloc_ctxt dealloc;
Mark Fashehb1967d02007-11-20 11:56:39 -08001658 struct address_space *mapping = inode->i_mapping;
Mark Fashehfecc0112008-11-12 15:16:38 -08001659 struct ocfs2_extent_tree et;
Tristan Yec1631d42010-05-11 17:54:45 +08001660 struct ocfs2_path *path = NULL;
1661 struct ocfs2_extent_list *el = NULL;
1662 struct ocfs2_extent_rec *rec = NULL;
Tristan Yee8aec062010-05-11 17:54:43 +08001663 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
Tristan Yec1631d42010-05-11 17:54:45 +08001664 u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
Mark Fasheh063c4562007-07-03 13:34:11 -07001665
Joel Becker5e404e92009-02-13 03:54:22 -08001666 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
Mark Fasheh063c4562007-07-03 13:34:11 -07001667 ocfs2_init_dealloc_ctxt(&dealloc);
1668
1669 if (byte_len == 0)
1670 return 0;
1671
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001672 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1673 ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
Mark Fashehb1967d02007-11-20 11:56:39 -08001674 byte_start + byte_len, 0);
1675 if (ret) {
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001676 mlog_errno(ret);
Mark Fashehb1967d02007-11-20 11:56:39 -08001677 goto out;
1678 }
1679 /*
1680 * There's no need to get fancy with the page cache
1681 * truncate of an inline-data inode. We're talking
1682 * about less than a page here, which will be cached
1683 * in the dinode buffer anyway.
1684 */
1685 unmap_mapping_range(mapping, 0, 0, 0);
1686 truncate_inode_pages(mapping, 0);
1687 goto out;
Mark Fasheh1afc32b2007-09-07 14:46:51 -07001688 }
1689
Tristan Yee8aec062010-05-11 17:54:43 +08001690 /*
1691 * For reflinks, we may need to CoW 2 clusters which might be
1692 * partially zero'd later, if hole's start and end offset were
1693 * within one cluster(means is not exactly aligned to clustersize).
1694 */
Mark Fasheh063c4562007-07-03 13:34:11 -07001695
Tristan Yee8aec062010-05-11 17:54:43 +08001696 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
1697
1698 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1699 if (ret) {
1700 mlog_errno(ret);
1701 goto out;
1702 }
1703
1704 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1705 if (ret) {
1706 mlog_errno(ret);
1707 goto out;
1708 }
1709 }
1710
Mark Fasheh063c4562007-07-03 13:34:11 -07001711 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
Tristan Yec1631d42010-05-11 17:54:45 +08001712 trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1713 cluster_in_el = trunc_end;
Mark Fasheh063c4562007-07-03 13:34:11 -07001714
Tristan Yec1631d42010-05-11 17:54:45 +08001715 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, cend: %u\n",
Mark Fasheh063c4562007-07-03 13:34:11 -07001716 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1717 (unsigned long long)byte_start,
Tristan Yec1631d42010-05-11 17:54:45 +08001718 (unsigned long long)byte_len, trunc_start, trunc_end);
Mark Fasheh063c4562007-07-03 13:34:11 -07001719
1720 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1721 if (ret) {
1722 mlog_errno(ret);
1723 goto out;
1724 }
1725
Tristan Yec1631d42010-05-11 17:54:45 +08001726 path = ocfs2_new_path_from_et(&et);
1727 if (!path) {
1728 ret = -ENOMEM;
1729 mlog_errno(ret);
1730 goto out;
1731 }
1732
1733 while (trunc_end > trunc_start) {
1734
1735 ret = ocfs2_find_path(INODE_CACHE(inode), path,
1736 cluster_in_el);
Mark Fasheh063c4562007-07-03 13:34:11 -07001737 if (ret) {
1738 mlog_errno(ret);
1739 goto out;
1740 }
1741
Tristan Yec1631d42010-05-11 17:54:45 +08001742 el = path_leaf_el(path);
Mark Fasheh063c4562007-07-03 13:34:11 -07001743
Tristan Yec1631d42010-05-11 17:54:45 +08001744 i = ocfs2_find_rec(el, trunc_end);
1745 /*
1746 * Need to go to previous extent block.
1747 */
1748 if (i < 0) {
1749 if (path->p_tree_depth == 0)
1750 break;
1751
1752 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1753 path,
1754 &cluster_in_el);
Mark Fasheh063c4562007-07-03 13:34:11 -07001755 if (ret) {
1756 mlog_errno(ret);
1757 goto out;
1758 }
Tristan Yec1631d42010-05-11 17:54:45 +08001759
1760 /*
1761 * We've reached the leftmost extent block,
1762 * it's safe to leave.
1763 */
1764 if (cluster_in_el == 0)
1765 break;
1766
1767 /*
1768 * The 'pos' searched for previous extent block is
1769 * always one cluster less than actual trunc_end.
1770 */
1771 trunc_end = cluster_in_el + 1;
1772
1773 ocfs2_reinit_path(path, 1);
1774
1775 continue;
1776
1777 } else
1778 rec = &el->l_recs[i];
1779
1780 ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1781 &trunc_len, &trunc_end, &blkno, &done);
1782 if (done)
1783 break;
1784
1785 flags = rec->e_flags;
1786 phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1787
1788 ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1789 phys_cpos, trunc_len, flags,
1790 &dealloc, refcount_loc);
1791 if (ret < 0) {
1792 mlog_errno(ret);
1793 goto out;
Mark Fasheh063c4562007-07-03 13:34:11 -07001794 }
1795
Tristan Yec1631d42010-05-11 17:54:45 +08001796 cluster_in_el = trunc_end;
1797
1798 ocfs2_reinit_path(path, 1);
Mark Fasheh063c4562007-07-03 13:34:11 -07001799 }
1800
1801 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1802
1803out:
1804 ocfs2_schedule_truncate_log_flush(osb, 1);
1805 ocfs2_run_deallocs(osb, &dealloc);
1806
1807 return ret;
1808}
1809
Mark Fashehb2580102007-03-09 16:53:21 -08001810/*
1811 * Parts of this function taken from xfs_change_file_space()
1812 */
Mark Fasheh385820a2007-07-19 00:14:38 -07001813static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1814 loff_t f_pos, unsigned int cmd,
1815 struct ocfs2_space_resv *sr,
1816 int change_size)
Mark Fashehb2580102007-03-09 16:53:21 -08001817{
1818 int ret;
1819 s64 llen;
Mark Fasheh385820a2007-07-19 00:14:38 -07001820 loff_t size;
Mark Fashehb2580102007-03-09 16:53:21 -08001821 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1822 struct buffer_head *di_bh = NULL;
1823 handle_t *handle;
Mark Fasheha00cce32007-07-20 11:28:30 -07001824 unsigned long long max_off = inode->i_sb->s_maxbytes;
Mark Fashehb2580102007-03-09 16:53:21 -08001825
Mark Fashehb2580102007-03-09 16:53:21 -08001826 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1827 return -EROFS;
1828
1829 mutex_lock(&inode->i_mutex);
1830
1831 /*
1832 * This prevents concurrent writes on other nodes
1833 */
1834 ret = ocfs2_rw_lock(inode, 1);
1835 if (ret) {
1836 mlog_errno(ret);
1837 goto out;
1838 }
1839
Mark Fashehe63aecb62007-10-18 15:30:42 -07001840 ret = ocfs2_inode_lock(inode, &di_bh, 1);
Mark Fashehb2580102007-03-09 16:53:21 -08001841 if (ret) {
1842 mlog_errno(ret);
1843 goto out_rw_unlock;
1844 }
1845
1846 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1847 ret = -EPERM;
Mark Fashehe63aecb62007-10-18 15:30:42 -07001848 goto out_inode_unlock;
Mark Fashehb2580102007-03-09 16:53:21 -08001849 }
1850
1851 switch (sr->l_whence) {
1852 case 0: /*SEEK_SET*/
1853 break;
1854 case 1: /*SEEK_CUR*/
Mark Fasheh385820a2007-07-19 00:14:38 -07001855 sr->l_start += f_pos;
Mark Fashehb2580102007-03-09 16:53:21 -08001856 break;
1857 case 2: /*SEEK_END*/
1858 sr->l_start += i_size_read(inode);
1859 break;
1860 default:
1861 ret = -EINVAL;
Mark Fashehe63aecb62007-10-18 15:30:42 -07001862 goto out_inode_unlock;
Mark Fashehb2580102007-03-09 16:53:21 -08001863 }
1864 sr->l_whence = 0;
1865
1866 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1867
1868 if (sr->l_start < 0
1869 || sr->l_start > max_off
1870 || (sr->l_start + llen) < 0
1871 || (sr->l_start + llen) > max_off) {
1872 ret = -EINVAL;
Mark Fashehe63aecb62007-10-18 15:30:42 -07001873 goto out_inode_unlock;
Mark Fashehb2580102007-03-09 16:53:21 -08001874 }
Mark Fasheh385820a2007-07-19 00:14:38 -07001875 size = sr->l_start + sr->l_len;
Mark Fashehb2580102007-03-09 16:53:21 -08001876
1877 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) {
1878 if (sr->l_len <= 0) {
1879 ret = -EINVAL;
Mark Fashehe63aecb62007-10-18 15:30:42 -07001880 goto out_inode_unlock;
Mark Fashehb2580102007-03-09 16:53:21 -08001881 }
1882 }
1883
Mark Fasheh385820a2007-07-19 00:14:38 -07001884 if (file && should_remove_suid(file->f_path.dentry)) {
Mark Fashehb2580102007-03-09 16:53:21 -08001885 ret = __ocfs2_write_remove_suid(inode, di_bh);
1886 if (ret) {
1887 mlog_errno(ret);
Mark Fashehe63aecb62007-10-18 15:30:42 -07001888 goto out_inode_unlock;
Mark Fashehb2580102007-03-09 16:53:21 -08001889 }
1890 }
1891
1892 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1893 switch (cmd) {
1894 case OCFS2_IOC_RESVSP:
1895 case OCFS2_IOC_RESVSP64:
1896 /*
1897 * This takes unsigned offsets, but the signed ones we
1898 * pass have been checked against overflow above.
1899 */
1900 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1901 sr->l_len);
1902 break;
1903 case OCFS2_IOC_UNRESVSP:
1904 case OCFS2_IOC_UNRESVSP64:
1905 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1906 sr->l_len);
1907 break;
1908 default:
1909 ret = -EINVAL;
1910 }
1911 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1912 if (ret) {
1913 mlog_errno(ret);
Mark Fashehe63aecb62007-10-18 15:30:42 -07001914 goto out_inode_unlock;
Mark Fashehb2580102007-03-09 16:53:21 -08001915 }
1916
1917 /*
1918 * We update c/mtime for these changes
1919 */
1920 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1921 if (IS_ERR(handle)) {
1922 ret = PTR_ERR(handle);
1923 mlog_errno(ret);
Mark Fashehe63aecb62007-10-18 15:30:42 -07001924 goto out_inode_unlock;
Mark Fashehb2580102007-03-09 16:53:21 -08001925 }
1926
Mark Fasheh385820a2007-07-19 00:14:38 -07001927 if (change_size && i_size_read(inode) < size)
1928 i_size_write(inode, size);
1929
Mark Fashehb2580102007-03-09 16:53:21 -08001930 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1931 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1932 if (ret < 0)
1933 mlog_errno(ret);
1934
1935 ocfs2_commit_trans(osb, handle);
1936
Mark Fashehe63aecb62007-10-18 15:30:42 -07001937out_inode_unlock:
Mark Fashehb2580102007-03-09 16:53:21 -08001938 brelse(di_bh);
Mark Fashehe63aecb62007-10-18 15:30:42 -07001939 ocfs2_inode_unlock(inode, 1);
Mark Fashehb2580102007-03-09 16:53:21 -08001940out_rw_unlock:
1941 ocfs2_rw_unlock(inode, 1);
1942
Mark Fashehb2580102007-03-09 16:53:21 -08001943out:
Julia Lawallc259ae52008-07-21 09:59:15 +02001944 mutex_unlock(&inode->i_mutex);
Mark Fashehb2580102007-03-09 16:53:21 -08001945 return ret;
1946}
1947
Mark Fasheh385820a2007-07-19 00:14:38 -07001948int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1949 struct ocfs2_space_resv *sr)
1950{
1951 struct inode *inode = file->f_path.dentry->d_inode;
Fernando Carrijoc19a28e2009-01-07 18:09:08 -08001952 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Mark Fasheh385820a2007-07-19 00:14:38 -07001953
1954 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
1955 !ocfs2_writes_unwritten_extents(osb))
1956 return -ENOTTY;
1957 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
1958 !ocfs2_sparse_alloc(osb))
1959 return -ENOTTY;
1960
1961 if (!S_ISREG(inode->i_mode))
1962 return -EINVAL;
1963
1964 if (!(file->f_mode & FMODE_WRITE))
1965 return -EBADF;
1966
1967 return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
1968}
1969
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001970static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
Mark Fasheh385820a2007-07-19 00:14:38 -07001971 loff_t len)
1972{
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001973 struct inode *inode = file->f_path.dentry->d_inode;
Mark Fasheh385820a2007-07-19 00:14:38 -07001974 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1975 struct ocfs2_space_resv sr;
1976 int change_size = 1;
Josef Bacikdb47fef2010-11-17 20:46:17 -05001977 int cmd = OCFS2_IOC_RESVSP64;
Mark Fasheh385820a2007-07-19 00:14:38 -07001978
Christoph Hellwig64c23e82011-01-14 13:07:30 +01001979 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1980 return -EOPNOTSUPP;
Mark Fasheh385820a2007-07-19 00:14:38 -07001981 if (!ocfs2_writes_unwritten_extents(osb))
1982 return -EOPNOTSUPP;
1983
Mark Fasheh385820a2007-07-19 00:14:38 -07001984 if (mode & FALLOC_FL_KEEP_SIZE)
1985 change_size = 0;
1986
Josef Bacikdb47fef2010-11-17 20:46:17 -05001987 if (mode & FALLOC_FL_PUNCH_HOLE)
1988 cmd = OCFS2_IOC_UNRESVSP64;
1989
Mark Fasheh385820a2007-07-19 00:14:38 -07001990 sr.l_whence = 0;
1991 sr.l_start = (s64)offset;
1992 sr.l_len = (s64)len;
1993
Josef Bacikdb47fef2010-11-17 20:46:17 -05001994 return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
1995 change_size);
Mark Fasheh385820a2007-07-19 00:14:38 -07001996}
1997
Tao Ma293b2f72009-08-25 08:02:48 +08001998int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
1999 size_t count)
2000{
2001 int ret = 0;
2002 unsigned int extent_flags;
2003 u32 cpos, clusters, extent_len, phys_cpos;
2004 struct super_block *sb = inode->i_sb;
2005
2006 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
Tao Ma2f48d592009-10-15 11:10:49 +08002007 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) ||
2008 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
Tao Ma293b2f72009-08-25 08:02:48 +08002009 return 0;
2010
2011 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2012 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2013
2014 while (clusters) {
2015 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2016 &extent_flags);
2017 if (ret < 0) {
2018 mlog_errno(ret);
2019 goto out;
2020 }
2021
2022 if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2023 ret = 1;
2024 break;
2025 }
2026
2027 if (extent_len > clusters)
2028 extent_len = clusters;
2029
2030 clusters -= extent_len;
2031 cpos += extent_len;
2032 }
2033out:
2034 return ret;
2035}
2036
2037static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
Tao Mab8908232010-08-12 10:27:14 +08002038 struct file *file,
Tao Ma293b2f72009-08-25 08:02:48 +08002039 loff_t pos, size_t count,
2040 int *meta_level)
2041{
2042 int ret;
2043 struct buffer_head *di_bh = NULL;
2044 u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2045 u32 clusters =
2046 ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2047
2048 ret = ocfs2_inode_lock(inode, &di_bh, 1);
2049 if (ret) {
2050 mlog_errno(ret);
2051 goto out;
2052 }
2053
2054 *meta_level = 1;
2055
Tao Ma15502712010-08-12 10:36:38 +08002056 ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
Tao Ma293b2f72009-08-25 08:02:48 +08002057 if (ret)
2058 mlog_errno(ret);
2059out:
2060 brelse(di_bh);
2061 return ret;
2062}
2063
Tao Mab8908232010-08-12 10:27:14 +08002064static int ocfs2_prepare_inode_for_write(struct file *file,
Tiger Yang8659ac22006-10-17 18:29:52 -07002065 loff_t *ppos,
2066 size_t count,
Mark Fasheh9517bac2007-02-09 20:24:12 -08002067 int appending,
Tao Ma86470e92009-12-03 21:55:05 +08002068 int *direct_io,
2069 int *has_refcount)
Mark Fashehccd979b2005-12-15 14:31:24 -08002070{
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002071 int ret = 0, meta_level = 0;
Tao Mab8908232010-08-12 10:27:14 +08002072 struct dentry *dentry = file->f_path.dentry;
Tiger Yang8659ac22006-10-17 18:29:52 -07002073 struct inode *inode = dentry->d_inode;
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002074 loff_t saved_pos, end;
Mark Fashehccd979b2005-12-15 14:31:24 -08002075
Sunil Mushran2bd63212010-01-25 16:57:38 -08002076 /*
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002077 * We start with a read level meta lock and only jump to an ex
2078 * if we need to make modifications here.
Mark Fashehccd979b2005-12-15 14:31:24 -08002079 */
Mark Fashehccd979b2005-12-15 14:31:24 -08002080 for(;;) {
Mark Fashehe63aecb62007-10-18 15:30:42 -07002081 ret = ocfs2_inode_lock(inode, NULL, meta_level);
Mark Fashehccd979b2005-12-15 14:31:24 -08002082 if (ret < 0) {
2083 meta_level = -1;
2084 mlog_errno(ret);
2085 goto out;
2086 }
2087
2088 /* Clear suid / sgid if necessary. We do this here
2089 * instead of later in the write path because
2090 * remove_suid() calls ->setattr without any hint that
2091 * we may have already done our cluster locking. Since
2092 * ocfs2_setattr() *must* take cluster locks to
2093 * proceeed, this will lead us to recursively lock the
2094 * inode. There's also the dinode i_size state which
2095 * can be lost via setattr during extending writes (we
2096 * set inode->i_size at the end of a write. */
Tiger Yang8659ac22006-10-17 18:29:52 -07002097 if (should_remove_suid(dentry)) {
Mark Fashehccd979b2005-12-15 14:31:24 -08002098 if (meta_level == 0) {
Mark Fashehe63aecb62007-10-18 15:30:42 -07002099 ocfs2_inode_unlock(inode, meta_level);
Mark Fashehccd979b2005-12-15 14:31:24 -08002100 meta_level = 1;
2101 continue;
2102 }
2103
2104 ret = ocfs2_write_remove_suid(inode);
2105 if (ret < 0) {
2106 mlog_errno(ret);
Tiger Yang8659ac22006-10-17 18:29:52 -07002107 goto out_unlock;
Mark Fashehccd979b2005-12-15 14:31:24 -08002108 }
2109 }
2110
2111 /* work on a copy of ppos until we're sure that we won't have
2112 * to recalculate it due to relocking. */
Tiger Yang8659ac22006-10-17 18:29:52 -07002113 if (appending) {
Mark Fashehccd979b2005-12-15 14:31:24 -08002114 saved_pos = i_size_read(inode);
2115 mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
2116 } else {
Tiger Yang8659ac22006-10-17 18:29:52 -07002117 saved_pos = *ppos;
Mark Fashehccd979b2005-12-15 14:31:24 -08002118 }
Mark Fasheh3a0782d2007-01-17 12:53:31 -08002119
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002120 end = saved_pos + count;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002121
Tao Ma293b2f72009-08-25 08:02:48 +08002122 ret = ocfs2_check_range_for_refcount(inode, saved_pos, count);
2123 if (ret == 1) {
2124 ocfs2_inode_unlock(inode, meta_level);
2125 meta_level = -1;
2126
2127 ret = ocfs2_prepare_inode_for_refcount(inode,
Tao Mab8908232010-08-12 10:27:14 +08002128 file,
Tao Ma293b2f72009-08-25 08:02:48 +08002129 saved_pos,
2130 count,
2131 &meta_level);
Tao Ma86470e92009-12-03 21:55:05 +08002132 if (has_refcount)
2133 *has_refcount = 1;
Wengang Wang96a1cc72010-02-09 14:57:45 +08002134 if (direct_io)
2135 *direct_io = 0;
Tao Ma293b2f72009-08-25 08:02:48 +08002136 }
2137
2138 if (ret < 0) {
2139 mlog_errno(ret);
2140 goto out_unlock;
2141 }
2142
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002143 /*
2144 * Skip the O_DIRECT checks if we don't need
2145 * them.
2146 */
2147 if (!direct_io || !(*direct_io))
2148 break;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002149
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002150 /*
Mark Fasheh1afc32b2007-09-07 14:46:51 -07002151 * There's no sane way to do direct writes to an inode
2152 * with inline data.
2153 */
2154 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2155 *direct_io = 0;
2156 break;
2157 }
2158
2159 /*
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002160 * Allowing concurrent direct writes means
2161 * i_size changes wouldn't be synchronized, so
2162 * one node could wind up truncating another
2163 * nodes writes.
2164 */
2165 if (end > i_size_read(inode)) {
2166 *direct_io = 0;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002167 break;
2168 }
2169
Mark Fasheh3a0782d2007-01-17 12:53:31 -08002170 /*
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002171 * We don't fill holes during direct io, so
2172 * check for them here. If any are found, the
2173 * caller will have to retake some cluster
2174 * locks and initiate the io as buffered.
Mark Fasheh3a0782d2007-01-17 12:53:31 -08002175 */
Mark Fasheh65ed39d2007-08-28 17:13:23 -07002176 ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
2177 if (ret == 1) {
2178 *direct_io = 0;
2179 ret = 0;
2180 } else if (ret < 0)
2181 mlog_errno(ret);
Mark Fashehccd979b2005-12-15 14:31:24 -08002182 break;
2183 }
2184
Tiger Yang8659ac22006-10-17 18:29:52 -07002185 if (appending)
2186 *ppos = saved_pos;
2187
2188out_unlock:
Tao Ma293b2f72009-08-25 08:02:48 +08002189 if (meta_level >= 0)
2190 ocfs2_inode_unlock(inode, meta_level);
Tiger Yang8659ac22006-10-17 18:29:52 -07002191
2192out:
2193 return ret;
2194}
2195
2196static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
2197 const struct iovec *iov,
2198 unsigned long nr_segs,
2199 loff_t pos)
2200{
Mark Fasheh9517bac2007-02-09 20:24:12 -08002201 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
Tao Ma86470e92009-12-03 21:55:05 +08002202 int can_do_direct, has_refcount = 0;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002203 ssize_t written = 0;
2204 size_t ocount; /* original count */
2205 size_t count; /* after file limit checks */
Mark Fasheh9ea2d322007-10-18 14:14:45 -07002206 loff_t old_size, *ppos = &iocb->ki_pos;
2207 u32 old_clusters;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002208 struct file *file = iocb->ki_filp;
2209 struct inode *inode = file->f_path.dentry->d_inode;
Mark Fasheh9ea2d322007-10-18 14:14:45 -07002210 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
Tristan Ye7bdb0d12010-10-11 16:46:39 +08002211 int full_coherency = !(osb->s_mount_opt &
2212 OCFS2_MOUNT_COHERENCY_BUFFERED);
Tiger Yang8659ac22006-10-17 18:29:52 -07002213
Tao Maef6b6892011-02-21 11:10:44 +08002214 mlog(0, "(0x%p, %u, '%.*s')\n", file,
2215 (unsigned int)nr_segs,
2216 file->f_path.dentry->d_name.len,
2217 file->f_path.dentry->d_name.name);
Tiger Yang8659ac22006-10-17 18:29:52 -07002218
Tiger Yang8659ac22006-10-17 18:29:52 -07002219 if (iocb->ki_left == 0)
2220 return 0;
2221
Mark Fasheh9517bac2007-02-09 20:24:12 -08002222 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2223
2224 appending = file->f_flags & O_APPEND ? 1 : 0;
2225 direct_io = file->f_flags & O_DIRECT ? 1 : 0;
2226
Tiger Yang8659ac22006-10-17 18:29:52 -07002227 mutex_lock(&inode->i_mutex);
Mark Fasheh9517bac2007-02-09 20:24:12 -08002228
Tristan Ye39c99f12010-12-07 14:35:07 +08002229 ocfs2_iocb_clear_sem_locked(iocb);
2230
Mark Fasheh9517bac2007-02-09 20:24:12 -08002231relock:
Tiger Yang8659ac22006-10-17 18:29:52 -07002232 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
Mark Fasheh9517bac2007-02-09 20:24:12 -08002233 if (direct_io) {
Tiger Yang8659ac22006-10-17 18:29:52 -07002234 down_read(&inode->i_alloc_sem);
Mark Fasheh9517bac2007-02-09 20:24:12 -08002235 have_alloc_sem = 1;
Tristan Ye39c99f12010-12-07 14:35:07 +08002236 /* communicate with ocfs2_dio_end_io */
2237 ocfs2_iocb_set_sem_locked(iocb);
Tiger Yang8659ac22006-10-17 18:29:52 -07002238 }
2239
Tristan Ye7bdb0d12010-10-11 16:46:39 +08002240 /*
2241 * Concurrent O_DIRECT writes are allowed with
2242 * mount_option "coherency=buffered".
2243 */
2244 rw_level = (!direct_io || full_coherency);
2245
Tiger Yang8659ac22006-10-17 18:29:52 -07002246 ret = ocfs2_rw_lock(inode, rw_level);
2247 if (ret < 0) {
Mark Fasheh9517bac2007-02-09 20:24:12 -08002248 mlog_errno(ret);
2249 goto out_sems;
2250 }
2251
Tristan Ye7bdb0d12010-10-11 16:46:39 +08002252 /*
2253 * O_DIRECT writes with "coherency=full" need to take EX cluster
2254 * inode_lock to guarantee coherency.
2255 */
2256 if (direct_io && full_coherency) {
2257 /*
2258 * We need to take and drop the inode lock to force
2259 * other nodes to drop their caches. Buffered I/O
2260 * already does this in write_begin().
2261 */
2262 ret = ocfs2_inode_lock(inode, NULL, 1);
2263 if (ret < 0) {
2264 mlog_errno(ret);
2265 goto out_sems;
2266 }
2267
2268 ocfs2_inode_unlock(inode, 1);
2269 }
2270
Mark Fasheh9517bac2007-02-09 20:24:12 -08002271 can_do_direct = direct_io;
Tao Mab8908232010-08-12 10:27:14 +08002272 ret = ocfs2_prepare_inode_for_write(file, ppos,
Mark Fasheh9517bac2007-02-09 20:24:12 -08002273 iocb->ki_left, appending,
Tao Ma86470e92009-12-03 21:55:05 +08002274 &can_do_direct, &has_refcount);
Mark Fasheh9517bac2007-02-09 20:24:12 -08002275 if (ret < 0) {
Tiger Yang8659ac22006-10-17 18:29:52 -07002276 mlog_errno(ret);
2277 goto out;
2278 }
2279
Mark Fasheh9517bac2007-02-09 20:24:12 -08002280 /*
2281 * We can't complete the direct I/O as requested, fall back to
2282 * buffered I/O.
2283 */
2284 if (direct_io && !can_do_direct) {
2285 ocfs2_rw_unlock(inode, rw_level);
2286 up_read(&inode->i_alloc_sem);
2287
2288 have_alloc_sem = 0;
2289 rw_level = -1;
2290
2291 direct_io = 0;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002292 goto relock;
Tiger Yang8659ac22006-10-17 18:29:52 -07002293 }
Mark Fashehccd979b2005-12-15 14:31:24 -08002294
Mark Fasheh9ea2d322007-10-18 14:14:45 -07002295 /*
2296 * To later detect whether a journal commit for sync writes is
2297 * necessary, we sample i_size, and cluster count here.
2298 */
2299 old_size = i_size_read(inode);
2300 old_clusters = OCFS2_I(inode)->ip_clusters;
2301
Mark Fashehccd979b2005-12-15 14:31:24 -08002302 /* communicate with ocfs2_dio_end_io */
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -07002303 ocfs2_iocb_set_rw_locked(iocb, rw_level);
Mark Fashehccd979b2005-12-15 14:31:24 -08002304
Li Dongyang6b933c82010-04-17 17:49:10 +08002305 ret = generic_segment_checks(iov, &nr_segs, &ocount,
2306 VERIFY_READ);
2307 if (ret)
2308 goto out_dio;
2309
2310 count = ocount;
2311 ret = generic_write_checks(file, ppos, &count,
2312 S_ISBLK(inode->i_mode));
2313 if (ret)
2314 goto out_dio;
2315
Mark Fasheh9517bac2007-02-09 20:24:12 -08002316 if (direct_io) {
2317 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
2318 ppos, count, ocount);
2319 if (written < 0) {
2320 ret = written;
2321 goto out_dio;
2322 }
2323 } else {
Li Dongyang6b933c82010-04-17 17:49:10 +08002324 current->backing_dev_info = file->f_mapping->backing_dev_info;
2325 written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos,
2326 ppos, count, 0);
2327 current->backing_dev_info = NULL;
Mark Fasheh9517bac2007-02-09 20:24:12 -08002328 }
Mark Fashehccd979b2005-12-15 14:31:24 -08002329
Mark Fasheh9517bac2007-02-09 20:24:12 -08002330out_dio:
Mark Fashehccd979b2005-12-15 14:31:24 -08002331 /* buffered aio wouldn't have proper lock coverage today */
Mark Fasheh9517bac2007-02-09 20:24:12 -08002332 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
Mark Fashehccd979b2005-12-15 14:31:24 -08002333
Tao Ma60c48672010-02-03 09:56:04 +08002334 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
Tristan Ye81c8c822010-08-19 15:15:00 +08002335 ((file->f_flags & O_DIRECT) && !direct_io)) {
Jan Kara918941a2009-08-17 18:50:08 +02002336 ret = filemap_fdatawrite_range(file->f_mapping, pos,
2337 pos + count - 1);
2338 if (ret < 0)
2339 written = ret;
2340
Coly Lia03ab782010-03-26 05:15:12 +08002341 if (!ret && ((old_size != i_size_read(inode)) ||
2342 (old_clusters != OCFS2_I(inode)->ip_clusters) ||
2343 has_refcount)) {
Joel Becker2b4e30f2008-09-03 20:03:41 -07002344 ret = jbd2_journal_force_commit(osb->journal->j_journal);
Mark Fasheh9ea2d322007-10-18 14:14:45 -07002345 if (ret < 0)
2346 written = ret;
2347 }
Jan Kara918941a2009-08-17 18:50:08 +02002348
2349 if (!ret)
2350 ret = filemap_fdatawait_range(file->f_mapping, pos,
2351 pos + count - 1);
Mark Fasheh9ea2d322007-10-18 14:14:45 -07002352 }
2353
Sunil Mushran2bd63212010-01-25 16:57:38 -08002354 /*
Mark Fashehccd979b2005-12-15 14:31:24 -08002355 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2356 * function pointer which is called when o_direct io completes so that
2357 * it can unlock our rw lock. (it's the clustered equivalent of
2358 * i_alloc_sem; protects truncate from racing with pending ios).
2359 * Unfortunately there are error cases which call end_io and others
2360 * that don't. so we don't have to unlock the rw_lock if either an
2361 * async dio is going to do it in the future or an end_io after an
2362 * error has already done it.
2363 */
Coly Li66b116c2010-02-25 14:57:13 +08002364 if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
Mark Fashehccd979b2005-12-15 14:31:24 -08002365 rw_level = -1;
2366 have_alloc_sem = 0;
2367 }
2368
2369out:
Mark Fasheh9517bac2007-02-09 20:24:12 -08002370 if (rw_level != -1)
2371 ocfs2_rw_unlock(inode, rw_level);
2372
2373out_sems:
Tristan Ye39c99f12010-12-07 14:35:07 +08002374 if (have_alloc_sem) {
Mark Fashehccd979b2005-12-15 14:31:24 -08002375 up_read(&inode->i_alloc_sem);
Tristan Ye39c99f12010-12-07 14:35:07 +08002376 ocfs2_iocb_clear_sem_locked(iocb);
2377 }
Mark Fasheh9517bac2007-02-09 20:24:12 -08002378
Jes Sorensen1b1dcc12006-01-09 15:59:24 -08002379 mutex_unlock(&inode->i_mutex);
Mark Fashehccd979b2005-12-15 14:31:24 -08002380
Wengang Wang812e7a62009-07-10 13:26:04 +08002381 if (written)
2382 ret = written;
Wengang Wang812e7a62009-07-10 13:26:04 +08002383 return ret;
Mark Fashehccd979b2005-12-15 14:31:24 -08002384}
2385
Miklos Szeredi328eaab2009-04-14 19:48:39 +02002386static int ocfs2_splice_to_file(struct pipe_inode_info *pipe,
2387 struct file *out,
2388 struct splice_desc *sd)
2389{
2390 int ret;
2391
Tao Mab8908232010-08-12 10:27:14 +08002392 ret = ocfs2_prepare_inode_for_write(out, &sd->pos,
Tao Ma86470e92009-12-03 21:55:05 +08002393 sd->total_len, 0, NULL, NULL);
Miklos Szeredi328eaab2009-04-14 19:48:39 +02002394 if (ret < 0) {
2395 mlog_errno(ret);
2396 return ret;
2397 }
2398
2399 return splice_from_pipe_feed(pipe, sd, pipe_to_file);
2400}
2401
Tiger Yang8659ac22006-10-17 18:29:52 -07002402static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2403 struct file *out,
2404 loff_t *ppos,
2405 size_t len,
2406 unsigned int flags)
2407{
2408 int ret;
Miklos Szeredi328eaab2009-04-14 19:48:39 +02002409 struct address_space *mapping = out->f_mapping;
2410 struct inode *inode = mapping->host;
2411 struct splice_desc sd = {
2412 .total_len = len,
2413 .flags = flags,
2414 .pos = *ppos,
2415 .u.file = out,
2416 };
Tiger Yang8659ac22006-10-17 18:29:52 -07002417
Tao Maef6b6892011-02-21 11:10:44 +08002418 mlog(0, "(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
2419 (unsigned int)len,
2420 out->f_path.dentry->d_name.len,
2421 out->f_path.dentry->d_name.name);
Tiger Yang8659ac22006-10-17 18:29:52 -07002422
Miklos Szeredi7bfac9e2009-04-06 17:41:00 +02002423 if (pipe->inode)
Miklos Szeredi328eaab2009-04-14 19:48:39 +02002424 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT);
2425
2426 splice_from_pipe_begin(&sd);
2427 do {
2428 ret = splice_from_pipe_next(pipe, &sd);
2429 if (ret <= 0)
2430 break;
2431
2432 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
2433 ret = ocfs2_rw_lock(inode, 1);
2434 if (ret < 0)
2435 mlog_errno(ret);
2436 else {
2437 ret = ocfs2_splice_to_file(pipe, out, &sd);
2438 ocfs2_rw_unlock(inode, 1);
2439 }
2440 mutex_unlock(&inode->i_mutex);
2441 } while (ret > 0);
2442 splice_from_pipe_end(pipe, &sd);
2443
Miklos Szeredi7bfac9e2009-04-06 17:41:00 +02002444 if (pipe->inode)
2445 mutex_unlock(&pipe->inode->i_mutex);
Tiger Yang8659ac22006-10-17 18:29:52 -07002446
Miklos Szeredi328eaab2009-04-14 19:48:39 +02002447 if (sd.num_spliced)
2448 ret = sd.num_spliced;
2449
2450 if (ret > 0) {
2451 unsigned long nr_pages;
Jan Karad23c9372009-08-18 18:24:31 +02002452 int err;
Miklos Szeredi328eaab2009-04-14 19:48:39 +02002453
Miklos Szeredi328eaab2009-04-14 19:48:39 +02002454 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
2455
Jan Karad23c9372009-08-18 18:24:31 +02002456 err = generic_write_sync(out, *ppos, ret);
2457 if (err)
2458 ret = err;
2459 else
2460 *ppos += ret;
Miklos Szeredi328eaab2009-04-14 19:48:39 +02002461
Miklos Szeredi328eaab2009-04-14 19:48:39 +02002462 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
2463 }
Tiger Yang8659ac22006-10-17 18:29:52 -07002464
Tiger Yang8659ac22006-10-17 18:29:52 -07002465 return ret;
2466}
2467
2468static ssize_t ocfs2_file_splice_read(struct file *in,
2469 loff_t *ppos,
2470 struct pipe_inode_info *pipe,
2471 size_t len,
2472 unsigned int flags)
2473{
Tao Ma1962f392009-06-19 15:36:52 +08002474 int ret = 0, lock_level = 0;
Josef Sipekd28c9172006-12-08 02:37:25 -08002475 struct inode *inode = in->f_path.dentry->d_inode;
Tiger Yang8659ac22006-10-17 18:29:52 -07002476
Tao Maef6b6892011-02-21 11:10:44 +08002477 mlog(0, "(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
2478 (unsigned int)len,
2479 in->f_path.dentry->d_name.len,
2480 in->f_path.dentry->d_name.name);
Tiger Yang8659ac22006-10-17 18:29:52 -07002481
2482 /*
2483 * See the comment in ocfs2_file_aio_read()
2484 */
Tao Ma1962f392009-06-19 15:36:52 +08002485 ret = ocfs2_inode_lock_atime(inode, in->f_vfsmnt, &lock_level);
Tiger Yang8659ac22006-10-17 18:29:52 -07002486 if (ret < 0) {
2487 mlog_errno(ret);
2488 goto bail;
2489 }
Tao Ma1962f392009-06-19 15:36:52 +08002490 ocfs2_inode_unlock(inode, lock_level);
Tiger Yang8659ac22006-10-17 18:29:52 -07002491
2492 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
2493
2494bail:
Tiger Yang8659ac22006-10-17 18:29:52 -07002495 return ret;
2496}
2497
Mark Fashehccd979b2005-12-15 14:31:24 -08002498static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
Badari Pulavarty027445c2006-09-30 23:28:46 -07002499 const struct iovec *iov,
2500 unsigned long nr_segs,
Mark Fashehccd979b2005-12-15 14:31:24 -08002501 loff_t pos)
2502{
Tiger Yang25899de2006-11-15 15:49:02 +08002503 int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
Mark Fashehccd979b2005-12-15 14:31:24 -08002504 struct file *filp = iocb->ki_filp;
Josef Sipekd28c9172006-12-08 02:37:25 -08002505 struct inode *inode = filp->f_path.dentry->d_inode;
Mark Fashehccd979b2005-12-15 14:31:24 -08002506
Tao Maef6b6892011-02-21 11:10:44 +08002507 mlog(0, "(0x%p, %u, '%.*s')\n", filp,
2508 (unsigned int)nr_segs,
2509 filp->f_path.dentry->d_name.len,
2510 filp->f_path.dentry->d_name.name);
Mark Fashehccd979b2005-12-15 14:31:24 -08002511
2512 if (!inode) {
2513 ret = -EINVAL;
2514 mlog_errno(ret);
2515 goto bail;
2516 }
2517
Tristan Ye39c99f12010-12-07 14:35:07 +08002518 ocfs2_iocb_clear_sem_locked(iocb);
2519
Sunil Mushran2bd63212010-01-25 16:57:38 -08002520 /*
Mark Fashehccd979b2005-12-15 14:31:24 -08002521 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2522 * need locks to protect pending reads from racing with truncate.
2523 */
2524 if (filp->f_flags & O_DIRECT) {
2525 down_read(&inode->i_alloc_sem);
2526 have_alloc_sem = 1;
Tristan Ye39c99f12010-12-07 14:35:07 +08002527 ocfs2_iocb_set_sem_locked(iocb);
Mark Fashehccd979b2005-12-15 14:31:24 -08002528
2529 ret = ocfs2_rw_lock(inode, 0);
2530 if (ret < 0) {
2531 mlog_errno(ret);
2532 goto bail;
2533 }
2534 rw_level = 0;
2535 /* communicate with ocfs2_dio_end_io */
Mark Fasheh7cdfc3a2007-04-16 17:28:51 -07002536 ocfs2_iocb_set_rw_locked(iocb, rw_level);
Mark Fashehccd979b2005-12-15 14:31:24 -08002537 }
2538
Mark Fashehc4374f82006-05-05 19:04:35 -07002539 /*
2540 * We're fine letting folks race truncates and extending
2541 * writes with read across the cluster, just like they can
2542 * locally. Hence no rw_lock during read.
Sunil Mushran2bd63212010-01-25 16:57:38 -08002543 *
Mark Fashehc4374f82006-05-05 19:04:35 -07002544 * Take and drop the meta data lock to update inode fields
2545 * like i_size. This allows the checks down below
Sunil Mushran2bd63212010-01-25 16:57:38 -08002546 * generic_file_aio_read() a chance of actually working.
Mark Fashehc4374f82006-05-05 19:04:35 -07002547 */
Mark Fashehe63aecb62007-10-18 15:30:42 -07002548 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
Mark Fashehc4374f82006-05-05 19:04:35 -07002549 if (ret < 0) {
2550 mlog_errno(ret);
2551 goto bail;
2552 }
Mark Fashehe63aecb62007-10-18 15:30:42 -07002553 ocfs2_inode_unlock(inode, lock_level);
Mark Fashehc4374f82006-05-05 19:04:35 -07002554
Badari Pulavarty027445c2006-09-30 23:28:46 -07002555 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
Mark Fashehccd979b2005-12-15 14:31:24 -08002556 if (ret == -EINVAL)
Sunil Mushran56753bd2008-06-09 11:24:41 -07002557 mlog(0, "generic_file_aio_read returned -EINVAL\n");
Mark Fashehccd979b2005-12-15 14:31:24 -08002558
2559 /* buffered aio wouldn't have proper lock coverage today */
2560 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
2561
2562 /* see ocfs2_file_aio_write */
2563 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2564 rw_level = -1;
2565 have_alloc_sem = 0;
2566 }
2567
2568bail:
Tristan Ye39c99f12010-12-07 14:35:07 +08002569 if (have_alloc_sem) {
Mark Fashehccd979b2005-12-15 14:31:24 -08002570 up_read(&inode->i_alloc_sem);
Tristan Ye39c99f12010-12-07 14:35:07 +08002571 ocfs2_iocb_clear_sem_locked(iocb);
2572 }
Sunil Mushran2bd63212010-01-25 16:57:38 -08002573 if (rw_level != -1)
Mark Fashehccd979b2005-12-15 14:31:24 -08002574 ocfs2_rw_unlock(inode, rw_level);
Mark Fashehccd979b2005-12-15 14:31:24 -08002575
2576 return ret;
2577}
2578
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08002579const struct inode_operations ocfs2_file_iops = {
Mark Fashehccd979b2005-12-15 14:31:24 -08002580 .setattr = ocfs2_setattr,
2581 .getattr = ocfs2_getattr,
Tiger Yangd38eb8d2006-11-27 09:59:21 +08002582 .permission = ocfs2_permission,
Tiger Yangcf1d6c72008-08-18 17:11:00 +08002583 .setxattr = generic_setxattr,
2584 .getxattr = generic_getxattr,
2585 .listxattr = ocfs2_listxattr,
2586 .removexattr = generic_removexattr,
Mark Fasheh00dc4172008-10-03 17:32:11 -04002587 .fiemap = ocfs2_fiemap,
Mark Fashehccd979b2005-12-15 14:31:24 -08002588};
2589
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08002590const struct inode_operations ocfs2_special_file_iops = {
Mark Fashehccd979b2005-12-15 14:31:24 -08002591 .setattr = ocfs2_setattr,
2592 .getattr = ocfs2_getattr,
Tiger Yangd38eb8d2006-11-27 09:59:21 +08002593 .permission = ocfs2_permission,
Mark Fashehccd979b2005-12-15 14:31:24 -08002594};
2595
Mark Fasheh53da4932008-07-21 14:29:16 -07002596/*
2597 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2598 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2599 */
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08002600const struct file_operations ocfs2_fops = {
Jan Kara32c3c0e2007-12-19 15:24:52 +01002601 .llseek = generic_file_llseek,
Mark Fashehccd979b2005-12-15 14:31:24 -08002602 .read = do_sync_read,
2603 .write = do_sync_write,
Mark Fashehccd979b2005-12-15 14:31:24 -08002604 .mmap = ocfs2_mmap,
2605 .fsync = ocfs2_sync_file,
2606 .release = ocfs2_file_release,
2607 .open = ocfs2_file_open,
2608 .aio_read = ocfs2_file_aio_read,
2609 .aio_write = ocfs2_file_aio_write,
Andi Kleenc9ec1482008-01-27 03:17:17 +01002610 .unlocked_ioctl = ocfs2_ioctl,
Mark Fasheh586d2322007-03-09 15:56:28 -08002611#ifdef CONFIG_COMPAT
2612 .compat_ioctl = ocfs2_compat_ioctl,
2613#endif
Mark Fasheh53da4932008-07-21 14:29:16 -07002614 .lock = ocfs2_lock,
2615 .flock = ocfs2_flock,
2616 .splice_read = ocfs2_file_splice_read,
2617 .splice_write = ocfs2_file_splice_write,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01002618 .fallocate = ocfs2_fallocate,
Mark Fasheh53da4932008-07-21 14:29:16 -07002619};
2620
2621const struct file_operations ocfs2_dops = {
2622 .llseek = generic_file_llseek,
2623 .read = generic_read_dir,
2624 .readdir = ocfs2_readdir,
2625 .fsync = ocfs2_sync_file,
2626 .release = ocfs2_dir_release,
2627 .open = ocfs2_dir_open,
2628 .unlocked_ioctl = ocfs2_ioctl,
2629#ifdef CONFIG_COMPAT
2630 .compat_ioctl = ocfs2_compat_ioctl,
2631#endif
2632 .lock = ocfs2_lock,
2633 .flock = ocfs2_flock,
2634};
2635
2636/*
2637 * POSIX-lockless variants of our file_operations.
2638 *
2639 * These will be used if the underlying cluster stack does not support
2640 * posix file locking, if the user passes the "localflocks" mount
2641 * option, or if we have a local-only fs.
2642 *
2643 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2644 * so we still want it in the case of no stack support for
2645 * plocks. Internally, it will do the right thing when asked to ignore
2646 * the cluster.
2647 */
2648const struct file_operations ocfs2_fops_no_plocks = {
2649 .llseek = generic_file_llseek,
2650 .read = do_sync_read,
2651 .write = do_sync_write,
2652 .mmap = ocfs2_mmap,
2653 .fsync = ocfs2_sync_file,
2654 .release = ocfs2_file_release,
2655 .open = ocfs2_file_open,
2656 .aio_read = ocfs2_file_aio_read,
2657 .aio_write = ocfs2_file_aio_write,
2658 .unlocked_ioctl = ocfs2_ioctl,
2659#ifdef CONFIG_COMPAT
2660 .compat_ioctl = ocfs2_compat_ioctl,
2661#endif
Mark Fasheh53fc6222007-12-20 16:49:04 -08002662 .flock = ocfs2_flock,
Tiger Yang8659ac22006-10-17 18:29:52 -07002663 .splice_read = ocfs2_file_splice_read,
2664 .splice_write = ocfs2_file_splice_write,
Mark Fashehccd979b2005-12-15 14:31:24 -08002665};
2666
Mark Fasheh53da4932008-07-21 14:29:16 -07002667const struct file_operations ocfs2_dops_no_plocks = {
Jan Kara32c3c0e2007-12-19 15:24:52 +01002668 .llseek = generic_file_llseek,
Mark Fashehccd979b2005-12-15 14:31:24 -08002669 .read = generic_read_dir,
2670 .readdir = ocfs2_readdir,
2671 .fsync = ocfs2_sync_file,
Mark Fasheh53fc6222007-12-20 16:49:04 -08002672 .release = ocfs2_dir_release,
2673 .open = ocfs2_dir_open,
Andi Kleenc9ec1482008-01-27 03:17:17 +01002674 .unlocked_ioctl = ocfs2_ioctl,
Mark Fasheh586d2322007-03-09 15:56:28 -08002675#ifdef CONFIG_COMPAT
2676 .compat_ioctl = ocfs2_compat_ioctl,
2677#endif
Mark Fasheh53fc6222007-12-20 16:49:04 -08002678 .flock = ocfs2_flock,
Mark Fashehccd979b2005-12-15 14:31:24 -08002679};