blob: f5ae36d19f4f78e89f3f8bd45726ff7babe11750 [file] [log] [blame]
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001/**
2 * fs/f2fs/file.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/stat.h>
14#include <linux/buffer_head.h>
15#include <linux/writeback.h>
16#include <linux/falloc.h>
17#include <linux/types.h>
18#include <linux/uaccess.h>
19#include <linux/mount.h>
20
21#include "f2fs.h"
22#include "node.h"
23#include "segment.h"
24#include "xattr.h"
25#include "acl.h"
26
27static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
28 struct vm_fault *vmf)
29{
30 struct page *page = vmf->page;
31 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
32 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
33 struct page *node_page;
34 block_t old_blk_addr;
35 struct dnode_of_data dn;
36 int err;
37
38 f2fs_balance_fs(sbi);
39
40 sb_start_pagefault(inode->i_sb);
41
42 mutex_lock_op(sbi, DATA_NEW);
43
44 /* block allocation */
45 set_new_dnode(&dn, inode, NULL, NULL, 0);
46 err = get_dnode_of_data(&dn, page->index, 0);
47 if (err) {
48 mutex_unlock_op(sbi, DATA_NEW);
49 goto out;
50 }
51
52 old_blk_addr = dn.data_blkaddr;
53 node_page = dn.node_page;
54
55 if (old_blk_addr == NULL_ADDR) {
56 err = reserve_new_block(&dn);
57 if (err) {
58 f2fs_put_dnode(&dn);
59 mutex_unlock_op(sbi, DATA_NEW);
60 goto out;
61 }
62 }
63 f2fs_put_dnode(&dn);
64
65 mutex_unlock_op(sbi, DATA_NEW);
66
67 lock_page(page);
68 if (page->mapping != inode->i_mapping ||
69 page_offset(page) >= i_size_read(inode) ||
70 !PageUptodate(page)) {
71 unlock_page(page);
72 err = -EFAULT;
73 goto out;
74 }
75
76 /*
77 * check to see if the page is mapped already (no holes)
78 */
79 if (PageMappedToDisk(page))
80 goto out;
81
82 /* fill the page */
83 wait_on_page_writeback(page);
84
85 /* page is wholly or partially inside EOF */
86 if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
87 unsigned offset;
88 offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
89 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
90 }
91 set_page_dirty(page);
92 SetPageUptodate(page);
93
94 file_update_time(vma->vm_file);
95out:
96 sb_end_pagefault(inode->i_sb);
97 return block_page_mkwrite_return(err);
98}
99
100static const struct vm_operations_struct f2fs_file_vm_ops = {
101 .fault = filemap_fault,
102 .page_mkwrite = f2fs_vm_page_mkwrite,
103};
104
105static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode)
106{
107 struct dentry *dentry;
108 nid_t pino;
109
110 inode = igrab(inode);
111 dentry = d_find_any_alias(inode);
112 if (!dentry) {
113 iput(inode);
114 return 0;
115 }
116 pino = dentry->d_parent->d_inode->i_ino;
117 dput(dentry);
118 iput(inode);
119 return !is_checkpointed_node(sbi, pino);
120}
121
122int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
123{
124 struct inode *inode = file->f_mapping->host;
125 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
126 unsigned long long cur_version;
127 int ret = 0;
128 bool need_cp = false;
129 struct writeback_control wbc = {
130 .sync_mode = WB_SYNC_ALL,
131 .nr_to_write = LONG_MAX,
132 .for_reclaim = 0,
133 };
134
135 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
136 if (ret)
137 return ret;
138
139 mutex_lock(&inode->i_mutex);
140
141 if (inode->i_sb->s_flags & MS_RDONLY)
142 goto out;
143 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
144 goto out;
145
146 mutex_lock(&sbi->cp_mutex);
147 cur_version = le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver);
148 mutex_unlock(&sbi->cp_mutex);
149
150 if (F2FS_I(inode)->data_version != cur_version &&
151 !(inode->i_state & I_DIRTY))
152 goto out;
153 F2FS_I(inode)->data_version--;
154
155 if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
156 need_cp = true;
157 if (is_inode_flag_set(F2FS_I(inode), FI_NEED_CP))
158 need_cp = true;
159 if (!space_for_roll_forward(sbi))
160 need_cp = true;
161 if (need_to_sync_dir(sbi, inode))
162 need_cp = true;
163
164 f2fs_write_inode(inode, NULL);
165
166 if (need_cp) {
167 /* all the dirty node pages should be flushed for POR */
168 ret = f2fs_sync_fs(inode->i_sb, 1);
169 clear_inode_flag(F2FS_I(inode), FI_NEED_CP);
170 } else {
171 while (sync_node_pages(sbi, inode->i_ino, &wbc) == 0)
172 f2fs_write_inode(inode, NULL);
173 filemap_fdatawait_range(sbi->node_inode->i_mapping,
174 0, LONG_MAX);
175 }
176out:
177 mutex_unlock(&inode->i_mutex);
178 return ret;
179}
180
181static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
182{
183 file_accessed(file);
184 vma->vm_ops = &f2fs_file_vm_ops;
185 return 0;
186}
187
188static int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
189{
190 int nr_free = 0, ofs = dn->ofs_in_node;
191 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
192 struct f2fs_node *raw_node;
193 __le32 *addr;
194
195 raw_node = page_address(dn->node_page);
196 addr = blkaddr_in_node(raw_node) + ofs;
197
198 for ( ; count > 0; count--, addr++, dn->ofs_in_node++) {
199 block_t blkaddr = le32_to_cpu(*addr);
200 if (blkaddr == NULL_ADDR)
201 continue;
202
203 update_extent_cache(NULL_ADDR, dn);
204 invalidate_blocks(sbi, blkaddr);
205 dec_valid_block_count(sbi, dn->inode, 1);
206 nr_free++;
207 }
208 if (nr_free) {
209 set_page_dirty(dn->node_page);
210 sync_inode_page(dn);
211 }
212 dn->ofs_in_node = ofs;
213 return nr_free;
214}
215
216void truncate_data_blocks(struct dnode_of_data *dn)
217{
218 truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
219}
220
221static void truncate_partial_data_page(struct inode *inode, u64 from)
222{
223 unsigned offset = from & (PAGE_CACHE_SIZE - 1);
224 struct page *page;
225
226 if (!offset)
227 return;
228
229 page = find_data_page(inode, from >> PAGE_CACHE_SHIFT);
230 if (IS_ERR(page))
231 return;
232
233 lock_page(page);
234 wait_on_page_writeback(page);
235 zero_user(page, offset, PAGE_CACHE_SIZE - offset);
236 set_page_dirty(page);
237 f2fs_put_page(page, 1);
238}
239
240static int truncate_blocks(struct inode *inode, u64 from)
241{
242 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
243 unsigned int blocksize = inode->i_sb->s_blocksize;
244 struct dnode_of_data dn;
245 pgoff_t free_from;
246 int count = 0;
247 int err;
248
249 free_from = (pgoff_t)
250 ((from + blocksize - 1) >> (sbi->log_blocksize));
251
252 mutex_lock_op(sbi, DATA_TRUNC);
253
254 set_new_dnode(&dn, inode, NULL, NULL, 0);
255 err = get_dnode_of_data(&dn, free_from, RDONLY_NODE);
256 if (err) {
257 if (err == -ENOENT)
258 goto free_next;
259 mutex_unlock_op(sbi, DATA_TRUNC);
260 return err;
261 }
262
263 if (IS_INODE(dn.node_page))
264 count = ADDRS_PER_INODE;
265 else
266 count = ADDRS_PER_BLOCK;
267
268 count -= dn.ofs_in_node;
269 BUG_ON(count < 0);
270 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
271 truncate_data_blocks_range(&dn, count);
272 free_from += count;
273 }
274
275 f2fs_put_dnode(&dn);
276free_next:
277 err = truncate_inode_blocks(inode, free_from);
278 mutex_unlock_op(sbi, DATA_TRUNC);
279
280 /* lastly zero out the first data page */
281 truncate_partial_data_page(inode, from);
282
283 return err;
284}
285
286void f2fs_truncate(struct inode *inode)
287{
288 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
289 S_ISLNK(inode->i_mode)))
290 return;
291
292 if (!truncate_blocks(inode, i_size_read(inode))) {
293 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
294 mark_inode_dirty(inode);
295 }
296
297 f2fs_balance_fs(F2FS_SB(inode->i_sb));
298}
299
300static int f2fs_getattr(struct vfsmount *mnt,
301 struct dentry *dentry, struct kstat *stat)
302{
303 struct inode *inode = dentry->d_inode;
304 generic_fillattr(inode, stat);
305 stat->blocks <<= 3;
306 return 0;
307}
308
309#ifdef CONFIG_F2FS_FS_POSIX_ACL
310static void __setattr_copy(struct inode *inode, const struct iattr *attr)
311{
312 struct f2fs_inode_info *fi = F2FS_I(inode);
313 unsigned int ia_valid = attr->ia_valid;
314
315 if (ia_valid & ATTR_UID)
316 inode->i_uid = attr->ia_uid;
317 if (ia_valid & ATTR_GID)
318 inode->i_gid = attr->ia_gid;
319 if (ia_valid & ATTR_ATIME)
320 inode->i_atime = timespec_trunc(attr->ia_atime,
321 inode->i_sb->s_time_gran);
322 if (ia_valid & ATTR_MTIME)
323 inode->i_mtime = timespec_trunc(attr->ia_mtime,
324 inode->i_sb->s_time_gran);
325 if (ia_valid & ATTR_CTIME)
326 inode->i_ctime = timespec_trunc(attr->ia_ctime,
327 inode->i_sb->s_time_gran);
328 if (ia_valid & ATTR_MODE) {
329 umode_t mode = attr->ia_mode;
330
331 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
332 mode &= ~S_ISGID;
333 set_acl_inode(fi, mode);
334 }
335}
336#else
337#define __setattr_copy setattr_copy
338#endif
339
340int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
341{
342 struct inode *inode = dentry->d_inode;
343 struct f2fs_inode_info *fi = F2FS_I(inode);
344 int err;
345
346 err = inode_change_ok(inode, attr);
347 if (err)
348 return err;
349
350 if ((attr->ia_valid & ATTR_SIZE) &&
351 attr->ia_size != i_size_read(inode)) {
352 truncate_setsize(inode, attr->ia_size);
353 f2fs_truncate(inode);
354 }
355
356 __setattr_copy(inode, attr);
357
358 if (attr->ia_valid & ATTR_MODE) {
359 err = f2fs_acl_chmod(inode);
360 if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
361 inode->i_mode = fi->i_acl_mode;
362 clear_inode_flag(fi, FI_ACL_MODE);
363 }
364 }
365
366 mark_inode_dirty(inode);
367 return err;
368}
369
370const struct inode_operations f2fs_file_inode_operations = {
371 .getattr = f2fs_getattr,
372 .setattr = f2fs_setattr,
373 .get_acl = f2fs_get_acl,
374#ifdef CONFIG_F2FS_FS_XATTR
375 .setxattr = generic_setxattr,
376 .getxattr = generic_getxattr,
377 .listxattr = f2fs_listxattr,
378 .removexattr = generic_removexattr,
379#endif
380};
381
382static void fill_zero(struct inode *inode, pgoff_t index,
383 loff_t start, loff_t len)
384{
385 struct page *page;
386
387 if (!len)
388 return;
389
390 page = get_new_data_page(inode, index, false);
391
392 if (!IS_ERR(page)) {
393 wait_on_page_writeback(page);
394 zero_user(page, start, len);
395 set_page_dirty(page);
396 f2fs_put_page(page, 1);
397 }
398}
399
400int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
401{
402 pgoff_t index;
403 int err;
404
405 for (index = pg_start; index < pg_end; index++) {
406 struct dnode_of_data dn;
407 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
408
409 mutex_lock_op(sbi, DATA_TRUNC);
410 set_new_dnode(&dn, inode, NULL, NULL, 0);
411 err = get_dnode_of_data(&dn, index, RDONLY_NODE);
412 if (err) {
413 mutex_unlock_op(sbi, DATA_TRUNC);
414 if (err == -ENOENT)
415 continue;
416 return err;
417 }
418
419 if (dn.data_blkaddr != NULL_ADDR)
420 truncate_data_blocks_range(&dn, 1);
421 f2fs_put_dnode(&dn);
422 mutex_unlock_op(sbi, DATA_TRUNC);
423 }
424 return 0;
425}
426
427static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
428{
429 pgoff_t pg_start, pg_end;
430 loff_t off_start, off_end;
431 int ret = 0;
432
433 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
434 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
435
436 off_start = offset & (PAGE_CACHE_SIZE - 1);
437 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
438
439 if (pg_start == pg_end) {
440 fill_zero(inode, pg_start, off_start,
441 off_end - off_start);
442 } else {
443 if (off_start)
444 fill_zero(inode, pg_start++, off_start,
445 PAGE_CACHE_SIZE - off_start);
446 if (off_end)
447 fill_zero(inode, pg_end, 0, off_end);
448
449 if (pg_start < pg_end) {
450 struct address_space *mapping = inode->i_mapping;
451 loff_t blk_start, blk_end;
452
453 blk_start = pg_start << PAGE_CACHE_SHIFT;
454 blk_end = pg_end << PAGE_CACHE_SHIFT;
455 truncate_inode_pages_range(mapping, blk_start,
456 blk_end - 1);
457 ret = truncate_hole(inode, pg_start, pg_end);
458 }
459 }
460
461 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
462 i_size_read(inode) <= (offset + len)) {
463 i_size_write(inode, offset);
464 mark_inode_dirty(inode);
465 }
466
467 return ret;
468}
469
470static int expand_inode_data(struct inode *inode, loff_t offset,
471 loff_t len, int mode)
472{
473 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
474 pgoff_t index, pg_start, pg_end;
475 loff_t new_size = i_size_read(inode);
476 loff_t off_start, off_end;
477 int ret = 0;
478
479 ret = inode_newsize_ok(inode, (len + offset));
480 if (ret)
481 return ret;
482
483 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
484 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
485
486 off_start = offset & (PAGE_CACHE_SIZE - 1);
487 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
488
489 for (index = pg_start; index <= pg_end; index++) {
490 struct dnode_of_data dn;
491
492 mutex_lock_op(sbi, DATA_NEW);
493
494 set_new_dnode(&dn, inode, NULL, NULL, 0);
495 ret = get_dnode_of_data(&dn, index, 0);
496 if (ret) {
497 mutex_unlock_op(sbi, DATA_NEW);
498 break;
499 }
500
501 if (dn.data_blkaddr == NULL_ADDR) {
502 ret = reserve_new_block(&dn);
503 if (ret) {
504 f2fs_put_dnode(&dn);
505 mutex_unlock_op(sbi, DATA_NEW);
506 break;
507 }
508 }
509 f2fs_put_dnode(&dn);
510
511 mutex_unlock_op(sbi, DATA_NEW);
512
513 if (pg_start == pg_end)
514 new_size = offset + len;
515 else if (index == pg_start && off_start)
516 new_size = (index + 1) << PAGE_CACHE_SHIFT;
517 else if (index == pg_end)
518 new_size = (index << PAGE_CACHE_SHIFT) + off_end;
519 else
520 new_size += PAGE_CACHE_SIZE;
521 }
522
523 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
524 i_size_read(inode) < new_size) {
525 i_size_write(inode, new_size);
526 mark_inode_dirty(inode);
527 }
528
529 return ret;
530}
531
532static long f2fs_fallocate(struct file *file, int mode,
533 loff_t offset, loff_t len)
534{
535 struct inode *inode = file->f_path.dentry->d_inode;
536 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
537 long ret;
538
539 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
540 return -EOPNOTSUPP;
541
542 if (mode & FALLOC_FL_PUNCH_HOLE)
543 ret = punch_hole(inode, offset, len, mode);
544 else
545 ret = expand_inode_data(inode, offset, len, mode);
546
547 f2fs_balance_fs(sbi);
548 return ret;
549}
550
551#define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
552#define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
553
554static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
555{
556 if (S_ISDIR(mode))
557 return flags;
558 else if (S_ISREG(mode))
559 return flags & F2FS_REG_FLMASK;
560 else
561 return flags & F2FS_OTHER_FLMASK;
562}
563
564long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
565{
566 struct inode *inode = filp->f_dentry->d_inode;
567 struct f2fs_inode_info *fi = F2FS_I(inode);
568 unsigned int flags;
569 int ret;
570
571 switch (cmd) {
572 case FS_IOC_GETFLAGS:
573 flags = fi->i_flags & FS_FL_USER_VISIBLE;
574 return put_user(flags, (int __user *) arg);
575 case FS_IOC_SETFLAGS:
576 {
577 unsigned int oldflags;
578
579 ret = mnt_want_write(filp->f_path.mnt);
580 if (ret)
581 return ret;
582
583 if (!inode_owner_or_capable(inode)) {
584 ret = -EACCES;
585 goto out;
586 }
587
588 if (get_user(flags, (int __user *) arg)) {
589 ret = -EFAULT;
590 goto out;
591 }
592
593 flags = f2fs_mask_flags(inode->i_mode, flags);
594
595 mutex_lock(&inode->i_mutex);
596
597 oldflags = fi->i_flags;
598
599 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
600 if (!capable(CAP_LINUX_IMMUTABLE)) {
601 mutex_unlock(&inode->i_mutex);
602 ret = -EPERM;
603 goto out;
604 }
605 }
606
607 flags = flags & FS_FL_USER_MODIFIABLE;
608 flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
609 fi->i_flags = flags;
610 mutex_unlock(&inode->i_mutex);
611
612 f2fs_set_inode_flags(inode);
613 inode->i_ctime = CURRENT_TIME;
614 mark_inode_dirty(inode);
615out:
616 mnt_drop_write(filp->f_path.mnt);
617 return ret;
618 }
619 default:
620 return -ENOTTY;
621 }
622}
623
624const struct file_operations f2fs_file_operations = {
625 .llseek = generic_file_llseek,
626 .read = do_sync_read,
627 .write = do_sync_write,
628 .aio_read = generic_file_aio_read,
629 .aio_write = generic_file_aio_write,
630 .open = generic_file_open,
631 .mmap = f2fs_file_mmap,
632 .fsync = f2fs_sync_file,
633 .fallocate = f2fs_fallocate,
634 .unlocked_ioctl = f2fs_ioctl,
635 .splice_read = generic_file_splice_read,
636 .splice_write = generic_file_splice_write,
637};