blob: 2a1e1953240729934b47aa014c975ac9f4cbfd79 [file] [log] [blame]
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001/*
2 * fs/f2fs/file.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/stat.h>
14#include <linux/buffer_head.h>
15#include <linux/writeback.h>
16#include <linux/blkdev.h>
17#include <linux/falloc.h>
18#include <linux/types.h>
19#include <linux/compat.h>
20#include <linux/uaccess.h>
21#include <linux/mount.h>
22#include <linux/pagevec.h>
23#include <linux/random.h>
Jaegeuk Kim78c564b2016-02-03 13:09:09 -080024#include <linux/aio.h>
Jaegeuk Kim315f4552015-11-29 09:25:08 -080025
26#include "f2fs.h"
27#include "node.h"
28#include "segment.h"
29#include "xattr.h"
30#include "acl.h"
31#include "gc.h"
32#include "trace.h"
33#include <trace/events/f2fs.h>
34
35static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
36 struct vm_fault *vmf)
37{
38 struct page *page = vmf->page;
39 struct inode *inode = file_inode(vma->vm_file);
40 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
41 struct dnode_of_data dn;
42 int err;
43
Jaegeuk Kim315f4552015-11-29 09:25:08 -080044 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
45
46 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
47
48 /* block allocation */
49 f2fs_lock_op(sbi);
50 set_new_dnode(&dn, inode, NULL, NULL, 0);
51 err = f2fs_reserve_block(&dn, page->index);
52 if (err) {
53 f2fs_unlock_op(sbi);
54 goto out;
55 }
56 f2fs_put_dnode(&dn);
57 f2fs_unlock_op(sbi);
58
Jaegeuk Kimab405972016-01-07 14:15:04 -080059 f2fs_balance_fs(sbi, dn.node_changed);
Jaegeuk Kim9755a872015-12-22 13:23:35 -080060
Jaegeuk Kim315f4552015-11-29 09:25:08 -080061 file_update_time(vma->vm_file);
62 lock_page(page);
63 if (unlikely(page->mapping != inode->i_mapping ||
64 page_offset(page) > i_size_read(inode) ||
65 !PageUptodate(page))) {
66 unlock_page(page);
67 err = -EFAULT;
68 goto out;
69 }
70
71 /*
72 * check to see if the page is mapped already (no holes)
73 */
74 if (PageMappedToDisk(page))
75 goto mapped;
76
77 /* page is wholly or partially inside EOF */
78 if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
79 i_size_read(inode)) {
80 unsigned offset;
81 offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
82 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
83 }
84 set_page_dirty(page);
85 SetPageUptodate(page);
86
87 trace_f2fs_vm_page_mkwrite(page, DATA);
88mapped:
89 /* fill the page */
Jaegeuk Kim3e0b2f42016-01-20 23:43:51 +080090 f2fs_wait_on_page_writeback(page, DATA, false);
Jaegeuk Kim315f4552015-11-29 09:25:08 -080091
92 /* wait for GCed encrypted page writeback */
93 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
94 f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
95
96 /* if gced page is attached, don't write to cold segment */
97 clear_cold_data(page);
98out:
Jaegeuk Kim2c88a922016-01-08 16:57:48 -080099 f2fs_update_time(sbi, REQ_TIME);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800100 return block_page_mkwrite_return(err);
101}
102
103static const struct vm_operations_struct f2fs_file_vm_ops = {
104 .fault = filemap_fault,
105 .page_mkwrite = f2fs_vm_page_mkwrite,
106};
107
108static int get_parent_ino(struct inode *inode, nid_t *pino)
109{
110 struct dentry *dentry;
111
112 inode = igrab(inode);
113 dentry = d_find_any_alias(inode);
114 iput(inode);
115 if (!dentry)
116 return 0;
117
118 if (update_dent_inode(inode, inode, &dentry->d_name)) {
119 dput(dentry);
120 return 0;
121 }
122
123 *pino = parent_ino(dentry);
124 dput(dentry);
125 return 1;
126}
127
128static inline bool need_do_checkpoint(struct inode *inode)
129{
130 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
131 bool need_cp = false;
132
133 if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
134 need_cp = true;
135 else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino))
136 need_cp = true;
137 else if (file_wrong_pino(inode))
138 need_cp = true;
139 else if (!space_for_roll_forward(sbi))
140 need_cp = true;
141 else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
142 need_cp = true;
143 else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
144 need_cp = true;
145 else if (test_opt(sbi, FASTBOOT))
146 need_cp = true;
147 else if (sbi->active_logs == 2)
148 need_cp = true;
149
150 return need_cp;
151}
152
153static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
154{
155 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
156 bool ret = false;
157 /* But we need to avoid that there are some inode updates */
158 if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
159 ret = true;
160 f2fs_put_page(i, 0);
161 return ret;
162}
163
164static void try_to_fix_pino(struct inode *inode)
165{
166 struct f2fs_inode_info *fi = F2FS_I(inode);
167 nid_t pino;
168
169 down_write(&fi->i_sem);
170 fi->xattr_ver = 0;
171 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
172 get_parent_ino(inode, &pino)) {
173 fi->i_pino = pino;
174 file_got_pino(inode);
175 up_write(&fi->i_sem);
176
177 mark_inode_dirty_sync(inode);
178 f2fs_write_inode(inode, NULL);
179 } else {
180 up_write(&fi->i_sem);
181 }
182}
183
Jaegeuk Kima6b9f222016-04-15 09:43:17 -0700184static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
185 int datasync, bool atomic)
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800186{
187 struct inode *inode = file->f_mapping->host;
188 struct f2fs_inode_info *fi = F2FS_I(inode);
189 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
190 nid_t ino = inode->i_ino;
191 int ret = 0;
192 bool need_cp = false;
193 struct writeback_control wbc = {
194 .sync_mode = WB_SYNC_ALL,
195 .nr_to_write = LONG_MAX,
196 .for_reclaim = 0,
197 };
198
199 if (unlikely(f2fs_readonly(inode->i_sb)))
200 return 0;
201
202 trace_f2fs_sync_file_enter(inode);
203
204 /* if fdatasync is triggered, let's do in-place-update */
Jaegeuk Kim3943cfb2015-12-31 13:49:17 -0800205 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800206 set_inode_flag(fi, FI_NEED_IPU);
207 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
208 clear_inode_flag(fi, FI_NEED_IPU);
209
210 if (ret) {
211 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
212 return ret;
213 }
214
215 /* if the inode is dirty, let's recover all the time */
216 if (!datasync) {
217 f2fs_write_inode(inode, NULL);
218 goto go_write;
219 }
220
221 /*
222 * if there is no written data, don't waste time to write recovery info.
223 */
224 if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
225 !exist_written_data(sbi, ino, APPEND_INO)) {
226
227 /* it may call write_inode just prior to fsync */
228 if (need_inode_page_update(sbi, ino))
229 goto go_write;
230
231 if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
232 exist_written_data(sbi, ino, UPDATE_INO))
233 goto flush_out;
234 goto out;
235 }
236go_write:
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800237 /*
238 * Both of fdatasync() and fsync() are able to be recovered from
239 * sudden-power-off.
240 */
241 down_read(&fi->i_sem);
242 need_cp = need_do_checkpoint(inode);
243 up_read(&fi->i_sem);
244
245 if (need_cp) {
246 /* all the dirty node pages should be flushed for POR */
247 ret = f2fs_sync_fs(inode->i_sb, 1);
248
249 /*
250 * We've secured consistency through sync_fs. Following pino
251 * will be used only for fsynced inodes after checkpoint.
252 */
253 try_to_fix_pino(inode);
254 clear_inode_flag(fi, FI_APPEND_WRITE);
255 clear_inode_flag(fi, FI_UPDATE_WRITE);
256 goto out;
257 }
258sync_nodes:
Jaegeuk Kima6b9f222016-04-15 09:43:17 -0700259 ret = fsync_node_pages(sbi, ino, &wbc, atomic);
Jaegeuk Kim17b88912016-04-15 09:25:04 -0700260 if (ret)
261 goto out;
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800262
263 /* if cp_error was enabled, we should avoid infinite loop */
Chao Yu167a3162015-12-24 18:04:56 +0800264 if (unlikely(f2fs_cp_error(sbi))) {
265 ret = -EIO;
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800266 goto out;
Chao Yu167a3162015-12-24 18:04:56 +0800267 }
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800268
269 if (need_inode_block_update(sbi, ino)) {
270 mark_inode_dirty_sync(inode);
271 f2fs_write_inode(inode, NULL);
272 goto sync_nodes;
273 }
274
275 ret = wait_on_node_pages_writeback(sbi, ino);
276 if (ret)
277 goto out;
278
279 /* once recovery info is written, don't need to tack this */
Chao Yudbc32c22015-12-15 13:29:47 +0800280 remove_ino_entry(sbi, ino, APPEND_INO);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800281 clear_inode_flag(fi, FI_APPEND_WRITE);
282flush_out:
Chao Yudbc32c22015-12-15 13:29:47 +0800283 remove_ino_entry(sbi, ino, UPDATE_INO);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800284 clear_inode_flag(fi, FI_UPDATE_WRITE);
285 ret = f2fs_issue_flush(sbi);
Jaegeuk Kim2c88a922016-01-08 16:57:48 -0800286 f2fs_update_time(sbi, REQ_TIME);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800287out:
288 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
289 f2fs_trace_ios(NULL, 1);
290 return ret;
291}
292
Jaegeuk Kima6b9f222016-04-15 09:43:17 -0700293int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
294{
295 return f2fs_do_sync_file(file, start, end, datasync, false);
296}
297
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800298static pgoff_t __get_first_dirty_index(struct address_space *mapping,
299 pgoff_t pgofs, int whence)
300{
301 struct pagevec pvec;
302 int nr_pages;
303
304 if (whence != SEEK_DATA)
305 return 0;
306
307 /* find first dirty page index */
308 pagevec_init(&pvec, 0);
309 nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
310 PAGECACHE_TAG_DIRTY, 1);
Chao Yufab3b542016-02-24 17:20:44 +0800311 pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX;
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800312 pagevec_release(&pvec);
313 return pgofs;
314}
315
316static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
317 int whence)
318{
319 switch (whence) {
320 case SEEK_DATA:
321 if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
322 (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
323 return true;
324 break;
325 case SEEK_HOLE:
326 if (blkaddr == NULL_ADDR)
327 return true;
328 break;
329 }
330 return false;
331}
332
333static inline int unsigned_offsets(struct file *file)
334{
335 return file->f_mode & FMODE_UNSIGNED_OFFSET;
336}
337
338static loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
339{
340 if (offset < 0 && !unsigned_offsets(file))
341 return -EINVAL;
342 if (offset > maxsize)
343 return -EINVAL;
344
345 if (offset != file->f_pos) {
346 file->f_pos = offset;
347 file->f_version = 0;
348 }
349 return offset;
350}
351
352static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
353{
354 struct inode *inode = file->f_mapping->host;
355 loff_t maxbytes = inode->i_sb->s_maxbytes;
356 struct dnode_of_data dn;
357 pgoff_t pgofs, end_offset, dirty;
358 loff_t data_ofs = offset;
359 loff_t isize;
360 int err = 0;
361
362 mutex_lock(&inode->i_mutex);
363
364 isize = i_size_read(inode);
365 if (offset >= isize)
366 goto fail;
367
368 /* handle inline data case */
369 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
370 if (whence == SEEK_HOLE)
371 data_ofs = isize;
372 goto found;
373 }
374
375 pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);
376
377 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
378
379 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
380 set_new_dnode(&dn, inode, NULL, NULL, 0);
381 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
382 if (err && err != -ENOENT) {
383 goto fail;
384 } else if (err == -ENOENT) {
385 /* direct node does not exists */
386 if (whence == SEEK_DATA) {
Chao Yue9f96852016-01-26 15:40:44 +0800387 pgofs = get_next_page_offset(&dn, pgofs);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800388 continue;
389 } else {
390 goto found;
391 }
392 }
393
Chao Yu9ff6fa22016-01-26 15:39:35 +0800394 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800395
396 /* find data/hole in dnode block */
397 for (; dn.ofs_in_node < end_offset;
398 dn.ofs_in_node++, pgofs++,
399 data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
400 block_t blkaddr;
401 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
402
403 if (__found_offset(blkaddr, dirty, pgofs, whence)) {
404 f2fs_put_dnode(&dn);
405 goto found;
406 }
407 }
408 f2fs_put_dnode(&dn);
409 }
410
411 if (whence == SEEK_DATA)
412 goto fail;
413found:
414 if (whence == SEEK_HOLE && data_ofs > isize)
415 data_ofs = isize;
416 mutex_unlock(&inode->i_mutex);
417 return vfs_setpos(file, data_ofs, maxbytes);
418fail:
419 mutex_unlock(&inode->i_mutex);
420 return -ENXIO;
421}
422
423static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
424{
425 struct inode *inode = file->f_mapping->host;
426 loff_t maxbytes = inode->i_sb->s_maxbytes;
427
428 switch (whence) {
429 case SEEK_SET:
430 case SEEK_CUR:
431 case SEEK_END:
432 return generic_file_llseek_size(file, offset, whence,
433 maxbytes);
434 case SEEK_DATA:
435 case SEEK_HOLE:
436 if (offset < 0)
437 return -ENXIO;
438 return f2fs_seek_block(file, offset, whence);
439 }
440
441 return -EINVAL;
442}
443
444static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
445{
446 struct inode *inode = file_inode(file);
Jaegeuk Kima15a51b2015-12-22 11:09:35 -0800447 int err;
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800448
449 if (f2fs_encrypted_inode(inode)) {
Jaegeuk Kim86e0d582015-05-15 16:26:10 -0700450 err = fscrypt_get_encryption_info(inode);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800451 if (err)
452 return 0;
Chao Yu047d42e2016-02-14 18:58:35 +0800453 if (!f2fs_encrypted_inode(inode))
454 return -ENOKEY;
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800455 }
456
457 /* we don't need to use inline_data strictly */
Jaegeuk Kima15a51b2015-12-22 11:09:35 -0800458 err = f2fs_convert_inline_inode(inode);
459 if (err)
460 return err;
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800461
462 file_accessed(file);
463 vma->vm_ops = &f2fs_file_vm_ops;
464 return 0;
465}
466
467static int f2fs_file_open(struct inode *inode, struct file *filp)
468{
469 int ret = generic_file_open(inode, filp);
Jaegeuk Kimebd77702016-04-11 15:15:38 -0700470 struct dentry *dir;
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800471
472 if (!ret && f2fs_encrypted_inode(inode)) {
Jaegeuk Kim86e0d582015-05-15 16:26:10 -0700473 ret = fscrypt_get_encryption_info(inode);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800474 if (ret)
Chao Yu047d42e2016-02-14 18:58:35 +0800475 return -EACCES;
Jaegeuk Kim86e0d582015-05-15 16:26:10 -0700476 if (!fscrypt_has_encryption_key(inode))
Chao Yu047d42e2016-02-14 18:58:35 +0800477 return -ENOKEY;
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800478 }
Jaegeuk Kimebd77702016-04-11 15:15:38 -0700479 dir = dget_parent(filp->f_path.dentry);
480 if (f2fs_encrypted_inode(dir->d_inode) &&
481 !fscrypt_has_permitted_context(dir->d_inode, inode)) {
482 dput(dir);
Jaegeuk Kim0cc23e02016-02-23 09:21:37 -0800483 return -EPERM;
Jaegeuk Kimebd77702016-04-11 15:15:38 -0700484 }
485 dput(dir);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800486 return ret;
487}
488
489int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
490{
491 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
492 struct f2fs_node *raw_node;
493 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
494 __le32 *addr;
495
496 raw_node = F2FS_NODE(dn->node_page);
497 addr = blkaddr_in_node(raw_node) + ofs;
498
499 for (; count > 0; count--, addr++, dn->ofs_in_node++) {
500 block_t blkaddr = le32_to_cpu(*addr);
501 if (blkaddr == NULL_ADDR)
502 continue;
503
504 dn->data_blkaddr = NULL_ADDR;
505 set_data_blkaddr(dn);
506 invalidate_blocks(sbi, blkaddr);
507 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
508 clear_inode_flag(F2FS_I(dn->inode),
509 FI_FIRST_BLOCK_WRITTEN);
510 nr_free++;
511 }
512
513 if (nr_free) {
514 pgoff_t fofs;
515 /*
516 * once we invalidate valid blkaddr in range [ofs, ofs + count],
517 * we will invalidate all blkaddr in the whole range.
518 */
519 fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
Chao Yu9ff6fa22016-01-26 15:39:35 +0800520 dn->inode) + ofs;
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800521 f2fs_update_extent_cache_range(dn, fofs, 0, len);
522 dec_valid_block_count(sbi, dn->inode, nr_free);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800523 sync_inode_page(dn);
524 }
525 dn->ofs_in_node = ofs;
526
Jaegeuk Kim2c88a922016-01-08 16:57:48 -0800527 f2fs_update_time(sbi, REQ_TIME);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800528 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
529 dn->ofs_in_node, nr_free);
530 return nr_free;
531}
532
533void truncate_data_blocks(struct dnode_of_data *dn)
534{
535 truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
536}
537
538static int truncate_partial_data_page(struct inode *inode, u64 from,
539 bool cache_only)
540{
541 unsigned offset = from & (PAGE_CACHE_SIZE - 1);
542 pgoff_t index = from >> PAGE_CACHE_SHIFT;
543 struct address_space *mapping = inode->i_mapping;
544 struct page *page;
545
546 if (!offset && !cache_only)
547 return 0;
548
549 if (cache_only) {
550 page = f2fs_grab_cache_page(mapping, index, false);
551 if (page && PageUptodate(page))
552 goto truncate_out;
553 f2fs_put_page(page, 1);
554 return 0;
555 }
556
557 page = get_lock_data_page(inode, index, true);
558 if (IS_ERR(page))
559 return 0;
560truncate_out:
Jaegeuk Kim3e0b2f42016-01-20 23:43:51 +0800561 f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800562 zero_user(page, offset, PAGE_CACHE_SIZE - offset);
Jaegeuk Kim86e0d582015-05-15 16:26:10 -0700563 if (!cache_only || !f2fs_encrypted_inode(inode) ||
564 !S_ISREG(inode->i_mode))
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800565 set_page_dirty(page);
566 f2fs_put_page(page, 1);
567 return 0;
568}
569
570int truncate_blocks(struct inode *inode, u64 from, bool lock)
571{
572 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
573 unsigned int blocksize = inode->i_sb->s_blocksize;
574 struct dnode_of_data dn;
575 pgoff_t free_from;
576 int count = 0, err = 0;
577 struct page *ipage;
578 bool truncate_page = false;
579
580 trace_f2fs_truncate_blocks_enter(inode, from);
581
582 free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
583
Chao Yu8063cc72016-05-05 19:13:03 +0800584 if (free_from >= sbi->max_file_blocks)
585 goto free_partial;
586
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800587 if (lock)
588 f2fs_lock_op(sbi);
589
590 ipage = get_node_page(sbi, inode->i_ino);
591 if (IS_ERR(ipage)) {
592 err = PTR_ERR(ipage);
593 goto out;
594 }
595
596 if (f2fs_has_inline_data(inode)) {
597 if (truncate_inline_inode(ipage, from))
598 set_page_dirty(ipage);
599 f2fs_put_page(ipage, 1);
600 truncate_page = true;
601 goto out;
602 }
603
604 set_new_dnode(&dn, inode, ipage, NULL, 0);
Jaegeuk Kim91237b72016-05-06 16:19:43 -0700605 err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800606 if (err) {
607 if (err == -ENOENT)
608 goto free_next;
609 goto out;
610 }
611
Chao Yu9ff6fa22016-01-26 15:39:35 +0800612 count = ADDRS_PER_PAGE(dn.node_page, inode);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800613
614 count -= dn.ofs_in_node;
615 f2fs_bug_on(sbi, count < 0);
616
617 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
618 truncate_data_blocks_range(&dn, count);
619 free_from += count;
620 }
621
622 f2fs_put_dnode(&dn);
623free_next:
624 err = truncate_inode_blocks(inode, free_from);
625out:
626 if (lock)
627 f2fs_unlock_op(sbi);
Chao Yu8063cc72016-05-05 19:13:03 +0800628free_partial:
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800629 /* lastly zero out the first data page */
630 if (!err)
631 err = truncate_partial_data_page(inode, from, truncate_page);
632
633 trace_f2fs_truncate_blocks_exit(inode, err);
634 return err;
635}
636
637int f2fs_truncate(struct inode *inode, bool lock)
638{
639 int err;
640
641 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
642 S_ISLNK(inode->i_mode)))
643 return 0;
644
645 trace_f2fs_truncate(inode);
646
647 /* we should check inline_data size */
Jaegeuk Kima15a51b2015-12-22 11:09:35 -0800648 if (!f2fs_may_inline_data(inode)) {
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800649 err = f2fs_convert_inline_inode(inode);
650 if (err)
651 return err;
652 }
653
654 err = truncate_blocks(inode, i_size_read(inode), lock);
655 if (err)
656 return err;
657
658 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
659 mark_inode_dirty(inode);
660 return 0;
661}
662
663int f2fs_getattr(struct vfsmount *mnt,
664 struct dentry *dentry, struct kstat *stat)
665{
666 struct inode *inode = dentry->d_inode;
667 generic_fillattr(inode, stat);
668 stat->blocks <<= 3;
669 return 0;
670}
671
672#ifdef CONFIG_F2FS_FS_POSIX_ACL
673static void __setattr_copy(struct inode *inode, const struct iattr *attr)
674{
675 struct f2fs_inode_info *fi = F2FS_I(inode);
676 unsigned int ia_valid = attr->ia_valid;
677
678 if (ia_valid & ATTR_UID)
679 inode->i_uid = attr->ia_uid;
680 if (ia_valid & ATTR_GID)
681 inode->i_gid = attr->ia_gid;
682 if (ia_valid & ATTR_ATIME)
683 inode->i_atime = timespec_trunc(attr->ia_atime,
684 inode->i_sb->s_time_gran);
685 if (ia_valid & ATTR_MTIME)
686 inode->i_mtime = timespec_trunc(attr->ia_mtime,
687 inode->i_sb->s_time_gran);
688 if (ia_valid & ATTR_CTIME)
689 inode->i_ctime = timespec_trunc(attr->ia_ctime,
690 inode->i_sb->s_time_gran);
691 if (ia_valid & ATTR_MODE) {
692 umode_t mode = attr->ia_mode;
693
694 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
695 mode &= ~S_ISGID;
696 set_acl_inode(fi, mode);
697 }
698}
699#else
700#define __setattr_copy setattr_copy
701#endif
702
703int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
704{
705 struct inode *inode = dentry->d_inode;
706 struct f2fs_inode_info *fi = F2FS_I(inode);
707 int err;
708
709 err = inode_change_ok(inode, attr);
710 if (err)
711 return err;
712
713 if (attr->ia_valid & ATTR_SIZE) {
714 if (f2fs_encrypted_inode(inode) &&
Jaegeuk Kim86e0d582015-05-15 16:26:10 -0700715 fscrypt_get_encryption_info(inode))
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800716 return -EACCES;
717
718 if (attr->ia_size <= i_size_read(inode)) {
719 truncate_setsize(inode, attr->ia_size);
720 err = f2fs_truncate(inode, true);
721 if (err)
722 return err;
Jaegeuk Kimab405972016-01-07 14:15:04 -0800723 f2fs_balance_fs(F2FS_I_SB(inode), true);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800724 } else {
725 /*
726 * do not trim all blocks after i_size if target size is
727 * larger than i_size.
728 */
729 truncate_setsize(inode, attr->ia_size);
Chao Yu5a2ab6e2015-12-01 11:36:16 +0800730
731 /* should convert inline inode here */
Jaegeuk Kima15a51b2015-12-22 11:09:35 -0800732 if (!f2fs_may_inline_data(inode)) {
Chao Yu5a2ab6e2015-12-01 11:36:16 +0800733 err = f2fs_convert_inline_inode(inode);
734 if (err)
735 return err;
736 }
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800737 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
738 }
739 }
740
741 __setattr_copy(inode, attr);
742
743 if (attr->ia_valid & ATTR_MODE) {
744 err = f2fs_acl_chmod(inode);
745 if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
746 inode->i_mode = fi->i_acl_mode;
747 clear_inode_flag(fi, FI_ACL_MODE);
748 }
749 }
750
751 mark_inode_dirty(inode);
752 return err;
753}
754
755const struct inode_operations f2fs_file_inode_operations = {
756 .getattr = f2fs_getattr,
757 .setattr = f2fs_setattr,
758 .get_acl = f2fs_get_acl,
759#ifdef CONFIG_F2FS_FS_XATTR
760 .setxattr = generic_setxattr,
761 .getxattr = generic_getxattr,
762 .listxattr = f2fs_listxattr,
763 .removexattr = generic_removexattr,
764#endif
765 .fiemap = f2fs_fiemap,
766};
767
768static int fill_zero(struct inode *inode, pgoff_t index,
769 loff_t start, loff_t len)
770{
771 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
772 struct page *page;
773
774 if (!len)
775 return 0;
776
Jaegeuk Kimab405972016-01-07 14:15:04 -0800777 f2fs_balance_fs(sbi, true);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800778
779 f2fs_lock_op(sbi);
780 page = get_new_data_page(inode, NULL, index, false);
781 f2fs_unlock_op(sbi);
782
783 if (IS_ERR(page))
784 return PTR_ERR(page);
785
Jaegeuk Kim3e0b2f42016-01-20 23:43:51 +0800786 f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800787 zero_user(page, start, len);
788 set_page_dirty(page);
789 f2fs_put_page(page, 1);
790 return 0;
791}
792
793int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
794{
795 int err;
796
797 while (pg_start < pg_end) {
798 struct dnode_of_data dn;
799 pgoff_t end_offset, count;
800
801 set_new_dnode(&dn, inode, NULL, NULL, 0);
802 err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
803 if (err) {
804 if (err == -ENOENT) {
805 pg_start++;
806 continue;
807 }
808 return err;
809 }
810
Chao Yu9ff6fa22016-01-26 15:39:35 +0800811 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800812 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
813
814 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
815
816 truncate_data_blocks_range(&dn, count);
817 f2fs_put_dnode(&dn);
818
819 pg_start += count;
820 }
821 return 0;
822}
823
824static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
825{
826 pgoff_t pg_start, pg_end;
827 loff_t off_start, off_end;
Jaegeuk Kima15a51b2015-12-22 11:09:35 -0800828 int ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800829
Jaegeuk Kima15a51b2015-12-22 11:09:35 -0800830 ret = f2fs_convert_inline_inode(inode);
831 if (ret)
832 return ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800833
834 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
835 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
836
837 off_start = offset & (PAGE_CACHE_SIZE - 1);
838 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
839
840 if (pg_start == pg_end) {
841 ret = fill_zero(inode, pg_start, off_start,
842 off_end - off_start);
843 if (ret)
844 return ret;
845 } else {
846 if (off_start) {
847 ret = fill_zero(inode, pg_start++, off_start,
848 PAGE_CACHE_SIZE - off_start);
849 if (ret)
850 return ret;
851 }
852 if (off_end) {
853 ret = fill_zero(inode, pg_end, 0, off_end);
854 if (ret)
855 return ret;
856 }
857
858 if (pg_start < pg_end) {
859 struct address_space *mapping = inode->i_mapping;
860 loff_t blk_start, blk_end;
861 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
862
Jaegeuk Kimab405972016-01-07 14:15:04 -0800863 f2fs_balance_fs(sbi, true);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800864
865 blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
866 blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
867 truncate_inode_pages_range(mapping, blk_start,
868 blk_end - 1);
869
870 f2fs_lock_op(sbi);
871 ret = truncate_hole(inode, pg_start, pg_end);
872 f2fs_unlock_op(sbi);
873 }
874 }
875
876 return ret;
877}
878
879static int __exchange_data_block(struct inode *inode, pgoff_t src,
880 pgoff_t dst, bool full)
881{
882 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
883 struct dnode_of_data dn;
884 block_t new_addr;
885 bool do_replace = false;
886 int ret;
887
888 set_new_dnode(&dn, inode, NULL, NULL, 0);
889 ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
890 if (ret && ret != -ENOENT) {
891 return ret;
892 } else if (ret == -ENOENT) {
893 new_addr = NULL_ADDR;
894 } else {
895 new_addr = dn.data_blkaddr;
896 if (!is_checkpointed_data(sbi, new_addr)) {
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800897 /* do not invalidate this block address */
Chao Yu4f686ae2016-02-24 17:16:47 +0800898 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800899 do_replace = true;
900 }
901 f2fs_put_dnode(&dn);
902 }
903
904 if (new_addr == NULL_ADDR)
905 return full ? truncate_hole(inode, dst, dst + 1) : 0;
906
907 if (do_replace) {
908 struct page *ipage = get_node_page(sbi, inode->i_ino);
909 struct node_info ni;
910
911 if (IS_ERR(ipage)) {
912 ret = PTR_ERR(ipage);
913 goto err_out;
914 }
915
916 set_new_dnode(&dn, inode, ipage, NULL, 0);
917 ret = f2fs_reserve_block(&dn, dst);
918 if (ret)
919 goto err_out;
920
921 truncate_data_blocks_range(&dn, 1);
922
923 get_node_info(sbi, dn.nid, &ni);
924 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
Chao Yu7cc3c202016-02-06 14:40:34 +0800925 ni.version, true, false);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800926 f2fs_put_dnode(&dn);
927 } else {
928 struct page *psrc, *pdst;
929
930 psrc = get_lock_data_page(inode, src, true);
931 if (IS_ERR(psrc))
932 return PTR_ERR(psrc);
Jaegeuk Kim1e653ef2016-02-08 14:17:38 -0800933 pdst = get_new_data_page(inode, NULL, dst, true);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800934 if (IS_ERR(pdst)) {
935 f2fs_put_page(psrc, 1);
936 return PTR_ERR(pdst);
937 }
938 f2fs_copy_page(psrc, pdst);
939 set_page_dirty(pdst);
940 f2fs_put_page(pdst, 1);
941 f2fs_put_page(psrc, 1);
942
943 return truncate_hole(inode, src, src + 1);
944 }
945 return 0;
946
947err_out:
948 if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
Chao Yu4f686ae2016-02-24 17:16:47 +0800949 f2fs_update_data_blkaddr(&dn, new_addr);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800950 f2fs_put_dnode(&dn);
951 }
952 return ret;
953}
954
955static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
956{
957 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
958 pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
959 int ret = 0;
960
961 for (; end < nrpages; start++, end++) {
Jaegeuk Kimab405972016-01-07 14:15:04 -0800962 f2fs_balance_fs(sbi, true);
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800963 f2fs_lock_op(sbi);
964 ret = __exchange_data_block(inode, end, start, true);
965 f2fs_unlock_op(sbi);
966 if (ret)
967 break;
968 }
969 return ret;
970}
971
972static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
973{
974 pgoff_t pg_start, pg_end;
975 loff_t new_size;
976 int ret;
977
978 if (offset + len >= i_size_read(inode))
979 return -EINVAL;
980
981 /* collapse range should be aligned to block size of f2fs. */
982 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
983 return -EINVAL;
984
Jaegeuk Kima15a51b2015-12-22 11:09:35 -0800985 ret = f2fs_convert_inline_inode(inode);
986 if (ret)
987 return ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -0800988
989 pg_start = offset >> PAGE_CACHE_SHIFT;
990 pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
991
992 /* write out all dirty pages from offset */
993 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
994 if (ret)
995 return ret;
996
997 truncate_pagecache(inode, 0, offset);
998
999 ret = f2fs_do_collapse(inode, pg_start, pg_end);
1000 if (ret)
1001 return ret;
1002
1003 /* write out all moved pages, if possible */
1004 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1005 truncate_pagecache(inode, 0, offset);
1006
1007 new_size = i_size_read(inode) - len;
1008 truncate_pagecache(inode, 0, new_size);
1009
1010 ret = truncate_blocks(inode, new_size, true);
1011 if (!ret)
1012 i_size_write(inode, new_size);
1013
1014 return ret;
1015}
1016
1017static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1018 int mode)
1019{
1020 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1021 struct address_space *mapping = inode->i_mapping;
1022 pgoff_t index, pg_start, pg_end;
1023 loff_t new_size = i_size_read(inode);
1024 loff_t off_start, off_end;
1025 int ret = 0;
1026
1027 ret = inode_newsize_ok(inode, (len + offset));
1028 if (ret)
1029 return ret;
1030
Jaegeuk Kima15a51b2015-12-22 11:09:35 -08001031 ret = f2fs_convert_inline_inode(inode);
1032 if (ret)
1033 return ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001034
1035 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1036 if (ret)
1037 return ret;
1038
1039 truncate_pagecache_range(inode, offset, offset + len - 1);
1040
1041 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
1042 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
1043
1044 off_start = offset & (PAGE_CACHE_SIZE - 1);
1045 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
1046
1047 if (pg_start == pg_end) {
1048 ret = fill_zero(inode, pg_start, off_start,
1049 off_end - off_start);
1050 if (ret)
1051 return ret;
1052
1053 if (offset + len > new_size)
1054 new_size = offset + len;
1055 new_size = max_t(loff_t, new_size, offset + len);
1056 } else {
1057 if (off_start) {
1058 ret = fill_zero(inode, pg_start++, off_start,
1059 PAGE_CACHE_SIZE - off_start);
1060 if (ret)
1061 return ret;
1062
1063 new_size = max_t(loff_t, new_size,
1064 (loff_t)pg_start << PAGE_CACHE_SHIFT);
1065 }
1066
1067 for (index = pg_start; index < pg_end; index++) {
1068 struct dnode_of_data dn;
1069 struct page *ipage;
1070
1071 f2fs_lock_op(sbi);
1072
1073 ipage = get_node_page(sbi, inode->i_ino);
1074 if (IS_ERR(ipage)) {
1075 ret = PTR_ERR(ipage);
1076 f2fs_unlock_op(sbi);
1077 goto out;
1078 }
1079
1080 set_new_dnode(&dn, inode, ipage, NULL, 0);
1081 ret = f2fs_reserve_block(&dn, index);
1082 if (ret) {
1083 f2fs_unlock_op(sbi);
1084 goto out;
1085 }
1086
1087 if (dn.data_blkaddr != NEW_ADDR) {
1088 invalidate_blocks(sbi, dn.data_blkaddr);
Chao Yu4f686ae2016-02-24 17:16:47 +08001089 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001090 }
1091 f2fs_put_dnode(&dn);
1092 f2fs_unlock_op(sbi);
1093
1094 new_size = max_t(loff_t, new_size,
1095 (loff_t)(index + 1) << PAGE_CACHE_SHIFT);
1096 }
1097
1098 if (off_end) {
1099 ret = fill_zero(inode, pg_end, 0, off_end);
1100 if (ret)
1101 goto out;
1102
1103 new_size = max_t(loff_t, new_size, offset + len);
1104 }
1105 }
1106
1107out:
1108 if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
1109 i_size_write(inode, new_size);
1110 mark_inode_dirty(inode);
1111 update_inode_page(inode);
1112 }
1113
1114 return ret;
1115}
1116
1117static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1118{
1119 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1120 pgoff_t pg_start, pg_end, delta, nrpages, idx;
1121 loff_t new_size;
1122 int ret = 0;
1123
1124 new_size = i_size_read(inode) + len;
1125 if (new_size > inode->i_sb->s_maxbytes)
1126 return -EFBIG;
1127
1128 if (offset >= i_size_read(inode))
1129 return -EINVAL;
1130
1131 /* insert range should be aligned to block size of f2fs. */
1132 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1133 return -EINVAL;
1134
Jaegeuk Kima15a51b2015-12-22 11:09:35 -08001135 ret = f2fs_convert_inline_inode(inode);
1136 if (ret)
1137 return ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001138
Jaegeuk Kimab405972016-01-07 14:15:04 -08001139 f2fs_balance_fs(sbi, true);
Jaegeuk Kim9755a872015-12-22 13:23:35 -08001140
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001141 ret = truncate_blocks(inode, i_size_read(inode), true);
1142 if (ret)
1143 return ret;
1144
1145 /* write out all dirty pages from offset */
1146 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1147 if (ret)
1148 return ret;
1149
1150 truncate_pagecache(inode, 0, offset);
1151
1152 pg_start = offset >> PAGE_CACHE_SHIFT;
1153 pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
1154 delta = pg_end - pg_start;
1155 nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1156
1157 for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) {
1158 f2fs_lock_op(sbi);
1159 ret = __exchange_data_block(inode, idx, idx + delta, false);
1160 f2fs_unlock_op(sbi);
1161 if (ret)
1162 break;
1163 }
1164
1165 /* write out all moved pages, if possible */
1166 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1167 truncate_pagecache(inode, 0, offset);
1168
1169 if (!ret)
1170 i_size_write(inode, new_size);
1171 return ret;
1172}
1173
1174static int expand_inode_data(struct inode *inode, loff_t offset,
1175 loff_t len, int mode)
1176{
1177 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kima6c63ba2016-05-06 15:30:38 -07001178 struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
1179 pgoff_t pg_end;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001180 loff_t new_size = i_size_read(inode);
Jaegeuk Kima6c63ba2016-05-06 15:30:38 -07001181 loff_t off_end;
1182 int ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001183
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001184 ret = inode_newsize_ok(inode, (len + offset));
1185 if (ret)
1186 return ret;
1187
Jaegeuk Kima15a51b2015-12-22 11:09:35 -08001188 ret = f2fs_convert_inline_inode(inode);
1189 if (ret)
1190 return ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001191
Jaegeuk Kimab405972016-01-07 14:15:04 -08001192 f2fs_balance_fs(sbi, true);
Jaegeuk Kim9755a872015-12-22 13:23:35 -08001193
Jaegeuk Kima6c63ba2016-05-06 15:30:38 -07001194 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1195 off_end = (offset + len) & (PAGE_SIZE - 1);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001196
Jaegeuk Kima6c63ba2016-05-06 15:30:38 -07001197 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1198 map.m_len = pg_end - map.m_lblk;
1199 if (off_end)
1200 map.m_len++;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001201
Jaegeuk Kima6c63ba2016-05-06 15:30:38 -07001202 ret = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1203 if (ret) {
1204 pgoff_t last_off;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001205
Jaegeuk Kima6c63ba2016-05-06 15:30:38 -07001206 if (!map.m_len)
1207 return ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001208
Jaegeuk Kima6c63ba2016-05-06 15:30:38 -07001209 last_off = map.m_lblk + map.m_len - 1;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001210
Jaegeuk Kima6c63ba2016-05-06 15:30:38 -07001211 /* update new size to the failed position */
1212 new_size = (last_off == pg_end) ? offset + len:
1213 (loff_t)(last_off + 1) << PAGE_SHIFT;
1214 } else {
1215 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001216 }
1217
Jaegeuk Kima6c63ba2016-05-06 15:30:38 -07001218 if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001219 i_size_write(inode, new_size);
1220 mark_inode_dirty(inode);
1221 update_inode_page(inode);
1222 }
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001223
1224 return ret;
1225}
1226
Jaegeuk Kim3fa793b2016-03-28 11:33:05 -07001227#ifndef FALLOC_FL_COLLAPSE_RANGE
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001228#define FALLOC_FL_COLLAPSE_RANGE 0X08
Jaegeuk Kim3fa793b2016-03-28 11:33:05 -07001229#endif
1230#ifndef FALLOC_FL_ZERO_RANGE
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001231#define FALLOC_FL_ZERO_RANGE 0X10
Jaegeuk Kim3fa793b2016-03-28 11:33:05 -07001232#endif
1233#ifndef FALLOC_FL_INSERT_RANGE
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001234#define FALLOC_FL_INSERT_RANGE 0X20
Jaegeuk Kim3fa793b2016-03-28 11:33:05 -07001235#endif
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001236
1237static long f2fs_fallocate(struct file *file, int mode,
1238 loff_t offset, loff_t len)
1239{
1240 struct inode *inode = file_inode(file);
1241 long ret = 0;
1242
1243 /* f2fs only support ->fallocate for regular file */
1244 if (!S_ISREG(inode->i_mode))
1245 return -EINVAL;
1246
1247 if (f2fs_encrypted_inode(inode) &&
1248 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1249 return -EOPNOTSUPP;
1250
1251 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1252 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1253 FALLOC_FL_INSERT_RANGE))
1254 return -EOPNOTSUPP;
1255
1256 mutex_lock(&inode->i_mutex);
1257
1258 if (mode & FALLOC_FL_PUNCH_HOLE) {
1259 if (offset >= inode->i_size)
1260 goto out;
1261
1262 ret = punch_hole(inode, offset, len);
1263 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1264 ret = f2fs_collapse_range(inode, offset, len);
1265 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1266 ret = f2fs_zero_range(inode, offset, len, mode);
1267 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1268 ret = f2fs_insert_range(inode, offset, len);
1269 } else {
1270 ret = expand_inode_data(inode, offset, len, mode);
1271 }
1272
1273 if (!ret) {
1274 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1275 mark_inode_dirty(inode);
Jaegeuk Kim2c88a922016-01-08 16:57:48 -08001276 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001277 }
1278
1279out:
1280 mutex_unlock(&inode->i_mutex);
1281
1282 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1283 return ret;
1284}
1285
1286static int f2fs_release_file(struct inode *inode, struct file *filp)
1287{
Jaegeuk Kim57289462016-04-11 11:51:51 -07001288 /*
1289 * f2fs_relase_file is called at every close calls. So we should
1290 * not drop any inmemory pages by close called by other process.
1291 */
1292 if (!(filp->f_mode & FMODE_WRITE) ||
1293 atomic_read(&inode->i_writecount) != 1)
1294 return 0;
1295
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001296 /* some remained atomic pages should discarded */
1297 if (f2fs_is_atomic_file(inode))
Chao Yub99c7432016-02-06 14:38:29 +08001298 drop_inmem_pages(inode);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001299 if (f2fs_is_volatile_file(inode)) {
Jaegeuk Kim7f8ac3f2016-04-11 13:15:10 -07001300 clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001301 set_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
1302 filemap_fdatawrite(inode->i_mapping);
1303 clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
1304 }
1305 return 0;
1306}
1307
1308#define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
1309#define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
1310
1311static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
1312{
1313 if (S_ISDIR(mode))
1314 return flags;
1315 else if (S_ISREG(mode))
1316 return flags & F2FS_REG_FLMASK;
1317 else
1318 return flags & F2FS_OTHER_FLMASK;
1319}
1320
1321static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1322{
1323 struct inode *inode = file_inode(filp);
1324 struct f2fs_inode_info *fi = F2FS_I(inode);
1325 unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
1326 return put_user(flags, (int __user *)arg);
1327}
1328
1329static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1330{
1331 struct inode *inode = file_inode(filp);
1332 struct f2fs_inode_info *fi = F2FS_I(inode);
1333 unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
1334 unsigned int oldflags;
1335 int ret;
1336
Chao Yuf32b2f72016-05-09 19:56:32 +08001337 if (!inode_owner_or_capable(inode))
1338 return -EACCES;
1339
1340 if (get_user(flags, (int __user *)arg))
1341 return -EFAULT;
1342
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001343 ret = mnt_want_write_file(filp);
1344 if (ret)
1345 return ret;
1346
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001347 flags = f2fs_mask_flags(inode->i_mode, flags);
1348
1349 mutex_lock(&inode->i_mutex);
1350
1351 oldflags = fi->i_flags;
1352
1353 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
1354 if (!capable(CAP_LINUX_IMMUTABLE)) {
1355 mutex_unlock(&inode->i_mutex);
1356 ret = -EPERM;
1357 goto out;
1358 }
1359 }
1360
1361 flags = flags & FS_FL_USER_MODIFIABLE;
1362 flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
1363 fi->i_flags = flags;
1364 mutex_unlock(&inode->i_mutex);
1365
1366 f2fs_set_inode_flags(inode);
1367 inode->i_ctime = CURRENT_TIME;
1368 mark_inode_dirty(inode);
1369out:
1370 mnt_drop_write_file(filp);
1371 return ret;
1372}
1373
1374static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1375{
1376 struct inode *inode = file_inode(filp);
1377
1378 return put_user(inode->i_generation, (int __user *)arg);
1379}
1380
1381static int f2fs_ioc_start_atomic_write(struct file *filp)
1382{
1383 struct inode *inode = file_inode(filp);
1384 int ret;
1385
1386 if (!inode_owner_or_capable(inode))
1387 return -EACCES;
1388
Chao Yuf32b2f72016-05-09 19:56:32 +08001389 ret = mnt_want_write_file(filp);
1390 if (ret)
1391 return ret;
1392
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001393 if (f2fs_is_atomic_file(inode))
Chao Yuf32b2f72016-05-09 19:56:32 +08001394 goto out;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001395
1396 ret = f2fs_convert_inline_inode(inode);
1397 if (ret)
Chao Yuf32b2f72016-05-09 19:56:32 +08001398 goto out;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001399
1400 set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
Jaegeuk Kim2c88a922016-01-08 16:57:48 -08001401 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1402
Jaegeuk Kima9a72772016-04-12 14:36:11 -07001403 if (!get_dirty_pages(inode))
Chao Yuf32b2f72016-05-09 19:56:32 +08001404 goto out;
Jaegeuk Kima9a72772016-04-12 14:36:11 -07001405
1406 f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
1407 "Unexpected flush for atomic writes: ino=%lu, npages=%u",
1408 inode->i_ino, get_dirty_pages(inode));
1409 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
1410 if (ret)
1411 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
Chao Yuf32b2f72016-05-09 19:56:32 +08001412out:
1413 mnt_drop_write_file(filp);
Jaegeuk Kima9a72772016-04-12 14:36:11 -07001414 return ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001415}
1416
1417static int f2fs_ioc_commit_atomic_write(struct file *filp)
1418{
1419 struct inode *inode = file_inode(filp);
1420 int ret;
1421
1422 if (!inode_owner_or_capable(inode))
1423 return -EACCES;
1424
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001425 ret = mnt_want_write_file(filp);
1426 if (ret)
1427 return ret;
1428
Chao Yuf32b2f72016-05-09 19:56:32 +08001429 if (f2fs_is_volatile_file(inode))
1430 goto err_out;
1431
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001432 if (f2fs_is_atomic_file(inode)) {
1433 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
Chao Yub99c7432016-02-06 14:38:29 +08001434 ret = commit_inmem_pages(inode);
Jaegeuk Kim7823b572016-01-09 17:08:38 -08001435 if (ret) {
1436 set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001437 goto err_out;
Jaegeuk Kim7823b572016-01-09 17:08:38 -08001438 }
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001439 }
1440
Jaegeuk Kima6b9f222016-04-15 09:43:17 -07001441 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001442err_out:
1443 mnt_drop_write_file(filp);
1444 return ret;
1445}
1446
1447static int f2fs_ioc_start_volatile_write(struct file *filp)
1448{
1449 struct inode *inode = file_inode(filp);
1450 int ret;
1451
1452 if (!inode_owner_or_capable(inode))
1453 return -EACCES;
1454
Chao Yuf32b2f72016-05-09 19:56:32 +08001455 ret = mnt_want_write_file(filp);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001456 if (ret)
1457 return ret;
1458
Chao Yuf32b2f72016-05-09 19:56:32 +08001459 if (f2fs_is_volatile_file(inode))
1460 goto out;
1461
1462 ret = f2fs_convert_inline_inode(inode);
1463 if (ret)
1464 goto out;
1465
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001466 set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
Jaegeuk Kim2c88a922016-01-08 16:57:48 -08001467 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Chao Yuf32b2f72016-05-09 19:56:32 +08001468out:
1469 mnt_drop_write_file(filp);
1470 return ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001471}
1472
1473static int f2fs_ioc_release_volatile_write(struct file *filp)
1474{
1475 struct inode *inode = file_inode(filp);
Chao Yuf32b2f72016-05-09 19:56:32 +08001476 int ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001477
1478 if (!inode_owner_or_capable(inode))
1479 return -EACCES;
1480
Chao Yuf32b2f72016-05-09 19:56:32 +08001481 ret = mnt_want_write_file(filp);
1482 if (ret)
1483 return ret;
1484
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001485 if (!f2fs_is_volatile_file(inode))
Chao Yuf32b2f72016-05-09 19:56:32 +08001486 goto out;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001487
Chao Yuf32b2f72016-05-09 19:56:32 +08001488 if (!f2fs_is_first_block_written(inode)) {
1489 ret = truncate_partial_data_page(inode, 0, true);
1490 goto out;
1491 }
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001492
Chao Yuf32b2f72016-05-09 19:56:32 +08001493 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
1494out:
1495 mnt_drop_write_file(filp);
1496 return ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001497}
1498
1499static int f2fs_ioc_abort_volatile_write(struct file *filp)
1500{
1501 struct inode *inode = file_inode(filp);
1502 int ret;
1503
1504 if (!inode_owner_or_capable(inode))
1505 return -EACCES;
1506
1507 ret = mnt_want_write_file(filp);
1508 if (ret)
1509 return ret;
1510
Jaegeuk Kim7f8ac3f2016-04-11 13:15:10 -07001511 if (f2fs_is_atomic_file(inode))
Chao Yub99c7432016-02-06 14:38:29 +08001512 drop_inmem_pages(inode);
Jaegeuk Kim2f8dbf92015-12-29 15:46:33 -08001513 if (f2fs_is_volatile_file(inode)) {
1514 clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
Jaegeuk Kima6b9f222016-04-15 09:43:17 -07001515 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
Jaegeuk Kim2f8dbf92015-12-29 15:46:33 -08001516 }
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001517
1518 mnt_drop_write_file(filp);
Jaegeuk Kim2c88a922016-01-08 16:57:48 -08001519 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001520 return ret;
1521}
1522
1523static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
1524{
1525 struct inode *inode = file_inode(filp);
1526 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1527 struct super_block *sb = sbi->sb;
1528 __u32 in;
Chao Yuf32b2f72016-05-09 19:56:32 +08001529 int ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001530
1531 if (!capable(CAP_SYS_ADMIN))
1532 return -EPERM;
1533
1534 if (get_user(in, (__u32 __user *)arg))
1535 return -EFAULT;
1536
Chao Yuf32b2f72016-05-09 19:56:32 +08001537 ret = mnt_want_write_file(filp);
1538 if (ret)
1539 return ret;
1540
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001541 switch (in) {
1542 case FS_GOING_DOWN_FULLSYNC:
1543 sb = freeze_bdev(sb->s_bdev);
1544 if (sb && !IS_ERR(sb)) {
1545 f2fs_stop_checkpoint(sbi);
1546 thaw_bdev(sb->s_bdev, sb);
1547 }
1548 break;
1549 case FS_GOING_DOWN_METASYNC:
1550 /* do checkpoint only */
1551 f2fs_sync_fs(sb, 1);
1552 f2fs_stop_checkpoint(sbi);
1553 break;
1554 case FS_GOING_DOWN_NOSYNC:
1555 f2fs_stop_checkpoint(sbi);
1556 break;
1557 case FS_GOING_DOWN_METAFLUSH:
1558 sync_meta_pages(sbi, META, LONG_MAX);
1559 f2fs_stop_checkpoint(sbi);
1560 break;
1561 default:
Chao Yuf32b2f72016-05-09 19:56:32 +08001562 ret = -EINVAL;
1563 goto out;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001564 }
Jaegeuk Kim2c88a922016-01-08 16:57:48 -08001565 f2fs_update_time(sbi, REQ_TIME);
Chao Yuf32b2f72016-05-09 19:56:32 +08001566out:
1567 mnt_drop_write_file(filp);
1568 return ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001569}
1570
1571static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
1572{
1573 struct inode *inode = file_inode(filp);
1574 struct super_block *sb = inode->i_sb;
1575 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1576 struct fstrim_range range;
1577 int ret;
1578
1579 if (!capable(CAP_SYS_ADMIN))
1580 return -EPERM;
1581
1582 if (!blk_queue_discard(q))
1583 return -EOPNOTSUPP;
1584
1585 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
1586 sizeof(range)))
1587 return -EFAULT;
1588
Chao Yuf32b2f72016-05-09 19:56:32 +08001589 ret = mnt_want_write_file(filp);
1590 if (ret)
1591 return ret;
1592
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001593 range.minlen = max((unsigned int)range.minlen,
1594 q->limits.discard_granularity);
1595 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
Chao Yuf32b2f72016-05-09 19:56:32 +08001596 mnt_drop_write_file(filp);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001597 if (ret < 0)
1598 return ret;
1599
1600 if (copy_to_user((struct fstrim_range __user *)arg, &range,
1601 sizeof(range)))
1602 return -EFAULT;
Jaegeuk Kim2c88a922016-01-08 16:57:48 -08001603 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001604 return 0;
1605}
1606
1607static bool uuid_is_nonzero(__u8 u[16])
1608{
1609 int i;
1610
1611 for (i = 0; i < 16; i++)
1612 if (u[i])
1613 return true;
1614 return false;
1615}
1616
1617static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
1618{
Jaegeuk Kim86e0d582015-05-15 16:26:10 -07001619 struct fscrypt_policy policy;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001620 struct inode *inode = file_inode(filp);
Chao Yuf32b2f72016-05-09 19:56:32 +08001621 int ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001622
Jaegeuk Kim86e0d582015-05-15 16:26:10 -07001623 if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
1624 sizeof(policy)))
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001625 return -EFAULT;
1626
Chao Yuf32b2f72016-05-09 19:56:32 +08001627 ret = mnt_want_write_file(filp);
1628 if (ret)
1629 return ret;
1630
Jaegeuk Kim2c88a922016-01-08 16:57:48 -08001631 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Chao Yuf32b2f72016-05-09 19:56:32 +08001632 ret = fscrypt_process_policy(inode, &policy);
1633
1634 mnt_drop_write_file(filp);
1635 return ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001636}
1637
1638static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
1639{
Jaegeuk Kim86e0d582015-05-15 16:26:10 -07001640 struct fscrypt_policy policy;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001641 struct inode *inode = file_inode(filp);
1642 int err;
1643
Jaegeuk Kim86e0d582015-05-15 16:26:10 -07001644 err = fscrypt_get_policy(inode, &policy);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001645 if (err)
1646 return err;
1647
Jaegeuk Kim86e0d582015-05-15 16:26:10 -07001648 if (copy_to_user((struct fscrypt_policy __user *)arg, &policy, sizeof(policy)))
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001649 return -EFAULT;
1650 return 0;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001651}
1652
1653static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
1654{
1655 struct inode *inode = file_inode(filp);
1656 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1657 int err;
1658
1659 if (!f2fs_sb_has_crypto(inode->i_sb))
1660 return -EOPNOTSUPP;
1661
1662 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
1663 goto got_it;
1664
1665 err = mnt_want_write_file(filp);
1666 if (err)
1667 return err;
1668
1669 /* update superblock with uuid */
1670 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
1671
1672 err = f2fs_commit_super(sbi, false);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001673 if (err) {
1674 /* undo new data */
1675 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
Chao Yuc9162be2015-12-15 17:19:26 +08001676 mnt_drop_write_file(filp);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001677 return err;
1678 }
Chao Yuc9162be2015-12-15 17:19:26 +08001679 mnt_drop_write_file(filp);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001680got_it:
1681 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
1682 16))
1683 return -EFAULT;
1684 return 0;
1685}
1686
1687static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
1688{
1689 struct inode *inode = file_inode(filp);
1690 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1691 __u32 sync;
Chao Yuf32b2f72016-05-09 19:56:32 +08001692 int ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001693
1694 if (!capable(CAP_SYS_ADMIN))
1695 return -EPERM;
1696
1697 if (get_user(sync, (__u32 __user *)arg))
1698 return -EFAULT;
1699
1700 if (f2fs_readonly(sbi->sb))
1701 return -EROFS;
1702
Chao Yuf32b2f72016-05-09 19:56:32 +08001703 ret = mnt_want_write_file(filp);
1704 if (ret)
1705 return ret;
1706
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001707 if (!sync) {
Chao Yuf32b2f72016-05-09 19:56:32 +08001708 if (!mutex_trylock(&sbi->gc_mutex)) {
1709 ret = -EBUSY;
1710 goto out;
1711 }
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001712 } else {
1713 mutex_lock(&sbi->gc_mutex);
1714 }
1715
Chao Yuf32b2f72016-05-09 19:56:32 +08001716 ret = f2fs_gc(sbi, sync);
1717out:
1718 mnt_drop_write_file(filp);
1719 return ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001720}
1721
1722static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
1723{
1724 struct inode *inode = file_inode(filp);
1725 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuf32b2f72016-05-09 19:56:32 +08001726 int ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001727
1728 if (!capable(CAP_SYS_ADMIN))
1729 return -EPERM;
1730
1731 if (f2fs_readonly(sbi->sb))
1732 return -EROFS;
1733
Chao Yuf32b2f72016-05-09 19:56:32 +08001734 ret = mnt_want_write_file(filp);
1735 if (ret)
1736 return ret;
1737
1738 ret = f2fs_sync_fs(sbi->sb, 1);
1739
1740 mnt_drop_write_file(filp);
1741 return ret;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001742}
1743
Chao Yuc3c6ec62015-10-27 09:53:45 +08001744static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
1745 struct file *filp,
1746 struct f2fs_defragment *range)
1747{
1748 struct inode *inode = file_inode(filp);
Chao Yu6dac6f72016-01-26 15:42:58 +08001749 struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
Chao Yuc3c6ec62015-10-27 09:53:45 +08001750 struct extent_info ei;
1751 pgoff_t pg_start, pg_end;
Chao Yuadf04202015-12-01 11:56:52 +08001752 unsigned int blk_per_seg = sbi->blocks_per_seg;
Chao Yuc3c6ec62015-10-27 09:53:45 +08001753 unsigned int total = 0, sec_num;
Chao Yuadf04202015-12-01 11:56:52 +08001754 unsigned int pages_per_sec = sbi->segs_per_sec * blk_per_seg;
Chao Yuc3c6ec62015-10-27 09:53:45 +08001755 block_t blk_end = 0;
1756 bool fragmented = false;
1757 int err;
1758
1759 /* if in-place-update policy is enabled, don't waste time here */
1760 if (need_inplace_update(inode))
1761 return -EINVAL;
1762
1763 pg_start = range->start >> PAGE_CACHE_SHIFT;
1764 pg_end = (range->start + range->len) >> PAGE_CACHE_SHIFT;
1765
Jaegeuk Kimab405972016-01-07 14:15:04 -08001766 f2fs_balance_fs(sbi, true);
Chao Yuc3c6ec62015-10-27 09:53:45 +08001767
1768 mutex_lock(&inode->i_mutex);
1769
1770 /* writeback all dirty pages in the range */
1771 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
Fan Lib0391d12015-12-14 13:34:00 +08001772 range->start + range->len - 1);
Chao Yuc3c6ec62015-10-27 09:53:45 +08001773 if (err)
1774 goto out;
1775
1776 /*
1777 * lookup mapping info in extent cache, skip defragmenting if physical
1778 * block addresses are continuous.
1779 */
1780 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
1781 if (ei.fofs + ei.len >= pg_end)
1782 goto out;
1783 }
1784
1785 map.m_lblk = pg_start;
Chao Yuc3c6ec62015-10-27 09:53:45 +08001786
1787 /*
1788 * lookup mapping info in dnode page cache, skip defragmenting if all
1789 * physical block addresses are continuous even if there are hole(s)
1790 * in logical blocks.
1791 */
1792 while (map.m_lblk < pg_end) {
Fan Li94cb9332015-12-15 17:02:41 +08001793 map.m_len = pg_end - map.m_lblk;
Chao Yuc3c6ec62015-10-27 09:53:45 +08001794 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
1795 if (err)
1796 goto out;
1797
1798 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
1799 map.m_lblk++;
Chao Yuc3c6ec62015-10-27 09:53:45 +08001800 continue;
1801 }
1802
1803 if (blk_end && blk_end != map.m_pblk) {
1804 fragmented = true;
1805 break;
1806 }
1807 blk_end = map.m_pblk + map.m_len;
1808
1809 map.m_lblk += map.m_len;
Chao Yuc3c6ec62015-10-27 09:53:45 +08001810 }
1811
1812 if (!fragmented)
1813 goto out;
1814
1815 map.m_lblk = pg_start;
1816 map.m_len = pg_end - pg_start;
1817
1818 sec_num = (map.m_len + pages_per_sec - 1) / pages_per_sec;
1819
1820 /*
1821 * make sure there are enough free section for LFS allocation, this can
1822 * avoid defragment running in SSR mode when free section are allocated
1823 * intensively
1824 */
1825 if (has_not_enough_free_secs(sbi, sec_num)) {
1826 err = -EAGAIN;
1827 goto out;
1828 }
1829
1830 while (map.m_lblk < pg_end) {
1831 pgoff_t idx;
1832 int cnt = 0;
1833
1834do_map:
Fan Li94cb9332015-12-15 17:02:41 +08001835 map.m_len = pg_end - map.m_lblk;
Chao Yuc3c6ec62015-10-27 09:53:45 +08001836 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
1837 if (err)
1838 goto clear_out;
1839
1840 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
1841 map.m_lblk++;
1842 continue;
1843 }
1844
1845 set_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
1846
1847 idx = map.m_lblk;
1848 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
1849 struct page *page;
1850
1851 page = get_lock_data_page(inode, idx, true);
1852 if (IS_ERR(page)) {
1853 err = PTR_ERR(page);
1854 goto clear_out;
1855 }
1856
1857 set_page_dirty(page);
1858 f2fs_put_page(page, 1);
1859
1860 idx++;
1861 cnt++;
1862 total++;
1863 }
1864
1865 map.m_lblk = idx;
Chao Yuc3c6ec62015-10-27 09:53:45 +08001866
1867 if (idx < pg_end && cnt < blk_per_seg)
1868 goto do_map;
1869
1870 clear_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
1871
1872 err = filemap_fdatawrite(inode->i_mapping);
1873 if (err)
1874 goto out;
1875 }
1876clear_out:
1877 clear_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
1878out:
1879 mutex_unlock(&inode->i_mutex);
1880 if (!err)
1881 range->len = (u64)total << PAGE_CACHE_SHIFT;
1882 return err;
1883}
1884
1885static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
1886{
1887 struct inode *inode = file_inode(filp);
1888 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1889 struct f2fs_defragment range;
1890 int err;
1891
1892 if (!capable(CAP_SYS_ADMIN))
1893 return -EPERM;
1894
1895 if (!S_ISREG(inode->i_mode))
1896 return -EINVAL;
1897
1898 err = mnt_want_write_file(filp);
1899 if (err)
1900 return err;
1901
1902 if (f2fs_readonly(sbi->sb)) {
1903 err = -EROFS;
1904 goto out;
1905 }
1906
1907 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
1908 sizeof(range))) {
1909 err = -EFAULT;
1910 goto out;
1911 }
1912
1913 /* verify alignment of offset & size */
1914 if (range.start & (F2FS_BLKSIZE - 1) ||
1915 range.len & (F2FS_BLKSIZE - 1)) {
1916 err = -EINVAL;
1917 goto out;
1918 }
1919
1920 err = f2fs_defragment_range(sbi, filp, &range);
Jaegeuk Kim2c88a922016-01-08 16:57:48 -08001921 f2fs_update_time(sbi, REQ_TIME);
Chao Yuc3c6ec62015-10-27 09:53:45 +08001922 if (err < 0)
1923 goto out;
1924
1925 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
1926 sizeof(range)))
1927 err = -EFAULT;
1928out:
1929 mnt_drop_write_file(filp);
1930 return err;
1931}
1932
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001933long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1934{
1935 switch (cmd) {
1936 case F2FS_IOC_GETFLAGS:
1937 return f2fs_ioc_getflags(filp, arg);
1938 case F2FS_IOC_SETFLAGS:
1939 return f2fs_ioc_setflags(filp, arg);
1940 case F2FS_IOC_GETVERSION:
1941 return f2fs_ioc_getversion(filp, arg);
1942 case F2FS_IOC_START_ATOMIC_WRITE:
1943 return f2fs_ioc_start_atomic_write(filp);
1944 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
1945 return f2fs_ioc_commit_atomic_write(filp);
1946 case F2FS_IOC_START_VOLATILE_WRITE:
1947 return f2fs_ioc_start_volatile_write(filp);
1948 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
1949 return f2fs_ioc_release_volatile_write(filp);
1950 case F2FS_IOC_ABORT_VOLATILE_WRITE:
1951 return f2fs_ioc_abort_volatile_write(filp);
1952 case FS_IOC_SHUTDOWN:
1953 return f2fs_ioc_shutdown(filp, arg);
1954 case FITRIM:
1955 return f2fs_ioc_fitrim(filp, arg);
1956 case F2FS_IOC_SET_ENCRYPTION_POLICY:
1957 return f2fs_ioc_set_encryption_policy(filp, arg);
1958 case F2FS_IOC_GET_ENCRYPTION_POLICY:
1959 return f2fs_ioc_get_encryption_policy(filp, arg);
1960 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
1961 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
1962 case F2FS_IOC_GARBAGE_COLLECT:
1963 return f2fs_ioc_gc(filp, arg);
1964 case F2FS_IOC_WRITE_CHECKPOINT:
1965 return f2fs_ioc_write_checkpoint(filp, arg);
Chao Yuc3c6ec62015-10-27 09:53:45 +08001966 case F2FS_IOC_DEFRAGMENT:
1967 return f2fs_ioc_defragment(filp, arg);
Jaegeuk Kim315f4552015-11-29 09:25:08 -08001968 default:
1969 return -ENOTTY;
1970 }
1971}
1972
Jaegeuk Kim78c564b2016-02-03 13:09:09 -08001973static ssize_t f2fs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
1974 unsigned long nr_segs, loff_t pos)
1975{
1976 struct file *file = iocb->ki_filp;
1977 struct inode *inode = file_inode(file);
Jaegeuk Kim422c30a2016-02-03 13:49:44 -08001978 size_t count;
Jaegeuk Kim78c564b2016-02-03 13:09:09 -08001979 ssize_t ret;
1980
1981 if (f2fs_encrypted_inode(inode) &&
Jaegeuk Kim86e0d582015-05-15 16:26:10 -07001982 !fscrypt_has_encryption_key(inode) &&
1983 fscrypt_get_encryption_info(inode))
Jaegeuk Kim78c564b2016-02-03 13:09:09 -08001984 return -EACCES;
1985
Jaegeuk Kim422c30a2016-02-03 13:49:44 -08001986 ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
1987 if (ret)
1988 return ret;
1989
Jaegeuk Kim78c564b2016-02-03 13:09:09 -08001990 inode_lock(inode);
Jaegeuk Kim422c30a2016-02-03 13:49:44 -08001991 ret = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1992 if (!ret) {
1993 ret = f2fs_preallocate_blocks(inode, pos, count,
1994 iocb->ki_filp->f_flags & O_DIRECT);
1995 if (!ret)
1996 ret = __generic_file_aio_write(iocb, iov, nr_segs,
1997 &iocb->ki_pos);
1998 }
Jaegeuk Kim78c564b2016-02-03 13:09:09 -08001999 inode_unlock(inode);
2000
2001 if (ret > 0 || ret == -EIOCBQUEUED) {
2002 ssize_t err;
2003
2004 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
2005 if (err < 0 && ret > 0)
2006 ret = err;
2007 }
2008 return ret;
2009}
2010
Jaegeuk Kim315f4552015-11-29 09:25:08 -08002011#ifdef CONFIG_COMPAT
2012long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2013{
2014 switch (cmd) {
2015 case F2FS_IOC32_GETFLAGS:
2016 cmd = F2FS_IOC_GETFLAGS;
2017 break;
2018 case F2FS_IOC32_SETFLAGS:
2019 cmd = F2FS_IOC_SETFLAGS;
2020 break;
Chao Yu8eb900d2015-11-10 18:44:20 +08002021 case F2FS_IOC32_GETVERSION:
2022 cmd = F2FS_IOC_GETVERSION;
2023 break;
2024 case F2FS_IOC_START_ATOMIC_WRITE:
2025 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
2026 case F2FS_IOC_START_VOLATILE_WRITE:
2027 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
2028 case F2FS_IOC_ABORT_VOLATILE_WRITE:
2029 case FS_IOC_SHUTDOWN:
2030 case F2FS_IOC_SET_ENCRYPTION_POLICY:
2031 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
2032 case F2FS_IOC_GET_ENCRYPTION_POLICY:
2033 case F2FS_IOC_GARBAGE_COLLECT:
2034 case F2FS_IOC_WRITE_CHECKPOINT:
2035 case F2FS_IOC_DEFRAGMENT:
2036 break;
Jaegeuk Kim315f4552015-11-29 09:25:08 -08002037 default:
2038 return -ENOIOCTLCMD;
2039 }
2040 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
2041}
2042#endif
2043
Jaegeuk Kim422c30a2016-02-03 13:49:44 -08002044static ssize_t f2fs_file_splice_write(struct pipe_inode_info *pipe,
2045 struct file *out,
2046 loff_t *ppos, size_t len, unsigned int flags)
2047{
2048 struct address_space *mapping = out->f_mapping;
2049 struct inode *inode = mapping->host;
2050 int ret;
2051
2052 ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode));
2053 if (ret)
2054 return ret;
2055 ret = f2fs_preallocate_blocks(inode, *ppos, len, false);
2056 if (ret)
2057 return ret;
2058 return generic_file_splice_write(pipe, out, ppos, len, flags);
2059}
2060
Jaegeuk Kim315f4552015-11-29 09:25:08 -08002061const struct file_operations f2fs_file_operations = {
2062 .llseek = f2fs_llseek,
2063 .read = do_sync_read,
2064 .write = do_sync_write,
2065 .aio_read = generic_file_aio_read,
Jaegeuk Kim78c564b2016-02-03 13:09:09 -08002066 .aio_write = f2fs_file_aio_write,
Jaegeuk Kim315f4552015-11-29 09:25:08 -08002067 .open = f2fs_file_open,
2068 .release = f2fs_release_file,
2069 .mmap = f2fs_file_mmap,
2070 .fsync = f2fs_sync_file,
2071 .fallocate = f2fs_fallocate,
2072 .unlocked_ioctl = f2fs_ioctl,
2073#ifdef CONFIG_COMPAT
2074 .compat_ioctl = f2fs_compat_ioctl,
2075#endif
2076 .splice_read = generic_file_splice_read,
Jaegeuk Kim422c30a2016-02-03 13:49:44 -08002077 .splice_write = f2fs_file_splice_write,
Jaegeuk Kim315f4552015-11-29 09:25:08 -08002078};