blob: e3d32f6b4b4f5bc674ee5ae7ffb4e132d2c32130 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09002 * fs/f2fs/file.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/stat.h>
14#include <linux/buffer_head.h>
15#include <linux/writeback.h>
Jaegeuk Kimae51fb32013-03-16 11:13:04 +090016#include <linux/blkdev.h>
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090017#include <linux/falloc.h>
18#include <linux/types.h>
Namjae Jeone9750822013-02-04 23:41:41 +090019#include <linux/compat.h>
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090020#include <linux/uaccess.h>
21#include <linux/mount.h>
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +090022#include <linux/pagevec.h>
Jaegeuk Kimf424f662015-04-20 15:19:06 -070023#include <linux/random.h>
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090024
25#include "f2fs.h"
26#include "node.h"
27#include "segment.h"
28#include "xattr.h"
29#include "acl.h"
Chao Yuc1c1b582015-07-10 18:08:10 +080030#include "gc.h"
Jaegeuk Kimdb9f7c12014-12-17 20:04:08 -080031#include "trace.h"
Namjae Jeona2a4a7e2013-04-20 01:28:40 +090032#include <trace/events/f2fs.h>
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090033
34static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
35 struct vm_fault *vmf)
36{
37 struct page *page = vmf->page;
Al Viro6131ffa2013-02-27 16:59:05 -050038 struct inode *inode = file_inode(vma->vm_file);
Jaegeuk Kim40813632014-09-02 15:31:18 -070039 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090040 struct dnode_of_data dn;
Gu Zhenge4795562013-09-27 18:08:30 +080041 int err;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090042
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090043 sb_start_pagefault(inode->i_sb);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -070044
45 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
Jaegeuk Kimb067ba12014-08-07 16:32:25 -070046
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090047 /* block allocation */
Gu Zhenge4795562013-09-27 18:08:30 +080048 f2fs_lock_op(sbi);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -070049 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -070050 err = f2fs_reserve_block(&dn, page->index);
51 if (err) {
52 f2fs_unlock_op(sbi);
53 goto out;
54 }
55 f2fs_put_dnode(&dn);
56 f2fs_unlock_op(sbi);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090057
Jaegeuk Kim2a340762015-12-22 13:23:35 -080058 if (dn.node_changed)
59 f2fs_balance_fs(sbi);
60
Namjae Jeon9851e6e2013-04-28 09:04:18 +090061 file_update_time(vma->vm_file);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090062 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +090063 if (unlikely(page->mapping != inode->i_mapping ||
Namjae Jeon9851e6e2013-04-28 09:04:18 +090064 page_offset(page) > i_size_read(inode) ||
Jaegeuk Kim6bacf522013-12-06 15:00:58 +090065 !PageUptodate(page))) {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090066 unlock_page(page);
67 err = -EFAULT;
68 goto out;
69 }
70
71 /*
72 * check to see if the page is mapped already (no holes)
73 */
74 if (PageMappedToDisk(page))
Namjae Jeon9851e6e2013-04-28 09:04:18 +090075 goto mapped;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090076
77 /* page is wholly or partially inside EOF */
Chao Yu9edcdab2015-09-11 14:43:52 +080078 if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
79 i_size_read(inode)) {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090080 unsigned offset;
81 offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
82 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
83 }
84 set_page_dirty(page);
85 SetPageUptodate(page);
86
Jaegeuk Kime943a102013-10-25 14:26:31 +090087 trace_f2fs_vm_page_mkwrite(page, DATA);
Namjae Jeon9851e6e2013-04-28 09:04:18 +090088mapped:
89 /* fill the page */
Jaegeuk Kim3cb5ad12014-03-18 13:29:07 +090090 f2fs_wait_on_page_writeback(page, DATA);
Chao Yu08b39fb2015-10-08 13:27:34 +080091
92 /* wait for GCed encrypted page writeback */
93 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
94 f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
95
Chao Yu5b339122015-07-13 17:43:19 +080096 /* if gced page is attached, don't write to cold segment */
97 clear_cold_data(page);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +090098out:
99 sb_end_pagefault(inode->i_sb);
100 return block_page_mkwrite_return(err);
101}
102
103static const struct vm_operations_struct f2fs_file_vm_ops = {
Jaegeuk Kim692bb552013-01-17 18:37:41 +0900104 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700105 .map_pages = filemap_map_pages,
Jaegeuk Kim692bb552013-01-17 18:37:41 +0900106 .page_mkwrite = f2fs_vm_page_mkwrite,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900107};
108
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900109static int get_parent_ino(struct inode *inode, nid_t *pino)
110{
111 struct dentry *dentry;
112
113 inode = igrab(inode);
114 dentry = d_find_any_alias(inode);
115 iput(inode);
116 if (!dentry)
117 return 0;
118
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700119 if (update_dent_inode(inode, inode, &dentry->d_name)) {
Jaegeuk Kimf0947e52013-07-22 22:12:56 +0900120 dput(dentry);
121 return 0;
122 }
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900123
Jaegeuk Kimf0947e52013-07-22 22:12:56 +0900124 *pino = parent_ino(dentry);
125 dput(dentry);
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900126 return 1;
127}
128
Chao Yu9d1589e2014-08-20 18:37:35 +0800129static inline bool need_do_checkpoint(struct inode *inode)
130{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700131 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yu9d1589e2014-08-20 18:37:35 +0800132 bool need_cp = false;
133
134 if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
135 need_cp = true;
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700136 else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino))
137 need_cp = true;
Chao Yu9d1589e2014-08-20 18:37:35 +0800138 else if (file_wrong_pino(inode))
139 need_cp = true;
140 else if (!space_for_roll_forward(sbi))
141 need_cp = true;
142 else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
143 need_cp = true;
144 else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
145 need_cp = true;
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700146 else if (test_opt(sbi, FASTBOOT))
147 need_cp = true;
Jaegeuk Kima344b9f2014-11-05 20:05:53 -0800148 else if (sbi->active_logs == 2)
149 need_cp = true;
Chao Yu9d1589e2014-08-20 18:37:35 +0800150
151 return need_cp;
152}
153
Changman Lee9c7bb702014-12-08 15:29:40 +0900154static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
155{
156 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
157 bool ret = false;
158 /* But we need to avoid that there are some inode updates */
159 if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
160 ret = true;
161 f2fs_put_page(i, 0);
162 return ret;
163}
164
Changman Lee51455b12014-12-08 15:29:41 +0900165static void try_to_fix_pino(struct inode *inode)
166{
167 struct f2fs_inode_info *fi = F2FS_I(inode);
168 nid_t pino;
169
170 down_write(&fi->i_sem);
171 fi->xattr_ver = 0;
172 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
173 get_parent_ino(inode, &pino)) {
174 fi->i_pino = pino;
175 file_got_pino(inode);
176 up_write(&fi->i_sem);
177
178 mark_inode_dirty_sync(inode);
179 f2fs_write_inode(inode, NULL);
180 } else {
181 up_write(&fi->i_sem);
182 }
183}
184
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900185int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
186{
187 struct inode *inode = file->f_mapping->host;
Jaegeuk Kimd928bfb2014-03-20 19:10:08 +0900188 struct f2fs_inode_info *fi = F2FS_I(inode);
Jaegeuk Kim40813632014-09-02 15:31:18 -0700189 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim2403c152014-09-10 14:58:18 -0700190 nid_t ino = inode->i_ino;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900191 int ret = 0;
192 bool need_cp = false;
193 struct writeback_control wbc = {
Jaegeuk Kimc81bf1c2014-03-03 11:28:40 +0900194 .sync_mode = WB_SYNC_ALL,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900195 .nr_to_write = LONG_MAX,
196 .for_reclaim = 0,
197 };
198
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900199 if (unlikely(f2fs_readonly(inode->i_sb)))
Namjae Jeon1fa95b02012-12-01 10:56:01 +0900200 return 0;
201
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900202 trace_f2fs_sync_file_enter(inode);
Jaegeuk Kimea1aa122014-07-24 19:11:43 -0700203
204 /* if fdatasync is triggered, let's do in-place-update */
Jaegeuk Kimc46a1552015-12-31 13:49:17 -0800205 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
Jaegeuk Kimea1aa122014-07-24 19:11:43 -0700206 set_inode_flag(fi, FI_NEED_IPU);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900207 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
Jaegeuk Kimc1ce1b02014-09-10 16:53:02 -0700208 clear_inode_flag(fi, FI_NEED_IPU);
209
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900210 if (ret) {
211 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900212 return ret;
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900213 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900214
Changman Lee9c7bb702014-12-08 15:29:40 +0900215 /* if the inode is dirty, let's recover all the time */
Jaegeuk Kim2286c022015-08-15 21:51:05 -0700216 if (!datasync) {
217 f2fs_write_inode(inode, NULL);
Changman Lee9c7bb702014-12-08 15:29:40 +0900218 goto go_write;
219 }
220
Jaegeuk Kim6d99ba412014-07-24 19:08:02 -0700221 /*
222 * if there is no written data, don't waste time to write recovery info.
223 */
224 if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
Jaegeuk Kim2403c152014-09-10 14:58:18 -0700225 !exist_written_data(sbi, ino, APPEND_INO)) {
Jaegeuk Kim19c9c462014-09-10 15:04:03 -0700226
Changman Lee9c7bb702014-12-08 15:29:40 +0900227 /* it may call write_inode just prior to fsync */
228 if (need_inode_page_update(sbi, ino))
Jaegeuk Kim19c9c462014-09-10 15:04:03 -0700229 goto go_write;
Jaegeuk Kim19c9c462014-09-10 15:04:03 -0700230
Jaegeuk Kim6d99ba412014-07-24 19:08:02 -0700231 if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
Jaegeuk Kim2403c152014-09-10 14:58:18 -0700232 exist_written_data(sbi, ino, UPDATE_INO))
Jaegeuk Kim6d99ba412014-07-24 19:08:02 -0700233 goto flush_out;
234 goto out;
235 }
Jaegeuk Kim19c9c462014-09-10 15:04:03 -0700236go_write:
Jaegeuk Kime5d23852013-07-03 10:55:52 +0900237 /*
238 * Both of fdatasync() and fsync() are able to be recovered from
239 * sudden-power-off.
240 */
Chao Yu9d1589e2014-08-20 18:37:35 +0800241 down_read(&fi->i_sem);
242 need_cp = need_do_checkpoint(inode);
Jaegeuk Kimd928bfb2014-03-20 19:10:08 +0900243 up_read(&fi->i_sem);
244
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900245 if (need_cp) {
246 /* all the dirty node pages should be flushed for POR */
247 ret = f2fs_sync_fs(inode->i_sb, 1);
Jaegeuk Kimd928bfb2014-03-20 19:10:08 +0900248
Changman Lee51455b12014-12-08 15:29:41 +0900249 /*
250 * We've secured consistency through sync_fs. Following pino
251 * will be used only for fsynced inodes after checkpoint.
252 */
253 try_to_fix_pino(inode);
Jaegeuk Kimcff28522015-03-02 10:48:56 -0800254 clear_inode_flag(fi, FI_APPEND_WRITE);
255 clear_inode_flag(fi, FI_UPDATE_WRITE);
Changman Lee51455b12014-12-08 15:29:41 +0900256 goto out;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900257 }
Changman Lee51455b12014-12-08 15:29:41 +0900258sync_nodes:
259 sync_node_pages(sbi, ino, &wbc);
260
Jaegeuk Kim871f5992015-01-09 16:27:17 -0800261 /* if cp_error was enabled, we should avoid infinite loop */
Chao Yu6d5a1492015-12-24 18:04:56 +0800262 if (unlikely(f2fs_cp_error(sbi))) {
263 ret = -EIO;
Jaegeuk Kim871f5992015-01-09 16:27:17 -0800264 goto out;
Chao Yu6d5a1492015-12-24 18:04:56 +0800265 }
Jaegeuk Kim871f5992015-01-09 16:27:17 -0800266
Changman Lee51455b12014-12-08 15:29:41 +0900267 if (need_inode_block_update(sbi, ino)) {
268 mark_inode_dirty_sync(inode);
269 f2fs_write_inode(inode, NULL);
Jaegeuk Kim2a340762015-12-22 13:23:35 -0800270
271 f2fs_balance_fs(sbi);
Changman Lee51455b12014-12-08 15:29:41 +0900272 goto sync_nodes;
273 }
274
275 ret = wait_on_node_pages_writeback(sbi, ino);
276 if (ret)
277 goto out;
278
279 /* once recovery info is written, don't need to tack this */
Chao Yua49324f2015-12-15 13:29:47 +0800280 remove_ino_entry(sbi, ino, APPEND_INO);
Changman Lee51455b12014-12-08 15:29:41 +0900281 clear_inode_flag(fi, FI_APPEND_WRITE);
282flush_out:
Chao Yua49324f2015-12-15 13:29:47 +0800283 remove_ino_entry(sbi, ino, UPDATE_INO);
Changman Lee51455b12014-12-08 15:29:41 +0900284 clear_inode_flag(fi, FI_UPDATE_WRITE);
285 ret = f2fs_issue_flush(sbi);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900286out:
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900287 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700288 f2fs_trace_ios(NULL, 1);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900289 return ret;
290}
291
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +0900292static pgoff_t __get_first_dirty_index(struct address_space *mapping,
293 pgoff_t pgofs, int whence)
294{
295 struct pagevec pvec;
296 int nr_pages;
297
298 if (whence != SEEK_DATA)
299 return 0;
300
301 /* find first dirty page index */
302 pagevec_init(&pvec, 0);
Jaegeuk Kim65b85cc2014-07-30 17:25:54 -0700303 nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
304 PAGECACHE_TAG_DIRTY, 1);
305 pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX;
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +0900306 pagevec_release(&pvec);
307 return pgofs;
308}
309
310static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
311 int whence)
312{
313 switch (whence) {
314 case SEEK_DATA:
315 if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
316 (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
317 return true;
318 break;
319 case SEEK_HOLE:
320 if (blkaddr == NULL_ADDR)
321 return true;
322 break;
323 }
324 return false;
325}
326
Chao Yu267378d2014-04-23 14:10:24 +0800327static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
328{
329 struct inode *inode = file->f_mapping->host;
330 loff_t maxbytes = inode->i_sb->s_maxbytes;
331 struct dnode_of_data dn;
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +0900332 pgoff_t pgofs, end_offset, dirty;
333 loff_t data_ofs = offset;
334 loff_t isize;
Chao Yu267378d2014-04-23 14:10:24 +0800335 int err = 0;
336
337 mutex_lock(&inode->i_mutex);
338
339 isize = i_size_read(inode);
340 if (offset >= isize)
341 goto fail;
342
343 /* handle inline data case */
Chao Yu622f28a2014-09-24 18:19:10 +0800344 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
Chao Yu267378d2014-04-23 14:10:24 +0800345 if (whence == SEEK_HOLE)
346 data_ofs = isize;
347 goto found;
348 }
349
350 pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);
351
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +0900352 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
353
Chao Yu9edcdab2015-09-11 14:43:52 +0800354 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
Chao Yu267378d2014-04-23 14:10:24 +0800355 set_new_dnode(&dn, inode, NULL, NULL, 0);
356 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
357 if (err && err != -ENOENT) {
358 goto fail;
359 } else if (err == -ENOENT) {
arter97e1c42042014-08-06 23:22:50 +0900360 /* direct node does not exists */
Chao Yu267378d2014-04-23 14:10:24 +0800361 if (whence == SEEK_DATA) {
362 pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
363 F2FS_I(inode));
364 continue;
365 } else {
366 goto found;
367 }
368 }
369
Chao Yu6b2920a2014-07-07 11:21:59 +0800370 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Chao Yu267378d2014-04-23 14:10:24 +0800371
372 /* find data/hole in dnode block */
373 for (; dn.ofs_in_node < end_offset;
374 dn.ofs_in_node++, pgofs++,
Chao Yu2e023172015-02-09 11:23:58 +0800375 data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
Chao Yu267378d2014-04-23 14:10:24 +0800376 block_t blkaddr;
377 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
378
Jaegeuk Kim7f7670f2014-04-28 18:12:36 +0900379 if (__found_offset(blkaddr, dirty, pgofs, whence)) {
Chao Yu267378d2014-04-23 14:10:24 +0800380 f2fs_put_dnode(&dn);
381 goto found;
382 }
383 }
384 f2fs_put_dnode(&dn);
385 }
386
387 if (whence == SEEK_DATA)
388 goto fail;
Chao Yu267378d2014-04-23 14:10:24 +0800389found:
Jaegeuk Kimfe369bc2014-04-28 17:02:48 +0900390 if (whence == SEEK_HOLE && data_ofs > isize)
391 data_ofs = isize;
Chao Yu267378d2014-04-23 14:10:24 +0800392 mutex_unlock(&inode->i_mutex);
393 return vfs_setpos(file, data_ofs, maxbytes);
394fail:
395 mutex_unlock(&inode->i_mutex);
396 return -ENXIO;
397}
398
399static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
400{
401 struct inode *inode = file->f_mapping->host;
402 loff_t maxbytes = inode->i_sb->s_maxbytes;
403
404 switch (whence) {
405 case SEEK_SET:
406 case SEEK_CUR:
407 case SEEK_END:
408 return generic_file_llseek_size(file, offset, whence,
409 maxbytes, i_size_read(inode));
410 case SEEK_DATA:
411 case SEEK_HOLE:
Jaegeuk Kim0b4c5afde2014-09-08 10:59:43 -0700412 if (offset < 0)
413 return -ENXIO;
Chao Yu267378d2014-04-23 14:10:24 +0800414 return f2fs_seek_block(file, offset, whence);
415 }
416
417 return -EINVAL;
418}
419
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900420static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
421{
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700422 struct inode *inode = file_inode(file);
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -0800423 int err;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700424
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -0700425 if (f2fs_encrypted_inode(inode)) {
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -0800426 err = f2fs_get_encryption_info(inode);
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -0700427 if (err)
428 return 0;
429 }
430
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700431 /* we don't need to use inline_data strictly */
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -0800432 err = f2fs_convert_inline_inode(inode);
433 if (err)
434 return err;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700435
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900436 file_accessed(file);
437 vma->vm_ops = &f2fs_file_vm_ops;
438 return 0;
439}
440
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -0700441static int f2fs_file_open(struct inode *inode, struct file *filp)
442{
443 int ret = generic_file_open(inode, filp);
444
445 if (!ret && f2fs_encrypted_inode(inode)) {
446 ret = f2fs_get_encryption_info(inode);
447 if (ret)
448 ret = -EACCES;
449 }
450 return ret;
451}
452
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900453int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900454{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700455 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900456 struct f2fs_node *raw_node;
Chao Yu19b2c302015-08-26 20:34:48 +0800457 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900458 __le32 *addr;
459
Gu Zheng45590712013-07-15 17:57:38 +0800460 raw_node = F2FS_NODE(dn->node_page);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900461 addr = blkaddr_in_node(raw_node) + ofs;
462
Chris Fries6c311ec2014-01-17 14:44:39 -0600463 for (; count > 0; count--, addr++, dn->ofs_in_node++) {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900464 block_t blkaddr = le32_to_cpu(*addr);
465 if (blkaddr == NULL_ADDR)
466 continue;
467
Jaegeuk Kime1509cf2014-12-30 22:57:55 -0800468 dn->data_blkaddr = NULL_ADDR;
Chao Yu216a6202015-03-19 19:23:32 +0800469 set_data_blkaddr(dn);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900470 invalidate_blocks(sbi, blkaddr);
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -0700471 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
472 clear_inode_flag(F2FS_I(dn->inode),
473 FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900474 nr_free++;
475 }
Chao Yu19b2c302015-08-26 20:34:48 +0800476
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900477 if (nr_free) {
Chao Yu19b2c302015-08-26 20:34:48 +0800478 pgoff_t fofs;
479 /*
480 * once we invalidate valid blkaddr in range [ofs, ofs + count],
481 * we will invalidate all blkaddr in the whole range.
482 */
483 fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
484 F2FS_I(dn->inode)) + ofs;
485 f2fs_update_extent_cache_range(dn, fofs, 0, len);
Namjae Jeond7cc9502013-06-08 21:25:40 +0900486 dec_valid_block_count(sbi, dn->inode, nr_free);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900487 set_page_dirty(dn->node_page);
488 sync_inode_page(dn);
489 }
490 dn->ofs_in_node = ofs;
Namjae Jeon51dd6242013-04-20 01:28:52 +0900491
492 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
493 dn->ofs_in_node, nr_free);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900494 return nr_free;
495}
496
497void truncate_data_blocks(struct dnode_of_data *dn)
498{
499 truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
500}
501
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800502static int truncate_partial_data_page(struct inode *inode, u64 from,
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700503 bool cache_only)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900504{
505 unsigned offset = from & (PAGE_CACHE_SIZE - 1);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700506 pgoff_t index = from >> PAGE_CACHE_SHIFT;
507 struct address_space *mapping = inode->i_mapping;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900508 struct page *page;
509
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700510 if (!offset && !cache_only)
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700511 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900512
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700513 if (cache_only) {
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700514 page = f2fs_grab_cache_page(mapping, index, false);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700515 if (page && PageUptodate(page))
516 goto truncate_out;
517 f2fs_put_page(page, 1);
518 return 0;
519 }
520
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700521 page = get_lock_data_page(inode, index, true);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900522 if (IS_ERR(page))
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700523 return 0;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700524truncate_out:
Jaegeuk Kim3cb5ad12014-03-18 13:29:07 +0900525 f2fs_wait_on_page_writeback(page, DATA);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900526 zero_user(page, offset, PAGE_CACHE_SIZE - offset);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700527 if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800528 set_page_dirty(page);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900529 f2fs_put_page(page, 1);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700530 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900531}
532
Jaegeuk Kim764aa3e2014-08-14 16:32:54 -0700533int truncate_blocks(struct inode *inode, u64 from, bool lock)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900534{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700535 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900536 unsigned int blocksize = inode->i_sb->s_blocksize;
537 struct dnode_of_data dn;
538 pgoff_t free_from;
Huajun Li9ffe0fb2013-11-10 23:13:20 +0800539 int count = 0, err = 0;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700540 struct page *ipage;
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800541 bool truncate_page = false;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900542
Namjae Jeon51dd6242013-04-20 01:28:52 +0900543 trace_f2fs_truncate_blocks_enter(inode, from);
544
Jaegeuk Kimf7ef9b82015-02-09 12:02:44 -0800545 free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900546
Jaegeuk Kim764aa3e2014-08-14 16:32:54 -0700547 if (lock)
548 f2fs_lock_op(sbi);
Huajun Li9ffe0fb2013-11-10 23:13:20 +0800549
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700550 ipage = get_node_page(sbi, inode->i_ino);
551 if (IS_ERR(ipage)) {
552 err = PTR_ERR(ipage);
553 goto out;
554 }
555
556 if (f2fs_has_inline_data(inode)) {
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800557 if (truncate_inline_inode(ipage, from))
558 set_page_dirty(ipage);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700559 f2fs_put_page(ipage, 1);
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800560 truncate_page = true;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700561 goto out;
562 }
563
564 set_new_dnode(&dn, inode, ipage, NULL, 0);
Jaegeuk Kim266e97a2013-02-26 13:10:46 +0900565 err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900566 if (err) {
567 if (err == -ENOENT)
568 goto free_next;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700569 goto out;
Jaegeuk Kim1ce86bf2014-10-15 10:24:34 -0700570 }
571
Chao Yu6403eb12014-04-26 19:59:52 +0800572 count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900573
574 count -= dn.ofs_in_node;
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700575 f2fs_bug_on(sbi, count < 0);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900576
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900577 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
578 truncate_data_blocks_range(&dn, count);
579 free_from += count;
580 }
581
582 f2fs_put_dnode(&dn);
583free_next:
584 err = truncate_inode_blocks(inode, free_from);
Jaegeuk Kim764d2c82014-11-11 11:01:01 -0800585out:
586 if (lock)
587 f2fs_unlock_op(sbi);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700588
589 /* lastly zero out the first data page */
590 if (!err)
Chao Yu0bfcfcc2015-03-10 13:16:25 +0800591 err = truncate_partial_data_page(inode, from, truncate_page);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900592
Namjae Jeon51dd6242013-04-20 01:28:52 +0900593 trace_f2fs_truncate_blocks_exit(inode, err);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900594 return err;
595}
596
Chao Yub0154892015-08-24 17:39:42 +0800597int f2fs_truncate(struct inode *inode, bool lock)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900598{
Chao Yub0154892015-08-24 17:39:42 +0800599 int err;
600
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900601 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
602 S_ISLNK(inode->i_mode)))
Chao Yub0154892015-08-24 17:39:42 +0800603 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900604
Namjae Jeon51dd6242013-04-20 01:28:52 +0900605 trace_f2fs_truncate(inode);
606
Jaegeuk Kim92dffd02014-11-11 14:10:01 -0800607 /* we should check inline_data size */
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -0800608 if (!f2fs_may_inline_data(inode)) {
Chao Yub0154892015-08-24 17:39:42 +0800609 err = f2fs_convert_inline_inode(inode);
610 if (err)
611 return err;
Jaegeuk Kim92dffd02014-11-11 14:10:01 -0800612 }
613
Chao Yub0154892015-08-24 17:39:42 +0800614 err = truncate_blocks(inode, i_size_read(inode), lock);
615 if (err)
616 return err;
617
618 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
619 mark_inode_dirty(inode);
620 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900621}
622
Jaegeuk Kim2d4d9fb52013-06-07 16:33:07 +0900623int f2fs_getattr(struct vfsmount *mnt,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900624 struct dentry *dentry, struct kstat *stat)
625{
David Howells2b0143b2015-03-17 22:25:59 +0000626 struct inode *inode = d_inode(dentry);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900627 generic_fillattr(inode, stat);
628 stat->blocks <<= 3;
629 return 0;
630}
631
632#ifdef CONFIG_F2FS_FS_POSIX_ACL
633static void __setattr_copy(struct inode *inode, const struct iattr *attr)
634{
635 struct f2fs_inode_info *fi = F2FS_I(inode);
636 unsigned int ia_valid = attr->ia_valid;
637
638 if (ia_valid & ATTR_UID)
639 inode->i_uid = attr->ia_uid;
640 if (ia_valid & ATTR_GID)
641 inode->i_gid = attr->ia_gid;
642 if (ia_valid & ATTR_ATIME)
643 inode->i_atime = timespec_trunc(attr->ia_atime,
644 inode->i_sb->s_time_gran);
645 if (ia_valid & ATTR_MTIME)
646 inode->i_mtime = timespec_trunc(attr->ia_mtime,
647 inode->i_sb->s_time_gran);
648 if (ia_valid & ATTR_CTIME)
649 inode->i_ctime = timespec_trunc(attr->ia_ctime,
650 inode->i_sb->s_time_gran);
651 if (ia_valid & ATTR_MODE) {
652 umode_t mode = attr->ia_mode;
653
654 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
655 mode &= ~S_ISGID;
656 set_acl_inode(fi, mode);
657 }
658}
659#else
660#define __setattr_copy setattr_copy
661#endif
662
663int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
664{
David Howells2b0143b2015-03-17 22:25:59 +0000665 struct inode *inode = d_inode(dentry);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900666 struct f2fs_inode_info *fi = F2FS_I(inode);
667 int err;
668
669 err = inode_change_ok(inode, attr);
670 if (err)
671 return err;
672
Chao Yu09db6a22014-09-15 18:02:09 +0800673 if (attr->ia_valid & ATTR_SIZE) {
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -0700674 if (f2fs_encrypted_inode(inode) &&
675 f2fs_get_encryption_info(inode))
676 return -EACCES;
677
Chao Yu3c454142015-06-05 18:34:02 +0800678 if (attr->ia_size <= i_size_read(inode)) {
Chao Yu09db6a22014-09-15 18:02:09 +0800679 truncate_setsize(inode, attr->ia_size);
Chao Yub0154892015-08-24 17:39:42 +0800680 err = f2fs_truncate(inode, true);
681 if (err)
682 return err;
Chao Yu09db6a22014-09-15 18:02:09 +0800683 f2fs_balance_fs(F2FS_I_SB(inode));
684 } else {
685 /*
Chao Yu3c454142015-06-05 18:34:02 +0800686 * do not trim all blocks after i_size if target size is
687 * larger than i_size.
Chao Yu09db6a22014-09-15 18:02:09 +0800688 */
Chao Yu3c454142015-06-05 18:34:02 +0800689 truncate_setsize(inode, attr->ia_size);
Chao Yu0cab80e2015-12-01 11:36:16 +0800690
691 /* should convert inline inode here */
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -0800692 if (!f2fs_may_inline_data(inode)) {
Chao Yu0cab80e2015-12-01 11:36:16 +0800693 err = f2fs_convert_inline_inode(inode);
694 if (err)
695 return err;
696 }
Chao Yu345a6b22015-09-28 17:46:01 +0800697 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
Chao Yu09db6a22014-09-15 18:02:09 +0800698 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900699 }
700
701 __setattr_copy(inode, attr);
702
703 if (attr->ia_valid & ATTR_MODE) {
Christoph Hellwiga6dda0e2013-12-20 05:16:45 -0800704 err = posix_acl_chmod(inode, get_inode_mode(inode));
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900705 if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
706 inode->i_mode = fi->i_acl_mode;
707 clear_inode_flag(fi, FI_ACL_MODE);
708 }
709 }
710
711 mark_inode_dirty(inode);
712 return err;
713}
714
715const struct inode_operations f2fs_file_inode_operations = {
716 .getattr = f2fs_getattr,
717 .setattr = f2fs_setattr,
718 .get_acl = f2fs_get_acl,
Christoph Hellwiga6dda0e2013-12-20 05:16:45 -0800719 .set_acl = f2fs_set_acl,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900720#ifdef CONFIG_F2FS_FS_XATTR
721 .setxattr = generic_setxattr,
722 .getxattr = generic_getxattr,
723 .listxattr = f2fs_listxattr,
724 .removexattr = generic_removexattr,
725#endif
Jaegeuk Kim9ab70132014-06-08 04:30:14 +0900726 .fiemap = f2fs_fiemap,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900727};
728
Chao Yu63943282015-08-07 18:36:06 +0800729static int fill_zero(struct inode *inode, pgoff_t index,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900730 loff_t start, loff_t len)
731{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700732 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900733 struct page *page;
734
735 if (!len)
Chao Yu63943282015-08-07 18:36:06 +0800736 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900737
Jaegeuk Kimbd43df02013-01-25 18:33:41 +0900738 f2fs_balance_fs(sbi);
739
Gu Zhenge4795562013-09-27 18:08:30 +0800740 f2fs_lock_op(sbi);
Jaegeuk Kim64aa7ed2013-05-20 09:55:50 +0900741 page = get_new_data_page(inode, NULL, index, false);
Gu Zhenge4795562013-09-27 18:08:30 +0800742 f2fs_unlock_op(sbi);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900743
Chao Yu63943282015-08-07 18:36:06 +0800744 if (IS_ERR(page))
745 return PTR_ERR(page);
746
747 f2fs_wait_on_page_writeback(page, DATA);
748 zero_user(page, start, len);
749 set_page_dirty(page);
750 f2fs_put_page(page, 1);
751 return 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900752}
753
754int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
755{
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900756 int err;
757
Chao Yuea587112015-09-17 20:22:44 +0800758 while (pg_start < pg_end) {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900759 struct dnode_of_data dn;
Chao Yuea587112015-09-17 20:22:44 +0800760 pgoff_t end_offset, count;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900761
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900762 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yuea587112015-09-17 20:22:44 +0800763 err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900764 if (err) {
Chao Yuea587112015-09-17 20:22:44 +0800765 if (err == -ENOENT) {
766 pg_start++;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900767 continue;
Chao Yuea587112015-09-17 20:22:44 +0800768 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900769 return err;
770 }
771
Chao Yuea587112015-09-17 20:22:44 +0800772 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
773 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
774
775 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
776
777 truncate_data_blocks_range(&dn, count);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900778 f2fs_put_dnode(&dn);
Chao Yuea587112015-09-17 20:22:44 +0800779
780 pg_start += count;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900781 }
782 return 0;
783}
784
Chao Yua66c7b22013-11-22 16:52:50 +0800785static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900786{
787 pgoff_t pg_start, pg_end;
788 loff_t off_start, off_end;
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -0800789 int ret;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900790
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -0800791 ret = f2fs_convert_inline_inode(inode);
792 if (ret)
793 return ret;
Huajun Li9ffe0fb2013-11-10 23:13:20 +0800794
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900795 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
796 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
797
798 off_start = offset & (PAGE_CACHE_SIZE - 1);
799 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
800
801 if (pg_start == pg_end) {
Chao Yu63943282015-08-07 18:36:06 +0800802 ret = fill_zero(inode, pg_start, off_start,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900803 off_end - off_start);
Chao Yu63943282015-08-07 18:36:06 +0800804 if (ret)
805 return ret;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900806 } else {
Chao Yu63943282015-08-07 18:36:06 +0800807 if (off_start) {
808 ret = fill_zero(inode, pg_start++, off_start,
809 PAGE_CACHE_SIZE - off_start);
810 if (ret)
811 return ret;
812 }
813 if (off_end) {
814 ret = fill_zero(inode, pg_end, 0, off_end);
815 if (ret)
816 return ret;
817 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900818
819 if (pg_start < pg_end) {
820 struct address_space *mapping = inode->i_mapping;
821 loff_t blk_start, blk_end;
Jaegeuk Kim40813632014-09-02 15:31:18 -0700822 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jason Hrycay1127a3d2013-04-08 20:16:44 -0500823
824 f2fs_balance_fs(sbi);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900825
Chao Yu9edcdab2015-09-11 14:43:52 +0800826 blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
827 blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900828 truncate_inode_pages_range(mapping, blk_start,
829 blk_end - 1);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900830
Gu Zhenge4795562013-09-27 18:08:30 +0800831 f2fs_lock_op(sbi);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900832 ret = truncate_hole(inode, pg_start, pg_end);
Gu Zhenge4795562013-09-27 18:08:30 +0800833 f2fs_unlock_op(sbi);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900834 }
835 }
836
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +0900837 return ret;
838}
839
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -0700840static int __exchange_data_block(struct inode *inode, pgoff_t src,
841 pgoff_t dst, bool full)
Chao Yub4ace332015-05-06 13:09:46 +0800842{
843 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
844 struct dnode_of_data dn;
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -0700845 block_t new_addr;
846 bool do_replace = false;
847 int ret;
848
849 set_new_dnode(&dn, inode, NULL, NULL, 0);
850 ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
851 if (ret && ret != -ENOENT) {
852 return ret;
853 } else if (ret == -ENOENT) {
854 new_addr = NULL_ADDR;
855 } else {
856 new_addr = dn.data_blkaddr;
857 if (!is_checkpointed_data(sbi, new_addr)) {
858 dn.data_blkaddr = NULL_ADDR;
859 /* do not invalidate this block address */
860 set_data_blkaddr(&dn);
861 f2fs_update_extent_cache(&dn);
862 do_replace = true;
863 }
864 f2fs_put_dnode(&dn);
865 }
866
867 if (new_addr == NULL_ADDR)
868 return full ? truncate_hole(inode, dst, dst + 1) : 0;
869
870 if (do_replace) {
871 struct page *ipage = get_node_page(sbi, inode->i_ino);
872 struct node_info ni;
873
874 if (IS_ERR(ipage)) {
875 ret = PTR_ERR(ipage);
876 goto err_out;
877 }
878
879 set_new_dnode(&dn, inode, ipage, NULL, 0);
880 ret = f2fs_reserve_block(&dn, dst);
881 if (ret)
882 goto err_out;
883
884 truncate_data_blocks_range(&dn, 1);
885
886 get_node_info(sbi, dn.nid, &ni);
887 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
888 ni.version, true);
889 f2fs_put_dnode(&dn);
890 } else {
891 struct page *psrc, *pdst;
892
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700893 psrc = get_lock_data_page(inode, src, true);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -0700894 if (IS_ERR(psrc))
895 return PTR_ERR(psrc);
896 pdst = get_new_data_page(inode, NULL, dst, false);
897 if (IS_ERR(pdst)) {
898 f2fs_put_page(psrc, 1);
899 return PTR_ERR(pdst);
900 }
901 f2fs_copy_page(psrc, pdst);
902 set_page_dirty(pdst);
903 f2fs_put_page(pdst, 1);
904 f2fs_put_page(psrc, 1);
905
906 return truncate_hole(inode, src, src + 1);
907 }
908 return 0;
909
910err_out:
911 if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
912 dn.data_blkaddr = new_addr;
913 set_data_blkaddr(&dn);
914 f2fs_update_extent_cache(&dn);
915 f2fs_put_dnode(&dn);
916 }
917 return ret;
918}
919
920static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
921{
922 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yub4ace332015-05-06 13:09:46 +0800923 pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
924 int ret = 0;
925
Chao Yub4ace332015-05-06 13:09:46 +0800926 for (; end < nrpages; start++, end++) {
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -0700927 f2fs_balance_fs(sbi);
Chao Yuecbaa402015-07-16 18:18:11 +0800928 f2fs_lock_op(sbi);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -0700929 ret = __exchange_data_block(inode, end, start, true);
Chao Yuecbaa402015-07-16 18:18:11 +0800930 f2fs_unlock_op(sbi);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -0700931 if (ret)
932 break;
Chao Yub4ace332015-05-06 13:09:46 +0800933 }
Chao Yub4ace332015-05-06 13:09:46 +0800934 return ret;
935}
936
937static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
938{
939 pgoff_t pg_start, pg_end;
940 loff_t new_size;
941 int ret;
942
Chao Yub4ace332015-05-06 13:09:46 +0800943 if (offset + len >= i_size_read(inode))
944 return -EINVAL;
945
946 /* collapse range should be aligned to block size of f2fs. */
947 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
948 return -EINVAL;
949
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -0800950 ret = f2fs_convert_inline_inode(inode);
951 if (ret)
952 return ret;
Jaegeuk Kim97a7b2c2015-06-17 13:59:05 -0700953
Chao Yub4ace332015-05-06 13:09:46 +0800954 pg_start = offset >> PAGE_CACHE_SHIFT;
955 pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
956
957 /* write out all dirty pages from offset */
958 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
959 if (ret)
960 return ret;
961
962 truncate_pagecache(inode, offset);
963
964 ret = f2fs_do_collapse(inode, pg_start, pg_end);
965 if (ret)
966 return ret;
967
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -0700968 /* write out all moved pages, if possible */
969 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
970 truncate_pagecache(inode, offset);
971
Chao Yub4ace332015-05-06 13:09:46 +0800972 new_size = i_size_read(inode) - len;
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -0700973 truncate_pagecache(inode, new_size);
Chao Yub4ace332015-05-06 13:09:46 +0800974
975 ret = truncate_blocks(inode, new_size, true);
976 if (!ret)
977 i_size_write(inode, new_size);
978
979 return ret;
980}
981
Chao Yu75cd4e02015-05-06 13:11:13 +0800982static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
983 int mode)
984{
985 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
986 struct address_space *mapping = inode->i_mapping;
987 pgoff_t index, pg_start, pg_end;
988 loff_t new_size = i_size_read(inode);
989 loff_t off_start, off_end;
990 int ret = 0;
991
Chao Yu75cd4e02015-05-06 13:11:13 +0800992 ret = inode_newsize_ok(inode, (len + offset));
993 if (ret)
994 return ret;
995
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -0800996 ret = f2fs_convert_inline_inode(inode);
997 if (ret)
998 return ret;
Chao Yu75cd4e02015-05-06 13:11:13 +0800999
1000 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1001 if (ret)
1002 return ret;
1003
1004 truncate_pagecache_range(inode, offset, offset + len - 1);
1005
1006 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
1007 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
1008
1009 off_start = offset & (PAGE_CACHE_SIZE - 1);
1010 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
1011
1012 if (pg_start == pg_end) {
Chao Yu63943282015-08-07 18:36:06 +08001013 ret = fill_zero(inode, pg_start, off_start,
1014 off_end - off_start);
1015 if (ret)
1016 return ret;
1017
Chao Yu75cd4e02015-05-06 13:11:13 +08001018 if (offset + len > new_size)
1019 new_size = offset + len;
1020 new_size = max_t(loff_t, new_size, offset + len);
1021 } else {
1022 if (off_start) {
Chao Yu63943282015-08-07 18:36:06 +08001023 ret = fill_zero(inode, pg_start++, off_start,
1024 PAGE_CACHE_SIZE - off_start);
1025 if (ret)
1026 return ret;
1027
Chao Yu75cd4e02015-05-06 13:11:13 +08001028 new_size = max_t(loff_t, new_size,
Chao Yu9edcdab2015-09-11 14:43:52 +08001029 (loff_t)pg_start << PAGE_CACHE_SHIFT);
Chao Yu75cd4e02015-05-06 13:11:13 +08001030 }
1031
1032 for (index = pg_start; index < pg_end; index++) {
1033 struct dnode_of_data dn;
1034 struct page *ipage;
1035
1036 f2fs_lock_op(sbi);
1037
1038 ipage = get_node_page(sbi, inode->i_ino);
1039 if (IS_ERR(ipage)) {
1040 ret = PTR_ERR(ipage);
1041 f2fs_unlock_op(sbi);
1042 goto out;
1043 }
1044
1045 set_new_dnode(&dn, inode, ipage, NULL, 0);
1046 ret = f2fs_reserve_block(&dn, index);
1047 if (ret) {
1048 f2fs_unlock_op(sbi);
1049 goto out;
1050 }
1051
1052 if (dn.data_blkaddr != NEW_ADDR) {
1053 invalidate_blocks(sbi, dn.data_blkaddr);
1054
1055 dn.data_blkaddr = NEW_ADDR;
1056 set_data_blkaddr(&dn);
1057
1058 dn.data_blkaddr = NULL_ADDR;
1059 f2fs_update_extent_cache(&dn);
1060 }
1061 f2fs_put_dnode(&dn);
1062 f2fs_unlock_op(sbi);
1063
1064 new_size = max_t(loff_t, new_size,
Chao Yu9edcdab2015-09-11 14:43:52 +08001065 (loff_t)(index + 1) << PAGE_CACHE_SHIFT);
Chao Yu75cd4e02015-05-06 13:11:13 +08001066 }
1067
1068 if (off_end) {
Chao Yu63943282015-08-07 18:36:06 +08001069 ret = fill_zero(inode, pg_end, 0, off_end);
1070 if (ret)
1071 goto out;
1072
Chao Yu75cd4e02015-05-06 13:11:13 +08001073 new_size = max_t(loff_t, new_size, offset + len);
1074 }
1075 }
1076
1077out:
1078 if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
1079 i_size_write(inode, new_size);
1080 mark_inode_dirty(inode);
1081 update_inode_page(inode);
1082 }
1083
1084 return ret;
1085}
1086
Chao Yuf62185d2015-05-28 19:16:57 +08001087static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1088{
1089 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1090 pgoff_t pg_start, pg_end, delta, nrpages, idx;
1091 loff_t new_size;
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001092 int ret = 0;
Chao Yuf62185d2015-05-28 19:16:57 +08001093
Chao Yuf62185d2015-05-28 19:16:57 +08001094 new_size = i_size_read(inode) + len;
1095 if (new_size > inode->i_sb->s_maxbytes)
1096 return -EFBIG;
1097
1098 if (offset >= i_size_read(inode))
1099 return -EINVAL;
1100
1101 /* insert range should be aligned to block size of f2fs. */
1102 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1103 return -EINVAL;
1104
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001105 ret = f2fs_convert_inline_inode(inode);
1106 if (ret)
1107 return ret;
Jaegeuk Kim97a7b2c2015-06-17 13:59:05 -07001108
Jaegeuk Kim2a340762015-12-22 13:23:35 -08001109 f2fs_balance_fs(sbi);
1110
Chao Yuf62185d2015-05-28 19:16:57 +08001111 ret = truncate_blocks(inode, i_size_read(inode), true);
1112 if (ret)
1113 return ret;
1114
1115 /* write out all dirty pages from offset */
1116 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1117 if (ret)
1118 return ret;
1119
1120 truncate_pagecache(inode, offset);
1121
1122 pg_start = offset >> PAGE_CACHE_SHIFT;
1123 pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
1124 delta = pg_end - pg_start;
1125 nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1126
1127 for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) {
Chao Yuf62185d2015-05-28 19:16:57 +08001128 f2fs_lock_op(sbi);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001129 ret = __exchange_data_block(inode, idx, idx + delta, false);
Chao Yuf62185d2015-05-28 19:16:57 +08001130 f2fs_unlock_op(sbi);
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001131 if (ret)
1132 break;
Chao Yuf62185d2015-05-28 19:16:57 +08001133 }
1134
Jaegeuk Kim6e2c64a2015-10-07 12:28:41 -07001135 /* write out all moved pages, if possible */
1136 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1137 truncate_pagecache(inode, offset);
1138
1139 if (!ret)
1140 i_size_write(inode, new_size);
Chao Yuf62185d2015-05-28 19:16:57 +08001141 return ret;
1142}
1143
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001144static int expand_inode_data(struct inode *inode, loff_t offset,
1145 loff_t len, int mode)
1146{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001147 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001148 pgoff_t index, pg_start, pg_end;
1149 loff_t new_size = i_size_read(inode);
1150 loff_t off_start, off_end;
1151 int ret = 0;
1152
1153 ret = inode_newsize_ok(inode, (len + offset));
1154 if (ret)
1155 return ret;
1156
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08001157 ret = f2fs_convert_inline_inode(inode);
1158 if (ret)
1159 return ret;
Jaegeuk Kim9e09fc82013-12-27 12:28:59 +09001160
Jaegeuk Kim2a340762015-12-22 13:23:35 -08001161 f2fs_balance_fs(sbi);
1162
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001163 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
1164 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
1165
1166 off_start = offset & (PAGE_CACHE_SIZE - 1);
1167 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
1168
Jaegeuk Kimead43272014-06-13 13:05:55 +09001169 f2fs_lock_op(sbi);
1170
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001171 for (index = pg_start; index <= pg_end; index++) {
1172 struct dnode_of_data dn;
1173
Jaegeuk Kim98397ff2014-06-13 13:07:31 +09001174 if (index == pg_end && !off_end)
1175 goto noalloc;
1176
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001177 set_new_dnode(&dn, inode, NULL, NULL, 0);
Huajun Lib6009652013-11-10 23:13:18 +08001178 ret = f2fs_reserve_block(&dn, index);
Huajun Lib6009652013-11-10 23:13:18 +08001179 if (ret)
1180 break;
Jaegeuk Kim98397ff2014-06-13 13:07:31 +09001181noalloc:
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001182 if (pg_start == pg_end)
1183 new_size = offset + len;
1184 else if (index == pg_start && off_start)
Chao Yu9edcdab2015-09-11 14:43:52 +08001185 new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001186 else if (index == pg_end)
Chao Yu9edcdab2015-09-11 14:43:52 +08001187 new_size = ((loff_t)index << PAGE_CACHE_SHIFT) +
1188 off_end;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001189 else
1190 new_size += PAGE_CACHE_SIZE;
1191 }
1192
1193 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1194 i_size_read(inode) < new_size) {
1195 i_size_write(inode, new_size);
1196 mark_inode_dirty(inode);
Jaegeuk Kimead43272014-06-13 13:05:55 +09001197 update_inode_page(inode);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001198 }
Jaegeuk Kimead43272014-06-13 13:05:55 +09001199 f2fs_unlock_op(sbi);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001200
1201 return ret;
1202}
1203
1204static long f2fs_fallocate(struct file *file, int mode,
1205 loff_t offset, loff_t len)
1206{
Al Viro6131ffa2013-02-27 16:59:05 -05001207 struct inode *inode = file_inode(file);
Taehee Yoo587c0a42015-04-21 15:59:12 +09001208 long ret = 0;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001209
Chao Yuc9980122015-09-11 14:39:02 +08001210 /* f2fs only support ->fallocate for regular file */
1211 if (!S_ISREG(inode->i_mode))
1212 return -EINVAL;
1213
Chao Yuf62185d2015-05-28 19:16:57 +08001214 if (f2fs_encrypted_inode(inode) &&
1215 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07001216 return -EOPNOTSUPP;
1217
Chao Yub4ace332015-05-06 13:09:46 +08001218 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
Chao Yuf62185d2015-05-28 19:16:57 +08001219 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1220 FALLOC_FL_INSERT_RANGE))
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001221 return -EOPNOTSUPP;
1222
Chao Yu3375f692014-01-28 10:29:26 +08001223 mutex_lock(&inode->i_mutex);
1224
Taehee Yoo587c0a42015-04-21 15:59:12 +09001225 if (mode & FALLOC_FL_PUNCH_HOLE) {
1226 if (offset >= inode->i_size)
1227 goto out;
1228
Chao Yua66c7b22013-11-22 16:52:50 +08001229 ret = punch_hole(inode, offset, len);
Chao Yub4ace332015-05-06 13:09:46 +08001230 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1231 ret = f2fs_collapse_range(inode, offset, len);
Chao Yu75cd4e02015-05-06 13:11:13 +08001232 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1233 ret = f2fs_zero_range(inode, offset, len, mode);
Chao Yuf62185d2015-05-28 19:16:57 +08001234 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1235 ret = f2fs_insert_range(inode, offset, len);
Chao Yub4ace332015-05-06 13:09:46 +08001236 } else {
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001237 ret = expand_inode_data(inode, offset, len, mode);
Chao Yub4ace332015-05-06 13:09:46 +08001238 }
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001239
Namjae Jeon3af60a42012-12-30 14:52:37 +09001240 if (!ret) {
1241 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1242 mark_inode_dirty(inode);
1243 }
Chao Yu3375f692014-01-28 10:29:26 +08001244
Taehee Yoo587c0a42015-04-21 15:59:12 +09001245out:
Chao Yu3375f692014-01-28 10:29:26 +08001246 mutex_unlock(&inode->i_mutex);
1247
Namjae Jeonc01e2852013-04-23 17:00:52 +09001248 trace_f2fs_fallocate(inode, mode, offset, len, ret);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001249 return ret;
1250}
1251
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001252static int f2fs_release_file(struct inode *inode, struct file *filp)
1253{
1254 /* some remained atomic pages should discarded */
1255 if (f2fs_is_atomic_file(inode))
1256 commit_inmem_pages(inode, true);
1257 if (f2fs_is_volatile_file(inode)) {
1258 set_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
1259 filemap_fdatawrite(inode->i_mapping);
1260 clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
1261 }
1262 return 0;
1263}
1264
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001265#define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
1266#define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
1267
1268static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
1269{
1270 if (S_ISDIR(mode))
1271 return flags;
1272 else if (S_ISREG(mode))
1273 return flags & F2FS_REG_FLMASK;
1274 else
1275 return flags & F2FS_OTHER_FLMASK;
1276}
1277
Jaegeuk Kim52656e62014-09-24 15:37:02 -07001278static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001279{
Al Viro6131ffa2013-02-27 16:59:05 -05001280 struct inode *inode = file_inode(filp);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001281 struct f2fs_inode_info *fi = F2FS_I(inode);
Jaegeuk Kim52656e62014-09-24 15:37:02 -07001282 unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
1283 return put_user(flags, (int __user *)arg);
1284}
1285
1286static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1287{
1288 struct inode *inode = file_inode(filp);
1289 struct f2fs_inode_info *fi = F2FS_I(inode);
1290 unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
1291 unsigned int oldflags;
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001292 int ret;
1293
Jaegeuk Kim52656e62014-09-24 15:37:02 -07001294 ret = mnt_want_write_file(filp);
1295 if (ret)
1296 return ret;
1297
1298 if (!inode_owner_or_capable(inode)) {
1299 ret = -EACCES;
1300 goto out;
1301 }
1302
1303 if (get_user(flags, (int __user *)arg)) {
1304 ret = -EFAULT;
1305 goto out;
1306 }
1307
1308 flags = f2fs_mask_flags(inode->i_mode, flags);
1309
1310 mutex_lock(&inode->i_mutex);
1311
1312 oldflags = fi->i_flags;
1313
1314 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
1315 if (!capable(CAP_LINUX_IMMUTABLE)) {
1316 mutex_unlock(&inode->i_mutex);
1317 ret = -EPERM;
1318 goto out;
1319 }
1320 }
1321
1322 flags = flags & FS_FL_USER_MODIFIABLE;
1323 flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
1324 fi->i_flags = flags;
1325 mutex_unlock(&inode->i_mutex);
1326
1327 f2fs_set_inode_flags(inode);
1328 inode->i_ctime = CURRENT_TIME;
1329 mark_inode_dirty(inode);
1330out:
1331 mnt_drop_write_file(filp);
1332 return ret;
1333}
1334
Chao Yud49f3e82015-01-23 20:36:04 +08001335static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1336{
1337 struct inode *inode = file_inode(filp);
1338
1339 return put_user(inode->i_generation, (int __user *)arg);
1340}
1341
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001342static int f2fs_ioc_start_atomic_write(struct file *filp)
1343{
1344 struct inode *inode = file_inode(filp);
Chao Yuf4c9c742015-07-17 18:06:35 +08001345 int ret;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001346
1347 if (!inode_owner_or_capable(inode))
1348 return -EACCES;
1349
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001350 if (f2fs_is_atomic_file(inode))
1351 return 0;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001352
Chao Yuf4c9c742015-07-17 18:06:35 +08001353 ret = f2fs_convert_inline_inode(inode);
1354 if (ret)
1355 return ret;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001356
Chao Yuf4c9c742015-07-17 18:06:35 +08001357 set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1358 return 0;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001359}
1360
1361static int f2fs_ioc_commit_atomic_write(struct file *filp)
1362{
1363 struct inode *inode = file_inode(filp);
1364 int ret;
1365
1366 if (!inode_owner_or_capable(inode))
1367 return -EACCES;
1368
Jaegeuk Kim02a13352014-10-06 16:11:16 -07001369 if (f2fs_is_volatile_file(inode))
1370 return 0;
1371
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001372 ret = mnt_want_write_file(filp);
1373 if (ret)
1374 return ret;
1375
Jaegeuk Kim6282adb2015-07-25 00:29:17 -07001376 if (f2fs_is_atomic_file(inode)) {
1377 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
Jaegeuk Kimedb27de2015-07-25 00:52:52 -07001378 ret = commit_inmem_pages(inode, false);
1379 if (ret)
1380 goto err_out;
Jaegeuk Kim6282adb2015-07-25 00:29:17 -07001381 }
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001382
Chao Yua5f64b62015-07-17 18:05:21 +08001383 ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0);
Jaegeuk Kimedb27de2015-07-25 00:52:52 -07001384err_out:
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001385 mnt_drop_write_file(filp);
1386 return ret;
1387}
1388
Jaegeuk Kim02a13352014-10-06 16:11:16 -07001389static int f2fs_ioc_start_volatile_write(struct file *filp)
1390{
1391 struct inode *inode = file_inode(filp);
Chao Yuf4c9c742015-07-17 18:06:35 +08001392 int ret;
Jaegeuk Kim02a13352014-10-06 16:11:16 -07001393
1394 if (!inode_owner_or_capable(inode))
1395 return -EACCES;
1396
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001397 if (f2fs_is_volatile_file(inode))
1398 return 0;
1399
Chao Yuf4c9c742015-07-17 18:06:35 +08001400 ret = f2fs_convert_inline_inode(inode);
1401 if (ret)
1402 return ret;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07001403
Chao Yuf4c9c742015-07-17 18:06:35 +08001404 set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
1405 return 0;
Jaegeuk Kim02a13352014-10-06 16:11:16 -07001406}
1407
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001408static int f2fs_ioc_release_volatile_write(struct file *filp)
1409{
1410 struct inode *inode = file_inode(filp);
1411
1412 if (!inode_owner_or_capable(inode))
1413 return -EACCES;
1414
1415 if (!f2fs_is_volatile_file(inode))
1416 return 0;
1417
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -07001418 if (!f2fs_is_first_block_written(inode))
1419 return truncate_partial_data_page(inode, 0, true);
1420
Nicholas Krause538e17e2015-09-06 08:28:46 -04001421 return punch_hole(inode, 0, F2FS_BLKSIZE);
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001422}
1423
1424static int f2fs_ioc_abort_volatile_write(struct file *filp)
1425{
1426 struct inode *inode = file_inode(filp);
1427 int ret;
1428
1429 if (!inode_owner_or_capable(inode))
1430 return -EACCES;
1431
1432 ret = mnt_want_write_file(filp);
1433 if (ret)
1434 return ret;
1435
Jaegeuk Kim732d5642015-12-29 15:46:33 -08001436 if (f2fs_is_atomic_file(inode)) {
1437 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1438 commit_inmem_pages(inode, true);
1439 }
1440 if (f2fs_is_volatile_file(inode)) {
1441 clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
1442 ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0);
1443 }
Jaegeuk Kimde6a8ec2015-06-08 17:51:10 -07001444
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001445 mnt_drop_write_file(filp);
1446 return ret;
1447}
1448
Jaegeuk Kim1abff932015-01-08 19:15:53 -08001449static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
1450{
1451 struct inode *inode = file_inode(filp);
1452 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1453 struct super_block *sb = sbi->sb;
1454 __u32 in;
1455
1456 if (!capable(CAP_SYS_ADMIN))
1457 return -EPERM;
1458
1459 if (get_user(in, (__u32 __user *)arg))
1460 return -EFAULT;
1461
1462 switch (in) {
1463 case F2FS_GOING_DOWN_FULLSYNC:
1464 sb = freeze_bdev(sb->s_bdev);
1465 if (sb && !IS_ERR(sb)) {
1466 f2fs_stop_checkpoint(sbi);
1467 thaw_bdev(sb->s_bdev, sb);
1468 }
1469 break;
1470 case F2FS_GOING_DOWN_METASYNC:
1471 /* do checkpoint only */
1472 f2fs_sync_fs(sb, 1);
1473 f2fs_stop_checkpoint(sbi);
1474 break;
1475 case F2FS_GOING_DOWN_NOSYNC:
1476 f2fs_stop_checkpoint(sbi);
1477 break;
Jaegeuk Kimc912a822015-10-07 09:46:37 -07001478 case F2FS_GOING_DOWN_METAFLUSH:
1479 sync_meta_pages(sbi, META, LONG_MAX);
1480 f2fs_stop_checkpoint(sbi);
1481 break;
Jaegeuk Kim1abff932015-01-08 19:15:53 -08001482 default:
1483 return -EINVAL;
1484 }
1485 return 0;
1486}
1487
Jaegeuk Kim52656e62014-09-24 15:37:02 -07001488static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
1489{
1490 struct inode *inode = file_inode(filp);
1491 struct super_block *sb = inode->i_sb;
1492 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1493 struct fstrim_range range;
1494 int ret;
1495
1496 if (!capable(CAP_SYS_ADMIN))
1497 return -EPERM;
1498
1499 if (!blk_queue_discard(q))
1500 return -EOPNOTSUPP;
1501
1502 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
1503 sizeof(range)))
1504 return -EFAULT;
1505
1506 range.minlen = max((unsigned int)range.minlen,
1507 q->limits.discard_granularity);
1508 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
1509 if (ret < 0)
1510 return ret;
1511
1512 if (copy_to_user((struct fstrim_range __user *)arg, &range,
1513 sizeof(range)))
1514 return -EFAULT;
1515 return 0;
1516}
1517
Jaegeuk Kimf424f662015-04-20 15:19:06 -07001518static bool uuid_is_nonzero(__u8 u[16])
1519{
1520 int i;
1521
1522 for (i = 0; i < 16; i++)
1523 if (u[i])
1524 return true;
1525 return false;
1526}
1527
1528static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
1529{
1530#ifdef CONFIG_F2FS_FS_ENCRYPTION
1531 struct f2fs_encryption_policy policy;
1532 struct inode *inode = file_inode(filp);
1533
1534 if (copy_from_user(&policy, (struct f2fs_encryption_policy __user *)arg,
1535 sizeof(policy)))
1536 return -EFAULT;
1537
Jaegeuk Kimf424f662015-04-20 15:19:06 -07001538 return f2fs_process_policy(&policy, inode);
1539#else
1540 return -EOPNOTSUPP;
1541#endif
1542}
1543
1544static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
1545{
1546#ifdef CONFIG_F2FS_FS_ENCRYPTION
1547 struct f2fs_encryption_policy policy;
1548 struct inode *inode = file_inode(filp);
1549 int err;
1550
1551 err = f2fs_get_policy(inode, &policy);
1552 if (err)
1553 return err;
1554
1555 if (copy_to_user((struct f2fs_encryption_policy __user *)arg, &policy,
1556 sizeof(policy)))
1557 return -EFAULT;
1558 return 0;
1559#else
1560 return -EOPNOTSUPP;
1561#endif
1562}
1563
1564static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
1565{
1566 struct inode *inode = file_inode(filp);
1567 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1568 int err;
1569
1570 if (!f2fs_sb_has_crypto(inode->i_sb))
1571 return -EOPNOTSUPP;
1572
1573 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
1574 goto got_it;
1575
1576 err = mnt_want_write_file(filp);
1577 if (err)
1578 return err;
1579
1580 /* update superblock with uuid */
1581 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
1582
Chao Yuc5bda1c2015-06-08 13:28:03 +08001583 err = f2fs_commit_super(sbi, false);
Jaegeuk Kimf424f662015-04-20 15:19:06 -07001584 if (err) {
1585 /* undo new data */
1586 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
Chao Yue8240f62015-12-15 17:19:26 +08001587 mnt_drop_write_file(filp);
Jaegeuk Kimf424f662015-04-20 15:19:06 -07001588 return err;
1589 }
Chao Yue8240f62015-12-15 17:19:26 +08001590 mnt_drop_write_file(filp);
Jaegeuk Kimf424f662015-04-20 15:19:06 -07001591got_it:
1592 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
1593 16))
1594 return -EFAULT;
1595 return 0;
1596}
1597
Chao Yuc1c1b582015-07-10 18:08:10 +08001598static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
1599{
1600 struct inode *inode = file_inode(filp);
1601 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yud530d4d2015-10-05 22:22:44 +08001602 __u32 sync;
Chao Yuc1c1b582015-07-10 18:08:10 +08001603
1604 if (!capable(CAP_SYS_ADMIN))
1605 return -EPERM;
1606
Chao Yud530d4d2015-10-05 22:22:44 +08001607 if (get_user(sync, (__u32 __user *)arg))
Chao Yuc1c1b582015-07-10 18:08:10 +08001608 return -EFAULT;
1609
Chao Yud530d4d2015-10-05 22:22:44 +08001610 if (f2fs_readonly(sbi->sb))
1611 return -EROFS;
Chao Yuc1c1b582015-07-10 18:08:10 +08001612
Chao Yud530d4d2015-10-05 22:22:44 +08001613 if (!sync) {
Chao Yuc1c1b582015-07-10 18:08:10 +08001614 if (!mutex_trylock(&sbi->gc_mutex))
Chao Yud530d4d2015-10-05 22:22:44 +08001615 return -EBUSY;
1616 } else {
1617 mutex_lock(&sbi->gc_mutex);
Chao Yuc1c1b582015-07-10 18:08:10 +08001618 }
1619
Chao Yud530d4d2015-10-05 22:22:44 +08001620 return f2fs_gc(sbi, sync);
Chao Yuc1c1b582015-07-10 18:08:10 +08001621}
1622
Chao Yu456b88e2015-10-05 22:24:19 +08001623static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
1624{
1625 struct inode *inode = file_inode(filp);
1626 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yu456b88e2015-10-05 22:24:19 +08001627
1628 if (!capable(CAP_SYS_ADMIN))
1629 return -EPERM;
1630
1631 if (f2fs_readonly(sbi->sb))
1632 return -EROFS;
1633
Chao Yue96248b2015-12-24 18:11:32 +08001634 return f2fs_sync_fs(sbi->sb, 1);
Chao Yu456b88e2015-10-05 22:24:19 +08001635}
1636
Chao Yud323d002015-10-27 09:53:45 +08001637static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
1638 struct file *filp,
1639 struct f2fs_defragment *range)
1640{
1641 struct inode *inode = file_inode(filp);
1642 struct f2fs_map_blocks map;
1643 struct extent_info ei;
1644 pgoff_t pg_start, pg_end;
Chao Yu3519e3f2015-12-01 11:56:52 +08001645 unsigned int blk_per_seg = sbi->blocks_per_seg;
Chao Yud323d002015-10-27 09:53:45 +08001646 unsigned int total = 0, sec_num;
Chao Yu3519e3f2015-12-01 11:56:52 +08001647 unsigned int pages_per_sec = sbi->segs_per_sec * blk_per_seg;
Chao Yud323d002015-10-27 09:53:45 +08001648 block_t blk_end = 0;
1649 bool fragmented = false;
1650 int err;
1651
1652 /* if in-place-update policy is enabled, don't waste time here */
1653 if (need_inplace_update(inode))
1654 return -EINVAL;
1655
1656 pg_start = range->start >> PAGE_CACHE_SHIFT;
1657 pg_end = (range->start + range->len) >> PAGE_CACHE_SHIFT;
1658
1659 f2fs_balance_fs(sbi);
1660
1661 mutex_lock(&inode->i_mutex);
1662
1663 /* writeback all dirty pages in the range */
1664 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
Fan Lid8fe4f02015-12-14 13:34:00 +08001665 range->start + range->len - 1);
Chao Yud323d002015-10-27 09:53:45 +08001666 if (err)
1667 goto out;
1668
1669 /*
1670 * lookup mapping info in extent cache, skip defragmenting if physical
1671 * block addresses are continuous.
1672 */
1673 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
1674 if (ei.fofs + ei.len >= pg_end)
1675 goto out;
1676 }
1677
1678 map.m_lblk = pg_start;
Chao Yud323d002015-10-27 09:53:45 +08001679
1680 /*
1681 * lookup mapping info in dnode page cache, skip defragmenting if all
1682 * physical block addresses are continuous even if there are hole(s)
1683 * in logical blocks.
1684 */
1685 while (map.m_lblk < pg_end) {
Fan Lia1c1e9b2015-12-15 17:02:41 +08001686 map.m_len = pg_end - map.m_lblk;
Chao Yud323d002015-10-27 09:53:45 +08001687 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
1688 if (err)
1689 goto out;
1690
1691 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
1692 map.m_lblk++;
Chao Yud323d002015-10-27 09:53:45 +08001693 continue;
1694 }
1695
1696 if (blk_end && blk_end != map.m_pblk) {
1697 fragmented = true;
1698 break;
1699 }
1700 blk_end = map.m_pblk + map.m_len;
1701
1702 map.m_lblk += map.m_len;
Chao Yud323d002015-10-27 09:53:45 +08001703 }
1704
1705 if (!fragmented)
1706 goto out;
1707
1708 map.m_lblk = pg_start;
1709 map.m_len = pg_end - pg_start;
1710
1711 sec_num = (map.m_len + pages_per_sec - 1) / pages_per_sec;
1712
1713 /*
1714 * make sure there are enough free section for LFS allocation, this can
1715 * avoid defragment running in SSR mode when free section are allocated
1716 * intensively
1717 */
1718 if (has_not_enough_free_secs(sbi, sec_num)) {
1719 err = -EAGAIN;
1720 goto out;
1721 }
1722
1723 while (map.m_lblk < pg_end) {
1724 pgoff_t idx;
1725 int cnt = 0;
1726
1727do_map:
Fan Lia1c1e9b2015-12-15 17:02:41 +08001728 map.m_len = pg_end - map.m_lblk;
Chao Yud323d002015-10-27 09:53:45 +08001729 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
1730 if (err)
1731 goto clear_out;
1732
1733 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
1734 map.m_lblk++;
1735 continue;
1736 }
1737
1738 set_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
1739
1740 idx = map.m_lblk;
1741 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
1742 struct page *page;
1743
1744 page = get_lock_data_page(inode, idx, true);
1745 if (IS_ERR(page)) {
1746 err = PTR_ERR(page);
1747 goto clear_out;
1748 }
1749
1750 set_page_dirty(page);
1751 f2fs_put_page(page, 1);
1752
1753 idx++;
1754 cnt++;
1755 total++;
1756 }
1757
1758 map.m_lblk = idx;
Chao Yud323d002015-10-27 09:53:45 +08001759
1760 if (idx < pg_end && cnt < blk_per_seg)
1761 goto do_map;
1762
1763 clear_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
1764
1765 err = filemap_fdatawrite(inode->i_mapping);
1766 if (err)
1767 goto out;
1768 }
1769clear_out:
1770 clear_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
1771out:
1772 mutex_unlock(&inode->i_mutex);
1773 if (!err)
1774 range->len = (u64)total << PAGE_CACHE_SHIFT;
1775 return err;
1776}
1777
1778static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
1779{
1780 struct inode *inode = file_inode(filp);
1781 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1782 struct f2fs_defragment range;
1783 int err;
1784
1785 if (!capable(CAP_SYS_ADMIN))
1786 return -EPERM;
1787
1788 if (!S_ISREG(inode->i_mode))
1789 return -EINVAL;
1790
1791 err = mnt_want_write_file(filp);
1792 if (err)
1793 return err;
1794
1795 if (f2fs_readonly(sbi->sb)) {
1796 err = -EROFS;
1797 goto out;
1798 }
1799
1800 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
1801 sizeof(range))) {
1802 err = -EFAULT;
1803 goto out;
1804 }
1805
1806 /* verify alignment of offset & size */
1807 if (range.start & (F2FS_BLKSIZE - 1) ||
1808 range.len & (F2FS_BLKSIZE - 1)) {
1809 err = -EINVAL;
1810 goto out;
1811 }
1812
1813 err = f2fs_defragment_range(sbi, filp, &range);
1814 if (err < 0)
1815 goto out;
1816
1817 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
1818 sizeof(range)))
1819 err = -EFAULT;
1820out:
1821 mnt_drop_write_file(filp);
1822 return err;
1823}
1824
Jaegeuk Kim52656e62014-09-24 15:37:02 -07001825long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1826{
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001827 switch (cmd) {
Namjae Jeon6a3e8ef2013-06-08 21:25:28 +09001828 case F2FS_IOC_GETFLAGS:
Jaegeuk Kim52656e62014-09-24 15:37:02 -07001829 return f2fs_ioc_getflags(filp, arg);
Namjae Jeon6a3e8ef2013-06-08 21:25:28 +09001830 case F2FS_IOC_SETFLAGS:
Jaegeuk Kim52656e62014-09-24 15:37:02 -07001831 return f2fs_ioc_setflags(filp, arg);
Chao Yud49f3e82015-01-23 20:36:04 +08001832 case F2FS_IOC_GETVERSION:
1833 return f2fs_ioc_getversion(filp, arg);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07001834 case F2FS_IOC_START_ATOMIC_WRITE:
1835 return f2fs_ioc_start_atomic_write(filp);
1836 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
1837 return f2fs_ioc_commit_atomic_write(filp);
Jaegeuk Kim02a13352014-10-06 16:11:16 -07001838 case F2FS_IOC_START_VOLATILE_WRITE:
1839 return f2fs_ioc_start_volatile_write(filp);
Jaegeuk Kim1e843712014-12-09 06:08:59 -08001840 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
1841 return f2fs_ioc_release_volatile_write(filp);
1842 case F2FS_IOC_ABORT_VOLATILE_WRITE:
1843 return f2fs_ioc_abort_volatile_write(filp);
Jaegeuk Kim1abff932015-01-08 19:15:53 -08001844 case F2FS_IOC_SHUTDOWN:
1845 return f2fs_ioc_shutdown(filp, arg);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001846 case FITRIM:
Jaegeuk Kim52656e62014-09-24 15:37:02 -07001847 return f2fs_ioc_fitrim(filp, arg);
Jaegeuk Kimf424f662015-04-20 15:19:06 -07001848 case F2FS_IOC_SET_ENCRYPTION_POLICY:
1849 return f2fs_ioc_set_encryption_policy(filp, arg);
1850 case F2FS_IOC_GET_ENCRYPTION_POLICY:
1851 return f2fs_ioc_get_encryption_policy(filp, arg);
1852 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
1853 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
Chao Yuc1c1b582015-07-10 18:08:10 +08001854 case F2FS_IOC_GARBAGE_COLLECT:
1855 return f2fs_ioc_gc(filp, arg);
Chao Yu456b88e2015-10-05 22:24:19 +08001856 case F2FS_IOC_WRITE_CHECKPOINT:
1857 return f2fs_ioc_write_checkpoint(filp, arg);
Chao Yud323d002015-10-27 09:53:45 +08001858 case F2FS_IOC_DEFRAGMENT:
1859 return f2fs_ioc_defragment(filp, arg);
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001860 default:
1861 return -ENOTTY;
1862 }
1863}
1864
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07001865static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1866{
1867 struct inode *inode = file_inode(iocb->ki_filp);
1868
1869 if (f2fs_encrypted_inode(inode) &&
1870 !f2fs_has_encryption_key(inode) &&
1871 f2fs_get_encryption_info(inode))
1872 return -EACCES;
1873
1874 return generic_file_write_iter(iocb, from);
1875}
1876
Namjae Jeone9750822013-02-04 23:41:41 +09001877#ifdef CONFIG_COMPAT
1878long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1879{
1880 switch (cmd) {
1881 case F2FS_IOC32_GETFLAGS:
1882 cmd = F2FS_IOC_GETFLAGS;
1883 break;
1884 case F2FS_IOC32_SETFLAGS:
1885 cmd = F2FS_IOC_SETFLAGS;
1886 break;
Chao Yu04ef4b62015-11-10 18:44:20 +08001887 case F2FS_IOC32_GETVERSION:
1888 cmd = F2FS_IOC_GETVERSION;
1889 break;
1890 case F2FS_IOC_START_ATOMIC_WRITE:
1891 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
1892 case F2FS_IOC_START_VOLATILE_WRITE:
1893 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
1894 case F2FS_IOC_ABORT_VOLATILE_WRITE:
1895 case F2FS_IOC_SHUTDOWN:
1896 case F2FS_IOC_SET_ENCRYPTION_POLICY:
1897 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
1898 case F2FS_IOC_GET_ENCRYPTION_POLICY:
1899 case F2FS_IOC_GARBAGE_COLLECT:
1900 case F2FS_IOC_WRITE_CHECKPOINT:
1901 case F2FS_IOC_DEFRAGMENT:
1902 break;
Namjae Jeone9750822013-02-04 23:41:41 +09001903 default:
1904 return -ENOIOCTLCMD;
1905 }
1906 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
1907}
1908#endif
1909
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001910const struct file_operations f2fs_file_operations = {
Chao Yu267378d2014-04-23 14:10:24 +08001911 .llseek = f2fs_llseek,
Al Viroaad4f8b2014-04-02 14:33:16 -04001912 .read_iter = generic_file_read_iter,
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07001913 .write_iter = f2fs_file_write_iter,
1914 .open = f2fs_file_open,
Jaegeuk Kim12662232014-12-05 14:37:37 -08001915 .release = f2fs_release_file,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001916 .mmap = f2fs_file_mmap,
1917 .fsync = f2fs_sync_file,
1918 .fallocate = f2fs_fallocate,
1919 .unlocked_ioctl = f2fs_ioctl,
Namjae Jeone9750822013-02-04 23:41:41 +09001920#ifdef CONFIG_COMPAT
1921 .compat_ioctl = f2fs_compat_ioctl,
1922#endif
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001923 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04001924 .splice_write = iter_file_splice_write,
Jaegeuk Kimfbfa2cc2012-11-02 17:09:44 +09001925};