blob: c56d04ec45dcfe369a54fd150d8b9b66d529daa4 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +09002 * fs/f2fs/inode.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -070014#include <linux/backing-dev.h>
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +090015#include <linux/writeback.h>
16
17#include "f2fs.h"
18#include "node.h"
19
Namjae Jeona2a4a7e2013-04-20 01:28:40 +090020#include <trace/events/f2fs.h>
21
Jaegeuk Kimb56ab832016-06-30 19:09:37 -070022void f2fs_mark_inode_dirty_sync(struct inode *inode)
23{
24 if (f2fs_inode_dirtied(inode))
25 return;
26 mark_inode_dirty_sync(inode);
27}
28
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +090029void f2fs_set_inode_flags(struct inode *inode)
30{
31 unsigned int flags = F2FS_I(inode)->i_flags;
Zhang Zhen8abfb362014-04-15 14:19:38 +080032 unsigned int new_fl = 0;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +090033
34 if (flags & FS_SYNC_FL)
Zhang Zhen8abfb362014-04-15 14:19:38 +080035 new_fl |= S_SYNC;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +090036 if (flags & FS_APPEND_FL)
Zhang Zhen8abfb362014-04-15 14:19:38 +080037 new_fl |= S_APPEND;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +090038 if (flags & FS_IMMUTABLE_FL)
Zhang Zhen8abfb362014-04-15 14:19:38 +080039 new_fl |= S_IMMUTABLE;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +090040 if (flags & FS_NOATIME_FL)
Zhang Zhen8abfb362014-04-15 14:19:38 +080041 new_fl |= S_NOATIME;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +090042 if (flags & FS_DIRSYNC_FL)
Zhang Zhen8abfb362014-04-15 14:19:38 +080043 new_fl |= S_DIRSYNC;
Zhang Zhen6a678852015-08-24 10:41:32 +080044 inode_set_flags(inode, new_fl,
45 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
Jaegeuk Kimb56ab832016-06-30 19:09:37 -070046 f2fs_mark_inode_dirty_sync(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +090047}
48
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +090049static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
50{
51 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
52 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
53 if (ri->i_addr[0])
Chris Fries6c311ec2014-01-17 14:44:39 -060054 inode->i_rdev =
55 old_decode_dev(le32_to_cpu(ri->i_addr[0]));
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +090056 else
Chris Fries6c311ec2014-01-17 14:44:39 -060057 inode->i_rdev =
58 new_decode_dev(le32_to_cpu(ri->i_addr[1]));
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +090059 }
60}
61
Chao Yub10a6ac2018-07-10 23:01:45 +080062static int __written_first_block(struct f2fs_sb_info *sbi,
Chao Yud4511882018-06-05 17:44:11 +080063 struct f2fs_inode *ri)
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -070064{
Jaegeuk Kimadad81e2015-03-24 12:04:20 -070065 block_t addr = le32_to_cpu(ri->i_addr[0]);
66
Chao Yub10a6ac2018-07-10 23:01:45 +080067 if (!__is_valid_data_blkaddr(addr))
68 return 1;
69 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
70 return -EFAULT;
71 return 0;
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -070072}
73
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +090074static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
75{
76 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
77 if (old_valid_dev(inode->i_rdev)) {
Chris Fries6c311ec2014-01-17 14:44:39 -060078 ri->i_addr[0] =
79 cpu_to_le32(old_encode_dev(inode->i_rdev));
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +090080 ri->i_addr[1] = 0;
81 } else {
82 ri->i_addr[0] = 0;
Chris Fries6c311ec2014-01-17 14:44:39 -060083 ri->i_addr[1] =
84 cpu_to_le32(new_encode_dev(inode->i_rdev));
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +090085 ri->i_addr[2] = 0;
86 }
87 }
88}
89
Chao Yu9e5ba772015-01-06 14:28:43 +080090static void __recover_inline_status(struct inode *inode, struct page *ipage)
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -070091{
92 void *inline_data = inline_data_addr(ipage);
Chao Yu9e5ba772015-01-06 14:28:43 +080093 __le32 *start = inline_data;
94 __le32 *end = start + MAX_INLINE_DATA / sizeof(__le32);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -070095
Chao Yu9e5ba772015-01-06 14:28:43 +080096 while (start < end) {
97 if (*start++) {
Jaegeuk Kimfec1d652016-01-20 23:43:51 +080098 f2fs_wait_on_page_writeback(ipage, NODE, true);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -070099
Jaegeuk Kim91942322016-05-20 10:13:22 -0700100 set_inode_flag(inode, FI_DATA_EXIST);
101 set_raw_inline(inode, F2FS_INODE(ipage));
Chao Yu9e5ba772015-01-06 14:28:43 +0800102 set_page_dirty(ipage);
103 return;
104 }
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700105 }
Chao Yu9e5ba772015-01-06 14:28:43 +0800106 return;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700107}
108
Chao Yu1c879802018-06-29 13:55:22 +0800109static bool sanity_check_inode(struct inode *inode, struct page *node_page)
Ben Hutchingsfd8c7d32018-11-29 19:17:34 +0000110{
111 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yu1c879802018-06-29 13:55:22 +0800112 unsigned long long iblocks;
113
114 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
115 if (!iblocks) {
116 set_sbi_flag(sbi, SBI_NEED_FSCK);
117 f2fs_msg(sbi->sb, KERN_WARNING,
118 "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
119 "run fsck to fix.",
120 __func__, inode->i_ino, iblocks);
121 return false;
122 }
123
124 if (ino_of_node(node_page) != nid_of_node(node_page)) {
125 set_sbi_flag(sbi, SBI_NEED_FSCK);
126 f2fs_msg(sbi->sb, KERN_WARNING,
127 "%s: corrupted inode footer i_ino=%lx, ino,nid: "
128 "[%u, %u] run fsck to fix.",
129 __func__, inode->i_ino,
130 ino_of_node(node_page), nid_of_node(node_page));
131 return false;
132 }
Ben Hutchingsfd8c7d32018-11-29 19:17:34 +0000133
Chao Yuaafb3712018-08-01 19:13:44 +0800134 if (F2FS_I(inode)->extent_tree) {
135 struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
136
137 if (ei->len &&
138 (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
139 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
140 DATA_GENERIC))) {
141 set_sbi_flag(sbi, SBI_NEED_FSCK);
142 f2fs_msg(sbi->sb, KERN_WARNING,
143 "%s: inode (ino=%lx) extent info [%u, %u, %u] "
144 "is incorrect, run fsck to fix",
145 __func__, inode->i_ino,
146 ei->blk, ei->fofs, ei->len);
147 return false;
148 }
149 }
Ben Hutchingsfd8c7d32018-11-29 19:17:34 +0000150 return true;
151}
152
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900153static int do_read_inode(struct inode *inode)
154{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700155 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900156 struct f2fs_inode_info *fi = F2FS_I(inode);
157 struct page *node_page;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900158 struct f2fs_inode *ri;
Chao Yub10a6ac2018-07-10 23:01:45 +0800159 int err;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900160
161 /* Check if ino is within scope */
Namjae Jeon064e0822013-03-17 17:27:20 +0900162 if (check_nid_range(sbi, inode->i_ino)) {
163 f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
164 (unsigned long) inode->i_ino);
Chao Yud6b7d4b2014-06-12 13:23:41 +0800165 WARN_ON(1);
Namjae Jeon064e0822013-03-17 17:27:20 +0900166 return -EINVAL;
167 }
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900168
169 node_page = get_node_page(sbi, inode->i_ino);
170 if (IS_ERR(node_page))
171 return PTR_ERR(node_page);
172
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +0900173 ri = F2FS_INODE(node_page);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900174
175 inode->i_mode = le16_to_cpu(ri->i_mode);
176 i_uid_write(inode, le32_to_cpu(ri->i_uid));
177 i_gid_write(inode, le32_to_cpu(ri->i_gid));
178 set_nlink(inode, le32_to_cpu(ri->i_links));
179 inode->i_size = le64_to_cpu(ri->i_size);
180 inode->i_blocks = le64_to_cpu(ri->i_blocks);
181
182 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
183 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
184 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
185 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
186 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
187 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
188 inode->i_generation = le32_to_cpu(ri->i_generation);
189
190 fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
191 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
192 fi->i_flags = le32_to_cpu(ri->i_flags);
193 fi->flags = 0;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900194 fi->i_advise = ri->i_advise;
Jaegeuk Kim6666e6a2012-12-10 17:52:48 +0900195 fi->i_pino = le32_to_cpu(ri->i_pino);
Jaegeuk Kim38431542014-02-27 18:20:00 +0900196 fi->i_dir_level = ri->i_dir_level;
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +0900197
Jaegeuk Kimed3d1252015-12-28 11:39:06 -0800198 if (f2fs_init_extent_tree(inode, &ri->i_ext))
199 set_page_dirty(node_page);
Chao Yu0c872e22015-02-05 17:46:29 +0800200
Jaegeuk Kim91942322016-05-20 10:13:22 -0700201 get_inline_info(inode, ri);
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +0900202
Chao Yu1c879802018-06-29 13:55:22 +0800203 if (!sanity_check_inode(inode, node_page)) {
Ben Hutchingsfd8c7d32018-11-29 19:17:34 +0000204 f2fs_put_page(node_page, 1);
205 return -EINVAL;
206 }
207
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700208 /* check data exist */
209 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
Chao Yu9e5ba772015-01-06 14:28:43 +0800210 __recover_inline_status(inode, node_page);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -0700211
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +0900212 /* get rdev by using inline_info */
213 __get_inode_rdev(inode, ri);
214
Chao Yub10a6ac2018-07-10 23:01:45 +0800215 err = __written_first_block(sbi, ri);
216 if (err < 0) {
217 f2fs_put_page(node_page, 1);
218 return err;
219 }
220 if (!err)
Jaegeuk Kim91942322016-05-20 10:13:22 -0700221 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kim3c6c2be2015-03-17 17:16:35 -0700222
Jaegeuk Kim26de9b12016-05-20 20:42:37 -0700223 if (!need_inode_block_update(sbi, inode->i_ino))
224 fi->last_disk_size = inode->i_size;
225
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900226 f2fs_put_page(node_page, 1);
Jaegeuk Kim9d1015d2014-12-05 10:51:50 -0800227
Chao Yud5e8f6c2015-07-15 17:28:53 +0800228 stat_inc_inline_xattr(inode);
Jaegeuk Kim9d1015d2014-12-05 10:51:50 -0800229 stat_inc_inline_inode(inode);
230 stat_inc_inline_dir(inode);
231
Chao Yu9e5ba772015-01-06 14:28:43 +0800232 return 0;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900233}
234
235struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
236{
237 struct f2fs_sb_info *sbi = F2FS_SB(sb);
238 struct inode *inode;
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900239 int ret = 0;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900240
241 inode = iget_locked(sb, ino);
242 if (!inode)
243 return ERR_PTR(-ENOMEM);
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900244
245 if (!(inode->i_state & I_NEW)) {
246 trace_f2fs_iget(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900247 return inode;
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900248 }
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900249 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
250 goto make_now;
251
252 ret = do_read_inode(inode);
253 if (ret)
254 goto bad_inode;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900255make_now:
256 if (ino == F2FS_NODE_INO(sbi)) {
257 inode->i_mapping->a_ops = &f2fs_node_aops;
258 mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
259 } else if (ino == F2FS_META_INO(sbi)) {
260 inode->i_mapping->a_ops = &f2fs_meta_aops;
261 mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
262 } else if (S_ISREG(inode->i_mode)) {
263 inode->i_op = &f2fs_file_inode_operations;
264 inode->i_fop = &f2fs_file_operations;
265 inode->i_mapping->a_ops = &f2fs_dblock_aops;
266 } else if (S_ISDIR(inode->i_mode)) {
267 inode->i_op = &f2fs_dir_inode_operations;
268 inode->i_fop = &f2fs_dir_operations;
269 inode->i_mapping->a_ops = &f2fs_dblock_aops;
Jaegeuk Kima78186e2014-10-17 17:57:29 -0700270 mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900271 } else if (S_ISLNK(inode->i_mode)) {
Jaegeuk Kimcbaf0422015-04-29 15:10:53 -0700272 if (f2fs_encrypted_inode(inode))
273 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
274 else
275 inode->i_op = &f2fs_symlink_inode_operations;
Al Viro21fc61c2015-11-17 01:07:57 -0500276 inode_nohighmem(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900277 inode->i_mapping->a_ops = &f2fs_dblock_aops;
278 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
279 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
280 inode->i_op = &f2fs_special_inode_operations;
281 init_special_inode(inode, inode->i_mode, inode->i_rdev);
282 } else {
283 ret = -EIO;
284 goto bad_inode;
285 }
286 unlock_new_inode(inode);
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900287 trace_f2fs_iget(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900288 return inode;
289
290bad_inode:
Chao Yu2b653162019-04-15 15:28:33 +0800291 f2fs_inode_synced(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900292 iget_failed(inode);
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900293 trace_f2fs_iget_exit(inode, ret);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900294 return ERR_PTR(ret);
295}
296
Jaegeuk Kime8ea9b32016-09-09 16:59:39 -0700297struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
298{
299 struct inode *inode;
300retry:
301 inode = f2fs_iget(sb, ino);
302 if (IS_ERR(inode)) {
303 if (PTR_ERR(inode) == -ENOMEM) {
304 congestion_wait(BLK_RW_ASYNC, HZ/50);
305 goto retry;
306 }
307 }
308 return inode;
309}
310
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800311int update_inode(struct inode *inode, struct page *node_page)
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900312{
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900313 struct f2fs_inode *ri;
314
Jaegeuk Kim1e7c48f2016-06-02 14:15:56 -0700315 f2fs_inode_synced(inode);
316
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800317 f2fs_wait_on_page_writeback(node_page, NODE, true);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900318
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +0900319 ri = F2FS_INODE(node_page);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900320
321 ri->i_mode = cpu_to_le16(inode->i_mode);
322 ri->i_advise = F2FS_I(inode)->i_advise;
323 ri->i_uid = cpu_to_le32(i_uid_read(inode));
324 ri->i_gid = cpu_to_le32(i_gid_read(inode));
325 ri->i_links = cpu_to_le32(inode->i_nlink);
326 ri->i_size = cpu_to_le64(i_size_read(inode));
327 ri->i_blocks = cpu_to_le64(inode->i_blocks);
Chao Yu0c872e22015-02-05 17:46:29 +0800328
Jaegeuk Kim3e72f722015-06-19 17:53:26 -0700329 if (F2FS_I(inode)->extent_tree)
330 set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
331 &ri->i_ext);
332 else
333 memset(&ri->i_ext, 0, sizeof(ri->i_ext));
Jaegeuk Kim91942322016-05-20 10:13:22 -0700334 set_raw_inline(inode, ri);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900335
336 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
337 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
338 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
339 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
340 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
341 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
342 ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth);
343 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
344 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
Jaegeuk Kim6666e6a2012-12-10 17:52:48 +0900345 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900346 ri->i_generation = cpu_to_le32(inode->i_generation);
Jaegeuk Kim38431542014-02-27 18:20:00 +0900347 ri->i_dir_level = F2FS_I(inode)->i_dir_level;
Changman Lee7d79e752013-01-23 09:40:23 +0900348
Jaegeuk Kim3d1e3802013-10-08 18:01:51 +0900349 __set_inode_rdev(inode, ri);
Jaegeuk Kim398b1ac2012-12-19 15:28:39 +0900350 set_cold_node(inode, node_page);
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800351
Jaegeuk Kim2049d4f2016-01-25 05:57:05 -0800352 /* deleted inode */
353 if (inode->i_nlink == 0)
354 clear_inline_node(node_page);
355
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800356 return set_page_dirty(node_page);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900357}
358
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800359int update_inode_page(struct inode *inode)
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900360{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700361 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900362 struct page *node_page;
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800363 int ret = 0;
Jaegeuk Kim744602c2014-01-24 09:42:16 +0900364retry:
Jaegeuk Kim39936832012-11-22 16:21:29 +0900365 node_page = get_node_page(sbi, inode->i_ino);
Jaegeuk Kim744602c2014-01-24 09:42:16 +0900366 if (IS_ERR(node_page)) {
367 int err = PTR_ERR(node_page);
368 if (err == -ENOMEM) {
369 cond_resched();
370 goto retry;
371 } else if (err != -ENOENT) {
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -0700372 f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kim744602c2014-01-24 09:42:16 +0900373 }
Jaegeuk Kim0f18b462016-05-20 11:10:10 -0700374 f2fs_inode_synced(inode);
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800375 return 0;
Jaegeuk Kim744602c2014-01-24 09:42:16 +0900376 }
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800377 ret = update_inode(inode, node_page);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900378 f2fs_put_page(node_page, 1);
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800379 return ret;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900380}
381
382int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
383{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700384 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900385
386 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
387 inode->i_ino == F2FS_META_INO(sbi))
388 return 0;
389
Jaegeuk Kim91942322016-05-20 10:13:22 -0700390 if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
Jaegeuk Kimb3783872013-06-10 09:17:01 +0900391 return 0;
392
Jaegeuk Kim39936832012-11-22 16:21:29 +0900393 /*
Jaegeuk Kimc5cd29d2015-09-12 11:25:30 -0700394 * We need to balance fs here to prevent from producing dirty node pages
Jaegeuk Kim39936832012-11-22 16:21:29 +0900395 * during the urgent cleaning time when runing out of free sections.
396 */
Jaegeuk Kim12719ae2016-01-07 13:23:12 -0800397 if (update_inode_page(inode))
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -0800398 f2fs_balance_fs(sbi, true);
Jaegeuk Kim744602c2014-01-24 09:42:16 +0900399 return 0;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900400}
401
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900402/*
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900403 * Called at the last iput() if i_nlink is zero
404 */
405void f2fs_evict_inode(struct inode *inode)
406{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700407 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700408 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
Chao Yu13ec7292015-08-24 17:40:45 +0800409 int err = 0;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900410
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700411 /* some remained atomic pages should discarded */
Jaegeuk Kim1e843712014-12-09 06:08:59 -0800412 if (f2fs_is_atomic_file(inode))
Chao Yu29b96b52016-02-06 14:38:29 +0800413 drop_inmem_pages(inode);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700414
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900415 trace_f2fs_evict_inode(inode);
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700416 truncate_inode_pages_final(&inode->i_data);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900417
418 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
419 inode->i_ino == F2FS_META_INO(sbi))
Chao Yudbf20cb2014-07-25 12:00:57 +0800420 goto out_clear;
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900421
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -0700422 f2fs_bug_on(sbi, get_dirty_pages(inode));
Chao Yuc227f912015-12-16 13:09:20 +0800423 remove_dirty_inode(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900424
Jaegeuk Kim3e72f722015-06-19 17:53:26 -0700425 f2fs_destroy_extent_tree(inode);
426
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900427 if (inode->i_nlink || is_bad_inode(inode))
428 goto no_delete;
429
Jaegeuk Kim53aa6bb2016-05-25 15:24:18 -0700430#ifdef CONFIG_F2FS_FAULT_INJECTION
Chao Yu1ecc0c52016-09-23 21:30:09 +0800431 if (time_to_inject(sbi, FAULT_EVICT_INODE))
Jaegeuk Kim53aa6bb2016-05-25 15:24:18 -0700432 goto no_delete;
433#endif
434
Changman Leed6212a52013-01-29 18:30:07 +0900435 sb_start_intwrite(inode->i_sb);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700436 set_inode_flag(inode, FI_NO_ALLOC);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900437 i_size_write(inode, 0);
Jaegeuk Kim4c0c2942016-05-03 09:22:18 -0700438retry:
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900439 if (F2FS_HAS_BLOCKS(inode))
Jaegeuk Kim9a449e92016-06-02 13:49:38 -0700440 err = f2fs_truncate(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900441
Chao Yu13ec7292015-08-24 17:40:45 +0800442 if (!err) {
443 f2fs_lock_op(sbi);
444 err = remove_inode_page(inode);
445 f2fs_unlock_op(sbi);
446 }
Jaegeuk Kim39936832012-11-22 16:21:29 +0900447
Jaegeuk Kim4c0c2942016-05-03 09:22:18 -0700448 /* give more chances, if ENOMEM case */
449 if (err == -ENOMEM) {
450 err = 0;
451 goto retry;
452 }
453
Jaegeuk Kim0f18b462016-05-20 11:10:10 -0700454 if (err)
455 update_inode_page(inode);
Changman Leed6212a52013-01-29 18:30:07 +0900456 sb_end_intwrite(inode->i_sb);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900457no_delete:
Chao Yud5e8f6c2015-07-15 17:28:53 +0800458 stat_dec_inline_xattr(inode);
Jaegeuk Kim3289c062014-10-13 20:00:16 -0700459 stat_dec_inline_dir(inode);
Jaegeuk Kime7a2bf22014-10-14 10:29:50 -0700460 stat_dec_inline_inode(inode);
Chao Yu0bdee482015-03-19 19:27:51 +0800461
Jaegeuk Kim81988992014-04-30 15:04:39 +0900462 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
Chao Yu002a41c2014-08-04 09:54:58 +0800463 if (xnid)
464 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700465 if (is_inode_flag_set(inode, FI_APPEND_WRITE))
Chao Yua49324f2015-12-15 13:29:47 +0800466 add_ino_entry(sbi, inode->i_ino, APPEND_INO);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700467 if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
Chao Yua49324f2015-12-15 13:29:47 +0800468 add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700469 if (is_inode_flag_set(inode, FI_FREE_NID)) {
Jaegeuk Kim221149c2016-05-02 12:34:48 -0700470 alloc_nid_failed(sbi, inode->i_ino);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700471 clear_inode_flag(inode, FI_FREE_NID);
Jaegeuk Kimc9b63bd2015-06-23 10:36:08 -0700472 }
Jaegeuk Kim29234b12016-05-04 19:48:53 -0700473 f2fs_bug_on(sbi, err &&
474 !exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
Chao Yudbf20cb2014-07-25 12:00:57 +0800475out_clear:
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700476 fscrypt_put_encryption_info(inode, NULL);
Chao Yudbf20cb2014-07-25 12:00:57 +0800477 clear_inode(inode);
Jaegeuk Kim19f99ce2012-11-02 17:10:40 +0900478}
Jaegeuk Kim44c16152014-09-25 11:55:53 -0700479
480/* caller should call f2fs_lock_op() */
481void handle_failed_inode(struct inode *inode)
482{
483 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim221149c2016-05-02 12:34:48 -0700484 struct node_info ni;
Jaegeuk Kim44c16152014-09-25 11:55:53 -0700485
Jaegeuk Kim221149c2016-05-02 12:34:48 -0700486 /* don't make bad inode, since it becomes a regular file. */
Jaegeuk Kim44c16152014-09-25 11:55:53 -0700487 unlock_new_inode(inode);
488
Chao Yu13ec7292015-08-24 17:40:45 +0800489 /*
Chao Yu13ec7292015-08-24 17:40:45 +0800490 * Note: we should add inode to orphan list before f2fs_unlock_op()
491 * so we can prevent losing this orphan when encoutering checkpoint
492 * and following suddenly power-off.
493 */
Jaegeuk Kim221149c2016-05-02 12:34:48 -0700494 get_node_info(sbi, inode->i_ino, &ni);
495
496 if (ni.blk_addr != NULL_ADDR) {
497 int err = acquire_orphan_inode(sbi);
498 if (err) {
499 set_sbi_flag(sbi, SBI_NEED_FSCK);
500 f2fs_msg(sbi->sb, KERN_WARNING,
501 "Too many orphan inodes, run fsck to fix.");
502 } else {
Jaegeuk Kim67c37582016-06-13 18:27:02 -0700503 add_orphan_inode(inode);
Jaegeuk Kim221149c2016-05-02 12:34:48 -0700504 }
505 alloc_nid_done(sbi, inode->i_ino);
506 } else {
Jaegeuk Kim91942322016-05-20 10:13:22 -0700507 set_inode_flag(inode, FI_FREE_NID);
Chao Yu13ec7292015-08-24 17:40:45 +0800508 }
Jaegeuk Kim44c16152014-09-25 11:55:53 -0700509
Jaegeuk Kim44c16152014-09-25 11:55:53 -0700510 f2fs_unlock_op(sbi);
511
512 /* iput will drop the inode object */
513 iput(inode);
514}