blob: b3b1125666b6e8073b1a7d58eb258fe52fbdc476 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002 * fs/f2fs/super.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/fs.h>
14#include <linux/statfs.h>
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090015#include <linux/buffer_head.h>
16#include <linux/backing-dev.h>
17#include <linux/kthread.h>
18#include <linux/parser.h>
19#include <linux/mount.h>
20#include <linux/seq_file.h>
Jaegeuk Kim5e176d52013-06-28 12:47:01 +090021#include <linux/proc_fs.h>
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090022#include <linux/random.h>
23#include <linux/exportfs.h>
Namjae Jeond3ee4562013-03-17 17:26:14 +090024#include <linux/blkdev.h>
Chao Yu09c3a722017-07-09 00:13:07 +080025#include <linux/quotaops.h>
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090026#include <linux/f2fs_fs.h>
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090027#include <linux/sysfs.h>
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090028
29#include "f2fs.h"
30#include "node.h"
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +090031#include "segment.h"
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090032#include "xattr.h"
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090033#include "gc.h"
Jaegeuk Kimdb9f7c12014-12-17 20:04:08 -080034#include "trace.h"
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090035
Namjae Jeona2a4a7e2013-04-20 01:28:40 +090036#define CREATE_TRACE_POINTS
37#include <trace/events/f2fs.h>
38
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090039static struct kmem_cache *f2fs_inode_cachep;
40
Jaegeuk Kim73faec42016-04-29 15:34:32 -070041#ifdef CONFIG_F2FS_FAULT_INJECTION
Jaegeuk Kim2c63fea2016-04-29 15:49:56 -070042
43char *fault_name[FAULT_MAX] = {
44 [FAULT_KMALLOC] = "kmalloc",
Jaegeuk Kimc41f3cc32016-04-29 16:17:09 -070045 [FAULT_PAGE_ALLOC] = "page alloc",
Jaegeuk Kimcb789422016-04-29 16:29:22 -070046 [FAULT_ALLOC_NID] = "alloc nid",
47 [FAULT_ORPHAN] = "orphan",
48 [FAULT_BLOCK] = "no more block",
49 [FAULT_DIR_DEPTH] = "too big dir depth",
Jaegeuk Kim53aa6bb2016-05-25 15:24:18 -070050 [FAULT_EVICT_INODE] = "evict_inode fail",
Jaegeuk Kime6b120d2017-07-10 12:55:09 -070051 [FAULT_TRUNCATE] = "truncate fail",
Chao Yu8b038c72016-09-18 23:30:07 +080052 [FAULT_IO] = "IO error",
Chao Yu0f348022016-09-26 19:45:55 +080053 [FAULT_CHECKPOINT] = "checkpoint error",
Jaegeuk Kim2c63fea2016-04-29 15:49:56 -070054};
Sheng Yong08796892016-05-16 12:38:50 +080055
Chao Yu1ecc0c52016-09-23 21:30:09 +080056static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
57 unsigned int rate)
Sheng Yong08796892016-05-16 12:38:50 +080058{
Chao Yu1ecc0c52016-09-23 21:30:09 +080059 struct f2fs_fault_info *ffi = &sbi->fault_info;
60
Sheng Yong08796892016-05-16 12:38:50 +080061 if (rate) {
Chao Yu1ecc0c52016-09-23 21:30:09 +080062 atomic_set(&ffi->inject_ops, 0);
63 ffi->inject_rate = rate;
64 ffi->inject_type = (1 << FAULT_MAX) - 1;
Sheng Yong08796892016-05-16 12:38:50 +080065 } else {
Chao Yu1ecc0c52016-09-23 21:30:09 +080066 memset(ffi, 0, sizeof(struct f2fs_fault_info));
Sheng Yong08796892016-05-16 12:38:50 +080067 }
68}
Jaegeuk Kim73faec42016-04-29 15:34:32 -070069#endif
70
Jaegeuk Kim2658e502015-06-19 12:01:21 -070071/* f2fs-wide shrinker description */
72static struct shrinker f2fs_shrinker_info = {
73 .scan_objects = f2fs_shrink_scan,
74 .count_objects = f2fs_shrink_count,
75 .seeks = DEFAULT_SEEKS,
76};
77
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090078enum {
Namjae Jeon696c0182013-06-16 09:48:48 +090079 Opt_gc_background,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090080 Opt_disable_roll_forward,
Jaegeuk Kim2d834bf2015-01-23 18:33:46 -080081 Opt_norecovery,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090082 Opt_discard,
Chao Yu64058be2016-07-03 22:05:14 +080083 Opt_nodiscard,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090084 Opt_noheap,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -070085 Opt_heap,
Kelly Anderson4058c512013-10-07 11:36:20 +090086 Opt_user_xattr,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090087 Opt_nouser_xattr,
Kelly Anderson4058c512013-10-07 11:36:20 +090088 Opt_acl,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090089 Opt_noacl,
90 Opt_active_logs,
91 Opt_disable_ext_identify,
Jaegeuk Kim444c5802013-08-08 15:16:22 +090092 Opt_inline_xattr,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -070093 Opt_noinline_xattr,
Huajun Li8274de72013-11-10 23:13:17 +080094 Opt_inline_data,
Chao Yu5efd3c62014-09-24 18:16:13 +080095 Opt_inline_dentry,
Chao Yu97c17942016-05-09 19:56:34 +080096 Opt_noinline_dentry,
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +090097 Opt_flush_merge,
Jaegeuk Kim69e9e422016-05-20 22:39:20 -070098 Opt_noflush_merge,
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -070099 Opt_nobarrier,
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700100 Opt_fastboot,
Chao Yu89672152015-02-05 17:55:51 +0800101 Opt_extent_cache,
Jaegeuk Kim7daaea22015-06-25 17:43:04 -0700102 Opt_noextent_cache,
Wanpeng Li75342792015-03-24 10:20:27 +0800103 Opt_noinline_data,
Chao Yu343f40f2015-12-16 13:12:16 +0800104 Opt_data_flush,
Jaegeuk Kim36abef42016-06-03 19:29:38 -0700105 Opt_mode,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700106 Opt_io_size_bits,
Jaegeuk Kim73faec42016-04-29 15:34:32 -0700107 Opt_fault_injection,
Jaegeuk Kim6d94c742016-05-20 21:47:24 -0700108 Opt_lazytime,
109 Opt_nolazytime,
Chao Yu09c3a722017-07-09 00:13:07 +0800110 Opt_usrquota,
111 Opt_grpquota,
Chao Yu5647b302017-07-26 00:01:41 +0800112 Opt_prjquota,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900113 Opt_err,
114};
115
116static match_table_t f2fs_tokens = {
Namjae Jeon696c0182013-06-16 09:48:48 +0900117 {Opt_gc_background, "background_gc=%s"},
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900118 {Opt_disable_roll_forward, "disable_roll_forward"},
Jaegeuk Kim2d834bf2015-01-23 18:33:46 -0800119 {Opt_norecovery, "norecovery"},
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900120 {Opt_discard, "discard"},
Chao Yu64058be2016-07-03 22:05:14 +0800121 {Opt_nodiscard, "nodiscard"},
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900122 {Opt_noheap, "no_heap"},
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700123 {Opt_heap, "heap"},
Kelly Anderson4058c512013-10-07 11:36:20 +0900124 {Opt_user_xattr, "user_xattr"},
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900125 {Opt_nouser_xattr, "nouser_xattr"},
Kelly Anderson4058c512013-10-07 11:36:20 +0900126 {Opt_acl, "acl"},
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900127 {Opt_noacl, "noacl"},
128 {Opt_active_logs, "active_logs=%u"},
129 {Opt_disable_ext_identify, "disable_ext_identify"},
Jaegeuk Kim444c5802013-08-08 15:16:22 +0900130 {Opt_inline_xattr, "inline_xattr"},
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700131 {Opt_noinline_xattr, "noinline_xattr"},
Huajun Li8274de72013-11-10 23:13:17 +0800132 {Opt_inline_data, "inline_data"},
Chao Yu5efd3c62014-09-24 18:16:13 +0800133 {Opt_inline_dentry, "inline_dentry"},
Chao Yu97c17942016-05-09 19:56:34 +0800134 {Opt_noinline_dentry, "noinline_dentry"},
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900135 {Opt_flush_merge, "flush_merge"},
Jaegeuk Kim69e9e422016-05-20 22:39:20 -0700136 {Opt_noflush_merge, "noflush_merge"},
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700137 {Opt_nobarrier, "nobarrier"},
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700138 {Opt_fastboot, "fastboot"},
Chao Yu89672152015-02-05 17:55:51 +0800139 {Opt_extent_cache, "extent_cache"},
Jaegeuk Kim7daaea22015-06-25 17:43:04 -0700140 {Opt_noextent_cache, "noextent_cache"},
Wanpeng Li75342792015-03-24 10:20:27 +0800141 {Opt_noinline_data, "noinline_data"},
Chao Yu343f40f2015-12-16 13:12:16 +0800142 {Opt_data_flush, "data_flush"},
Jaegeuk Kim36abef42016-06-03 19:29:38 -0700143 {Opt_mode, "mode=%s"},
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700144 {Opt_io_size_bits, "io_bits=%u"},
Jaegeuk Kim73faec42016-04-29 15:34:32 -0700145 {Opt_fault_injection, "fault_injection=%u"},
Jaegeuk Kim6d94c742016-05-20 21:47:24 -0700146 {Opt_lazytime, "lazytime"},
147 {Opt_nolazytime, "nolazytime"},
Chao Yu09c3a722017-07-09 00:13:07 +0800148 {Opt_usrquota, "usrquota"},
149 {Opt_grpquota, "grpquota"},
Chao Yu5647b302017-07-26 00:01:41 +0800150 {Opt_prjquota, "prjquota"},
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900151 {Opt_err, NULL},
152};
153
Namjae Jeona07ef782012-12-30 14:52:05 +0900154void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
155{
156 struct va_format vaf;
157 va_list args;
158
159 va_start(args, fmt);
160 vaf.fmt = fmt;
161 vaf.va = &args;
Jaegeuk Kimdb4fa882017-08-02 20:58:29 -0700162 printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
Namjae Jeona07ef782012-12-30 14:52:05 +0900163 va_end(args);
164}
165
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900166static void init_once(void *foo)
167{
168 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
169
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900170 inode_init_once(&fi->vfs_inode);
171}
172
Namjae Jeon696c0182013-06-16 09:48:48 +0900173static int parse_options(struct super_block *sb, char *options)
174{
175 struct f2fs_sb_info *sbi = F2FS_SB(sb);
Chao Yu09d54cd2015-06-08 13:20:10 +0800176 struct request_queue *q;
Namjae Jeon696c0182013-06-16 09:48:48 +0900177 substring_t args[MAX_OPT_ARGS];
178 char *p, *name;
179 int arg = 0;
180
181 if (!options)
182 return 0;
183
184 while ((p = strsep(&options, ",")) != NULL) {
185 int token;
186 if (!*p)
187 continue;
188 /*
189 * Initialize args struct so we know whether arg was
190 * found; some options take optional arguments.
191 */
192 args[0].to = args[0].from = NULL;
193 token = match_token(p, f2fs_tokens, args);
194
195 switch (token) {
196 case Opt_gc_background:
197 name = match_strdup(&args[0]);
198
199 if (!name)
200 return -ENOMEM;
Jaegeuk Kim6aefd932015-10-05 11:02:54 -0700201 if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
Namjae Jeon696c0182013-06-16 09:48:48 +0900202 set_opt(sbi, BG_GC);
Jaegeuk Kim6aefd932015-10-05 11:02:54 -0700203 clear_opt(sbi, FORCE_FG_GC);
204 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
Namjae Jeon696c0182013-06-16 09:48:48 +0900205 clear_opt(sbi, BG_GC);
Jaegeuk Kim6aefd932015-10-05 11:02:54 -0700206 clear_opt(sbi, FORCE_FG_GC);
207 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
208 set_opt(sbi, BG_GC);
209 set_opt(sbi, FORCE_FG_GC);
210 } else {
Namjae Jeon696c0182013-06-16 09:48:48 +0900211 kfree(name);
212 return -EINVAL;
213 }
214 kfree(name);
215 break;
216 case Opt_disable_roll_forward:
217 set_opt(sbi, DISABLE_ROLL_FORWARD);
218 break;
Jaegeuk Kim2d834bf2015-01-23 18:33:46 -0800219 case Opt_norecovery:
220 /* this option mounts f2fs with ro */
221 set_opt(sbi, DISABLE_ROLL_FORWARD);
222 if (!f2fs_readonly(sb))
223 return -EINVAL;
224 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900225 case Opt_discard:
Chao Yu09d54cd2015-06-08 13:20:10 +0800226 q = bdev_get_queue(sb->s_bdev);
227 if (blk_queue_discard(q)) {
228 set_opt(sbi, DISCARD);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700229 } else if (!f2fs_sb_mounted_blkzoned(sb)) {
Chao Yu09d54cd2015-06-08 13:20:10 +0800230 f2fs_msg(sb, KERN_WARNING,
231 "mounting with \"discard\" option, but "
232 "the device does not support discard");
233 }
Namjae Jeon696c0182013-06-16 09:48:48 +0900234 break;
Chao Yu64058be2016-07-03 22:05:14 +0800235 case Opt_nodiscard:
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700236 if (f2fs_sb_mounted_blkzoned(sb)) {
237 f2fs_msg(sb, KERN_WARNING,
238 "discard is required for zoned block devices");
239 return -EINVAL;
240 }
Chao Yu64058be2016-07-03 22:05:14 +0800241 clear_opt(sbi, DISCARD);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700242 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900243 case Opt_noheap:
244 set_opt(sbi, NOHEAP);
245 break;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700246 case Opt_heap:
247 clear_opt(sbi, NOHEAP);
248 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900249#ifdef CONFIG_F2FS_FS_XATTR
Kelly Anderson4058c512013-10-07 11:36:20 +0900250 case Opt_user_xattr:
251 set_opt(sbi, XATTR_USER);
252 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900253 case Opt_nouser_xattr:
254 clear_opt(sbi, XATTR_USER);
255 break;
Jaegeuk Kim444c5802013-08-08 15:16:22 +0900256 case Opt_inline_xattr:
257 set_opt(sbi, INLINE_XATTR);
258 break;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700259 case Opt_noinline_xattr:
260 clear_opt(sbi, INLINE_XATTR);
261 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900262#else
Kelly Anderson4058c512013-10-07 11:36:20 +0900263 case Opt_user_xattr:
264 f2fs_msg(sb, KERN_INFO,
265 "user_xattr options not supported");
266 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900267 case Opt_nouser_xattr:
268 f2fs_msg(sb, KERN_INFO,
269 "nouser_xattr options not supported");
270 break;
Jaegeuk Kim444c5802013-08-08 15:16:22 +0900271 case Opt_inline_xattr:
272 f2fs_msg(sb, KERN_INFO,
273 "inline_xattr options not supported");
274 break;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700275 case Opt_noinline_xattr:
276 f2fs_msg(sb, KERN_INFO,
277 "noinline_xattr options not supported");
278 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900279#endif
280#ifdef CONFIG_F2FS_FS_POSIX_ACL
Kelly Anderson4058c512013-10-07 11:36:20 +0900281 case Opt_acl:
282 set_opt(sbi, POSIX_ACL);
283 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900284 case Opt_noacl:
285 clear_opt(sbi, POSIX_ACL);
286 break;
287#else
Kelly Anderson4058c512013-10-07 11:36:20 +0900288 case Opt_acl:
289 f2fs_msg(sb, KERN_INFO, "acl options not supported");
290 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900291 case Opt_noacl:
292 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
293 break;
294#endif
295 case Opt_active_logs:
296 if (args->from && match_int(args, &arg))
297 return -EINVAL;
298 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
299 return -EINVAL;
300 sbi->active_logs = arg;
301 break;
302 case Opt_disable_ext_identify:
303 set_opt(sbi, DISABLE_EXT_IDENTIFY);
304 break;
Huajun Li8274de72013-11-10 23:13:17 +0800305 case Opt_inline_data:
306 set_opt(sbi, INLINE_DATA);
307 break;
Chao Yu5efd3c62014-09-24 18:16:13 +0800308 case Opt_inline_dentry:
309 set_opt(sbi, INLINE_DENTRY);
310 break;
Chao Yu97c17942016-05-09 19:56:34 +0800311 case Opt_noinline_dentry:
312 clear_opt(sbi, INLINE_DENTRY);
313 break;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900314 case Opt_flush_merge:
315 set_opt(sbi, FLUSH_MERGE);
316 break;
Jaegeuk Kim69e9e422016-05-20 22:39:20 -0700317 case Opt_noflush_merge:
318 clear_opt(sbi, FLUSH_MERGE);
319 break;
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700320 case Opt_nobarrier:
321 set_opt(sbi, NOBARRIER);
322 break;
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700323 case Opt_fastboot:
324 set_opt(sbi, FASTBOOT);
325 break;
Chao Yu89672152015-02-05 17:55:51 +0800326 case Opt_extent_cache:
327 set_opt(sbi, EXTENT_CACHE);
328 break;
Jaegeuk Kim7daaea22015-06-25 17:43:04 -0700329 case Opt_noextent_cache:
330 clear_opt(sbi, EXTENT_CACHE);
331 break;
Wanpeng Li75342792015-03-24 10:20:27 +0800332 case Opt_noinline_data:
333 clear_opt(sbi, INLINE_DATA);
334 break;
Chao Yu343f40f2015-12-16 13:12:16 +0800335 case Opt_data_flush:
336 set_opt(sbi, DATA_FLUSH);
337 break;
Jaegeuk Kim36abef42016-06-03 19:29:38 -0700338 case Opt_mode:
339 name = match_strdup(&args[0]);
340
341 if (!name)
342 return -ENOMEM;
343 if (strlen(name) == 8 &&
344 !strncmp(name, "adaptive", 8)) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700345 if (f2fs_sb_mounted_blkzoned(sb)) {
346 f2fs_msg(sb, KERN_WARNING,
347 "adaptive mode is not allowed with "
348 "zoned block device feature");
349 kfree(name);
350 return -EINVAL;
351 }
Jaegeuk Kim52763a42016-06-13 09:47:48 -0700352 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
Jaegeuk Kim36abef42016-06-03 19:29:38 -0700353 } else if (strlen(name) == 3 &&
354 !strncmp(name, "lfs", 3)) {
Jaegeuk Kim52763a42016-06-13 09:47:48 -0700355 set_opt_mode(sbi, F2FS_MOUNT_LFS);
Jaegeuk Kim36abef42016-06-03 19:29:38 -0700356 } else {
357 kfree(name);
358 return -EINVAL;
359 }
360 kfree(name);
361 break;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700362 case Opt_io_size_bits:
363 if (args->from && match_int(args, &arg))
364 return -EINVAL;
365 if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
366 f2fs_msg(sb, KERN_WARNING,
367 "Not support %d, larger than %d",
368 1 << arg, BIO_MAX_PAGES);
369 return -EINVAL;
370 }
371 sbi->write_io_size_bits = arg;
372 break;
Jaegeuk Kim73faec42016-04-29 15:34:32 -0700373 case Opt_fault_injection:
374 if (args->from && match_int(args, &arg))
375 return -EINVAL;
376#ifdef CONFIG_F2FS_FAULT_INJECTION
Chao Yu1ecc0c52016-09-23 21:30:09 +0800377 f2fs_build_fault_attr(sbi, arg);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700378 set_opt(sbi, FAULT_INJECTION);
Jaegeuk Kim73faec42016-04-29 15:34:32 -0700379#else
380 f2fs_msg(sb, KERN_INFO,
381 "FAULT_INJECTION was not selected");
382#endif
383 break;
Jaegeuk Kim6d94c742016-05-20 21:47:24 -0700384 case Opt_lazytime:
385 sb->s_flags |= MS_LAZYTIME;
386 break;
387 case Opt_nolazytime:
388 sb->s_flags &= ~MS_LAZYTIME;
389 break;
Chao Yu09c3a722017-07-09 00:13:07 +0800390#ifdef CONFIG_QUOTA
391 case Opt_usrquota:
392 set_opt(sbi, USRQUOTA);
393 break;
394 case Opt_grpquota:
395 set_opt(sbi, GRPQUOTA);
396 break;
Chao Yu5647b302017-07-26 00:01:41 +0800397 case Opt_prjquota:
398 set_opt(sbi, PRJQUOTA);
399 break;
Chao Yu09c3a722017-07-09 00:13:07 +0800400#else
401 case Opt_usrquota:
402 case Opt_grpquota:
Chao Yu5647b302017-07-26 00:01:41 +0800403 case Opt_prjquota:
Chao Yu09c3a722017-07-09 00:13:07 +0800404 f2fs_msg(sb, KERN_INFO,
405 "quota operations not supported");
406 break;
407#endif
Namjae Jeon696c0182013-06-16 09:48:48 +0900408 default:
409 f2fs_msg(sb, KERN_ERR,
410 "Unrecognized mount option \"%s\" or missing value",
411 p);
412 return -EINVAL;
413 }
414 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700415
416 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
417 f2fs_msg(sb, KERN_ERR,
418 "Should set mode=lfs with %uKB-sized IO",
419 F2FS_IO_SIZE_KB(sbi));
420 return -EINVAL;
421 }
Namjae Jeon696c0182013-06-16 09:48:48 +0900422 return 0;
423}
424
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900425static struct inode *f2fs_alloc_inode(struct super_block *sb)
426{
427 struct f2fs_inode_info *fi;
428
Chao Yua0acdfe2013-12-05 09:54:00 +0800429 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900430 if (!fi)
431 return NULL;
432
433 init_once((void *) fi);
434
Masanari Iida434720f2013-03-19 08:03:35 +0900435 /* Initialize f2fs-specific inode info */
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900436 fi->vfs_inode.i_version = 1;
Jaegeuk Kima43e1c42016-12-02 15:11:32 -0800437 atomic_set(&fi->dirty_pages, 0);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900438 fi->i_current_depth = 1;
439 fi->i_advise = 0;
Jaegeuk Kimd928bfb2014-03-20 19:10:08 +0900440 init_rwsem(&fi->i_sem);
Chao Yu2710fd72015-12-15 13:30:45 +0800441 INIT_LIST_HEAD(&fi->dirty_list);
Jaegeuk Kim0f18b462016-05-20 11:10:10 -0700442 INIT_LIST_HEAD(&fi->gdirty_list);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700443 INIT_LIST_HEAD(&fi->inmem_pages);
444 mutex_init(&fi->inmem_lock);
Chao Yu82e0a5a2016-07-13 09:18:29 +0800445 init_rwsem(&fi->dio_rwsem[READ]);
446 init_rwsem(&fi->dio_rwsem[WRITE]);
Qiuyang Sun7b23ea12017-05-18 11:06:45 +0800447 init_rwsem(&fi->i_mmap_sem);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900448
Chao Yu09c3a722017-07-09 00:13:07 +0800449#ifdef CONFIG_QUOTA
450 memset(&fi->i_dquot, 0, sizeof(fi->i_dquot));
451 fi->i_reserved_quota = 0;
452#endif
Jaegeuk Kimab9fa662014-02-27 20:09:05 +0900453 /* Will be used by directory only */
454 fi->i_dir_level = F2FS_SB(sb)->dir_level;
Chao Yue84f88e2017-07-19 00:19:05 +0800455
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900456 return &fi->vfs_inode;
457}
458
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900459static int f2fs_drop_inode(struct inode *inode)
460{
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700461 int ret;
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900462 /*
463 * This is to avoid a deadlock condition like below.
464 * writeback_single_inode(inode)
465 * - f2fs_write_data_page
466 * - f2fs_gc -> iput -> evict
467 * - inode_wait_for_writeback(inode)
468 */
Jaegeuk Kim0f18b462016-05-20 11:10:10 -0700469 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700470 if (!inode->i_nlink && !is_bad_inode(inode)) {
Jaegeuk Kim3e72f722015-06-19 17:53:26 -0700471 /* to avoid evict_inode call simultaneously */
472 atomic_inc(&inode->i_count);
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700473 spin_unlock(&inode->i_lock);
474
475 /* some remained atomic pages should discarded */
476 if (f2fs_is_atomic_file(inode))
Chao Yu29b96b52016-02-06 14:38:29 +0800477 drop_inmem_pages(inode);
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700478
Jaegeuk Kim3e72f722015-06-19 17:53:26 -0700479 /* should remain fi->extent_tree for writepage */
480 f2fs_destroy_extent_node(inode);
481
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700482 sb_start_intwrite(inode->i_sb);
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -0700483 f2fs_i_size_write(inode, 0);
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700484
485 if (F2FS_HAS_BLOCKS(inode))
Jaegeuk Kim9a449e92016-06-02 13:49:38 -0700486 f2fs_truncate(inode);
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700487
488 sb_end_intwrite(inode->i_sb);
489
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700490 fscrypt_put_encryption_info(inode, NULL);
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700491 spin_lock(&inode->i_lock);
Jaegeuk Kim3e72f722015-06-19 17:53:26 -0700492 atomic_dec(&inode->i_count);
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700493 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700494 trace_f2fs_drop_inode(inode, 0);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900495 return 0;
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700496 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700497 ret = generic_drop_inode(inode);
498 trace_f2fs_drop_inode(inode, ret);
499 return ret;
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900500}
501
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700502int f2fs_inode_dirtied(struct inode *inode, bool sync)
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700503{
504 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700505 int ret = 0;
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700506
507 spin_lock(&sbi->inode_lock[DIRTY_META]);
508 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700509 ret = 1;
510 } else {
511 set_inode_flag(inode, FI_DIRTY_INODE);
512 stat_inc_dirty_inode(sbi, DIRTY_META);
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700513 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700514 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
515 list_add_tail(&F2FS_I(inode)->gdirty_list,
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700516 &sbi->inode_list[DIRTY_META]);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700517 inc_page_count(sbi, F2FS_DIRTY_IMETA);
518 }
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700519 spin_unlock(&sbi->inode_lock[DIRTY_META]);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700520 return ret;
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700521}
522
523void f2fs_inode_synced(struct inode *inode)
524{
525 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
526
527 spin_lock(&sbi->inode_lock[DIRTY_META]);
528 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
529 spin_unlock(&sbi->inode_lock[DIRTY_META]);
530 return;
531 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700532 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
533 list_del_init(&F2FS_I(inode)->gdirty_list);
534 dec_page_count(sbi, F2FS_DIRTY_IMETA);
535 }
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700536 clear_inode_flag(inode, FI_DIRTY_INODE);
537 clear_inode_flag(inode, FI_AUTO_RECOVER);
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700538 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
539 spin_unlock(&sbi->inode_lock[DIRTY_META]);
540}
541
Jaegeuk Kimb3783872013-06-10 09:17:01 +0900542/*
543 * f2fs_dirty_inode() is called from __mark_inode_dirty()
544 *
545 * We should call set_dirty_inode to write the dirty inode through write_inode.
546 */
547static void f2fs_dirty_inode(struct inode *inode, int flags)
548{
Jaegeuk Kim0f18b462016-05-20 11:10:10 -0700549 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
550
551 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
552 inode->i_ino == F2FS_META_INO(sbi))
553 return;
554
Jaegeuk Kim6d94c742016-05-20 21:47:24 -0700555 if (flags == I_DIRTY_TIME)
556 return;
557
Jaegeuk Kim26de9b12016-05-20 20:42:37 -0700558 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
559 clear_inode_flag(inode, FI_AUTO_RECOVER);
560
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700561 f2fs_inode_dirtied(inode, false);
Jaegeuk Kimb3783872013-06-10 09:17:01 +0900562}
563
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900564static void f2fs_i_callback(struct rcu_head *head)
565{
566 struct inode *inode = container_of(head, struct inode, i_rcu);
567 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
568}
569
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900570static void f2fs_destroy_inode(struct inode *inode)
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900571{
572 call_rcu(&inode->i_rcu, f2fs_i_callback);
573}
574
Jaegeuk Kim523be8a2016-05-13 12:36:58 -0700575static void destroy_percpu_info(struct f2fs_sb_info *sbi)
576{
Jaegeuk Kim41382ec2016-05-16 11:06:50 -0700577 percpu_counter_destroy(&sbi->alloc_valid_block_count);
Jaegeuk Kim513c5f32016-05-16 11:42:32 -0700578 percpu_counter_destroy(&sbi->total_valid_inode_count);
Jaegeuk Kim523be8a2016-05-13 12:36:58 -0700579}
580
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700581static void destroy_device_list(struct f2fs_sb_info *sbi)
582{
583 int i;
584
585 for (i = 0; i < sbi->s_ndevs; i++) {
586 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
587#ifdef CONFIG_BLK_DEV_ZONED
588 kfree(FDEV(i).blkz_type);
589#endif
590 }
591 kfree(sbi->devs);
592}
593
Chao Yu09c3a722017-07-09 00:13:07 +0800594static void f2fs_quota_off_umount(struct super_block *sb);
Chao Yu17a3fb52017-06-14 17:39:46 +0800595static void f2fs_put_super(struct super_block *sb)
596{
597 struct f2fs_sb_info *sbi = F2FS_SB(sb);
598 int i;
Jaegeuk Kim5e176d52013-06-28 12:47:01 +0900599
Chao Yu09c3a722017-07-09 00:13:07 +0800600 f2fs_quota_off_umount(sb);
601
Jaegeuk Kim2658e502015-06-19 12:01:21 -0700602 /* prevent remaining shrinker jobs */
603 mutex_lock(&sbi->umount_mutex);
604
Jaegeuk Kim85dc2f22015-01-14 17:41:41 -0800605 /*
606 * We don't need to do checkpoint when superblock is clean.
607 * But, the previous checkpoint was not done by umount, it needs to do
608 * clean checkpoint again.
609 */
Chao Yucaf00472015-01-28 17:48:42 +0800610 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
Chao Yuaaec2b12016-09-20 11:04:18 +0800611 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
Jaegeuk Kim75ab4cb2014-09-20 21:57:51 -0700612 struct cp_control cpc = {
613 .reason = CP_UMOUNT,
614 };
615 write_checkpoint(sbi, &cpc);
616 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900617
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700618 /* be sure to wait for any on-going discard commands */
619 f2fs_wait_discard_bios(sbi);
620
Chao Yua88cedf2017-06-14 23:00:55 +0800621 if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700622 struct cp_control cpc = {
623 .reason = CP_UMOUNT | CP_TRIMMED,
624 };
625 write_checkpoint(sbi, &cpc);
626 }
627
Jaegeuk Kimeca616f2015-06-15 14:52:29 -0700628 /* write_checkpoint can update stat informaion */
629 f2fs_destroy_stats(sbi);
630
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -0700631 /*
632 * normally superblock is clean, so we need to release this.
633 * In addition, EIO will skip do checkpoint, we need this as well.
634 */
Jaegeuk Kim74ef9242016-05-02 22:09:56 -0700635 release_ino_entry(sbi, true);
Jaegeuk Kim6f12ac22014-08-19 09:48:22 -0700636
Jaegeuk Kim2658e502015-06-19 12:01:21 -0700637 f2fs_leave_shrinker(sbi);
638 mutex_unlock(&sbi->umount_mutex);
639
Jaegeuk Kim17c19122016-01-29 08:57:59 -0800640 /* our cp_error case, we can wait for any writeback page */
Jaegeuk Kim9bc576a2017-05-10 11:28:38 -0700641 f2fs_flush_merged_writes(sbi);
Jaegeuk Kim17c19122016-01-29 08:57:59 -0800642
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900643 iput(sbi->node_inode);
644 iput(sbi->meta_inode);
645
646 /* destroy f2fs internal modules */
647 destroy_node_manager(sbi);
648 destroy_segment_manager(sbi);
649
650 kfree(sbi->ckpt);
Chao Yu17a3fb52017-06-14 17:39:46 +0800651
Jaegeuk Kim883d5532017-07-26 11:24:13 -0700652 f2fs_unregister_sysfs(sbi);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900653
654 sb->s_fs_info = NULL;
Keith Mok43b65732016-03-02 12:04:24 -0800655 if (sbi->s_chksum_driver)
656 crypto_free_shash(sbi->s_chksum_driver);
Yunlei Heb39f0de2015-12-15 17:17:20 +0800657 kfree(sbi->raw_super);
Jaegeuk Kim523be8a2016-05-13 12:36:58 -0700658
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700659 destroy_device_list(sbi);
660 mempool_destroy(sbi->write_io_dummy);
Jaegeuk Kim523be8a2016-05-13 12:36:58 -0700661 destroy_percpu_info(sbi);
Jaegeuk Kimc4127262017-05-10 11:18:25 -0700662 for (i = 0; i < NR_PAGE_TYPE; i++)
663 kfree(sbi->write_io[i]);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900664 kfree(sbi);
665}
666
667int f2fs_sync_fs(struct super_block *sb, int sync)
668{
669 struct f2fs_sb_info *sbi = F2FS_SB(sb);
Chao Yuc34f42e2015-12-23 17:50:30 +0800670 int err = 0;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900671
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900672 trace_f2fs_sync_fs(sb, sync);
673
Jaegeuk Kimb7473752013-04-01 08:32:21 +0900674 if (sync) {
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700675 struct cp_control cpc;
676
Jaegeuk Kim119ee912015-01-29 11:45:33 -0800677 cpc.reason = __get_cp_reason(sbi);
678
Jaegeuk Kimb7473752013-04-01 08:32:21 +0900679 mutex_lock(&sbi->gc_mutex);
Chao Yuc34f42e2015-12-23 17:50:30 +0800680 err = write_checkpoint(sbi, &cpc);
Jaegeuk Kimb7473752013-04-01 08:32:21 +0900681 mutex_unlock(&sbi->gc_mutex);
Jaegeuk Kimb7473752013-04-01 08:32:21 +0900682 }
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700683 f2fs_trace_ios(NULL, 1);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900684
Chao Yuc34f42e2015-12-23 17:50:30 +0800685 return err;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900686}
687
Changman Leed6212a52013-01-29 18:30:07 +0900688static int f2fs_freeze(struct super_block *sb)
689{
Jaegeuk Kim77888c12013-05-20 20:28:47 +0900690 if (f2fs_readonly(sb))
Changman Leed6212a52013-01-29 18:30:07 +0900691 return 0;
692
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700693 /* IO error happened before */
694 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
695 return -EIO;
696
697 /* must be clean, since sync_filesystem() was already called */
698 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
699 return -EINVAL;
700 return 0;
Changman Leed6212a52013-01-29 18:30:07 +0900701}
702
703static int f2fs_unfreeze(struct super_block *sb)
704{
705 return 0;
706}
707
Chao Yu15426db2017-07-29 00:32:53 +0800708#ifdef CONFIG_QUOTA
709static int f2fs_statfs_project(struct super_block *sb,
710 kprojid_t projid, struct kstatfs *buf)
711{
712 struct kqid qid;
713 struct dquot *dquot;
714 u64 limit;
715 u64 curblock;
716
717 qid = make_kqid_projid(projid);
718 dquot = dqget(sb, qid);
719 if (IS_ERR(dquot))
720 return PTR_ERR(dquot);
721 spin_lock(&dq_data_lock);
722
723 limit = (dquot->dq_dqb.dqb_bsoftlimit ?
724 dquot->dq_dqb.dqb_bsoftlimit :
725 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
726 if (limit && buf->f_blocks > limit) {
727 curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
728 buf->f_blocks = limit;
729 buf->f_bfree = buf->f_bavail =
730 (buf->f_blocks > curblock) ?
731 (buf->f_blocks - curblock) : 0;
732 }
733
734 limit = dquot->dq_dqb.dqb_isoftlimit ?
735 dquot->dq_dqb.dqb_isoftlimit :
736 dquot->dq_dqb.dqb_ihardlimit;
737 if (limit && buf->f_files > limit) {
738 buf->f_files = limit;
739 buf->f_ffree =
740 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
741 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
742 }
743
744 spin_unlock(&dq_data_lock);
745 dqput(dquot);
746 return 0;
747}
748#endif
749
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900750static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
751{
752 struct super_block *sb = dentry->d_sb;
753 struct f2fs_sb_info *sbi = F2FS_SB(sb);
754 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
755 block_t total_count, user_block_count, start_count, ovp_count;
Jaegeuk Kim711acfd2017-06-21 20:55:55 -0700756 u64 avail_node_count;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900757
758 total_count = le64_to_cpu(sbi->raw_super->block_count);
759 user_block_count = sbi->user_block_count;
760 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
761 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
762 buf->f_type = F2FS_SUPER_MAGIC;
763 buf->f_bsize = sbi->blocksize;
764
765 buf->f_blocks = total_count - start_count;
Chao Yu3e6d0b42016-07-06 14:13:07 +0800766 buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
Chao Yu026bd9d2017-06-26 16:24:41 +0800767 buf->f_bavail = user_block_count - valid_user_blocks(sbi) -
768 sbi->reserved_blocks;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900769
Jaegeuk Kim711acfd2017-06-21 20:55:55 -0700770 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
771
772 if (avail_node_count > user_block_count) {
773 buf->f_files = user_block_count;
774 buf->f_ffree = buf->f_bavail;
775 } else {
776 buf->f_files = avail_node_count;
777 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
778 buf->f_bavail);
779 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900780
Jaegeuk Kim5a20d332013-03-03 13:58:05 +0900781 buf->f_namelen = F2FS_NAME_LEN;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900782 buf->f_fsid.val[0] = (u32)id;
783 buf->f_fsid.val[1] = (u32)(id >> 32);
784
Chao Yu15426db2017-07-29 00:32:53 +0800785#ifdef CONFIG_QUOTA
786 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
787 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
788 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
789 }
790#endif
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900791 return 0;
792}
793
794static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
795{
796 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
797
Jaegeuk Kim6aefd932015-10-05 11:02:54 -0700798 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
799 if (test_opt(sbi, FORCE_FG_GC))
800 seq_printf(seq, ",background_gc=%s", "sync");
801 else
802 seq_printf(seq, ",background_gc=%s", "on");
803 } else {
Namjae Jeon696c0182013-06-16 09:48:48 +0900804 seq_printf(seq, ",background_gc=%s", "off");
Jaegeuk Kim6aefd932015-10-05 11:02:54 -0700805 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900806 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
807 seq_puts(seq, ",disable_roll_forward");
808 if (test_opt(sbi, DISCARD))
809 seq_puts(seq, ",discard");
810 if (test_opt(sbi, NOHEAP))
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700811 seq_puts(seq, ",no_heap");
812 else
813 seq_puts(seq, ",heap");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900814#ifdef CONFIG_F2FS_FS_XATTR
815 if (test_opt(sbi, XATTR_USER))
816 seq_puts(seq, ",user_xattr");
817 else
818 seq_puts(seq, ",nouser_xattr");
Jaegeuk Kim444c5802013-08-08 15:16:22 +0900819 if (test_opt(sbi, INLINE_XATTR))
820 seq_puts(seq, ",inline_xattr");
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700821 else
822 seq_puts(seq, ",noinline_xattr");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900823#endif
824#ifdef CONFIG_F2FS_FS_POSIX_ACL
825 if (test_opt(sbi, POSIX_ACL))
826 seq_puts(seq, ",acl");
827 else
828 seq_puts(seq, ",noacl");
829#endif
830 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
Alejandro Martinez Ruizaa435072013-01-25 19:08:59 +0100831 seq_puts(seq, ",disable_ext_identify");
Huajun Li8274de72013-11-10 23:13:17 +0800832 if (test_opt(sbi, INLINE_DATA))
833 seq_puts(seq, ",inline_data");
Wanpeng Li75342792015-03-24 10:20:27 +0800834 else
835 seq_puts(seq, ",noinline_data");
Chao Yu5efd3c62014-09-24 18:16:13 +0800836 if (test_opt(sbi, INLINE_DENTRY))
837 seq_puts(seq, ",inline_dentry");
Chao Yu97c17942016-05-09 19:56:34 +0800838 else
839 seq_puts(seq, ",noinline_dentry");
Gu Zhengb270ad62014-04-11 17:49:55 +0800840 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900841 seq_puts(seq, ",flush_merge");
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700842 if (test_opt(sbi, NOBARRIER))
843 seq_puts(seq, ",nobarrier");
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700844 if (test_opt(sbi, FASTBOOT))
845 seq_puts(seq, ",fastboot");
Chao Yu89672152015-02-05 17:55:51 +0800846 if (test_opt(sbi, EXTENT_CACHE))
847 seq_puts(seq, ",extent_cache");
Jaegeuk Kim7daaea22015-06-25 17:43:04 -0700848 else
849 seq_puts(seq, ",noextent_cache");
Chao Yu343f40f2015-12-16 13:12:16 +0800850 if (test_opt(sbi, DATA_FLUSH))
851 seq_puts(seq, ",data_flush");
Jaegeuk Kim36abef42016-06-03 19:29:38 -0700852
853 seq_puts(seq, ",mode=");
854 if (test_opt(sbi, ADAPTIVE))
855 seq_puts(seq, "adaptive");
856 else if (test_opt(sbi, LFS))
857 seq_puts(seq, "lfs");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900858 seq_printf(seq, ",active_logs=%u", sbi->active_logs);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700859 if (F2FS_IO_SIZE_BITS(sbi))
860 seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
861#ifdef CONFIG_F2FS_FAULT_INJECTION
862 if (test_opt(sbi, FAULT_INJECTION))
Chao Yu93c0e2f2017-06-12 09:44:24 +0800863 seq_printf(seq, ",fault_injection=%u",
864 sbi->fault_info.inject_rate);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700865#endif
Chao Yu09c3a722017-07-09 00:13:07 +0800866#ifdef CONFIG_QUOTA
867 if (test_opt(sbi, USRQUOTA))
868 seq_puts(seq, ",usrquota");
869 if (test_opt(sbi, GRPQUOTA))
870 seq_puts(seq, ",grpquota");
Chao Yu5647b302017-07-26 00:01:41 +0800871 if (test_opt(sbi, PRJQUOTA))
872 seq_puts(seq, ",prjquota");
Chao Yu09c3a722017-07-09 00:13:07 +0800873#endif
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900874
875 return 0;
876}
877
Yunlei He498c5e92015-05-07 18:11:37 +0800878static void default_options(struct f2fs_sb_info *sbi)
879{
880 /* init some FS parameters */
881 sbi->active_logs = NR_CURSEG_TYPE;
882
883 set_opt(sbi, BG_GC);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700884 set_opt(sbi, INLINE_XATTR);
Yunlei He498c5e92015-05-07 18:11:37 +0800885 set_opt(sbi, INLINE_DATA);
Chao Yu97c17942016-05-09 19:56:34 +0800886 set_opt(sbi, INLINE_DENTRY);
Jaegeuk Kim3e72f722015-06-19 17:53:26 -0700887 set_opt(sbi, EXTENT_CACHE);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700888 set_opt(sbi, NOHEAP);
Jaegeuk Kim6d94c742016-05-20 21:47:24 -0700889 sbi->sb->s_flags |= MS_LAZYTIME;
Jaegeuk Kim69e9e422016-05-20 22:39:20 -0700890 set_opt(sbi, FLUSH_MERGE);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700891 if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
Jaegeuk Kim52763a42016-06-13 09:47:48 -0700892 set_opt_mode(sbi, F2FS_MOUNT_LFS);
893 set_opt(sbi, DISCARD);
894 } else {
895 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
896 }
Yunlei He498c5e92015-05-07 18:11:37 +0800897
898#ifdef CONFIG_F2FS_FS_XATTR
899 set_opt(sbi, XATTR_USER);
900#endif
901#ifdef CONFIG_F2FS_FS_POSIX_ACL
902 set_opt(sbi, POSIX_ACL);
903#endif
Chao Yu36dbd322016-09-26 19:45:05 +0800904
905#ifdef CONFIG_F2FS_FAULT_INJECTION
906 f2fs_build_fault_attr(sbi, 0);
907#endif
Yunlei He498c5e92015-05-07 18:11:37 +0800908}
909
Namjae Jeon696c0182013-06-16 09:48:48 +0900910static int f2fs_remount(struct super_block *sb, int *flags, char *data)
911{
912 struct f2fs_sb_info *sbi = F2FS_SB(sb);
913 struct f2fs_mount_info org_mount_opt;
Chao Yu09c3a722017-07-09 00:13:07 +0800914 unsigned long old_sb_flags;
Namjae Jeon696c0182013-06-16 09:48:48 +0900915 int err, active_logs;
Gu Zheng876dc592014-04-11 17:50:00 +0800916 bool need_restart_gc = false;
917 bool need_stop_gc = false;
Chao Yu9cd81ce2015-09-18 16:55:26 +0800918 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
Chao Yu2443b8b2016-09-26 19:45:06 +0800919#ifdef CONFIG_F2FS_FAULT_INJECTION
920 struct f2fs_fault_info ffi = sbi->fault_info;
921#endif
Namjae Jeon696c0182013-06-16 09:48:48 +0900922
923 /*
924 * Save the old mount options in case we
925 * need to restore them.
926 */
927 org_mount_opt = sbi->mount_opt;
Chao Yu09c3a722017-07-09 00:13:07 +0800928 old_sb_flags = sb->s_flags;
Namjae Jeon696c0182013-06-16 09:48:48 +0900929 active_logs = sbi->active_logs;
930
Jaegeuk Kimdf728b02016-03-23 17:05:27 -0700931 /* recover superblocks we couldn't write due to previous RO mount */
932 if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
933 err = f2fs_commit_super(sbi, false);
934 f2fs_msg(sb, KERN_INFO,
935 "Try to recover all the superblocks, ret: %d", err);
936 if (!err)
937 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
938 }
939
Yunlei He498c5e92015-05-07 18:11:37 +0800940 default_options(sbi);
Chao Yu26666c82014-09-15 18:04:44 +0800941
Namjae Jeon696c0182013-06-16 09:48:48 +0900942 /* parse mount options */
943 err = parse_options(sb, data);
944 if (err)
945 goto restore_opts;
946
947 /*
948 * Previous and new state of filesystem is RO,
Gu Zheng876dc592014-04-11 17:50:00 +0800949 * so skip checking GC and FLUSH_MERGE conditions.
Namjae Jeon696c0182013-06-16 09:48:48 +0900950 */
Chao Yu6b2920a2014-07-07 11:21:59 +0800951 if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
Namjae Jeon696c0182013-06-16 09:48:48 +0900952 goto skip;
953
Chao Yu09c3a722017-07-09 00:13:07 +0800954 if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
955 err = dquot_suspend(sb, -1);
956 if (err < 0)
957 goto restore_opts;
958 } else {
959 /* dquot_resume needs RW */
960 sb->s_flags &= ~MS_RDONLY;
961 dquot_resume(sb, -1);
962 }
963
Chao Yu9cd81ce2015-09-18 16:55:26 +0800964 /* disallow enable/disable extent_cache dynamically */
965 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
966 err = -EINVAL;
967 f2fs_msg(sbi->sb, KERN_WARNING,
968 "switch extent_cache option is not allowed");
969 goto restore_opts;
970 }
971
Namjae Jeon696c0182013-06-16 09:48:48 +0900972 /*
973 * We stop the GC thread if FS is mounted as RO
974 * or if background_gc = off is passed in mount
975 * option. Also sync the filesystem.
976 */
977 if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
978 if (sbi->gc_thread) {
979 stop_gc_thread(sbi);
Gu Zheng876dc592014-04-11 17:50:00 +0800980 need_restart_gc = true;
Namjae Jeon696c0182013-06-16 09:48:48 +0900981 }
Chao Yuaba291b2014-11-18 11:17:20 +0800982 } else if (!sbi->gc_thread) {
Namjae Jeon696c0182013-06-16 09:48:48 +0900983 err = start_gc_thread(sbi);
984 if (err)
985 goto restore_opts;
Gu Zheng876dc592014-04-11 17:50:00 +0800986 need_stop_gc = true;
987 }
988
Jaegeuk Kimfaa0e552016-03-24 10:29:39 -0700989 if (*flags & MS_RDONLY) {
990 writeback_inodes_sb(sb, WB_REASON_SYNC);
991 sync_inodes_sb(sb);
992
993 set_sbi_flag(sbi, SBI_IS_DIRTY);
994 set_sbi_flag(sbi, SBI_IS_CLOSE);
995 f2fs_sync_fs(sb, 1);
996 clear_sbi_flag(sbi, SBI_IS_CLOSE);
997 }
998
Gu Zheng876dc592014-04-11 17:50:00 +0800999 /*
1000 * We stop issue flush thread if FS is mounted as RO
1001 * or if flush_merge is not passed in mount option.
1002 */
1003 if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001004 clear_opt(sbi, FLUSH_MERGE);
1005 destroy_flush_cmd_control(sbi, false);
1006 } else {
Gu Zheng2163d192014-04-27 14:21:33 +08001007 err = create_flush_cmd_control(sbi);
1008 if (err)
Gu Zhenga688b9d9e2014-04-27 14:21:21 +08001009 goto restore_gc;
Namjae Jeon696c0182013-06-16 09:48:48 +09001010 }
1011skip:
1012 /* Update the POSIXACL Flag */
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001013 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
Namjae Jeon696c0182013-06-16 09:48:48 +09001014 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001015
Namjae Jeon696c0182013-06-16 09:48:48 +09001016 return 0;
Gu Zheng876dc592014-04-11 17:50:00 +08001017restore_gc:
1018 if (need_restart_gc) {
1019 if (start_gc_thread(sbi))
1020 f2fs_msg(sbi->sb, KERN_WARNING,
arter97e1c42042014-08-06 23:22:50 +09001021 "background gc thread has stopped");
Gu Zheng876dc592014-04-11 17:50:00 +08001022 } else if (need_stop_gc) {
1023 stop_gc_thread(sbi);
1024 }
Namjae Jeon696c0182013-06-16 09:48:48 +09001025restore_opts:
1026 sbi->mount_opt = org_mount_opt;
1027 sbi->active_logs = active_logs;
Chao Yu09c3a722017-07-09 00:13:07 +08001028 sb->s_flags = old_sb_flags;
Chao Yu2443b8b2016-09-26 19:45:06 +08001029#ifdef CONFIG_F2FS_FAULT_INJECTION
1030 sbi->fault_info = ffi;
1031#endif
Namjae Jeon696c0182013-06-16 09:48:48 +09001032 return err;
1033}
1034
Chao Yu09c3a722017-07-09 00:13:07 +08001035#ifdef CONFIG_QUOTA
1036/* Read data from quotafile */
1037static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
1038 size_t len, loff_t off)
1039{
1040 struct inode *inode = sb_dqopt(sb)->files[type];
1041 struct address_space *mapping = inode->i_mapping;
1042 block_t blkidx = F2FS_BYTES_TO_BLK(off);
1043 int offset = off & (sb->s_blocksize - 1);
1044 int tocopy;
1045 size_t toread;
1046 loff_t i_size = i_size_read(inode);
1047 struct page *page;
1048 char *kaddr;
1049
1050 if (off > i_size)
1051 return 0;
1052
1053 if (off + len > i_size)
1054 len = i_size - off;
1055 toread = len;
1056 while (toread > 0) {
1057 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
1058repeat:
1059 page = read_mapping_page(mapping, blkidx, NULL);
1060 if (IS_ERR(page))
1061 return PTR_ERR(page);
1062
1063 lock_page(page);
1064
1065 if (unlikely(page->mapping != mapping)) {
1066 f2fs_put_page(page, 1);
1067 goto repeat;
1068 }
1069 if (unlikely(!PageUptodate(page))) {
1070 f2fs_put_page(page, 1);
1071 return -EIO;
1072 }
1073
1074 kaddr = kmap_atomic(page);
1075 memcpy(data, kaddr + offset, tocopy);
1076 kunmap_atomic(kaddr);
1077 f2fs_put_page(page, 1);
1078
1079 offset = 0;
1080 toread -= tocopy;
1081 data += tocopy;
1082 blkidx++;
1083 }
1084 return len;
1085}
1086
1087/* Write to quotafile */
1088static ssize_t f2fs_quota_write(struct super_block *sb, int type,
1089 const char *data, size_t len, loff_t off)
1090{
1091 struct inode *inode = sb_dqopt(sb)->files[type];
1092 struct address_space *mapping = inode->i_mapping;
1093 const struct address_space_operations *a_ops = mapping->a_ops;
1094 int offset = off & (sb->s_blocksize - 1);
1095 size_t towrite = len;
1096 struct page *page;
1097 char *kaddr;
1098 int err = 0;
1099 int tocopy;
1100
1101 while (towrite > 0) {
1102 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
1103 towrite);
1104
1105 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
1106 &page, NULL);
1107 if (unlikely(err))
1108 break;
1109
1110 kaddr = kmap_atomic(page);
1111 memcpy(kaddr + offset, data, tocopy);
1112 kunmap_atomic(kaddr);
1113 flush_dcache_page(page);
1114
1115 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
1116 page, NULL);
1117 offset = 0;
1118 towrite -= tocopy;
1119 off += tocopy;
1120 data += tocopy;
1121 cond_resched();
1122 }
1123
1124 if (len == towrite)
Jaegeuk Kim0b006fe2017-07-30 09:45:14 -07001125 return 0;
Chao Yu09c3a722017-07-09 00:13:07 +08001126 inode->i_version++;
1127 inode->i_mtime = inode->i_ctime = current_time(inode);
1128 f2fs_mark_inode_dirty_sync(inode, false);
1129 return len - towrite;
1130}
1131
1132static struct dquot **f2fs_get_dquots(struct inode *inode)
1133{
1134 return F2FS_I(inode)->i_dquot;
1135}
1136
1137static qsize_t *f2fs_get_reserved_space(struct inode *inode)
1138{
1139 return &F2FS_I(inode)->i_reserved_quota;
1140}
1141
1142static int f2fs_quota_sync(struct super_block *sb, int type)
1143{
1144 struct quota_info *dqopt = sb_dqopt(sb);
1145 int cnt;
1146 int ret;
1147
1148 ret = dquot_writeback_dquots(sb, type);
1149 if (ret)
1150 return ret;
1151
1152 /*
1153 * Now when everything is written we can discard the pagecache so
1154 * that userspace sees the changes.
1155 */
1156 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1157 if (type != -1 && cnt != type)
1158 continue;
1159 if (!sb_has_quota_active(sb, cnt))
1160 continue;
1161
1162 ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
1163 if (ret)
1164 return ret;
1165
1166 inode_lock(dqopt->files[cnt]);
1167 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
1168 inode_unlock(dqopt->files[cnt]);
1169 }
1170 return 0;
1171}
1172
1173static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
1174 struct path *path)
1175{
1176 struct inode *inode;
1177 int err;
1178
Chao Yuf701f1c2017-08-07 16:37:59 +08001179 err = f2fs_quota_sync(sb, type);
Chao Yu09c3a722017-07-09 00:13:07 +08001180 if (err)
1181 return err;
1182
1183 err = dquot_quota_on(sb, type, format_id, path);
1184 if (err)
1185 return err;
1186
1187 inode = d_inode(path->dentry);
1188
1189 inode_lock(inode);
1190 F2FS_I(inode)->i_flags |= FS_NOATIME_FL | FS_IMMUTABLE_FL;
1191 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
1192 S_NOATIME | S_IMMUTABLE);
1193 inode_unlock(inode);
1194 f2fs_mark_inode_dirty_sync(inode, false);
1195
1196 return 0;
1197}
1198
1199static int f2fs_quota_off(struct super_block *sb, int type)
1200{
1201 struct inode *inode = sb_dqopt(sb)->files[type];
1202 int err;
1203
1204 if (!inode || !igrab(inode))
1205 return dquot_quota_off(sb, type);
1206
Chao Yuf701f1c2017-08-07 16:37:59 +08001207 f2fs_quota_sync(sb, type);
Chao Yu09c3a722017-07-09 00:13:07 +08001208
1209 err = dquot_quota_off(sb, type);
1210 if (err)
1211 goto out_put;
1212
1213 inode_lock(inode);
1214 F2FS_I(inode)->i_flags &= ~(FS_NOATIME_FL | FS_IMMUTABLE_FL);
1215 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
1216 inode_unlock(inode);
1217 f2fs_mark_inode_dirty_sync(inode, false);
1218out_put:
1219 iput(inode);
1220 return err;
1221}
1222
1223static void f2fs_quota_off_umount(struct super_block *sb)
1224{
1225 int type;
1226
1227 for (type = 0; type < MAXQUOTAS; type++)
1228 f2fs_quota_off(sb, type);
1229}
1230
Chao Yu5647b302017-07-26 00:01:41 +08001231int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
1232{
1233 *projid = F2FS_I(inode)->i_projid;
1234 return 0;
1235}
1236
Chao Yu09c3a722017-07-09 00:13:07 +08001237static const struct dquot_operations f2fs_quota_operations = {
1238 .get_reserved_space = f2fs_get_reserved_space,
1239 .write_dquot = dquot_commit,
1240 .acquire_dquot = dquot_acquire,
1241 .release_dquot = dquot_release,
1242 .mark_dirty = dquot_mark_dquot_dirty,
1243 .write_info = dquot_commit_info,
1244 .alloc_dquot = dquot_alloc,
1245 .destroy_dquot = dquot_destroy,
Chao Yu5647b302017-07-26 00:01:41 +08001246 .get_projid = f2fs_get_projid,
Chao Yu09c3a722017-07-09 00:13:07 +08001247 .get_next_id = dquot_get_next_id,
1248};
1249
1250static const struct quotactl_ops f2fs_quotactl_ops = {
1251 .quota_on = f2fs_quota_on,
1252 .quota_off = f2fs_quota_off,
1253 .quota_sync = f2fs_quota_sync,
1254 .get_state = dquot_get_state,
1255 .set_info = dquot_set_dqinfo,
1256 .get_dqblk = dquot_get_dqblk,
1257 .set_dqblk = dquot_set_dqblk,
1258 .get_nextdqblk = dquot_get_next_dqblk,
1259};
1260#else
1261static inline void f2fs_quota_off_umount(struct super_block *sb)
1262{
1263}
1264#endif
1265
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001266static struct super_operations f2fs_sops = {
1267 .alloc_inode = f2fs_alloc_inode,
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +09001268 .drop_inode = f2fs_drop_inode,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001269 .destroy_inode = f2fs_destroy_inode,
1270 .write_inode = f2fs_write_inode,
Jaegeuk Kimb3783872013-06-10 09:17:01 +09001271 .dirty_inode = f2fs_dirty_inode,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001272 .show_options = f2fs_show_options,
Chao Yu09c3a722017-07-09 00:13:07 +08001273#ifdef CONFIG_QUOTA
1274 .quota_read = f2fs_quota_read,
1275 .quota_write = f2fs_quota_write,
1276 .get_dquots = f2fs_get_dquots,
1277#endif
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001278 .evict_inode = f2fs_evict_inode,
1279 .put_super = f2fs_put_super,
1280 .sync_fs = f2fs_sync_fs,
Changman Leed6212a52013-01-29 18:30:07 +09001281 .freeze_fs = f2fs_freeze,
1282 .unfreeze_fs = f2fs_unfreeze,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001283 .statfs = f2fs_statfs,
Namjae Jeon696c0182013-06-16 09:48:48 +09001284 .remount_fs = f2fs_remount,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001285};
1286
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07001287#ifdef CONFIG_F2FS_FS_ENCRYPTION
1288static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
1289{
1290 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1291 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1292 ctx, len, NULL);
1293}
1294
1295static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
1296 void *fs_data)
1297{
1298 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1299 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1300 ctx, len, fs_data, XATTR_CREATE);
1301}
1302
1303static unsigned f2fs_max_namelen(struct inode *inode)
1304{
1305 return S_ISLNK(inode->i_mode) ?
1306 inode->i_sb->s_blocksize : F2FS_NAME_LEN;
1307}
1308
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001309static const struct fscrypt_operations f2fs_cryptops = {
1310 .key_prefix = "f2fs:",
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07001311 .get_context = f2fs_get_context,
1312 .set_context = f2fs_set_context,
1313 .is_encrypted = f2fs_encrypted_inode,
1314 .empty_dir = f2fs_empty_dir,
1315 .max_namelen = f2fs_max_namelen,
1316};
1317#else
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001318static const struct fscrypt_operations f2fs_cryptops = {
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07001319 .is_encrypted = f2fs_encrypted_inode,
1320};
1321#endif
1322
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001323static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
1324 u64 ino, u32 generation)
1325{
1326 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1327 struct inode *inode;
1328
Chao Yud6b7d4b2014-06-12 13:23:41 +08001329 if (check_nid_range(sbi, ino))
Chao Yu910bb122014-03-12 17:08:36 +08001330 return ERR_PTR(-ESTALE);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001331
1332 /*
1333 * f2fs_iget isn't quite right if the inode is currently unallocated!
1334 * However f2fs_iget currently does appropriate checks to handle stale
1335 * inodes so everything is OK.
1336 */
1337 inode = f2fs_iget(sb, ino);
1338 if (IS_ERR(inode))
1339 return ERR_CAST(inode);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001340 if (unlikely(generation && inode->i_generation != generation)) {
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001341 /* we didn't find the right inode.. */
1342 iput(inode);
1343 return ERR_PTR(-ESTALE);
1344 }
1345 return inode;
1346}
1347
1348static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1349 int fh_len, int fh_type)
1350{
1351 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1352 f2fs_nfs_get_inode);
1353}
1354
1355static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
1356 int fh_len, int fh_type)
1357{
1358 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1359 f2fs_nfs_get_inode);
1360}
1361
1362static const struct export_operations f2fs_export_ops = {
1363 .fh_to_dentry = f2fs_fh_to_dentry,
1364 .fh_to_parent = f2fs_fh_to_parent,
1365 .get_parent = f2fs_get_parent,
1366};
1367
Chao Yue0afc4d2015-12-31 14:35:37 +08001368static loff_t max_file_blocks(void)
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001369{
Chao Yufbcf9312017-07-19 00:19:06 +08001370 loff_t result = 0;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001371 loff_t leaf_count = ADDRS_PER_BLOCK;
1372
Chao Yufbcf9312017-07-19 00:19:06 +08001373 /*
1374 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
1375 * F2FS_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
1376 * space in inode.i_addr, it will be more safe to reassign
1377 * result as zero.
1378 */
1379
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001380 /* two direct node blocks */
1381 result += (leaf_count * 2);
1382
1383 /* two indirect node blocks */
1384 leaf_count *= NIDS_PER_BLOCK;
1385 result += (leaf_count * 2);
1386
1387 /* one double indirect node block */
1388 leaf_count *= NIDS_PER_BLOCK;
1389 result += leaf_count;
1390
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001391 return result;
1392}
1393
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001394static int __f2fs_commit_super(struct buffer_head *bh,
1395 struct f2fs_super_block *super)
Chao Yu9a59b622015-12-15 09:58:18 +08001396{
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001397 lock_buffer(bh);
1398 if (super)
1399 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
1400 set_buffer_uptodate(bh);
1401 set_buffer_dirty(bh);
1402 unlock_buffer(bh);
1403
1404 /* it's rare case, we can do fua all the time */
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001405 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001406}
1407
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001408static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001409 struct buffer_head *bh)
1410{
1411 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1412 (bh->b_data + F2FS_SUPER_OFFSET);
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001413 struct super_block *sb = sbi->sb;
Chao Yu9a59b622015-12-15 09:58:18 +08001414 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1415 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
1416 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
1417 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
1418 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
1419 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1420 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
1421 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
1422 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
1423 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
1424 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
1425 u32 segment_count = le32_to_cpu(raw_super->segment_count);
1426 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001427 u64 main_end_blkaddr = main_blkaddr +
1428 (segment_count_main << log_blocks_per_seg);
1429 u64 seg_end_blkaddr = segment0_blkaddr +
1430 (segment_count << log_blocks_per_seg);
Chao Yu9a59b622015-12-15 09:58:18 +08001431
1432 if (segment0_blkaddr != cp_blkaddr) {
1433 f2fs_msg(sb, KERN_INFO,
1434 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
1435 segment0_blkaddr, cp_blkaddr);
1436 return true;
1437 }
1438
1439 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
1440 sit_blkaddr) {
1441 f2fs_msg(sb, KERN_INFO,
1442 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
1443 cp_blkaddr, sit_blkaddr,
1444 segment_count_ckpt << log_blocks_per_seg);
1445 return true;
1446 }
1447
1448 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
1449 nat_blkaddr) {
1450 f2fs_msg(sb, KERN_INFO,
1451 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
1452 sit_blkaddr, nat_blkaddr,
1453 segment_count_sit << log_blocks_per_seg);
1454 return true;
1455 }
1456
1457 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
1458 ssa_blkaddr) {
1459 f2fs_msg(sb, KERN_INFO,
1460 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
1461 nat_blkaddr, ssa_blkaddr,
1462 segment_count_nat << log_blocks_per_seg);
1463 return true;
1464 }
1465
1466 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
1467 main_blkaddr) {
1468 f2fs_msg(sb, KERN_INFO,
1469 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
1470 ssa_blkaddr, main_blkaddr,
1471 segment_count_ssa << log_blocks_per_seg);
1472 return true;
1473 }
1474
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001475 if (main_end_blkaddr > seg_end_blkaddr) {
Chao Yu9a59b622015-12-15 09:58:18 +08001476 f2fs_msg(sb, KERN_INFO,
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001477 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
Chao Yu9a59b622015-12-15 09:58:18 +08001478 main_blkaddr,
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001479 segment0_blkaddr +
1480 (segment_count << log_blocks_per_seg),
Chao Yu9a59b622015-12-15 09:58:18 +08001481 segment_count_main << log_blocks_per_seg);
1482 return true;
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001483 } else if (main_end_blkaddr < seg_end_blkaddr) {
1484 int err = 0;
1485 char *res;
Chao Yu9a59b622015-12-15 09:58:18 +08001486
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001487 /* fix in-memory information all the time */
1488 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
1489 segment0_blkaddr) >> log_blocks_per_seg);
1490
1491 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001492 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001493 res = "internally";
1494 } else {
1495 err = __f2fs_commit_super(bh, NULL);
1496 res = err ? "failed" : "done";
1497 }
1498 f2fs_msg(sb, KERN_INFO,
1499 "Fix alignment : %s, start(%u) end(%u) block(%u)",
1500 res, main_blkaddr,
1501 segment0_blkaddr +
1502 (segment_count << log_blocks_per_seg),
1503 segment_count_main << log_blocks_per_seg);
1504 if (err)
1505 return true;
1506 }
Chao Yu9a59b622015-12-15 09:58:18 +08001507 return false;
1508}
1509
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001510static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001511 struct buffer_head *bh)
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001512{
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001513 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1514 (bh->b_data + F2FS_SUPER_OFFSET);
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001515 struct super_block *sb = sbi->sb;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001516 unsigned int blocksize;
1517
Namjae Jeona07ef782012-12-30 14:52:05 +09001518 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
1519 f2fs_msg(sb, KERN_INFO,
1520 "Magic Mismatch, valid(0x%x) - read(0x%x)",
1521 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001522 return 1;
Namjae Jeona07ef782012-12-30 14:52:05 +09001523 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001524
majianpeng5c9b4692013-02-01 19:07:57 +08001525 /* Currently, support only 4KB page cache size */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001526 if (F2FS_BLKSIZE != PAGE_SIZE) {
majianpeng5c9b4692013-02-01 19:07:57 +08001527 f2fs_msg(sb, KERN_INFO,
majianpeng14d7e9d2013-02-01 19:07:03 +08001528 "Invalid page_cache_size (%lu), supports only 4KB\n",
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001529 PAGE_SIZE);
majianpeng5c9b4692013-02-01 19:07:57 +08001530 return 1;
1531 }
1532
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001533 /* Currently, support only 4KB block size */
1534 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
majianpeng5c9b4692013-02-01 19:07:57 +08001535 if (blocksize != F2FS_BLKSIZE) {
Namjae Jeona07ef782012-12-30 14:52:05 +09001536 f2fs_msg(sb, KERN_INFO,
1537 "Invalid blocksize (%u), supports only 4KB\n",
1538 blocksize);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001539 return 1;
Namjae Jeona07ef782012-12-30 14:52:05 +09001540 }
majianpeng5c9b4692013-02-01 19:07:57 +08001541
Chao Yu9a59b622015-12-15 09:58:18 +08001542 /* check log blocks per segment */
1543 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
1544 f2fs_msg(sb, KERN_INFO,
1545 "Invalid log blocks per segment (%u)\n",
1546 le32_to_cpu(raw_super->log_blocks_per_seg));
1547 return 1;
1548 }
1549
Chao Yu55cf9cb2014-09-15 18:01:10 +08001550 /* Currently, support 512/1024/2048/4096 bytes sector size */
1551 if (le32_to_cpu(raw_super->log_sectorsize) >
1552 F2FS_MAX_LOG_SECTOR_SIZE ||
1553 le32_to_cpu(raw_super->log_sectorsize) <
1554 F2FS_MIN_LOG_SECTOR_SIZE) {
1555 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
1556 le32_to_cpu(raw_super->log_sectorsize));
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001557 return 1;
Namjae Jeona07ef782012-12-30 14:52:05 +09001558 }
Chao Yu55cf9cb2014-09-15 18:01:10 +08001559 if (le32_to_cpu(raw_super->log_sectors_per_block) +
1560 le32_to_cpu(raw_super->log_sectorsize) !=
1561 F2FS_MAX_LOG_SECTOR_SIZE) {
1562 f2fs_msg(sb, KERN_INFO,
1563 "Invalid log sectors per block(%u) log sectorsize(%u)",
1564 le32_to_cpu(raw_super->log_sectors_per_block),
1565 le32_to_cpu(raw_super->log_sectorsize));
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001566 return 1;
Namjae Jeona07ef782012-12-30 14:52:05 +09001567 }
Chao Yu9a59b622015-12-15 09:58:18 +08001568
1569 /* check reserved ino info */
1570 if (le32_to_cpu(raw_super->node_ino) != 1 ||
1571 le32_to_cpu(raw_super->meta_ino) != 2 ||
1572 le32_to_cpu(raw_super->root_ino) != 3) {
1573 f2fs_msg(sb, KERN_INFO,
1574 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
1575 le32_to_cpu(raw_super->node_ino),
1576 le32_to_cpu(raw_super->meta_ino),
1577 le32_to_cpu(raw_super->root_ino));
1578 return 1;
1579 }
1580
Jin Qian93862952017-04-25 16:28:48 -07001581 if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
1582 f2fs_msg(sb, KERN_INFO,
1583 "Invalid segment count (%u)",
1584 le32_to_cpu(raw_super->segment_count));
1585 return 1;
1586 }
1587
Chao Yu9a59b622015-12-15 09:58:18 +08001588 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001589 if (sanity_check_area_boundary(sbi, bh))
Chao Yu9a59b622015-12-15 09:58:18 +08001590 return 1;
1591
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001592 return 0;
1593}
1594
Shawn Lin984ec632016-02-17 11:26:32 +08001595int sanity_check_ckpt(struct f2fs_sb_info *sbi)
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001596{
1597 unsigned int total, fsmeta;
Jaegeuk Kim577e3492013-01-24 19:56:11 +09001598 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1599 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001600 unsigned int ovp_segments, reserved_segments;
Jin Qiand90659f2017-05-15 10:45:08 -07001601 unsigned int main_segs, blocks_per_seg;
1602 int i;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001603
1604 total = le32_to_cpu(raw_super->segment_count);
1605 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
1606 fsmeta += le32_to_cpu(raw_super->segment_count_sit);
1607 fsmeta += le32_to_cpu(raw_super->segment_count_nat);
1608 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
1609 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
1610
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001611 if (unlikely(fsmeta >= total))
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001612 return 1;
Jaegeuk Kim577e3492013-01-24 19:56:11 +09001613
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001614 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1615 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1616
1617 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
1618 ovp_segments == 0 || reserved_segments == 0)) {
1619 f2fs_msg(sbi->sb, KERN_ERR,
1620 "Wrong layout: check mkfs.f2fs version");
1621 return 1;
1622 }
1623
Jin Qiand90659f2017-05-15 10:45:08 -07001624 main_segs = le32_to_cpu(raw_super->segment_count_main);
1625 blocks_per_seg = sbi->blocks_per_seg;
1626
1627 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1628 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
1629 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
1630 return 1;
1631 }
1632 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1633 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
1634 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
1635 return 1;
1636 }
1637
Jaegeuk Kim1e968fd2014-08-11 16:49:25 -07001638 if (unlikely(f2fs_cp_error(sbi))) {
Jaegeuk Kim577e3492013-01-24 19:56:11 +09001639 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
1640 return 1;
1641 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001642 return 0;
1643}
1644
1645static void init_sb_info(struct f2fs_sb_info *sbi)
1646{
1647 struct f2fs_super_block *raw_super = sbi->raw_super;
Chao Yu06504682017-05-19 23:37:00 +08001648 int i, j;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001649
1650 sbi->log_sectors_per_block =
1651 le32_to_cpu(raw_super->log_sectors_per_block);
1652 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
1653 sbi->blocksize = 1 << sbi->log_blocksize;
1654 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
1655 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
1656 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
1657 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
1658 sbi->total_sections = le32_to_cpu(raw_super->section_count);
1659 sbi->total_node_count =
1660 (le32_to_cpu(raw_super->segment_count_nat) / 2)
1661 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
1662 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
1663 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
1664 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001665 sbi->cur_victim_sec = NULL_SECNO;
Jaegeuk Kimb1c57c12014-01-08 13:45:08 +09001666 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001667
Jaegeuk Kimab9fa662014-02-27 20:09:05 +09001668 sbi->dir_level = DEF_DIR_LEVEL;
Jaegeuk Kim6beceb52016-01-08 15:51:50 -08001669 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08001670 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
Chao Yucaf00472015-01-28 17:48:42 +08001671 clear_sbi_flag(sbi, SBI_NEED_FSCK);
Jaegeuk Kim2658e502015-06-19 12:01:21 -07001672
Jaegeuk Kim725ba1a2016-10-20 19:09:57 -07001673 for (i = 0; i < NR_COUNT_TYPE; i++)
1674 atomic_set(&sbi->nr_pages[i], 0);
1675
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001676 atomic_set(&sbi->wb_sync_req, 0);
1677
Jaegeuk Kim2658e502015-06-19 12:01:21 -07001678 INIT_LIST_HEAD(&sbi->s_list);
1679 mutex_init(&sbi->umount_mutex);
Chao Yu06504682017-05-19 23:37:00 +08001680 for (i = 0; i < NR_PAGE_TYPE - 1; i++)
1681 for (j = HOT; j < NR_TEMP_TYPE; j++)
1682 mutex_init(&sbi->wio_mutex[i][j]);
Chao Yuaaec2b12016-09-20 11:04:18 +08001683 spin_lock_init(&sbi->cp_lock);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001684}
1685
Jaegeuk Kim523be8a2016-05-13 12:36:58 -07001686static int init_percpu_info(struct f2fs_sb_info *sbi)
1687{
Jaegeuk Kim725ba1a2016-10-20 19:09:57 -07001688 int err;
Jaegeuk Kim41382ec2016-05-16 11:06:50 -07001689
Jaegeuk Kim513c5f32016-05-16 11:42:32 -07001690 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
1691 if (err)
1692 return err;
1693
1694 return percpu_counter_init(&sbi->total_valid_inode_count, 0,
Jaegeuk Kim41382ec2016-05-16 11:06:50 -07001695 GFP_KERNEL);
Jaegeuk Kim523be8a2016-05-13 12:36:58 -07001696}
1697
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001698#ifdef CONFIG_BLK_DEV_ZONED
1699static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
1700{
1701 struct block_device *bdev = FDEV(devi).bdev;
1702 sector_t nr_sectors = bdev->bd_part->nr_sects;
1703 sector_t sector = 0;
1704 struct blk_zone *zones;
1705 unsigned int i, nr_zones;
1706 unsigned int n = 0;
1707 int err = -EIO;
1708
1709 if (!f2fs_sb_mounted_blkzoned(sbi->sb))
1710 return 0;
1711
1712 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
1713 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
1714 return -EINVAL;
1715 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
1716 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
1717 __ilog2_u32(sbi->blocks_per_blkz))
1718 return -EINVAL;
1719 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
1720 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
1721 sbi->log_blocks_per_blkz;
1722 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
1723 FDEV(devi).nr_blkz++;
1724
1725 FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
1726 if (!FDEV(devi).blkz_type)
1727 return -ENOMEM;
1728
1729#define F2FS_REPORT_NR_ZONES 4096
1730
1731 zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
1732 GFP_KERNEL);
1733 if (!zones)
1734 return -ENOMEM;
1735
1736 /* Get block zones type */
1737 while (zones && sector < nr_sectors) {
1738
1739 nr_zones = F2FS_REPORT_NR_ZONES;
1740 err = blkdev_report_zones(bdev, sector,
1741 zones, &nr_zones,
1742 GFP_KERNEL);
1743 if (err)
1744 break;
1745 if (!nr_zones) {
1746 err = -EIO;
1747 break;
1748 }
1749
1750 for (i = 0; i < nr_zones; i++) {
1751 FDEV(devi).blkz_type[n] = zones[i].type;
1752 sector += zones[i].len;
1753 n++;
1754 }
1755 }
1756
1757 kfree(zones);
1758
1759 return err;
1760}
1761#endif
1762
Gu Zheng9076a752013-10-14 18:47:11 +08001763/*
1764 * Read f2fs raw super block.
Shawn Lin2b39e902016-02-17 08:59:01 +08001765 * Because we have two copies of super block, so read both of them
1766 * to get the first valid one. If any one of them is broken, we pass
1767 * them recovery flag back to the caller.
Gu Zheng9076a752013-10-14 18:47:11 +08001768 */
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001769static int read_raw_super_block(struct f2fs_sb_info *sbi,
Gu Zheng9076a752013-10-14 18:47:11 +08001770 struct f2fs_super_block **raw_super,
Chao Yue8240f62015-12-15 17:19:26 +08001771 int *valid_super_block, int *recovery)
majianpeng14d7e9d2013-02-01 19:07:03 +08001772{
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001773 struct super_block *sb = sbi->sb;
Shawn Lin2b39e902016-02-17 08:59:01 +08001774 int block;
Chao Yue8240f62015-12-15 17:19:26 +08001775 struct buffer_head *bh;
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001776 struct f2fs_super_block *super;
hujianyangda554e42015-05-21 14:42:53 +08001777 int err = 0;
majianpeng14d7e9d2013-02-01 19:07:03 +08001778
Yunlei Heb39f0de2015-12-15 17:17:20 +08001779 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
1780 if (!super)
1781 return -ENOMEM;
Shawn Lin2b39e902016-02-17 08:59:01 +08001782
1783 for (block = 0; block < 2; block++) {
1784 bh = sb_bread(sb, block);
1785 if (!bh) {
1786 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
Gu Zheng9076a752013-10-14 18:47:11 +08001787 block + 1);
Shawn Lin2b39e902016-02-17 08:59:01 +08001788 err = -EIO;
1789 continue;
1790 }
majianpeng14d7e9d2013-02-01 19:07:03 +08001791
Shawn Lin2b39e902016-02-17 08:59:01 +08001792 /* sanity checking of raw super */
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001793 if (sanity_check_raw_super(sbi, bh)) {
Shawn Lin2b39e902016-02-17 08:59:01 +08001794 f2fs_msg(sb, KERN_ERR,
1795 "Can't find valid F2FS filesystem in %dth superblock",
1796 block + 1);
1797 err = -EINVAL;
1798 brelse(bh);
1799 continue;
1800 }
1801
1802 if (!*raw_super) {
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001803 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
1804 sizeof(*super));
Shawn Lin2b39e902016-02-17 08:59:01 +08001805 *valid_super_block = block;
1806 *raw_super = super;
1807 }
Chao Yue8240f62015-12-15 17:19:26 +08001808 brelse(bh);
Shawn Lin2b39e902016-02-17 08:59:01 +08001809 }
1810
1811 /* Fail to read any one of the superblocks*/
1812 if (err < 0)
hujianyangda554e42015-05-21 14:42:53 +08001813 *recovery = 1;
hujianyangda554e42015-05-21 14:42:53 +08001814
hujianyangda554e42015-05-21 14:42:53 +08001815 /* No valid superblock */
Shawn Lin2b39e902016-02-17 08:59:01 +08001816 if (!*raw_super)
Yunlei Heb39f0de2015-12-15 17:17:20 +08001817 kfree(super);
Shawn Lin2b39e902016-02-17 08:59:01 +08001818 else
1819 err = 0;
hujianyangda554e42015-05-21 14:42:53 +08001820
Shawn Lin2b39e902016-02-17 08:59:01 +08001821 return err;
majianpeng14d7e9d2013-02-01 19:07:03 +08001822}
1823
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001824int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
Jaegeuk Kim26d815ad2015-04-20 18:49:51 -07001825{
Jaegeuk Kim5d909cd2015-12-07 10:16:58 -08001826 struct buffer_head *bh;
Jaegeuk Kim26d815ad2015-04-20 18:49:51 -07001827 int err;
1828
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001829 if ((recover && f2fs_readonly(sbi->sb)) ||
1830 bdev_read_only(sbi->sb->s_bdev)) {
1831 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
Jaegeuk Kimf2353d72016-03-23 10:42:01 -07001832 return -EROFS;
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001833 }
Jaegeuk Kimf2353d72016-03-23 10:42:01 -07001834
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001835 /* write back-up superblock first */
1836 bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
Jaegeuk Kim5d909cd2015-12-07 10:16:58 -08001837 if (!bh)
1838 return -EIO;
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001839 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
Jaegeuk Kim5d909cd2015-12-07 10:16:58 -08001840 brelse(bh);
Chao Yuc5bda1c2015-06-08 13:28:03 +08001841
1842 /* if we are in recovery path, skip writing valid superblock */
1843 if (recover || err)
Jaegeuk Kim5d909cd2015-12-07 10:16:58 -08001844 return err;
Jaegeuk Kim26d815ad2015-04-20 18:49:51 -07001845
Chao Yue8240f62015-12-15 17:19:26 +08001846 /* write current valid superblock */
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001847 bh = sb_getblk(sbi->sb, sbi->valid_super_block);
1848 if (!bh)
1849 return -EIO;
1850 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
1851 brelse(bh);
1852 return err;
Jaegeuk Kim26d815ad2015-04-20 18:49:51 -07001853}
1854
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001855static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
1856{
1857 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1858 unsigned int max_devices = MAX_DEVICES;
1859 int i;
1860
1861 /* Initialize single device information */
1862 if (!RDEV(0).path[0]) {
1863#ifdef CONFIG_BLK_DEV_ZONED
Jaegeuk Kimdbf05322017-07-10 19:16:28 -07001864 if (!bdev_is_zoned(sbi->sb->s_bdev))
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001865 return 0;
1866 max_devices = 1;
1867#else
1868 return 0;
1869#endif
1870 }
1871
1872 /*
1873 * Initialize multiple devices information, or single
1874 * zoned block device information.
1875 */
1876 sbi->devs = kcalloc(max_devices, sizeof(struct f2fs_dev_info),
1877 GFP_KERNEL);
1878 if (!sbi->devs)
1879 return -ENOMEM;
1880
1881 for (i = 0; i < max_devices; i++) {
1882
1883 if (i > 0 && !RDEV(i).path[0])
1884 break;
1885
1886 if (max_devices == 1) {
1887 /* Single zoned block device mount */
1888 FDEV(0).bdev =
1889 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
1890 sbi->sb->s_mode, sbi->sb->s_type);
1891 } else {
1892 /* Multi-device mount */
1893 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
1894 FDEV(i).total_segments =
1895 le32_to_cpu(RDEV(i).total_segments);
1896 if (i == 0) {
1897 FDEV(i).start_blk = 0;
1898 FDEV(i).end_blk = FDEV(i).start_blk +
1899 (FDEV(i).total_segments <<
1900 sbi->log_blocks_per_seg) - 1 +
1901 le32_to_cpu(raw_super->segment0_blkaddr);
1902 } else {
1903 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
1904 FDEV(i).end_blk = FDEV(i).start_blk +
1905 (FDEV(i).total_segments <<
1906 sbi->log_blocks_per_seg) - 1;
1907 }
1908 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
1909 sbi->sb->s_mode, sbi->sb->s_type);
1910 }
1911 if (IS_ERR(FDEV(i).bdev))
1912 return PTR_ERR(FDEV(i).bdev);
1913
1914 /* to release errored devices */
1915 sbi->s_ndevs = i + 1;
1916
1917#ifdef CONFIG_BLK_DEV_ZONED
1918 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
1919 !f2fs_sb_mounted_blkzoned(sbi->sb)) {
1920 f2fs_msg(sbi->sb, KERN_ERR,
1921 "Zoned block device feature not enabled\n");
1922 return -EINVAL;
1923 }
1924 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
1925 if (init_blkz_info(sbi, i)) {
1926 f2fs_msg(sbi->sb, KERN_ERR,
1927 "Failed to initialize F2FS blkzone information");
1928 return -EINVAL;
1929 }
1930 if (max_devices == 1)
1931 break;
1932 f2fs_msg(sbi->sb, KERN_INFO,
1933 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
1934 i, FDEV(i).path,
1935 FDEV(i).total_segments,
1936 FDEV(i).start_blk, FDEV(i).end_blk,
1937 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
1938 "Host-aware" : "Host-managed");
1939 continue;
1940 }
1941#endif
1942 f2fs_msg(sbi->sb, KERN_INFO,
1943 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
1944 i, FDEV(i).path,
1945 FDEV(i).total_segments,
1946 FDEV(i).start_blk, FDEV(i).end_blk);
1947 }
1948 f2fs_msg(sbi->sb, KERN_INFO,
1949 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
1950 return 0;
1951}
1952
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001953static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
1954{
1955 struct f2fs_sb_info *sbi;
hujianyangda554e42015-05-21 14:42:53 +08001956 struct f2fs_super_block *raw_super;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001957 struct inode *root;
Sheng Yong99e3e852016-05-11 17:08:14 +08001958 int err;
Chao Yu2adc3502015-03-16 21:08:44 +08001959 bool retry = true, need_fsck = false;
Jaegeuk Kimdabc4a52015-01-23 17:41:39 -08001960 char *options = NULL;
Chao Yue8240f62015-12-15 17:19:26 +08001961 int recovery, i, valid_super_block;
Shuoran Liu8f1dbbb2016-01-27 09:57:30 +08001962 struct curseg_info *seg_i;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001963
Jaegeuk Kimed2e6212014-08-08 15:37:41 -07001964try_onemore:
hujianyangda554e42015-05-21 14:42:53 +08001965 err = -EINVAL;
1966 raw_super = NULL;
Chao Yue8240f62015-12-15 17:19:26 +08001967 valid_super_block = -1;
hujianyangda554e42015-05-21 14:42:53 +08001968 recovery = 0;
1969
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001970 /* allocate memory for f2fs-specific super block info */
1971 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
1972 if (!sbi)
1973 return -ENOMEM;
1974
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001975 sbi->sb = sb;
1976
Keith Mok43b65732016-03-02 12:04:24 -08001977 /* Load the checksum driver */
1978 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
1979 if (IS_ERR(sbi->s_chksum_driver)) {
1980 f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
1981 err = PTR_ERR(sbi->s_chksum_driver);
1982 sbi->s_chksum_driver = NULL;
1983 goto free_sbi;
1984 }
1985
Namjae Jeonff9234a2013-01-12 14:41:13 +09001986 /* set a block size */
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001987 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
Namjae Jeona07ef782012-12-30 14:52:05 +09001988 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001989 goto free_sbi;
Namjae Jeona07ef782012-12-30 14:52:05 +09001990 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001991
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001992 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
Chao Yue8240f62015-12-15 17:19:26 +08001993 &recovery);
Gu Zheng9076a752013-10-14 18:47:11 +08001994 if (err)
1995 goto free_sbi;
1996
Gu Zheng5fb08372013-06-07 14:16:53 +08001997 sb->s_fs_info = sbi;
Jaegeuk Kim52763a42016-06-13 09:47:48 -07001998 sbi->raw_super = raw_super;
1999
Chao Yu43101252017-07-31 20:19:09 +08002000 /* precompute checksum seed for metadata */
2001 if (f2fs_sb_has_inode_chksum(sb))
2002 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
2003 sizeof(raw_super->uuid));
2004
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002005 /*
2006 * The BLKZONED feature indicates that the drive was formatted with
2007 * zone alignment optimization. This is optional for host-aware
2008 * devices, but mandatory for host-managed zoned block devices.
2009 */
2010#ifndef CONFIG_BLK_DEV_ZONED
2011 if (f2fs_sb_mounted_blkzoned(sb)) {
2012 f2fs_msg(sb, KERN_ERR,
2013 "Zoned block device support is not enabled\n");
Chao Yu3b0de562017-06-12 09:44:27 +08002014 err = -EOPNOTSUPP;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002015 goto free_sb_buf;
2016 }
2017#endif
Yunlei He498c5e92015-05-07 18:11:37 +08002018 default_options(sbi);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002019 /* parse mount options */
Jaegeuk Kimdabc4a52015-01-23 17:41:39 -08002020 options = kstrdup((const char *)data, GFP_KERNEL);
2021 if (data && !options) {
2022 err = -ENOMEM;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002023 goto free_sb_buf;
Jaegeuk Kimdabc4a52015-01-23 17:41:39 -08002024 }
2025
2026 err = parse_options(sb, options);
2027 if (err)
2028 goto free_options;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002029
Chao Yue0afc4d2015-12-31 14:35:37 +08002030 sbi->max_file_blocks = max_file_blocks();
2031 sb->s_maxbytes = sbi->max_file_blocks <<
2032 le32_to_cpu(raw_super->log_blocksize);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002033 sb->s_max_links = F2FS_LINK_MAX;
2034 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
2035
Chao Yu09c3a722017-07-09 00:13:07 +08002036#ifdef CONFIG_QUOTA
2037 sb->dq_op = &f2fs_quota_operations;
2038 sb->s_qcop = &f2fs_quotactl_ops;
Chao Yu5647b302017-07-26 00:01:41 +08002039 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
Chao Yu09c3a722017-07-09 00:13:07 +08002040#endif
2041
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002042 sb->s_op = &f2fs_sops;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07002043 sb->s_cop = &f2fs_cryptops;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002044 sb->s_xattr = f2fs_xattr_handlers;
2045 sb->s_export_op = &f2fs_export_ops;
2046 sb->s_magic = F2FS_SUPER_MAGIC;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002047 sb->s_time_gran = 1;
2048 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
2049 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002050 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002051
2052 /* init f2fs-specific super block info */
Chao Yue8240f62015-12-15 17:19:26 +08002053 sbi->valid_super_block = valid_super_block;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002054 mutex_init(&sbi->gc_mutex);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002055 mutex_init(&sbi->cp_mutex);
Chao Yub3582c62014-07-03 18:58:39 +08002056 init_rwsem(&sbi->node_write);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002057 init_rwsem(&sbi->node_change);
Jaegeuk Kim315df832015-08-11 12:45:39 -07002058
2059 /* disallow all the data/node/meta page writes */
2060 set_sbi_flag(sbi, SBI_POR_DOING);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002061 spin_lock_init(&sbi->stat_lock);
Jaegeuk Kim971767c2013-11-18 17:16:17 +09002062
Chao Yuc0fe4882017-08-02 23:21:48 +08002063 /* init iostat info */
2064 spin_lock_init(&sbi->iostat_lock);
2065 sbi->iostat_enable = false;
2066
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002067 for (i = 0; i < NR_PAGE_TYPE; i++) {
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002068 int n = (i == META) ? 1: NR_TEMP_TYPE;
2069 int j;
2070
2071 sbi->write_io[i] = kmalloc(n * sizeof(struct f2fs_bio_info),
2072 GFP_KERNEL);
Christophe JAILLETd0917a42017-06-11 09:21:11 +02002073 if (!sbi->write_io[i]) {
2074 err = -ENOMEM;
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002075 goto free_options;
Christophe JAILLETd0917a42017-06-11 09:21:11 +02002076 }
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002077
2078 for (j = HOT; j < n; j++) {
2079 init_rwsem(&sbi->write_io[i][j].io_rwsem);
2080 sbi->write_io[i][j].sbi = sbi;
2081 sbi->write_io[i][j].bio = NULL;
Chao Yuc52dc0f2017-05-19 23:37:01 +08002082 spin_lock_init(&sbi->write_io[i][j].io_lock);
2083 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002084 }
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002085 }
Jaegeuk Kim971767c2013-11-18 17:16:17 +09002086
Jaegeuk Kimb873b792016-08-04 11:38:25 -07002087 init_rwsem(&sbi->cp_rwsem);
Changman Leefb51b5e2013-11-07 12:48:25 +09002088 init_waitqueue_head(&sbi->cp_wait);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002089 init_sb_info(sbi);
2090
Jaegeuk Kim523be8a2016-05-13 12:36:58 -07002091 err = init_percpu_info(sbi);
2092 if (err)
2093 goto free_options;
2094
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002095 if (F2FS_IO_SIZE(sbi) > 1) {
2096 sbi->write_io_dummy =
2097 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
Chao Yu3b0de562017-06-12 09:44:27 +08002098 if (!sbi->write_io_dummy) {
2099 err = -ENOMEM;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002100 goto free_options;
Chao Yu3b0de562017-06-12 09:44:27 +08002101 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002102 }
2103
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002104 /* get an inode for meta space */
2105 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
2106 if (IS_ERR(sbi->meta_inode)) {
Namjae Jeona07ef782012-12-30 14:52:05 +09002107 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002108 err = PTR_ERR(sbi->meta_inode);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002109 goto free_io_dummy;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002110 }
2111
2112 err = get_valid_checkpoint(sbi);
Namjae Jeona07ef782012-12-30 14:52:05 +09002113 if (err) {
2114 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002115 goto free_meta_inode;
Namjae Jeona07ef782012-12-30 14:52:05 +09002116 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002117
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002118 /* Initialize device list */
2119 err = f2fs_scan_devices(sbi);
2120 if (err) {
2121 f2fs_msg(sb, KERN_ERR, "Failed to find devices");
2122 goto free_devices;
2123 }
2124
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002125 sbi->total_valid_node_count =
2126 le32_to_cpu(sbi->ckpt->valid_node_count);
Jaegeuk Kim513c5f32016-05-16 11:42:32 -07002127 percpu_counter_set(&sbi->total_valid_inode_count,
2128 le32_to_cpu(sbi->ckpt->valid_inode_count));
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002129 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
2130 sbi->total_valid_block_count =
2131 le64_to_cpu(sbi->ckpt->valid_block_count);
2132 sbi->last_valid_block_count = sbi->total_valid_block_count;
Chao Yu026bd9d2017-06-26 16:24:41 +08002133 sbi->reserved_blocks = 0;
Jaegeuk Kim41382ec2016-05-16 11:06:50 -07002134
Chao Yuc227f912015-12-16 13:09:20 +08002135 for (i = 0; i < NR_INODE_TYPE; i++) {
2136 INIT_LIST_HEAD(&sbi->inode_list[i]);
2137 spin_lock_init(&sbi->inode_lock[i]);
2138 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002139
Chao Yu1dcc3362015-02-05 17:57:31 +08002140 init_extent_cache_info(sbi);
2141
Jaegeuk Kim6451e042014-07-25 15:47:17 -07002142 init_ino_entry_info(sbi);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002143
2144 /* setup f2fs internal modules */
2145 err = build_segment_manager(sbi);
Namjae Jeona07ef782012-12-30 14:52:05 +09002146 if (err) {
2147 f2fs_msg(sb, KERN_ERR,
2148 "Failed to initialize F2FS segment manager");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002149 goto free_sm;
Namjae Jeona07ef782012-12-30 14:52:05 +09002150 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002151 err = build_node_manager(sbi);
Namjae Jeona07ef782012-12-30 14:52:05 +09002152 if (err) {
2153 f2fs_msg(sb, KERN_ERR,
2154 "Failed to initialize F2FS node manager");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002155 goto free_nm;
Namjae Jeona07ef782012-12-30 14:52:05 +09002156 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002157
Shuoran Liu8f1dbbb2016-01-27 09:57:30 +08002158 /* For write statistics */
2159 if (sb->s_bdev->bd_part)
2160 sbi->sectors_written_start =
2161 (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
2162
2163 /* Read accumulated write IO statistics if exists */
2164 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
2165 if (__exist_node_summaries(sbi))
2166 sbi->kbytes_written =
Shuoran Liub2dde6f2016-03-29 18:00:15 +08002167 le64_to_cpu(seg_i->journal->info.kbytes_written);
Shuoran Liu8f1dbbb2016-01-27 09:57:30 +08002168
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002169 build_gc_manager(sbi);
2170
2171 /* get an inode for node space */
2172 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
2173 if (IS_ERR(sbi->node_inode)) {
Namjae Jeona07ef782012-12-30 14:52:05 +09002174 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002175 err = PTR_ERR(sbi->node_inode);
2176 goto free_nm;
2177 }
2178
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002179 f2fs_join_shrinker(sbi);
2180
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002181 err = f2fs_build_stats(sbi);
2182 if (err)
2183 goto free_nm;
2184
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002185 /* if there are nt orphan nodes free them */
Chao Yu8c14bfa2015-08-07 17:58:43 +08002186 err = recover_orphan_inodes(sbi);
2187 if (err)
2188 goto free_node_inode;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002189
2190 /* read root inode and dentry */
2191 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
2192 if (IS_ERR(root)) {
Namjae Jeona07ef782012-12-30 14:52:05 +09002193 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002194 err = PTR_ERR(root);
2195 goto free_node_inode;
2196 }
Chao Yu8f99a942013-11-28 15:43:43 +08002197 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
Chao Yu9d847952014-07-25 12:55:09 +08002198 iput(root);
Chao Yu8f99a942013-11-28 15:43:43 +08002199 err = -EINVAL;
Chao Yu9d847952014-07-25 12:55:09 +08002200 goto free_node_inode;
Chao Yu8f99a942013-11-28 15:43:43 +08002201 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002202
2203 sb->s_root = d_make_root(root); /* allocate root dentry */
2204 if (!sb->s_root) {
2205 err = -ENOMEM;
2206 goto free_root_inode;
2207 }
2208
Jaegeuk Kim883d5532017-07-26 11:24:13 -07002209 err = f2fs_register_sysfs(sbi);
Namjae Jeonb59d0ba2013-08-04 23:09:40 +09002210 if (err)
Chao Yu17a3fb52017-06-14 17:39:46 +08002211 goto free_root_inode;
Namjae Jeonb59d0ba2013-08-04 23:09:40 +09002212
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002213 /* recover fsynced data */
2214 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
Jaegeuk Kim081d78c2015-01-23 19:16:59 -08002215 /*
2216 * mount should be failed, when device has readonly mode, and
2217 * previous checkpoint was not done by clean system shutdown.
2218 */
2219 if (bdev_read_only(sb->s_bdev) &&
Chao Yuaaec2b12016-09-20 11:04:18 +08002220 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
Jaegeuk Kim081d78c2015-01-23 19:16:59 -08002221 err = -EROFS;
Chao Yu17a3fb52017-06-14 17:39:46 +08002222 goto free_sysfs;
Jaegeuk Kim081d78c2015-01-23 19:16:59 -08002223 }
Chao Yu2adc3502015-03-16 21:08:44 +08002224
2225 if (need_fsck)
2226 set_sbi_flag(sbi, SBI_NEED_FSCK);
2227
Jaegeuk Kima468f0e2016-09-19 17:55:10 -07002228 if (!retry)
2229 goto skip_recovery;
2230
Jaegeuk Kim6781eab2016-03-23 16:12:58 -07002231 err = recover_fsync_data(sbi, false);
2232 if (err < 0) {
Chao Yu2adc3502015-03-16 21:08:44 +08002233 need_fsck = true;
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002234 f2fs_msg(sb, KERN_ERR,
Sheng Yong99e3e852016-05-11 17:08:14 +08002235 "Cannot recover all fsync data errno=%d", err);
Chao Yu17a3fb52017-06-14 17:39:46 +08002236 goto free_sysfs;
Jaegeuk Kimed2e6212014-08-08 15:37:41 -07002237 }
Jaegeuk Kim6781eab2016-03-23 16:12:58 -07002238 } else {
2239 err = recover_fsync_data(sbi, true);
2240
2241 if (!f2fs_readonly(sb) && err > 0) {
2242 err = -EINVAL;
2243 f2fs_msg(sb, KERN_ERR,
2244 "Need to recover fsync data");
Chao Yu17a3fb52017-06-14 17:39:46 +08002245 goto free_sysfs;
Jaegeuk Kim6781eab2016-03-23 16:12:58 -07002246 }
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002247 }
Jaegeuk Kima468f0e2016-09-19 17:55:10 -07002248skip_recovery:
Jaegeuk Kim315df832015-08-11 12:45:39 -07002249 /* recover_fsync_data() cleared this already */
2250 clear_sbi_flag(sbi, SBI_POR_DOING);
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002251
2252 /*
2253 * If filesystem is not mounted as read-only then
2254 * do start the gc_thread.
2255 */
Chao Yu6c029932014-11-18 11:16:01 +08002256 if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002257 /* After POR, we can run background GC thread.*/
2258 err = start_gc_thread(sbi);
2259 if (err)
Chao Yu17a3fb52017-06-14 17:39:46 +08002260 goto free_sysfs;
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002261 }
Jaegeuk Kimdabc4a52015-01-23 17:41:39 -08002262 kfree(options);
hujianyangda554e42015-05-21 14:42:53 +08002263
2264 /* recover broken superblock */
Jaegeuk Kimf2353d72016-03-23 10:42:01 -07002265 if (recovery) {
Chao Yu41214b32016-02-22 18:33:20 +08002266 err = f2fs_commit_super(sbi, true);
2267 f2fs_msg(sb, KERN_INFO,
Sheng Yong99e3e852016-05-11 17:08:14 +08002268 "Try to recover %dth superblock, ret: %d",
Chao Yu41214b32016-02-22 18:33:20 +08002269 sbi->valid_super_block ? 1 : 2, err);
hujianyangda554e42015-05-21 14:42:53 +08002270 }
2271
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002272 f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
2273 cur_cp_version(F2FS_CKPT(sbi)));
Jaegeuk Kim6beceb52016-01-08 15:51:50 -08002274 f2fs_update_time(sbi, CP_TIME);
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002275 f2fs_update_time(sbi, REQ_TIME);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002276 return 0;
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002277
Chao Yu17a3fb52017-06-14 17:39:46 +08002278free_sysfs:
Jaegeuk Kim0f18b462016-05-20 11:10:10 -07002279 f2fs_sync_inode_meta(sbi);
Jaegeuk Kim883d5532017-07-26 11:24:13 -07002280 f2fs_unregister_sysfs(sbi);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002281free_root_inode:
2282 dput(sb->s_root);
2283 sb->s_root = NULL;
2284free_node_inode:
Jaegeuk Kimbb5dada2016-09-23 11:29:00 -07002285 truncate_inode_pages_final(NODE_MAPPING(sbi));
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002286 mutex_lock(&sbi->umount_mutex);
Jaegeuk Kimd41065e2016-09-21 11:39:42 -07002287 release_ino_entry(sbi, true);
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002288 f2fs_leave_shrinker(sbi);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002289 /*
2290 * Some dirty meta pages can be produced by recover_orphan_inodes()
2291 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
2292 * followed by write_checkpoint() through f2fs_write_node_pages(), which
2293 * falls into an infinite loop in sync_meta_pages().
2294 */
2295 truncate_inode_pages_final(META_MAPPING(sbi));
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002296 iput(sbi->node_inode);
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002297 mutex_unlock(&sbi->umount_mutex);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002298 f2fs_destroy_stats(sbi);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002299free_nm:
2300 destroy_node_manager(sbi);
2301free_sm:
2302 destroy_segment_manager(sbi);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002303free_devices:
2304 destroy_device_list(sbi);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002305 kfree(sbi->ckpt);
2306free_meta_inode:
2307 make_bad_inode(sbi->meta_inode);
2308 iput(sbi->meta_inode);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002309free_io_dummy:
2310 mempool_destroy(sbi->write_io_dummy);
Jaegeuk Kimdabc4a52015-01-23 17:41:39 -08002311free_options:
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002312 for (i = 0; i < NR_PAGE_TYPE; i++)
2313 kfree(sbi->write_io[i]);
Jaegeuk Kim523be8a2016-05-13 12:36:58 -07002314 destroy_percpu_info(sbi);
Jaegeuk Kimdabc4a52015-01-23 17:41:39 -08002315 kfree(options);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002316free_sb_buf:
Yunlei Heb39f0de2015-12-15 17:17:20 +08002317 kfree(raw_super);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002318free_sbi:
Keith Mok43b65732016-03-02 12:04:24 -08002319 if (sbi->s_chksum_driver)
2320 crypto_free_shash(sbi->s_chksum_driver);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002321 kfree(sbi);
Jaegeuk Kimed2e6212014-08-08 15:37:41 -07002322
2323 /* give only one another chance */
2324 if (retry) {
Taehee Yoo9df47ba2015-04-13 21:48:06 +09002325 retry = false;
Jaegeuk Kimed2e6212014-08-08 15:37:41 -07002326 shrink_dcache_sb(sb);
2327 goto try_onemore;
2328 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002329 return err;
2330}
2331
2332static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
2333 const char *dev_name, void *data)
2334{
2335 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
2336}
2337
Jaegeuk Kim30a55372015-01-14 16:34:24 -08002338static void kill_f2fs_super(struct super_block *sb)
2339{
Chao Yud9d85cc2017-06-29 23:17:45 +08002340 if (sb->s_root) {
Chao Yucaf00472015-01-28 17:48:42 +08002341 set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
Chao Yud9d85cc2017-06-29 23:17:45 +08002342 stop_gc_thread(F2FS_SB(sb));
2343 stop_discard_thread(F2FS_SB(sb));
2344 }
Jaegeuk Kim30a55372015-01-14 16:34:24 -08002345 kill_block_super(sb);
2346}
2347
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002348static struct file_system_type f2fs_fs_type = {
2349 .owner = THIS_MODULE,
2350 .name = "f2fs",
2351 .mount = f2fs_mount,
Jaegeuk Kim30a55372015-01-14 16:34:24 -08002352 .kill_sb = kill_f2fs_super,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002353 .fs_flags = FS_REQUIRES_DEV,
2354};
Eric W. Biederman7f78e032013-03-02 19:39:14 -08002355MODULE_ALIAS_FS("f2fs");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002356
Namjae Jeon6e6093a2013-01-17 00:08:30 +09002357static int __init init_inodecache(void)
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002358{
Vladimir Davydov5d097052016-01-14 15:18:21 -08002359 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
2360 sizeof(struct f2fs_inode_info), 0,
2361 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09002362 if (!f2fs_inode_cachep)
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002363 return -ENOMEM;
2364 return 0;
2365}
2366
2367static void destroy_inodecache(void)
2368{
2369 /*
2370 * Make sure all delayed rcu free inodes are flushed before we
2371 * destroy cache.
2372 */
2373 rcu_barrier();
2374 kmem_cache_destroy(f2fs_inode_cachep);
2375}
2376
2377static int __init init_f2fs_fs(void)
2378{
2379 int err;
2380
Jaegeuk Kimc0508652015-01-07 14:07:36 -08002381 f2fs_build_trace_ios();
2382
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002383 err = init_inodecache();
2384 if (err)
2385 goto fail;
2386 err = create_node_manager_caches();
2387 if (err)
Zhao Hongjiang9890ff32013-08-20 16:49:51 +08002388 goto free_inodecache;
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09002389 err = create_segment_manager_caches();
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002390 if (err)
Zhao Hongjiang9890ff32013-08-20 16:49:51 +08002391 goto free_node_manager_caches;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002392 err = create_checkpoint_caches();
2393 if (err)
Chao Yu06292072014-12-29 15:56:18 +08002394 goto free_segment_manager_caches;
Chao Yu1dcc3362015-02-05 17:57:31 +08002395 err = create_extent_cache();
2396 if (err)
2397 goto free_checkpoint_caches;
Jaegeuk Kim883d5532017-07-26 11:24:13 -07002398 err = f2fs_init_sysfs();
Chao Yu17a3fb52017-06-14 17:39:46 +08002399 if (err)
Chao Yu1dcc3362015-02-05 17:57:31 +08002400 goto free_extent_cache;
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002401 err = register_shrinker(&f2fs_shrinker_info);
Jaegeuk Kimcfc4d972015-05-15 15:37:24 -07002402 if (err)
Chao Yu17a3fb52017-06-14 17:39:46 +08002403 goto free_sysfs;
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002404 err = register_filesystem(&f2fs_fs_type);
2405 if (err)
2406 goto free_shrinker;
Chao Yu787c7b8c2015-10-29 09:13:04 +08002407 err = f2fs_create_root_stats();
2408 if (err)
2409 goto free_filesystem;
Zhao Hongjiang9890ff32013-08-20 16:49:51 +08002410 return 0;
2411
Chao Yu787c7b8c2015-10-29 09:13:04 +08002412free_filesystem:
2413 unregister_filesystem(&f2fs_fs_type);
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002414free_shrinker:
2415 unregister_shrinker(&f2fs_shrinker_info);
Chao Yu17a3fb52017-06-14 17:39:46 +08002416free_sysfs:
Jaegeuk Kim883d5532017-07-26 11:24:13 -07002417 f2fs_exit_sysfs();
Chao Yu1dcc3362015-02-05 17:57:31 +08002418free_extent_cache:
2419 destroy_extent_cache();
Zhao Hongjiang9890ff32013-08-20 16:49:51 +08002420free_checkpoint_caches:
2421 destroy_checkpoint_caches();
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09002422free_segment_manager_caches:
2423 destroy_segment_manager_caches();
Zhao Hongjiang9890ff32013-08-20 16:49:51 +08002424free_node_manager_caches:
2425 destroy_node_manager_caches();
2426free_inodecache:
2427 destroy_inodecache();
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002428fail:
2429 return err;
2430}
2431
2432static void __exit exit_f2fs_fs(void)
2433{
Namjae Jeon4589d252013-01-15 19:58:47 +09002434 f2fs_destroy_root_stats();
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002435 unregister_filesystem(&f2fs_fs_type);
Tiezhu Yangb8bef792016-05-18 08:02:25 +08002436 unregister_shrinker(&f2fs_shrinker_info);
Jaegeuk Kim883d5532017-07-26 11:24:13 -07002437 f2fs_exit_sysfs();
Wanpeng Lifdf6c8b2015-03-06 15:00:54 +08002438 destroy_extent_cache();
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002439 destroy_checkpoint_caches();
Changman Lee5dcd8a72013-12-11 14:32:13 +09002440 destroy_segment_manager_caches();
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002441 destroy_node_manager_caches();
2442 destroy_inodecache();
Jaegeuk Kim351f4fb2015-01-07 14:09:48 -08002443 f2fs_destroy_trace_ios();
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002444}
2445
2446module_init(init_f2fs_fs)
2447module_exit(exit_f2fs_fs)
2448
2449MODULE_AUTHOR("Samsung Electronics's Praesto Team");
2450MODULE_DESCRIPTION("Flash Friendly File System");
2451MODULE_LICENSE("GPL");
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002452