blob: 96e1b7db93f8be94cc51295469de037d52d8b184 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002 * fs/f2fs/super.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/fs.h>
14#include <linux/statfs.h>
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090015#include <linux/buffer_head.h>
16#include <linux/backing-dev.h>
17#include <linux/kthread.h>
18#include <linux/parser.h>
19#include <linux/mount.h>
20#include <linux/seq_file.h>
Jaegeuk Kim5e176d52013-06-28 12:47:01 +090021#include <linux/proc_fs.h>
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090022#include <linux/random.h>
23#include <linux/exportfs.h>
Namjae Jeond3ee4562013-03-17 17:26:14 +090024#include <linux/blkdev.h>
Chao Yu09c3a722017-07-09 00:13:07 +080025#include <linux/quotaops.h>
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090026#include <linux/f2fs_fs.h>
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090027#include <linux/sysfs.h>
Chao Yu41ad73f2017-08-08 10:54:31 +080028#include <linux/quota.h>
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090029
30#include "f2fs.h"
31#include "node.h"
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +090032#include "segment.h"
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090033#include "xattr.h"
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090034#include "gc.h"
Jaegeuk Kimdb9f7c12014-12-17 20:04:08 -080035#include "trace.h"
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090036
Namjae Jeona2a4a7e2013-04-20 01:28:40 +090037#define CREATE_TRACE_POINTS
38#include <trace/events/f2fs.h>
39
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090040static struct kmem_cache *f2fs_inode_cachep;
41
Jaegeuk Kim73faec42016-04-29 15:34:32 -070042#ifdef CONFIG_F2FS_FAULT_INJECTION
Jaegeuk Kim2c63fea2016-04-29 15:49:56 -070043
44char *fault_name[FAULT_MAX] = {
45 [FAULT_KMALLOC] = "kmalloc",
Jaegeuk Kimc41f3cc32016-04-29 16:17:09 -070046 [FAULT_PAGE_ALLOC] = "page alloc",
Jaegeuk Kimcb789422016-04-29 16:29:22 -070047 [FAULT_ALLOC_NID] = "alloc nid",
48 [FAULT_ORPHAN] = "orphan",
49 [FAULT_BLOCK] = "no more block",
50 [FAULT_DIR_DEPTH] = "too big dir depth",
Jaegeuk Kim53aa6bb2016-05-25 15:24:18 -070051 [FAULT_EVICT_INODE] = "evict_inode fail",
Jaegeuk Kime6b120d2017-07-10 12:55:09 -070052 [FAULT_TRUNCATE] = "truncate fail",
Chao Yu8b038c72016-09-18 23:30:07 +080053 [FAULT_IO] = "IO error",
Chao Yu0f348022016-09-26 19:45:55 +080054 [FAULT_CHECKPOINT] = "checkpoint error",
Jaegeuk Kim2c63fea2016-04-29 15:49:56 -070055};
Sheng Yong08796892016-05-16 12:38:50 +080056
Chao Yu1ecc0c52016-09-23 21:30:09 +080057static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
58 unsigned int rate)
Sheng Yong08796892016-05-16 12:38:50 +080059{
Chao Yu1ecc0c52016-09-23 21:30:09 +080060 struct f2fs_fault_info *ffi = &sbi->fault_info;
61
Sheng Yong08796892016-05-16 12:38:50 +080062 if (rate) {
Chao Yu1ecc0c52016-09-23 21:30:09 +080063 atomic_set(&ffi->inject_ops, 0);
64 ffi->inject_rate = rate;
65 ffi->inject_type = (1 << FAULT_MAX) - 1;
Sheng Yong08796892016-05-16 12:38:50 +080066 } else {
Chao Yu1ecc0c52016-09-23 21:30:09 +080067 memset(ffi, 0, sizeof(struct f2fs_fault_info));
Sheng Yong08796892016-05-16 12:38:50 +080068 }
69}
Jaegeuk Kim73faec42016-04-29 15:34:32 -070070#endif
71
Jaegeuk Kim2658e502015-06-19 12:01:21 -070072/* f2fs-wide shrinker description */
73static struct shrinker f2fs_shrinker_info = {
74 .scan_objects = f2fs_shrink_scan,
75 .count_objects = f2fs_shrink_count,
76 .seeks = DEFAULT_SEEKS,
77};
78
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090079enum {
Namjae Jeon696c0182013-06-16 09:48:48 +090080 Opt_gc_background,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090081 Opt_disable_roll_forward,
Jaegeuk Kim2d834bf2015-01-23 18:33:46 -080082 Opt_norecovery,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090083 Opt_discard,
Chao Yu64058be2016-07-03 22:05:14 +080084 Opt_nodiscard,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090085 Opt_noheap,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -070086 Opt_heap,
Kelly Anderson4058c512013-10-07 11:36:20 +090087 Opt_user_xattr,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090088 Opt_nouser_xattr,
Kelly Anderson4058c512013-10-07 11:36:20 +090089 Opt_acl,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +090090 Opt_noacl,
91 Opt_active_logs,
92 Opt_disable_ext_identify,
Jaegeuk Kim444c5802013-08-08 15:16:22 +090093 Opt_inline_xattr,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -070094 Opt_noinline_xattr,
Huajun Li8274de72013-11-10 23:13:17 +080095 Opt_inline_data,
Chao Yu5efd3c62014-09-24 18:16:13 +080096 Opt_inline_dentry,
Chao Yu97c17942016-05-09 19:56:34 +080097 Opt_noinline_dentry,
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +090098 Opt_flush_merge,
Jaegeuk Kim69e9e422016-05-20 22:39:20 -070099 Opt_noflush_merge,
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700100 Opt_nobarrier,
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700101 Opt_fastboot,
Chao Yu89672152015-02-05 17:55:51 +0800102 Opt_extent_cache,
Jaegeuk Kim7daaea22015-06-25 17:43:04 -0700103 Opt_noextent_cache,
Wanpeng Li75342792015-03-24 10:20:27 +0800104 Opt_noinline_data,
Chao Yu343f40f2015-12-16 13:12:16 +0800105 Opt_data_flush,
Jaegeuk Kim36abef42016-06-03 19:29:38 -0700106 Opt_mode,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700107 Opt_io_size_bits,
Jaegeuk Kim73faec42016-04-29 15:34:32 -0700108 Opt_fault_injection,
Jaegeuk Kim6d94c742016-05-20 21:47:24 -0700109 Opt_lazytime,
110 Opt_nolazytime,
Chao Yu41ad73f2017-08-08 10:54:31 +0800111 Opt_quota,
112 Opt_noquota,
Chao Yu09c3a722017-07-09 00:13:07 +0800113 Opt_usrquota,
114 Opt_grpquota,
Chao Yu5647b302017-07-26 00:01:41 +0800115 Opt_prjquota,
Chao Yu41ad73f2017-08-08 10:54:31 +0800116 Opt_usrjquota,
117 Opt_grpjquota,
118 Opt_prjjquota,
119 Opt_offusrjquota,
120 Opt_offgrpjquota,
121 Opt_offprjjquota,
122 Opt_jqfmt_vfsold,
123 Opt_jqfmt_vfsv0,
124 Opt_jqfmt_vfsv1,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900125 Opt_err,
126};
127
128static match_table_t f2fs_tokens = {
Namjae Jeon696c0182013-06-16 09:48:48 +0900129 {Opt_gc_background, "background_gc=%s"},
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900130 {Opt_disable_roll_forward, "disable_roll_forward"},
Jaegeuk Kim2d834bf2015-01-23 18:33:46 -0800131 {Opt_norecovery, "norecovery"},
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900132 {Opt_discard, "discard"},
Chao Yu64058be2016-07-03 22:05:14 +0800133 {Opt_nodiscard, "nodiscard"},
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900134 {Opt_noheap, "no_heap"},
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700135 {Opt_heap, "heap"},
Kelly Anderson4058c512013-10-07 11:36:20 +0900136 {Opt_user_xattr, "user_xattr"},
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900137 {Opt_nouser_xattr, "nouser_xattr"},
Kelly Anderson4058c512013-10-07 11:36:20 +0900138 {Opt_acl, "acl"},
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900139 {Opt_noacl, "noacl"},
140 {Opt_active_logs, "active_logs=%u"},
141 {Opt_disable_ext_identify, "disable_ext_identify"},
Jaegeuk Kim444c5802013-08-08 15:16:22 +0900142 {Opt_inline_xattr, "inline_xattr"},
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700143 {Opt_noinline_xattr, "noinline_xattr"},
Huajun Li8274de72013-11-10 23:13:17 +0800144 {Opt_inline_data, "inline_data"},
Chao Yu5efd3c62014-09-24 18:16:13 +0800145 {Opt_inline_dentry, "inline_dentry"},
Chao Yu97c17942016-05-09 19:56:34 +0800146 {Opt_noinline_dentry, "noinline_dentry"},
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900147 {Opt_flush_merge, "flush_merge"},
Jaegeuk Kim69e9e422016-05-20 22:39:20 -0700148 {Opt_noflush_merge, "noflush_merge"},
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700149 {Opt_nobarrier, "nobarrier"},
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700150 {Opt_fastboot, "fastboot"},
Chao Yu89672152015-02-05 17:55:51 +0800151 {Opt_extent_cache, "extent_cache"},
Jaegeuk Kim7daaea22015-06-25 17:43:04 -0700152 {Opt_noextent_cache, "noextent_cache"},
Wanpeng Li75342792015-03-24 10:20:27 +0800153 {Opt_noinline_data, "noinline_data"},
Chao Yu343f40f2015-12-16 13:12:16 +0800154 {Opt_data_flush, "data_flush"},
Jaegeuk Kim36abef42016-06-03 19:29:38 -0700155 {Opt_mode, "mode=%s"},
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700156 {Opt_io_size_bits, "io_bits=%u"},
Jaegeuk Kim73faec42016-04-29 15:34:32 -0700157 {Opt_fault_injection, "fault_injection=%u"},
Jaegeuk Kim6d94c742016-05-20 21:47:24 -0700158 {Opt_lazytime, "lazytime"},
159 {Opt_nolazytime, "nolazytime"},
Chao Yu41ad73f2017-08-08 10:54:31 +0800160 {Opt_quota, "quota"},
161 {Opt_noquota, "noquota"},
Chao Yu09c3a722017-07-09 00:13:07 +0800162 {Opt_usrquota, "usrquota"},
163 {Opt_grpquota, "grpquota"},
Chao Yu5647b302017-07-26 00:01:41 +0800164 {Opt_prjquota, "prjquota"},
Chao Yu41ad73f2017-08-08 10:54:31 +0800165 {Opt_usrjquota, "usrjquota=%s"},
166 {Opt_grpjquota, "grpjquota=%s"},
167 {Opt_prjjquota, "prjjquota=%s"},
168 {Opt_offusrjquota, "usrjquota="},
169 {Opt_offgrpjquota, "grpjquota="},
170 {Opt_offprjjquota, "prjjquota="},
171 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
172 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
173 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900174 {Opt_err, NULL},
175};
176
Namjae Jeona07ef782012-12-30 14:52:05 +0900177void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
178{
179 struct va_format vaf;
180 va_list args;
181
182 va_start(args, fmt);
183 vaf.fmt = fmt;
184 vaf.va = &args;
Jaegeuk Kimdb4fa882017-08-02 20:58:29 -0700185 printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
Namjae Jeona07ef782012-12-30 14:52:05 +0900186 va_end(args);
187}
188
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900189static void init_once(void *foo)
190{
191 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
192
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900193 inode_init_once(&fi->vfs_inode);
194}
195
Chao Yu41ad73f2017-08-08 10:54:31 +0800196#ifdef CONFIG_QUOTA
197static const char * const quotatypes[] = INITQFNAMES;
198#define QTYPE2NAME(t) (quotatypes[t])
199static int f2fs_set_qf_name(struct super_block *sb, int qtype,
200 substring_t *args)
201{
202 struct f2fs_sb_info *sbi = F2FS_SB(sb);
203 char *qname;
204 int ret = -EINVAL;
205
206 if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) {
207 f2fs_msg(sb, KERN_ERR,
208 "Cannot change journaled "
209 "quota options when quota turned on");
210 return -EINVAL;
211 }
212 qname = match_strdup(args);
213 if (!qname) {
214 f2fs_msg(sb, KERN_ERR,
215 "Not enough memory for storing quotafile name");
216 return -EINVAL;
217 }
218 if (sbi->s_qf_names[qtype]) {
219 if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
220 ret = 0;
221 else
222 f2fs_msg(sb, KERN_ERR,
223 "%s quota file already specified",
224 QTYPE2NAME(qtype));
225 goto errout;
226 }
227 if (strchr(qname, '/')) {
228 f2fs_msg(sb, KERN_ERR,
229 "quotafile must be on filesystem root");
230 goto errout;
231 }
232 sbi->s_qf_names[qtype] = qname;
233 set_opt(sbi, QUOTA);
234 return 0;
235errout:
236 kfree(qname);
237 return ret;
238}
239
240static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
241{
242 struct f2fs_sb_info *sbi = F2FS_SB(sb);
243
244 if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) {
245 f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
246 " when quota turned on");
247 return -EINVAL;
248 }
249 kfree(sbi->s_qf_names[qtype]);
250 sbi->s_qf_names[qtype] = NULL;
251 return 0;
252}
253
254static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
255{
256 /*
257 * We do the test below only for project quotas. 'usrquota' and
258 * 'grpquota' mount options are allowed even without quota feature
259 * to support legacy quotas in quota files.
260 */
261 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
262 f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
263 "Cannot enable project quota enforcement.");
264 return -1;
265 }
266 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA] ||
267 sbi->s_qf_names[PRJQUOTA]) {
268 if (test_opt(sbi, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
269 clear_opt(sbi, USRQUOTA);
270
271 if (test_opt(sbi, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
272 clear_opt(sbi, GRPQUOTA);
273
274 if (test_opt(sbi, PRJQUOTA) && sbi->s_qf_names[PRJQUOTA])
275 clear_opt(sbi, PRJQUOTA);
276
277 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
278 test_opt(sbi, PRJQUOTA)) {
279 f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
280 "format mixing");
281 return -1;
282 }
283
284 if (!sbi->s_jquota_fmt) {
285 f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
286 "not specified");
287 return -1;
288 }
289 }
290 return 0;
291}
292#endif
293
Namjae Jeon696c0182013-06-16 09:48:48 +0900294static int parse_options(struct super_block *sb, char *options)
295{
296 struct f2fs_sb_info *sbi = F2FS_SB(sb);
Chao Yu09d54cd2015-06-08 13:20:10 +0800297 struct request_queue *q;
Namjae Jeon696c0182013-06-16 09:48:48 +0900298 substring_t args[MAX_OPT_ARGS];
299 char *p, *name;
300 int arg = 0;
Chao Yu41ad73f2017-08-08 10:54:31 +0800301#ifdef CONFIG_QUOTA
302 int ret;
303#endif
Namjae Jeon696c0182013-06-16 09:48:48 +0900304
305 if (!options)
306 return 0;
307
308 while ((p = strsep(&options, ",")) != NULL) {
309 int token;
310 if (!*p)
311 continue;
312 /*
313 * Initialize args struct so we know whether arg was
314 * found; some options take optional arguments.
315 */
316 args[0].to = args[0].from = NULL;
317 token = match_token(p, f2fs_tokens, args);
318
319 switch (token) {
320 case Opt_gc_background:
321 name = match_strdup(&args[0]);
322
323 if (!name)
324 return -ENOMEM;
Jaegeuk Kim6aefd932015-10-05 11:02:54 -0700325 if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
Namjae Jeon696c0182013-06-16 09:48:48 +0900326 set_opt(sbi, BG_GC);
Jaegeuk Kim6aefd932015-10-05 11:02:54 -0700327 clear_opt(sbi, FORCE_FG_GC);
328 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
Namjae Jeon696c0182013-06-16 09:48:48 +0900329 clear_opt(sbi, BG_GC);
Jaegeuk Kim6aefd932015-10-05 11:02:54 -0700330 clear_opt(sbi, FORCE_FG_GC);
331 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
332 set_opt(sbi, BG_GC);
333 set_opt(sbi, FORCE_FG_GC);
334 } else {
Namjae Jeon696c0182013-06-16 09:48:48 +0900335 kfree(name);
336 return -EINVAL;
337 }
338 kfree(name);
339 break;
340 case Opt_disable_roll_forward:
341 set_opt(sbi, DISABLE_ROLL_FORWARD);
342 break;
Jaegeuk Kim2d834bf2015-01-23 18:33:46 -0800343 case Opt_norecovery:
344 /* this option mounts f2fs with ro */
345 set_opt(sbi, DISABLE_ROLL_FORWARD);
346 if (!f2fs_readonly(sb))
347 return -EINVAL;
348 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900349 case Opt_discard:
Chao Yu09d54cd2015-06-08 13:20:10 +0800350 q = bdev_get_queue(sb->s_bdev);
351 if (blk_queue_discard(q)) {
352 set_opt(sbi, DISCARD);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700353 } else if (!f2fs_sb_mounted_blkzoned(sb)) {
Chao Yu09d54cd2015-06-08 13:20:10 +0800354 f2fs_msg(sb, KERN_WARNING,
355 "mounting with \"discard\" option, but "
356 "the device does not support discard");
357 }
Namjae Jeon696c0182013-06-16 09:48:48 +0900358 break;
Chao Yu64058be2016-07-03 22:05:14 +0800359 case Opt_nodiscard:
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700360 if (f2fs_sb_mounted_blkzoned(sb)) {
361 f2fs_msg(sb, KERN_WARNING,
362 "discard is required for zoned block devices");
363 return -EINVAL;
364 }
Chao Yu64058be2016-07-03 22:05:14 +0800365 clear_opt(sbi, DISCARD);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700366 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900367 case Opt_noheap:
368 set_opt(sbi, NOHEAP);
369 break;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700370 case Opt_heap:
371 clear_opt(sbi, NOHEAP);
372 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900373#ifdef CONFIG_F2FS_FS_XATTR
Kelly Anderson4058c512013-10-07 11:36:20 +0900374 case Opt_user_xattr:
375 set_opt(sbi, XATTR_USER);
376 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900377 case Opt_nouser_xattr:
378 clear_opt(sbi, XATTR_USER);
379 break;
Jaegeuk Kim444c5802013-08-08 15:16:22 +0900380 case Opt_inline_xattr:
381 set_opt(sbi, INLINE_XATTR);
382 break;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700383 case Opt_noinline_xattr:
384 clear_opt(sbi, INLINE_XATTR);
385 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900386#else
Kelly Anderson4058c512013-10-07 11:36:20 +0900387 case Opt_user_xattr:
388 f2fs_msg(sb, KERN_INFO,
389 "user_xattr options not supported");
390 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900391 case Opt_nouser_xattr:
392 f2fs_msg(sb, KERN_INFO,
393 "nouser_xattr options not supported");
394 break;
Jaegeuk Kim444c5802013-08-08 15:16:22 +0900395 case Opt_inline_xattr:
396 f2fs_msg(sb, KERN_INFO,
397 "inline_xattr options not supported");
398 break;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700399 case Opt_noinline_xattr:
400 f2fs_msg(sb, KERN_INFO,
401 "noinline_xattr options not supported");
402 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900403#endif
404#ifdef CONFIG_F2FS_FS_POSIX_ACL
Kelly Anderson4058c512013-10-07 11:36:20 +0900405 case Opt_acl:
406 set_opt(sbi, POSIX_ACL);
407 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900408 case Opt_noacl:
409 clear_opt(sbi, POSIX_ACL);
410 break;
411#else
Kelly Anderson4058c512013-10-07 11:36:20 +0900412 case Opt_acl:
413 f2fs_msg(sb, KERN_INFO, "acl options not supported");
414 break;
Namjae Jeon696c0182013-06-16 09:48:48 +0900415 case Opt_noacl:
416 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
417 break;
418#endif
419 case Opt_active_logs:
420 if (args->from && match_int(args, &arg))
421 return -EINVAL;
422 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
423 return -EINVAL;
424 sbi->active_logs = arg;
425 break;
426 case Opt_disable_ext_identify:
427 set_opt(sbi, DISABLE_EXT_IDENTIFY);
428 break;
Huajun Li8274de72013-11-10 23:13:17 +0800429 case Opt_inline_data:
430 set_opt(sbi, INLINE_DATA);
431 break;
Chao Yu5efd3c62014-09-24 18:16:13 +0800432 case Opt_inline_dentry:
433 set_opt(sbi, INLINE_DENTRY);
434 break;
Chao Yu97c17942016-05-09 19:56:34 +0800435 case Opt_noinline_dentry:
436 clear_opt(sbi, INLINE_DENTRY);
437 break;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900438 case Opt_flush_merge:
439 set_opt(sbi, FLUSH_MERGE);
440 break;
Jaegeuk Kim69e9e422016-05-20 22:39:20 -0700441 case Opt_noflush_merge:
442 clear_opt(sbi, FLUSH_MERGE);
443 break;
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700444 case Opt_nobarrier:
445 set_opt(sbi, NOBARRIER);
446 break;
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700447 case Opt_fastboot:
448 set_opt(sbi, FASTBOOT);
449 break;
Chao Yu89672152015-02-05 17:55:51 +0800450 case Opt_extent_cache:
451 set_opt(sbi, EXTENT_CACHE);
452 break;
Jaegeuk Kim7daaea22015-06-25 17:43:04 -0700453 case Opt_noextent_cache:
454 clear_opt(sbi, EXTENT_CACHE);
455 break;
Wanpeng Li75342792015-03-24 10:20:27 +0800456 case Opt_noinline_data:
457 clear_opt(sbi, INLINE_DATA);
458 break;
Chao Yu343f40f2015-12-16 13:12:16 +0800459 case Opt_data_flush:
460 set_opt(sbi, DATA_FLUSH);
461 break;
Jaegeuk Kim36abef42016-06-03 19:29:38 -0700462 case Opt_mode:
463 name = match_strdup(&args[0]);
464
465 if (!name)
466 return -ENOMEM;
467 if (strlen(name) == 8 &&
468 !strncmp(name, "adaptive", 8)) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700469 if (f2fs_sb_mounted_blkzoned(sb)) {
470 f2fs_msg(sb, KERN_WARNING,
471 "adaptive mode is not allowed with "
472 "zoned block device feature");
473 kfree(name);
474 return -EINVAL;
475 }
Jaegeuk Kim52763a42016-06-13 09:47:48 -0700476 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
Jaegeuk Kim36abef42016-06-03 19:29:38 -0700477 } else if (strlen(name) == 3 &&
478 !strncmp(name, "lfs", 3)) {
Jaegeuk Kim52763a42016-06-13 09:47:48 -0700479 set_opt_mode(sbi, F2FS_MOUNT_LFS);
Jaegeuk Kim36abef42016-06-03 19:29:38 -0700480 } else {
481 kfree(name);
482 return -EINVAL;
483 }
484 kfree(name);
485 break;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700486 case Opt_io_size_bits:
487 if (args->from && match_int(args, &arg))
488 return -EINVAL;
489 if (arg > __ilog2_u32(BIO_MAX_PAGES)) {
490 f2fs_msg(sb, KERN_WARNING,
491 "Not support %d, larger than %d",
492 1 << arg, BIO_MAX_PAGES);
493 return -EINVAL;
494 }
495 sbi->write_io_size_bits = arg;
496 break;
Jaegeuk Kim73faec42016-04-29 15:34:32 -0700497 case Opt_fault_injection:
498 if (args->from && match_int(args, &arg))
499 return -EINVAL;
500#ifdef CONFIG_F2FS_FAULT_INJECTION
Chao Yu1ecc0c52016-09-23 21:30:09 +0800501 f2fs_build_fault_attr(sbi, arg);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700502 set_opt(sbi, FAULT_INJECTION);
Jaegeuk Kim73faec42016-04-29 15:34:32 -0700503#else
504 f2fs_msg(sb, KERN_INFO,
505 "FAULT_INJECTION was not selected");
506#endif
507 break;
Jaegeuk Kim6d94c742016-05-20 21:47:24 -0700508 case Opt_lazytime:
509 sb->s_flags |= MS_LAZYTIME;
510 break;
511 case Opt_nolazytime:
512 sb->s_flags &= ~MS_LAZYTIME;
513 break;
Chao Yu09c3a722017-07-09 00:13:07 +0800514#ifdef CONFIG_QUOTA
Chao Yu41ad73f2017-08-08 10:54:31 +0800515 case Opt_quota:
Chao Yu09c3a722017-07-09 00:13:07 +0800516 case Opt_usrquota:
517 set_opt(sbi, USRQUOTA);
518 break;
519 case Opt_grpquota:
520 set_opt(sbi, GRPQUOTA);
521 break;
Chao Yu5647b302017-07-26 00:01:41 +0800522 case Opt_prjquota:
523 set_opt(sbi, PRJQUOTA);
524 break;
Chao Yu41ad73f2017-08-08 10:54:31 +0800525 case Opt_usrjquota:
526 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
527 if (ret)
528 return ret;
529 break;
530 case Opt_grpjquota:
531 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
532 if (ret)
533 return ret;
534 break;
535 case Opt_prjjquota:
536 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
537 if (ret)
538 return ret;
539 break;
540 case Opt_offusrjquota:
541 ret = f2fs_clear_qf_name(sb, USRQUOTA);
542 if (ret)
543 return ret;
544 break;
545 case Opt_offgrpjquota:
546 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
547 if (ret)
548 return ret;
549 break;
550 case Opt_offprjjquota:
551 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
552 if (ret)
553 return ret;
554 break;
555 case Opt_jqfmt_vfsold:
556 sbi->s_jquota_fmt = QFMT_VFS_OLD;
557 break;
558 case Opt_jqfmt_vfsv0:
559 sbi->s_jquota_fmt = QFMT_VFS_V0;
560 break;
561 case Opt_jqfmt_vfsv1:
562 sbi->s_jquota_fmt = QFMT_VFS_V1;
563 break;
564 case Opt_noquota:
565 clear_opt(sbi, QUOTA);
566 clear_opt(sbi, USRQUOTA);
567 clear_opt(sbi, GRPQUOTA);
568 clear_opt(sbi, PRJQUOTA);
569 break;
Chao Yu09c3a722017-07-09 00:13:07 +0800570#else
Chao Yu41ad73f2017-08-08 10:54:31 +0800571 case Opt_quota:
Chao Yu09c3a722017-07-09 00:13:07 +0800572 case Opt_usrquota:
573 case Opt_grpquota:
Chao Yu5647b302017-07-26 00:01:41 +0800574 case Opt_prjquota:
Chao Yu41ad73f2017-08-08 10:54:31 +0800575 case Opt_usrjquota:
576 case Opt_grpjquota:
577 case Opt_prjjquota:
578 case Opt_offusrjquota:
579 case Opt_offgrpjquota:
580 case Opt_offprjjquota:
581 case Opt_jqfmt_vfsold:
582 case Opt_jqfmt_vfsv0:
583 case Opt_jqfmt_vfsv1:
584 case Opt_noquota:
Chao Yu09c3a722017-07-09 00:13:07 +0800585 f2fs_msg(sb, KERN_INFO,
586 "quota operations not supported");
587 break;
588#endif
Namjae Jeon696c0182013-06-16 09:48:48 +0900589 default:
590 f2fs_msg(sb, KERN_ERR,
591 "Unrecognized mount option \"%s\" or missing value",
592 p);
593 return -EINVAL;
594 }
595 }
Chao Yu41ad73f2017-08-08 10:54:31 +0800596#ifdef CONFIG_QUOTA
597 if (f2fs_check_quota_options(sbi))
598 return -EINVAL;
599#endif
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700600
601 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
602 f2fs_msg(sb, KERN_ERR,
603 "Should set mode=lfs with %uKB-sized IO",
604 F2FS_IO_SIZE_KB(sbi));
605 return -EINVAL;
606 }
Namjae Jeon696c0182013-06-16 09:48:48 +0900607 return 0;
608}
609
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900610static struct inode *f2fs_alloc_inode(struct super_block *sb)
611{
612 struct f2fs_inode_info *fi;
613
Chao Yua0acdfe2013-12-05 09:54:00 +0800614 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900615 if (!fi)
616 return NULL;
617
618 init_once((void *) fi);
619
Masanari Iida434720f2013-03-19 08:03:35 +0900620 /* Initialize f2fs-specific inode info */
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900621 fi->vfs_inode.i_version = 1;
Jaegeuk Kima43e1c42016-12-02 15:11:32 -0800622 atomic_set(&fi->dirty_pages, 0);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900623 fi->i_current_depth = 1;
624 fi->i_advise = 0;
Jaegeuk Kimd928bfb2014-03-20 19:10:08 +0900625 init_rwsem(&fi->i_sem);
Chao Yu2710fd72015-12-15 13:30:45 +0800626 INIT_LIST_HEAD(&fi->dirty_list);
Jaegeuk Kim0f18b462016-05-20 11:10:10 -0700627 INIT_LIST_HEAD(&fi->gdirty_list);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700628 INIT_LIST_HEAD(&fi->inmem_pages);
629 mutex_init(&fi->inmem_lock);
Chao Yu82e0a5a2016-07-13 09:18:29 +0800630 init_rwsem(&fi->dio_rwsem[READ]);
631 init_rwsem(&fi->dio_rwsem[WRITE]);
Qiuyang Sun7b23ea12017-05-18 11:06:45 +0800632 init_rwsem(&fi->i_mmap_sem);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900633
Chao Yu09c3a722017-07-09 00:13:07 +0800634#ifdef CONFIG_QUOTA
635 memset(&fi->i_dquot, 0, sizeof(fi->i_dquot));
636 fi->i_reserved_quota = 0;
637#endif
Jaegeuk Kimab9fa662014-02-27 20:09:05 +0900638 /* Will be used by directory only */
639 fi->i_dir_level = F2FS_SB(sb)->dir_level;
Chao Yue84f88e2017-07-19 00:19:05 +0800640
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900641 return &fi->vfs_inode;
642}
643
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900644static int f2fs_drop_inode(struct inode *inode)
645{
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700646 int ret;
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900647 /*
648 * This is to avoid a deadlock condition like below.
649 * writeback_single_inode(inode)
650 * - f2fs_write_data_page
651 * - f2fs_gc -> iput -> evict
652 * - inode_wait_for_writeback(inode)
653 */
Jaegeuk Kim0f18b462016-05-20 11:10:10 -0700654 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700655 if (!inode->i_nlink && !is_bad_inode(inode)) {
Jaegeuk Kim3e72f722015-06-19 17:53:26 -0700656 /* to avoid evict_inode call simultaneously */
657 atomic_inc(&inode->i_count);
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700658 spin_unlock(&inode->i_lock);
659
660 /* some remained atomic pages should discarded */
661 if (f2fs_is_atomic_file(inode))
Chao Yu29b96b52016-02-06 14:38:29 +0800662 drop_inmem_pages(inode);
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700663
Jaegeuk Kim3e72f722015-06-19 17:53:26 -0700664 /* should remain fi->extent_tree for writepage */
665 f2fs_destroy_extent_node(inode);
666
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700667 sb_start_intwrite(inode->i_sb);
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -0700668 f2fs_i_size_write(inode, 0);
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700669
670 if (F2FS_HAS_BLOCKS(inode))
Jaegeuk Kim9a449e92016-06-02 13:49:38 -0700671 f2fs_truncate(inode);
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700672
673 sb_end_intwrite(inode->i_sb);
674
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700675 fscrypt_put_encryption_info(inode, NULL);
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700676 spin_lock(&inode->i_lock);
Jaegeuk Kim3e72f722015-06-19 17:53:26 -0700677 atomic_dec(&inode->i_count);
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700678 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700679 trace_f2fs_drop_inode(inode, 0);
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900680 return 0;
Jaegeuk Kim06e1bc02015-05-13 14:35:14 -0700681 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700682 ret = generic_drop_inode(inode);
683 trace_f2fs_drop_inode(inode, ret);
684 return ret;
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +0900685}
686
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700687int f2fs_inode_dirtied(struct inode *inode, bool sync)
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700688{
689 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700690 int ret = 0;
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700691
692 spin_lock(&sbi->inode_lock[DIRTY_META]);
693 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700694 ret = 1;
695 } else {
696 set_inode_flag(inode, FI_DIRTY_INODE);
697 stat_inc_dirty_inode(sbi, DIRTY_META);
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700698 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700699 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
700 list_add_tail(&F2FS_I(inode)->gdirty_list,
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700701 &sbi->inode_list[DIRTY_META]);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700702 inc_page_count(sbi, F2FS_DIRTY_IMETA);
703 }
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700704 spin_unlock(&sbi->inode_lock[DIRTY_META]);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700705 return ret;
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700706}
707
708void f2fs_inode_synced(struct inode *inode)
709{
710 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
711
712 spin_lock(&sbi->inode_lock[DIRTY_META]);
713 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
714 spin_unlock(&sbi->inode_lock[DIRTY_META]);
715 return;
716 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700717 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
718 list_del_init(&F2FS_I(inode)->gdirty_list);
719 dec_page_count(sbi, F2FS_DIRTY_IMETA);
720 }
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700721 clear_inode_flag(inode, FI_DIRTY_INODE);
722 clear_inode_flag(inode, FI_AUTO_RECOVER);
Jaegeuk Kimb56ab832016-06-30 19:09:37 -0700723 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
724 spin_unlock(&sbi->inode_lock[DIRTY_META]);
725}
726
Jaegeuk Kimb3783872013-06-10 09:17:01 +0900727/*
728 * f2fs_dirty_inode() is called from __mark_inode_dirty()
729 *
730 * We should call set_dirty_inode to write the dirty inode through write_inode.
731 */
732static void f2fs_dirty_inode(struct inode *inode, int flags)
733{
Jaegeuk Kim0f18b462016-05-20 11:10:10 -0700734 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
735
736 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
737 inode->i_ino == F2FS_META_INO(sbi))
738 return;
739
Jaegeuk Kim6d94c742016-05-20 21:47:24 -0700740 if (flags == I_DIRTY_TIME)
741 return;
742
Jaegeuk Kim26de9b12016-05-20 20:42:37 -0700743 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
744 clear_inode_flag(inode, FI_AUTO_RECOVER);
745
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700746 f2fs_inode_dirtied(inode, false);
Jaegeuk Kimb3783872013-06-10 09:17:01 +0900747}
748
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900749static void f2fs_i_callback(struct rcu_head *head)
750{
751 struct inode *inode = container_of(head, struct inode, i_rcu);
752 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
753}
754
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900755static void f2fs_destroy_inode(struct inode *inode)
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900756{
757 call_rcu(&inode->i_rcu, f2fs_i_callback);
758}
759
Jaegeuk Kim523be8a2016-05-13 12:36:58 -0700760static void destroy_percpu_info(struct f2fs_sb_info *sbi)
761{
Jaegeuk Kim41382ec2016-05-16 11:06:50 -0700762 percpu_counter_destroy(&sbi->alloc_valid_block_count);
Jaegeuk Kim513c5f32016-05-16 11:42:32 -0700763 percpu_counter_destroy(&sbi->total_valid_inode_count);
Jaegeuk Kim523be8a2016-05-13 12:36:58 -0700764}
765
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700766static void destroy_device_list(struct f2fs_sb_info *sbi)
767{
768 int i;
769
770 for (i = 0; i < sbi->s_ndevs; i++) {
771 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
772#ifdef CONFIG_BLK_DEV_ZONED
773 kfree(FDEV(i).blkz_type);
774#endif
775 }
776 kfree(sbi->devs);
777}
778
Chao Yu17a3fb52017-06-14 17:39:46 +0800779static void f2fs_put_super(struct super_block *sb)
780{
781 struct f2fs_sb_info *sbi = F2FS_SB(sb);
782 int i;
Jaegeuk Kim5e176d52013-06-28 12:47:01 +0900783
Chao Yu09c3a722017-07-09 00:13:07 +0800784 f2fs_quota_off_umount(sb);
785
Jaegeuk Kim2658e502015-06-19 12:01:21 -0700786 /* prevent remaining shrinker jobs */
787 mutex_lock(&sbi->umount_mutex);
788
Jaegeuk Kim85dc2f22015-01-14 17:41:41 -0800789 /*
790 * We don't need to do checkpoint when superblock is clean.
791 * But, the previous checkpoint was not done by umount, it needs to do
792 * clean checkpoint again.
793 */
Chao Yucaf00472015-01-28 17:48:42 +0800794 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
Chao Yuaaec2b12016-09-20 11:04:18 +0800795 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
Jaegeuk Kim75ab4cb2014-09-20 21:57:51 -0700796 struct cp_control cpc = {
797 .reason = CP_UMOUNT,
798 };
799 write_checkpoint(sbi, &cpc);
800 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900801
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700802 /* be sure to wait for any on-going discard commands */
803 f2fs_wait_discard_bios(sbi);
804
Chao Yua88cedf2017-06-14 23:00:55 +0800805 if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700806 struct cp_control cpc = {
807 .reason = CP_UMOUNT | CP_TRIMMED,
808 };
809 write_checkpoint(sbi, &cpc);
810 }
811
Jaegeuk Kimeca616f2015-06-15 14:52:29 -0700812 /* write_checkpoint can update stat informaion */
813 f2fs_destroy_stats(sbi);
814
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -0700815 /*
816 * normally superblock is clean, so we need to release this.
817 * In addition, EIO will skip do checkpoint, we need this as well.
818 */
Jaegeuk Kim74ef9242016-05-02 22:09:56 -0700819 release_ino_entry(sbi, true);
Jaegeuk Kim6f12ac22014-08-19 09:48:22 -0700820
Jaegeuk Kim2658e502015-06-19 12:01:21 -0700821 f2fs_leave_shrinker(sbi);
822 mutex_unlock(&sbi->umount_mutex);
823
Jaegeuk Kim17c19122016-01-29 08:57:59 -0800824 /* our cp_error case, we can wait for any writeback page */
Jaegeuk Kim9bc576a2017-05-10 11:28:38 -0700825 f2fs_flush_merged_writes(sbi);
Jaegeuk Kim17c19122016-01-29 08:57:59 -0800826
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900827 iput(sbi->node_inode);
828 iput(sbi->meta_inode);
829
830 /* destroy f2fs internal modules */
831 destroy_node_manager(sbi);
832 destroy_segment_manager(sbi);
833
834 kfree(sbi->ckpt);
Chao Yu17a3fb52017-06-14 17:39:46 +0800835
Jaegeuk Kim883d5532017-07-26 11:24:13 -0700836 f2fs_unregister_sysfs(sbi);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900837
838 sb->s_fs_info = NULL;
Keith Mok43b65732016-03-02 12:04:24 -0800839 if (sbi->s_chksum_driver)
840 crypto_free_shash(sbi->s_chksum_driver);
Yunlei Heb39f0de2015-12-15 17:17:20 +0800841 kfree(sbi->raw_super);
Jaegeuk Kim523be8a2016-05-13 12:36:58 -0700842
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700843 destroy_device_list(sbi);
844 mempool_destroy(sbi->write_io_dummy);
Chao Yu41ad73f2017-08-08 10:54:31 +0800845#ifdef CONFIG_QUOTA
846 for (i = 0; i < MAXQUOTAS; i++)
847 kfree(sbi->s_qf_names[i]);
848#endif
Jaegeuk Kim523be8a2016-05-13 12:36:58 -0700849 destroy_percpu_info(sbi);
Jaegeuk Kimc4127262017-05-10 11:18:25 -0700850 for (i = 0; i < NR_PAGE_TYPE; i++)
851 kfree(sbi->write_io[i]);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900852 kfree(sbi);
853}
854
855int f2fs_sync_fs(struct super_block *sb, int sync)
856{
857 struct f2fs_sb_info *sbi = F2FS_SB(sb);
Chao Yuc34f42e2015-12-23 17:50:30 +0800858 int err = 0;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900859
Namjae Jeona2a4a7e2013-04-20 01:28:40 +0900860 trace_f2fs_sync_fs(sb, sync);
861
Chao Yu41ad73f2017-08-08 10:54:31 +0800862 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
863 return -EAGAIN;
864
Jaegeuk Kimb7473752013-04-01 08:32:21 +0900865 if (sync) {
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700866 struct cp_control cpc;
867
Jaegeuk Kim119ee912015-01-29 11:45:33 -0800868 cpc.reason = __get_cp_reason(sbi);
869
Jaegeuk Kimb7473752013-04-01 08:32:21 +0900870 mutex_lock(&sbi->gc_mutex);
Chao Yuc34f42e2015-12-23 17:50:30 +0800871 err = write_checkpoint(sbi, &cpc);
Jaegeuk Kimb7473752013-04-01 08:32:21 +0900872 mutex_unlock(&sbi->gc_mutex);
Jaegeuk Kimb7473752013-04-01 08:32:21 +0900873 }
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700874 f2fs_trace_ios(NULL, 1);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900875
Chao Yuc34f42e2015-12-23 17:50:30 +0800876 return err;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900877}
878
Changman Leed6212a52013-01-29 18:30:07 +0900879static int f2fs_freeze(struct super_block *sb)
880{
Jaegeuk Kim77888c12013-05-20 20:28:47 +0900881 if (f2fs_readonly(sb))
Changman Leed6212a52013-01-29 18:30:07 +0900882 return 0;
883
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700884 /* IO error happened before */
885 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
886 return -EIO;
887
888 /* must be clean, since sync_filesystem() was already called */
889 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
890 return -EINVAL;
891 return 0;
Changman Leed6212a52013-01-29 18:30:07 +0900892}
893
894static int f2fs_unfreeze(struct super_block *sb)
895{
896 return 0;
897}
898
Chao Yu15426db2017-07-29 00:32:53 +0800899#ifdef CONFIG_QUOTA
900static int f2fs_statfs_project(struct super_block *sb,
901 kprojid_t projid, struct kstatfs *buf)
902{
903 struct kqid qid;
904 struct dquot *dquot;
905 u64 limit;
906 u64 curblock;
907
908 qid = make_kqid_projid(projid);
909 dquot = dqget(sb, qid);
910 if (IS_ERR(dquot))
911 return PTR_ERR(dquot);
912 spin_lock(&dq_data_lock);
913
914 limit = (dquot->dq_dqb.dqb_bsoftlimit ?
915 dquot->dq_dqb.dqb_bsoftlimit :
916 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
917 if (limit && buf->f_blocks > limit) {
918 curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
919 buf->f_blocks = limit;
920 buf->f_bfree = buf->f_bavail =
921 (buf->f_blocks > curblock) ?
922 (buf->f_blocks - curblock) : 0;
923 }
924
925 limit = dquot->dq_dqb.dqb_isoftlimit ?
926 dquot->dq_dqb.dqb_isoftlimit :
927 dquot->dq_dqb.dqb_ihardlimit;
928 if (limit && buf->f_files > limit) {
929 buf->f_files = limit;
930 buf->f_ffree =
931 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
932 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
933 }
934
935 spin_unlock(&dq_data_lock);
936 dqput(dquot);
937 return 0;
938}
939#endif
940
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900941static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
942{
943 struct super_block *sb = dentry->d_sb;
944 struct f2fs_sb_info *sbi = F2FS_SB(sb);
945 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
946 block_t total_count, user_block_count, start_count, ovp_count;
Jaegeuk Kim711acfd2017-06-21 20:55:55 -0700947 u64 avail_node_count;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900948
949 total_count = le64_to_cpu(sbi->raw_super->block_count);
950 user_block_count = sbi->user_block_count;
951 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
952 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
953 buf->f_type = F2FS_SUPER_MAGIC;
954 buf->f_bsize = sbi->blocksize;
955
956 buf->f_blocks = total_count - start_count;
Chao Yu3e6d0b42016-07-06 14:13:07 +0800957 buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
Chao Yu026bd9d2017-06-26 16:24:41 +0800958 buf->f_bavail = user_block_count - valid_user_blocks(sbi) -
959 sbi->reserved_blocks;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900960
Jaegeuk Kim711acfd2017-06-21 20:55:55 -0700961 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
962
963 if (avail_node_count > user_block_count) {
964 buf->f_files = user_block_count;
965 buf->f_ffree = buf->f_bavail;
966 } else {
967 buf->f_files = avail_node_count;
968 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
969 buf->f_bavail);
970 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900971
Jaegeuk Kim5a20d332013-03-03 13:58:05 +0900972 buf->f_namelen = F2FS_NAME_LEN;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900973 buf->f_fsid.val[0] = (u32)id;
974 buf->f_fsid.val[1] = (u32)(id >> 32);
975
Chao Yu15426db2017-07-29 00:32:53 +0800976#ifdef CONFIG_QUOTA
977 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
978 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
979 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
980 }
981#endif
Jaegeuk Kimaff063e2012-11-02 17:07:47 +0900982 return 0;
983}
984
Chao Yu41ad73f2017-08-08 10:54:31 +0800985static inline void f2fs_show_quota_options(struct seq_file *seq,
986 struct super_block *sb)
987{
988#ifdef CONFIG_QUOTA
989 struct f2fs_sb_info *sbi = F2FS_SB(sb);
990
991 if (sbi->s_jquota_fmt) {
992 char *fmtname = "";
993
994 switch (sbi->s_jquota_fmt) {
995 case QFMT_VFS_OLD:
996 fmtname = "vfsold";
997 break;
998 case QFMT_VFS_V0:
999 fmtname = "vfsv0";
1000 break;
1001 case QFMT_VFS_V1:
1002 fmtname = "vfsv1";
1003 break;
1004 }
1005 seq_printf(seq, ",jqfmt=%s", fmtname);
1006 }
1007
1008 if (sbi->s_qf_names[USRQUOTA])
1009 seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
1010
1011 if (sbi->s_qf_names[GRPQUOTA])
1012 seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
1013
1014 if (sbi->s_qf_names[PRJQUOTA])
1015 seq_show_option(seq, "prjjquota", sbi->s_qf_names[PRJQUOTA]);
1016#endif
1017}
1018
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001019static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1020{
1021 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1022
Jaegeuk Kim6aefd932015-10-05 11:02:54 -07001023 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
1024 if (test_opt(sbi, FORCE_FG_GC))
1025 seq_printf(seq, ",background_gc=%s", "sync");
1026 else
1027 seq_printf(seq, ",background_gc=%s", "on");
1028 } else {
Namjae Jeon696c0182013-06-16 09:48:48 +09001029 seq_printf(seq, ",background_gc=%s", "off");
Jaegeuk Kim6aefd932015-10-05 11:02:54 -07001030 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001031 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1032 seq_puts(seq, ",disable_roll_forward");
1033 if (test_opt(sbi, DISCARD))
1034 seq_puts(seq, ",discard");
1035 if (test_opt(sbi, NOHEAP))
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001036 seq_puts(seq, ",no_heap");
1037 else
1038 seq_puts(seq, ",heap");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001039#ifdef CONFIG_F2FS_FS_XATTR
1040 if (test_opt(sbi, XATTR_USER))
1041 seq_puts(seq, ",user_xattr");
1042 else
1043 seq_puts(seq, ",nouser_xattr");
Jaegeuk Kim444c5802013-08-08 15:16:22 +09001044 if (test_opt(sbi, INLINE_XATTR))
1045 seq_puts(seq, ",inline_xattr");
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001046 else
1047 seq_puts(seq, ",noinline_xattr");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001048#endif
1049#ifdef CONFIG_F2FS_FS_POSIX_ACL
1050 if (test_opt(sbi, POSIX_ACL))
1051 seq_puts(seq, ",acl");
1052 else
1053 seq_puts(seq, ",noacl");
1054#endif
1055 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
Alejandro Martinez Ruizaa435072013-01-25 19:08:59 +01001056 seq_puts(seq, ",disable_ext_identify");
Huajun Li8274de72013-11-10 23:13:17 +08001057 if (test_opt(sbi, INLINE_DATA))
1058 seq_puts(seq, ",inline_data");
Wanpeng Li75342792015-03-24 10:20:27 +08001059 else
1060 seq_puts(seq, ",noinline_data");
Chao Yu5efd3c62014-09-24 18:16:13 +08001061 if (test_opt(sbi, INLINE_DENTRY))
1062 seq_puts(seq, ",inline_dentry");
Chao Yu97c17942016-05-09 19:56:34 +08001063 else
1064 seq_puts(seq, ",noinline_dentry");
Gu Zhengb270ad62014-04-11 17:49:55 +08001065 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +09001066 seq_puts(seq, ",flush_merge");
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -07001067 if (test_opt(sbi, NOBARRIER))
1068 seq_puts(seq, ",nobarrier");
Jaegeuk Kimd5053a342014-10-30 22:47:03 -07001069 if (test_opt(sbi, FASTBOOT))
1070 seq_puts(seq, ",fastboot");
Chao Yu89672152015-02-05 17:55:51 +08001071 if (test_opt(sbi, EXTENT_CACHE))
1072 seq_puts(seq, ",extent_cache");
Jaegeuk Kim7daaea22015-06-25 17:43:04 -07001073 else
1074 seq_puts(seq, ",noextent_cache");
Chao Yu343f40f2015-12-16 13:12:16 +08001075 if (test_opt(sbi, DATA_FLUSH))
1076 seq_puts(seq, ",data_flush");
Jaegeuk Kim36abef42016-06-03 19:29:38 -07001077
1078 seq_puts(seq, ",mode=");
1079 if (test_opt(sbi, ADAPTIVE))
1080 seq_puts(seq, "adaptive");
1081 else if (test_opt(sbi, LFS))
1082 seq_puts(seq, "lfs");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001083 seq_printf(seq, ",active_logs=%u", sbi->active_logs);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001084 if (F2FS_IO_SIZE_BITS(sbi))
1085 seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
1086#ifdef CONFIG_F2FS_FAULT_INJECTION
1087 if (test_opt(sbi, FAULT_INJECTION))
Chao Yu93c0e2f2017-06-12 09:44:24 +08001088 seq_printf(seq, ",fault_injection=%u",
1089 sbi->fault_info.inject_rate);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001090#endif
Chao Yu09c3a722017-07-09 00:13:07 +08001091#ifdef CONFIG_QUOTA
Chao Yu41ad73f2017-08-08 10:54:31 +08001092 if (test_opt(sbi, QUOTA))
1093 seq_puts(seq, ",quota");
Chao Yu09c3a722017-07-09 00:13:07 +08001094 if (test_opt(sbi, USRQUOTA))
1095 seq_puts(seq, ",usrquota");
1096 if (test_opt(sbi, GRPQUOTA))
1097 seq_puts(seq, ",grpquota");
Chao Yu5647b302017-07-26 00:01:41 +08001098 if (test_opt(sbi, PRJQUOTA))
1099 seq_puts(seq, ",prjquota");
Chao Yu09c3a722017-07-09 00:13:07 +08001100#endif
Chao Yu41ad73f2017-08-08 10:54:31 +08001101 f2fs_show_quota_options(seq, sbi->sb);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001102
1103 return 0;
1104}
1105
Yunlei He498c5e92015-05-07 18:11:37 +08001106static void default_options(struct f2fs_sb_info *sbi)
1107{
1108 /* init some FS parameters */
1109 sbi->active_logs = NR_CURSEG_TYPE;
1110
1111 set_opt(sbi, BG_GC);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001112 set_opt(sbi, INLINE_XATTR);
Yunlei He498c5e92015-05-07 18:11:37 +08001113 set_opt(sbi, INLINE_DATA);
Chao Yu97c17942016-05-09 19:56:34 +08001114 set_opt(sbi, INLINE_DENTRY);
Jaegeuk Kim3e72f722015-06-19 17:53:26 -07001115 set_opt(sbi, EXTENT_CACHE);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001116 set_opt(sbi, NOHEAP);
Jaegeuk Kim6d94c742016-05-20 21:47:24 -07001117 sbi->sb->s_flags |= MS_LAZYTIME;
Jaegeuk Kim69e9e422016-05-20 22:39:20 -07001118 set_opt(sbi, FLUSH_MERGE);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001119 if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
Jaegeuk Kim52763a42016-06-13 09:47:48 -07001120 set_opt_mode(sbi, F2FS_MOUNT_LFS);
1121 set_opt(sbi, DISCARD);
1122 } else {
1123 set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
1124 }
Yunlei He498c5e92015-05-07 18:11:37 +08001125
1126#ifdef CONFIG_F2FS_FS_XATTR
1127 set_opt(sbi, XATTR_USER);
1128#endif
1129#ifdef CONFIG_F2FS_FS_POSIX_ACL
1130 set_opt(sbi, POSIX_ACL);
1131#endif
Chao Yu36dbd322016-09-26 19:45:05 +08001132
1133#ifdef CONFIG_F2FS_FAULT_INJECTION
1134 f2fs_build_fault_attr(sbi, 0);
1135#endif
Yunlei He498c5e92015-05-07 18:11:37 +08001136}
1137
Namjae Jeon696c0182013-06-16 09:48:48 +09001138static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1139{
1140 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1141 struct f2fs_mount_info org_mount_opt;
Chao Yu09c3a722017-07-09 00:13:07 +08001142 unsigned long old_sb_flags;
Namjae Jeon696c0182013-06-16 09:48:48 +09001143 int err, active_logs;
Gu Zheng876dc592014-04-11 17:50:00 +08001144 bool need_restart_gc = false;
1145 bool need_stop_gc = false;
Chao Yu9cd81ce2015-09-18 16:55:26 +08001146 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
Chao Yu2443b8b2016-09-26 19:45:06 +08001147#ifdef CONFIG_F2FS_FAULT_INJECTION
1148 struct f2fs_fault_info ffi = sbi->fault_info;
1149#endif
Chao Yu41ad73f2017-08-08 10:54:31 +08001150#ifdef CONFIG_QUOTA
1151 int s_jquota_fmt;
1152 char *s_qf_names[MAXQUOTAS];
1153 int i, j;
1154#endif
Namjae Jeon696c0182013-06-16 09:48:48 +09001155
1156 /*
1157 * Save the old mount options in case we
1158 * need to restore them.
1159 */
1160 org_mount_opt = sbi->mount_opt;
Chao Yu09c3a722017-07-09 00:13:07 +08001161 old_sb_flags = sb->s_flags;
Namjae Jeon696c0182013-06-16 09:48:48 +09001162 active_logs = sbi->active_logs;
1163
Chao Yu41ad73f2017-08-08 10:54:31 +08001164#ifdef CONFIG_QUOTA
1165 s_jquota_fmt = sbi->s_jquota_fmt;
1166 for (i = 0; i < MAXQUOTAS; i++) {
1167 if (sbi->s_qf_names[i]) {
1168 s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
1169 GFP_KERNEL);
1170 if (!s_qf_names[i]) {
1171 for (j = 0; j < i; j++)
1172 kfree(s_qf_names[j]);
1173 return -ENOMEM;
1174 }
1175 } else {
1176 s_qf_names[i] = NULL;
1177 }
1178 }
1179#endif
1180
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001181 /* recover superblocks we couldn't write due to previous RO mount */
1182 if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
1183 err = f2fs_commit_super(sbi, false);
1184 f2fs_msg(sb, KERN_INFO,
1185 "Try to recover all the superblocks, ret: %d", err);
1186 if (!err)
1187 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1188 }
1189
Yunlei He498c5e92015-05-07 18:11:37 +08001190 default_options(sbi);
Chao Yu26666c82014-09-15 18:04:44 +08001191
Namjae Jeon696c0182013-06-16 09:48:48 +09001192 /* parse mount options */
1193 err = parse_options(sb, data);
1194 if (err)
1195 goto restore_opts;
1196
1197 /*
1198 * Previous and new state of filesystem is RO,
Gu Zheng876dc592014-04-11 17:50:00 +08001199 * so skip checking GC and FLUSH_MERGE conditions.
Namjae Jeon696c0182013-06-16 09:48:48 +09001200 */
Chao Yu6b2920a2014-07-07 11:21:59 +08001201 if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
Namjae Jeon696c0182013-06-16 09:48:48 +09001202 goto skip;
1203
Chao Yu09c3a722017-07-09 00:13:07 +08001204 if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
1205 err = dquot_suspend(sb, -1);
1206 if (err < 0)
1207 goto restore_opts;
1208 } else {
1209 /* dquot_resume needs RW */
1210 sb->s_flags &= ~MS_RDONLY;
1211 dquot_resume(sb, -1);
1212 }
1213
Chao Yu9cd81ce2015-09-18 16:55:26 +08001214 /* disallow enable/disable extent_cache dynamically */
1215 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
1216 err = -EINVAL;
1217 f2fs_msg(sbi->sb, KERN_WARNING,
1218 "switch extent_cache option is not allowed");
1219 goto restore_opts;
1220 }
1221
Namjae Jeon696c0182013-06-16 09:48:48 +09001222 /*
1223 * We stop the GC thread if FS is mounted as RO
1224 * or if background_gc = off is passed in mount
1225 * option. Also sync the filesystem.
1226 */
1227 if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
1228 if (sbi->gc_thread) {
1229 stop_gc_thread(sbi);
Gu Zheng876dc592014-04-11 17:50:00 +08001230 need_restart_gc = true;
Namjae Jeon696c0182013-06-16 09:48:48 +09001231 }
Chao Yuaba291b2014-11-18 11:17:20 +08001232 } else if (!sbi->gc_thread) {
Namjae Jeon696c0182013-06-16 09:48:48 +09001233 err = start_gc_thread(sbi);
1234 if (err)
1235 goto restore_opts;
Gu Zheng876dc592014-04-11 17:50:00 +08001236 need_stop_gc = true;
1237 }
1238
Jaegeuk Kimfaa0e552016-03-24 10:29:39 -07001239 if (*flags & MS_RDONLY) {
1240 writeback_inodes_sb(sb, WB_REASON_SYNC);
1241 sync_inodes_sb(sb);
1242
1243 set_sbi_flag(sbi, SBI_IS_DIRTY);
1244 set_sbi_flag(sbi, SBI_IS_CLOSE);
1245 f2fs_sync_fs(sb, 1);
1246 clear_sbi_flag(sbi, SBI_IS_CLOSE);
1247 }
1248
Gu Zheng876dc592014-04-11 17:50:00 +08001249 /*
1250 * We stop issue flush thread if FS is mounted as RO
1251 * or if flush_merge is not passed in mount option.
1252 */
1253 if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001254 clear_opt(sbi, FLUSH_MERGE);
1255 destroy_flush_cmd_control(sbi, false);
1256 } else {
Gu Zheng2163d192014-04-27 14:21:33 +08001257 err = create_flush_cmd_control(sbi);
1258 if (err)
Gu Zhenga688b9d9e2014-04-27 14:21:21 +08001259 goto restore_gc;
Namjae Jeon696c0182013-06-16 09:48:48 +09001260 }
1261skip:
Chao Yu41ad73f2017-08-08 10:54:31 +08001262#ifdef CONFIG_QUOTA
1263 /* Release old quota file names */
1264 for (i = 0; i < MAXQUOTAS; i++)
1265 kfree(s_qf_names[i]);
1266#endif
Namjae Jeon696c0182013-06-16 09:48:48 +09001267 /* Update the POSIXACL Flag */
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001268 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
Namjae Jeon696c0182013-06-16 09:48:48 +09001269 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001270
Namjae Jeon696c0182013-06-16 09:48:48 +09001271 return 0;
Gu Zheng876dc592014-04-11 17:50:00 +08001272restore_gc:
1273 if (need_restart_gc) {
1274 if (start_gc_thread(sbi))
1275 f2fs_msg(sbi->sb, KERN_WARNING,
arter97e1c42042014-08-06 23:22:50 +09001276 "background gc thread has stopped");
Gu Zheng876dc592014-04-11 17:50:00 +08001277 } else if (need_stop_gc) {
1278 stop_gc_thread(sbi);
1279 }
Namjae Jeon696c0182013-06-16 09:48:48 +09001280restore_opts:
Chao Yu41ad73f2017-08-08 10:54:31 +08001281#ifdef CONFIG_QUOTA
1282 sbi->s_jquota_fmt = s_jquota_fmt;
1283 for (i = 0; i < MAXQUOTAS; i++) {
1284 kfree(sbi->s_qf_names[i]);
1285 sbi->s_qf_names[i] = s_qf_names[i];
1286 }
1287#endif
Namjae Jeon696c0182013-06-16 09:48:48 +09001288 sbi->mount_opt = org_mount_opt;
1289 sbi->active_logs = active_logs;
Chao Yu09c3a722017-07-09 00:13:07 +08001290 sb->s_flags = old_sb_flags;
Chao Yu2443b8b2016-09-26 19:45:06 +08001291#ifdef CONFIG_F2FS_FAULT_INJECTION
1292 sbi->fault_info = ffi;
1293#endif
Namjae Jeon696c0182013-06-16 09:48:48 +09001294 return err;
1295}
1296
Chao Yu09c3a722017-07-09 00:13:07 +08001297#ifdef CONFIG_QUOTA
1298/* Read data from quotafile */
1299static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
1300 size_t len, loff_t off)
1301{
1302 struct inode *inode = sb_dqopt(sb)->files[type];
1303 struct address_space *mapping = inode->i_mapping;
1304 block_t blkidx = F2FS_BYTES_TO_BLK(off);
1305 int offset = off & (sb->s_blocksize - 1);
1306 int tocopy;
1307 size_t toread;
1308 loff_t i_size = i_size_read(inode);
1309 struct page *page;
1310 char *kaddr;
1311
1312 if (off > i_size)
1313 return 0;
1314
1315 if (off + len > i_size)
1316 len = i_size - off;
1317 toread = len;
1318 while (toread > 0) {
1319 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
1320repeat:
1321 page = read_mapping_page(mapping, blkidx, NULL);
1322 if (IS_ERR(page))
1323 return PTR_ERR(page);
1324
1325 lock_page(page);
1326
1327 if (unlikely(page->mapping != mapping)) {
1328 f2fs_put_page(page, 1);
1329 goto repeat;
1330 }
1331 if (unlikely(!PageUptodate(page))) {
1332 f2fs_put_page(page, 1);
1333 return -EIO;
1334 }
1335
1336 kaddr = kmap_atomic(page);
1337 memcpy(data, kaddr + offset, tocopy);
1338 kunmap_atomic(kaddr);
1339 f2fs_put_page(page, 1);
1340
1341 offset = 0;
1342 toread -= tocopy;
1343 data += tocopy;
1344 blkidx++;
1345 }
1346 return len;
1347}
1348
1349/* Write to quotafile */
1350static ssize_t f2fs_quota_write(struct super_block *sb, int type,
1351 const char *data, size_t len, loff_t off)
1352{
1353 struct inode *inode = sb_dqopt(sb)->files[type];
1354 struct address_space *mapping = inode->i_mapping;
1355 const struct address_space_operations *a_ops = mapping->a_ops;
1356 int offset = off & (sb->s_blocksize - 1);
1357 size_t towrite = len;
1358 struct page *page;
1359 char *kaddr;
1360 int err = 0;
1361 int tocopy;
1362
1363 while (towrite > 0) {
1364 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
1365 towrite);
1366
1367 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
1368 &page, NULL);
1369 if (unlikely(err))
1370 break;
1371
1372 kaddr = kmap_atomic(page);
1373 memcpy(kaddr + offset, data, tocopy);
1374 kunmap_atomic(kaddr);
1375 flush_dcache_page(page);
1376
1377 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
1378 page, NULL);
1379 offset = 0;
1380 towrite -= tocopy;
1381 off += tocopy;
1382 data += tocopy;
1383 cond_resched();
1384 }
1385
1386 if (len == towrite)
Jaegeuk Kim0b006fe2017-07-30 09:45:14 -07001387 return 0;
Chao Yu09c3a722017-07-09 00:13:07 +08001388 inode->i_version++;
1389 inode->i_mtime = inode->i_ctime = current_time(inode);
1390 f2fs_mark_inode_dirty_sync(inode, false);
1391 return len - towrite;
1392}
1393
1394static struct dquot **f2fs_get_dquots(struct inode *inode)
1395{
1396 return F2FS_I(inode)->i_dquot;
1397}
1398
1399static qsize_t *f2fs_get_reserved_space(struct inode *inode)
1400{
1401 return &F2FS_I(inode)->i_reserved_quota;
1402}
1403
Chao Yu41ad73f2017-08-08 10:54:31 +08001404static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
1405{
1406 return dquot_quota_on_mount(sbi->sb, sbi->s_qf_names[type],
1407 sbi->s_jquota_fmt, type);
1408}
1409
1410void f2fs_enable_quota_files(struct f2fs_sb_info *sbi)
1411{
1412 int i, ret;
1413
1414 for (i = 0; i < MAXQUOTAS; i++) {
1415 if (sbi->s_qf_names[i]) {
1416 ret = f2fs_quota_on_mount(sbi, i);
1417 if (ret < 0)
1418 f2fs_msg(sbi->sb, KERN_ERR,
1419 "Cannot turn on journaled "
1420 "quota: error %d", ret);
1421 }
1422 }
1423}
1424
Chao Yu09c3a722017-07-09 00:13:07 +08001425static int f2fs_quota_sync(struct super_block *sb, int type)
1426{
1427 struct quota_info *dqopt = sb_dqopt(sb);
1428 int cnt;
1429 int ret;
1430
1431 ret = dquot_writeback_dquots(sb, type);
1432 if (ret)
1433 return ret;
1434
1435 /*
1436 * Now when everything is written we can discard the pagecache so
1437 * that userspace sees the changes.
1438 */
1439 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1440 if (type != -1 && cnt != type)
1441 continue;
1442 if (!sb_has_quota_active(sb, cnt))
1443 continue;
1444
1445 ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
1446 if (ret)
1447 return ret;
1448
1449 inode_lock(dqopt->files[cnt]);
1450 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
1451 inode_unlock(dqopt->files[cnt]);
1452 }
1453 return 0;
1454}
1455
1456static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
1457 struct path *path)
1458{
1459 struct inode *inode;
1460 int err;
1461
Chao Yuf701f1c2017-08-07 16:37:59 +08001462 err = f2fs_quota_sync(sb, type);
Chao Yu09c3a722017-07-09 00:13:07 +08001463 if (err)
1464 return err;
1465
1466 err = dquot_quota_on(sb, type, format_id, path);
1467 if (err)
1468 return err;
1469
1470 inode = d_inode(path->dentry);
1471
1472 inode_lock(inode);
1473 F2FS_I(inode)->i_flags |= FS_NOATIME_FL | FS_IMMUTABLE_FL;
1474 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
1475 S_NOATIME | S_IMMUTABLE);
1476 inode_unlock(inode);
1477 f2fs_mark_inode_dirty_sync(inode, false);
1478
1479 return 0;
1480}
1481
1482static int f2fs_quota_off(struct super_block *sb, int type)
1483{
1484 struct inode *inode = sb_dqopt(sb)->files[type];
1485 int err;
1486
1487 if (!inode || !igrab(inode))
1488 return dquot_quota_off(sb, type);
1489
Chao Yuf701f1c2017-08-07 16:37:59 +08001490 f2fs_quota_sync(sb, type);
Chao Yu09c3a722017-07-09 00:13:07 +08001491
1492 err = dquot_quota_off(sb, type);
1493 if (err)
1494 goto out_put;
1495
1496 inode_lock(inode);
1497 F2FS_I(inode)->i_flags &= ~(FS_NOATIME_FL | FS_IMMUTABLE_FL);
1498 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
1499 inode_unlock(inode);
1500 f2fs_mark_inode_dirty_sync(inode, false);
1501out_put:
1502 iput(inode);
1503 return err;
1504}
1505
Chao Yu41ad73f2017-08-08 10:54:31 +08001506void f2fs_quota_off_umount(struct super_block *sb)
Chao Yu09c3a722017-07-09 00:13:07 +08001507{
1508 int type;
1509
1510 for (type = 0; type < MAXQUOTAS; type++)
1511 f2fs_quota_off(sb, type);
1512}
1513
Chao Yu5647b302017-07-26 00:01:41 +08001514int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
1515{
1516 *projid = F2FS_I(inode)->i_projid;
1517 return 0;
1518}
1519
Chao Yu09c3a722017-07-09 00:13:07 +08001520static const struct dquot_operations f2fs_quota_operations = {
1521 .get_reserved_space = f2fs_get_reserved_space,
1522 .write_dquot = dquot_commit,
1523 .acquire_dquot = dquot_acquire,
1524 .release_dquot = dquot_release,
1525 .mark_dirty = dquot_mark_dquot_dirty,
1526 .write_info = dquot_commit_info,
1527 .alloc_dquot = dquot_alloc,
1528 .destroy_dquot = dquot_destroy,
Chao Yu5647b302017-07-26 00:01:41 +08001529 .get_projid = f2fs_get_projid,
Chao Yu09c3a722017-07-09 00:13:07 +08001530 .get_next_id = dquot_get_next_id,
1531};
1532
1533static const struct quotactl_ops f2fs_quotactl_ops = {
1534 .quota_on = f2fs_quota_on,
1535 .quota_off = f2fs_quota_off,
1536 .quota_sync = f2fs_quota_sync,
1537 .get_state = dquot_get_state,
1538 .set_info = dquot_set_dqinfo,
1539 .get_dqblk = dquot_get_dqblk,
1540 .set_dqblk = dquot_set_dqblk,
1541 .get_nextdqblk = dquot_get_next_dqblk,
1542};
1543#else
Chao Yu41ad73f2017-08-08 10:54:31 +08001544void f2fs_quota_off_umount(struct super_block *sb)
Chao Yu09c3a722017-07-09 00:13:07 +08001545{
1546}
1547#endif
1548
Arvind Yadav4c1db1a2017-08-31 15:06:24 +05301549static const struct super_operations f2fs_sops = {
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001550 .alloc_inode = f2fs_alloc_inode,
Jaegeuk Kim531ad7d2013-04-30 11:33:27 +09001551 .drop_inode = f2fs_drop_inode,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001552 .destroy_inode = f2fs_destroy_inode,
1553 .write_inode = f2fs_write_inode,
Jaegeuk Kimb3783872013-06-10 09:17:01 +09001554 .dirty_inode = f2fs_dirty_inode,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001555 .show_options = f2fs_show_options,
Chao Yu09c3a722017-07-09 00:13:07 +08001556#ifdef CONFIG_QUOTA
1557 .quota_read = f2fs_quota_read,
1558 .quota_write = f2fs_quota_write,
1559 .get_dquots = f2fs_get_dquots,
1560#endif
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001561 .evict_inode = f2fs_evict_inode,
1562 .put_super = f2fs_put_super,
1563 .sync_fs = f2fs_sync_fs,
Changman Leed6212a52013-01-29 18:30:07 +09001564 .freeze_fs = f2fs_freeze,
1565 .unfreeze_fs = f2fs_unfreeze,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001566 .statfs = f2fs_statfs,
Namjae Jeon696c0182013-06-16 09:48:48 +09001567 .remount_fs = f2fs_remount,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001568};
1569
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07001570#ifdef CONFIG_F2FS_FS_ENCRYPTION
1571static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
1572{
1573 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1574 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1575 ctx, len, NULL);
1576}
1577
1578static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
1579 void *fs_data)
1580{
1581 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
1582 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
1583 ctx, len, fs_data, XATTR_CREATE);
1584}
1585
1586static unsigned f2fs_max_namelen(struct inode *inode)
1587{
1588 return S_ISLNK(inode->i_mode) ?
1589 inode->i_sb->s_blocksize : F2FS_NAME_LEN;
1590}
1591
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001592static const struct fscrypt_operations f2fs_cryptops = {
1593 .key_prefix = "f2fs:",
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07001594 .get_context = f2fs_get_context,
1595 .set_context = f2fs_set_context,
1596 .is_encrypted = f2fs_encrypted_inode,
1597 .empty_dir = f2fs_empty_dir,
1598 .max_namelen = f2fs_max_namelen,
1599};
1600#else
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001601static const struct fscrypt_operations f2fs_cryptops = {
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07001602 .is_encrypted = f2fs_encrypted_inode,
1603};
1604#endif
1605
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001606static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
1607 u64 ino, u32 generation)
1608{
1609 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1610 struct inode *inode;
1611
Chao Yud6b7d4b2014-06-12 13:23:41 +08001612 if (check_nid_range(sbi, ino))
Chao Yu910bb122014-03-12 17:08:36 +08001613 return ERR_PTR(-ESTALE);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001614
1615 /*
1616 * f2fs_iget isn't quite right if the inode is currently unallocated!
1617 * However f2fs_iget currently does appropriate checks to handle stale
1618 * inodes so everything is OK.
1619 */
1620 inode = f2fs_iget(sb, ino);
1621 if (IS_ERR(inode))
1622 return ERR_CAST(inode);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001623 if (unlikely(generation && inode->i_generation != generation)) {
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001624 /* we didn't find the right inode.. */
1625 iput(inode);
1626 return ERR_PTR(-ESTALE);
1627 }
1628 return inode;
1629}
1630
1631static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
1632 int fh_len, int fh_type)
1633{
1634 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1635 f2fs_nfs_get_inode);
1636}
1637
1638static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
1639 int fh_len, int fh_type)
1640{
1641 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1642 f2fs_nfs_get_inode);
1643}
1644
1645static const struct export_operations f2fs_export_ops = {
1646 .fh_to_dentry = f2fs_fh_to_dentry,
1647 .fh_to_parent = f2fs_fh_to_parent,
1648 .get_parent = f2fs_get_parent,
1649};
1650
Chao Yue0afc4d2015-12-31 14:35:37 +08001651static loff_t max_file_blocks(void)
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001652{
Chao Yufbcf9312017-07-19 00:19:06 +08001653 loff_t result = 0;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001654 loff_t leaf_count = ADDRS_PER_BLOCK;
1655
Chao Yufbcf9312017-07-19 00:19:06 +08001656 /*
1657 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
1658 * F2FS_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
1659 * space in inode.i_addr, it will be more safe to reassign
1660 * result as zero.
1661 */
1662
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001663 /* two direct node blocks */
1664 result += (leaf_count * 2);
1665
1666 /* two indirect node blocks */
1667 leaf_count *= NIDS_PER_BLOCK;
1668 result += (leaf_count * 2);
1669
1670 /* one double indirect node block */
1671 leaf_count *= NIDS_PER_BLOCK;
1672 result += leaf_count;
1673
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001674 return result;
1675}
1676
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001677static int __f2fs_commit_super(struct buffer_head *bh,
1678 struct f2fs_super_block *super)
Chao Yu9a59b622015-12-15 09:58:18 +08001679{
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001680 lock_buffer(bh);
1681 if (super)
1682 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
1683 set_buffer_uptodate(bh);
1684 set_buffer_dirty(bh);
1685 unlock_buffer(bh);
1686
1687 /* it's rare case, we can do fua all the time */
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001688 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001689}
1690
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001691static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001692 struct buffer_head *bh)
1693{
1694 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1695 (bh->b_data + F2FS_SUPER_OFFSET);
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001696 struct super_block *sb = sbi->sb;
Chao Yu9a59b622015-12-15 09:58:18 +08001697 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1698 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
1699 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
1700 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
1701 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
1702 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1703 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
1704 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
1705 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
1706 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
1707 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
1708 u32 segment_count = le32_to_cpu(raw_super->segment_count);
1709 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001710 u64 main_end_blkaddr = main_blkaddr +
1711 (segment_count_main << log_blocks_per_seg);
1712 u64 seg_end_blkaddr = segment0_blkaddr +
1713 (segment_count << log_blocks_per_seg);
Chao Yu9a59b622015-12-15 09:58:18 +08001714
1715 if (segment0_blkaddr != cp_blkaddr) {
1716 f2fs_msg(sb, KERN_INFO,
1717 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
1718 segment0_blkaddr, cp_blkaddr);
1719 return true;
1720 }
1721
1722 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
1723 sit_blkaddr) {
1724 f2fs_msg(sb, KERN_INFO,
1725 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
1726 cp_blkaddr, sit_blkaddr,
1727 segment_count_ckpt << log_blocks_per_seg);
1728 return true;
1729 }
1730
1731 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
1732 nat_blkaddr) {
1733 f2fs_msg(sb, KERN_INFO,
1734 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
1735 sit_blkaddr, nat_blkaddr,
1736 segment_count_sit << log_blocks_per_seg);
1737 return true;
1738 }
1739
1740 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
1741 ssa_blkaddr) {
1742 f2fs_msg(sb, KERN_INFO,
1743 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
1744 nat_blkaddr, ssa_blkaddr,
1745 segment_count_nat << log_blocks_per_seg);
1746 return true;
1747 }
1748
1749 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
1750 main_blkaddr) {
1751 f2fs_msg(sb, KERN_INFO,
1752 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
1753 ssa_blkaddr, main_blkaddr,
1754 segment_count_ssa << log_blocks_per_seg);
1755 return true;
1756 }
1757
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001758 if (main_end_blkaddr > seg_end_blkaddr) {
Chao Yu9a59b622015-12-15 09:58:18 +08001759 f2fs_msg(sb, KERN_INFO,
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001760 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
Chao Yu9a59b622015-12-15 09:58:18 +08001761 main_blkaddr,
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001762 segment0_blkaddr +
1763 (segment_count << log_blocks_per_seg),
Chao Yu9a59b622015-12-15 09:58:18 +08001764 segment_count_main << log_blocks_per_seg);
1765 return true;
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001766 } else if (main_end_blkaddr < seg_end_blkaddr) {
1767 int err = 0;
1768 char *res;
Chao Yu9a59b622015-12-15 09:58:18 +08001769
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001770 /* fix in-memory information all the time */
1771 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
1772 segment0_blkaddr) >> log_blocks_per_seg);
1773
1774 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001775 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001776 res = "internally";
1777 } else {
1778 err = __f2fs_commit_super(bh, NULL);
1779 res = err ? "failed" : "done";
1780 }
1781 f2fs_msg(sb, KERN_INFO,
1782 "Fix alignment : %s, start(%u) end(%u) block(%u)",
1783 res, main_blkaddr,
1784 segment0_blkaddr +
1785 (segment_count << log_blocks_per_seg),
1786 segment_count_main << log_blocks_per_seg);
1787 if (err)
1788 return true;
1789 }
Chao Yu9a59b622015-12-15 09:58:18 +08001790 return false;
1791}
1792
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001793static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001794 struct buffer_head *bh)
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001795{
Jaegeuk Kimfd694732016-03-20 15:33:20 -07001796 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
1797 (bh->b_data + F2FS_SUPER_OFFSET);
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001798 struct super_block *sb = sbi->sb;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001799 unsigned int blocksize;
1800
Namjae Jeona07ef782012-12-30 14:52:05 +09001801 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
1802 f2fs_msg(sb, KERN_INFO,
1803 "Magic Mismatch, valid(0x%x) - read(0x%x)",
1804 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001805 return 1;
Namjae Jeona07ef782012-12-30 14:52:05 +09001806 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001807
majianpeng5c9b4692013-02-01 19:07:57 +08001808 /* Currently, support only 4KB page cache size */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001809 if (F2FS_BLKSIZE != PAGE_SIZE) {
majianpeng5c9b4692013-02-01 19:07:57 +08001810 f2fs_msg(sb, KERN_INFO,
majianpeng14d7e9d2013-02-01 19:07:03 +08001811 "Invalid page_cache_size (%lu), supports only 4KB\n",
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001812 PAGE_SIZE);
majianpeng5c9b4692013-02-01 19:07:57 +08001813 return 1;
1814 }
1815
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001816 /* Currently, support only 4KB block size */
1817 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
majianpeng5c9b4692013-02-01 19:07:57 +08001818 if (blocksize != F2FS_BLKSIZE) {
Namjae Jeona07ef782012-12-30 14:52:05 +09001819 f2fs_msg(sb, KERN_INFO,
1820 "Invalid blocksize (%u), supports only 4KB\n",
1821 blocksize);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001822 return 1;
Namjae Jeona07ef782012-12-30 14:52:05 +09001823 }
majianpeng5c9b4692013-02-01 19:07:57 +08001824
Chao Yu9a59b622015-12-15 09:58:18 +08001825 /* check log blocks per segment */
1826 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
1827 f2fs_msg(sb, KERN_INFO,
1828 "Invalid log blocks per segment (%u)\n",
1829 le32_to_cpu(raw_super->log_blocks_per_seg));
1830 return 1;
1831 }
1832
Chao Yu55cf9cb2014-09-15 18:01:10 +08001833 /* Currently, support 512/1024/2048/4096 bytes sector size */
1834 if (le32_to_cpu(raw_super->log_sectorsize) >
1835 F2FS_MAX_LOG_SECTOR_SIZE ||
1836 le32_to_cpu(raw_super->log_sectorsize) <
1837 F2FS_MIN_LOG_SECTOR_SIZE) {
1838 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
1839 le32_to_cpu(raw_super->log_sectorsize));
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001840 return 1;
Namjae Jeona07ef782012-12-30 14:52:05 +09001841 }
Chao Yu55cf9cb2014-09-15 18:01:10 +08001842 if (le32_to_cpu(raw_super->log_sectors_per_block) +
1843 le32_to_cpu(raw_super->log_sectorsize) !=
1844 F2FS_MAX_LOG_SECTOR_SIZE) {
1845 f2fs_msg(sb, KERN_INFO,
1846 "Invalid log sectors per block(%u) log sectorsize(%u)",
1847 le32_to_cpu(raw_super->log_sectors_per_block),
1848 le32_to_cpu(raw_super->log_sectorsize));
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001849 return 1;
Namjae Jeona07ef782012-12-30 14:52:05 +09001850 }
Chao Yu9a59b622015-12-15 09:58:18 +08001851
1852 /* check reserved ino info */
1853 if (le32_to_cpu(raw_super->node_ino) != 1 ||
1854 le32_to_cpu(raw_super->meta_ino) != 2 ||
1855 le32_to_cpu(raw_super->root_ino) != 3) {
1856 f2fs_msg(sb, KERN_INFO,
1857 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
1858 le32_to_cpu(raw_super->node_ino),
1859 le32_to_cpu(raw_super->meta_ino),
1860 le32_to_cpu(raw_super->root_ino));
1861 return 1;
1862 }
1863
Jin Qian93862952017-04-25 16:28:48 -07001864 if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
1865 f2fs_msg(sb, KERN_INFO,
1866 "Invalid segment count (%u)",
1867 le32_to_cpu(raw_super->segment_count));
1868 return 1;
1869 }
1870
Chao Yu9a59b622015-12-15 09:58:18 +08001871 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07001872 if (sanity_check_area_boundary(sbi, bh))
Chao Yu9a59b622015-12-15 09:58:18 +08001873 return 1;
1874
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001875 return 0;
1876}
1877
Shawn Lin984ec632016-02-17 11:26:32 +08001878int sanity_check_ckpt(struct f2fs_sb_info *sbi)
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001879{
1880 unsigned int total, fsmeta;
Jaegeuk Kim577e3492013-01-24 19:56:11 +09001881 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1882 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001883 unsigned int ovp_segments, reserved_segments;
Jin Qiand90659f2017-05-15 10:45:08 -07001884 unsigned int main_segs, blocks_per_seg;
1885 int i;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001886
1887 total = le32_to_cpu(raw_super->segment_count);
1888 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
1889 fsmeta += le32_to_cpu(raw_super->segment_count_sit);
1890 fsmeta += le32_to_cpu(raw_super->segment_count_nat);
1891 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
1892 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
1893
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001894 if (unlikely(fsmeta >= total))
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001895 return 1;
Jaegeuk Kim577e3492013-01-24 19:56:11 +09001896
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001897 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1898 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1899
1900 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
1901 ovp_segments == 0 || reserved_segments == 0)) {
1902 f2fs_msg(sbi->sb, KERN_ERR,
1903 "Wrong layout: check mkfs.f2fs version");
1904 return 1;
1905 }
1906
Jin Qiand90659f2017-05-15 10:45:08 -07001907 main_segs = le32_to_cpu(raw_super->segment_count_main);
1908 blocks_per_seg = sbi->blocks_per_seg;
1909
1910 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1911 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
1912 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
1913 return 1;
1914 }
1915 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1916 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
1917 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
1918 return 1;
1919 }
1920
Jaegeuk Kim1e968fd2014-08-11 16:49:25 -07001921 if (unlikely(f2fs_cp_error(sbi))) {
Jaegeuk Kim577e3492013-01-24 19:56:11 +09001922 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
1923 return 1;
1924 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001925 return 0;
1926}
1927
1928static void init_sb_info(struct f2fs_sb_info *sbi)
1929{
1930 struct f2fs_super_block *raw_super = sbi->raw_super;
Chao Yu06504682017-05-19 23:37:00 +08001931 int i, j;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001932
1933 sbi->log_sectors_per_block =
1934 le32_to_cpu(raw_super->log_sectors_per_block);
1935 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
1936 sbi->blocksize = 1 << sbi->log_blocksize;
1937 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
1938 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
1939 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
1940 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
1941 sbi->total_sections = le32_to_cpu(raw_super->section_count);
1942 sbi->total_node_count =
1943 (le32_to_cpu(raw_super->segment_count_nat) / 2)
1944 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
1945 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
1946 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
1947 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001948 sbi->cur_victim_sec = NULL_SECNO;
Jaegeuk Kimb1c57c12014-01-08 13:45:08 +09001949 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001950
Jaegeuk Kimab9fa662014-02-27 20:09:05 +09001951 sbi->dir_level = DEF_DIR_LEVEL;
Jaegeuk Kim6beceb52016-01-08 15:51:50 -08001952 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08001953 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
Chao Yucaf00472015-01-28 17:48:42 +08001954 clear_sbi_flag(sbi, SBI_NEED_FSCK);
Jaegeuk Kim2658e502015-06-19 12:01:21 -07001955
Jaegeuk Kim725ba1a2016-10-20 19:09:57 -07001956 for (i = 0; i < NR_COUNT_TYPE; i++)
1957 atomic_set(&sbi->nr_pages[i], 0);
1958
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001959 atomic_set(&sbi->wb_sync_req, 0);
1960
Jaegeuk Kim2658e502015-06-19 12:01:21 -07001961 INIT_LIST_HEAD(&sbi->s_list);
1962 mutex_init(&sbi->umount_mutex);
Chao Yu06504682017-05-19 23:37:00 +08001963 for (i = 0; i < NR_PAGE_TYPE - 1; i++)
1964 for (j = HOT; j < NR_TEMP_TYPE; j++)
1965 mutex_init(&sbi->wio_mutex[i][j]);
Chao Yuaaec2b12016-09-20 11:04:18 +08001966 spin_lock_init(&sbi->cp_lock);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09001967}
1968
Jaegeuk Kim523be8a2016-05-13 12:36:58 -07001969static int init_percpu_info(struct f2fs_sb_info *sbi)
1970{
Jaegeuk Kim725ba1a2016-10-20 19:09:57 -07001971 int err;
Jaegeuk Kim41382ec2016-05-16 11:06:50 -07001972
Jaegeuk Kim513c5f32016-05-16 11:42:32 -07001973 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
1974 if (err)
1975 return err;
1976
1977 return percpu_counter_init(&sbi->total_valid_inode_count, 0,
Jaegeuk Kim41382ec2016-05-16 11:06:50 -07001978 GFP_KERNEL);
Jaegeuk Kim523be8a2016-05-13 12:36:58 -07001979}
1980
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001981#ifdef CONFIG_BLK_DEV_ZONED
1982static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
1983{
1984 struct block_device *bdev = FDEV(devi).bdev;
1985 sector_t nr_sectors = bdev->bd_part->nr_sects;
1986 sector_t sector = 0;
1987 struct blk_zone *zones;
1988 unsigned int i, nr_zones;
1989 unsigned int n = 0;
1990 int err = -EIO;
1991
1992 if (!f2fs_sb_mounted_blkzoned(sbi->sb))
1993 return 0;
1994
1995 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
1996 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
1997 return -EINVAL;
1998 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
1999 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
2000 __ilog2_u32(sbi->blocks_per_blkz))
2001 return -EINVAL;
2002 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
2003 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
2004 sbi->log_blocks_per_blkz;
2005 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
2006 FDEV(devi).nr_blkz++;
2007
2008 FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
2009 if (!FDEV(devi).blkz_type)
2010 return -ENOMEM;
2011
2012#define F2FS_REPORT_NR_ZONES 4096
2013
2014 zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
2015 GFP_KERNEL);
2016 if (!zones)
2017 return -ENOMEM;
2018
2019 /* Get block zones type */
2020 while (zones && sector < nr_sectors) {
2021
2022 nr_zones = F2FS_REPORT_NR_ZONES;
2023 err = blkdev_report_zones(bdev, sector,
2024 zones, &nr_zones,
2025 GFP_KERNEL);
2026 if (err)
2027 break;
2028 if (!nr_zones) {
2029 err = -EIO;
2030 break;
2031 }
2032
2033 for (i = 0; i < nr_zones; i++) {
2034 FDEV(devi).blkz_type[n] = zones[i].type;
2035 sector += zones[i].len;
2036 n++;
2037 }
2038 }
2039
2040 kfree(zones);
2041
2042 return err;
2043}
2044#endif
2045
Gu Zheng9076a752013-10-14 18:47:11 +08002046/*
2047 * Read f2fs raw super block.
Shawn Lin2b39e902016-02-17 08:59:01 +08002048 * Because we have two copies of super block, so read both of them
2049 * to get the first valid one. If any one of them is broken, we pass
2050 * them recovery flag back to the caller.
Gu Zheng9076a752013-10-14 18:47:11 +08002051 */
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07002052static int read_raw_super_block(struct f2fs_sb_info *sbi,
Gu Zheng9076a752013-10-14 18:47:11 +08002053 struct f2fs_super_block **raw_super,
Chao Yue8240f62015-12-15 17:19:26 +08002054 int *valid_super_block, int *recovery)
majianpeng14d7e9d2013-02-01 19:07:03 +08002055{
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07002056 struct super_block *sb = sbi->sb;
Shawn Lin2b39e902016-02-17 08:59:01 +08002057 int block;
Chao Yue8240f62015-12-15 17:19:26 +08002058 struct buffer_head *bh;
Jaegeuk Kimfd694732016-03-20 15:33:20 -07002059 struct f2fs_super_block *super;
hujianyangda554e42015-05-21 14:42:53 +08002060 int err = 0;
majianpeng14d7e9d2013-02-01 19:07:03 +08002061
Yunlei Heb39f0de2015-12-15 17:17:20 +08002062 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
2063 if (!super)
2064 return -ENOMEM;
Shawn Lin2b39e902016-02-17 08:59:01 +08002065
2066 for (block = 0; block < 2; block++) {
2067 bh = sb_bread(sb, block);
2068 if (!bh) {
2069 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
Gu Zheng9076a752013-10-14 18:47:11 +08002070 block + 1);
Shawn Lin2b39e902016-02-17 08:59:01 +08002071 err = -EIO;
2072 continue;
2073 }
majianpeng14d7e9d2013-02-01 19:07:03 +08002074
Shawn Lin2b39e902016-02-17 08:59:01 +08002075 /* sanity checking of raw super */
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07002076 if (sanity_check_raw_super(sbi, bh)) {
Shawn Lin2b39e902016-02-17 08:59:01 +08002077 f2fs_msg(sb, KERN_ERR,
2078 "Can't find valid F2FS filesystem in %dth superblock",
2079 block + 1);
2080 err = -EINVAL;
2081 brelse(bh);
2082 continue;
2083 }
2084
2085 if (!*raw_super) {
Jaegeuk Kimfd694732016-03-20 15:33:20 -07002086 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
2087 sizeof(*super));
Shawn Lin2b39e902016-02-17 08:59:01 +08002088 *valid_super_block = block;
2089 *raw_super = super;
2090 }
Chao Yue8240f62015-12-15 17:19:26 +08002091 brelse(bh);
Shawn Lin2b39e902016-02-17 08:59:01 +08002092 }
2093
2094 /* Fail to read any one of the superblocks*/
2095 if (err < 0)
hujianyangda554e42015-05-21 14:42:53 +08002096 *recovery = 1;
hujianyangda554e42015-05-21 14:42:53 +08002097
hujianyangda554e42015-05-21 14:42:53 +08002098 /* No valid superblock */
Shawn Lin2b39e902016-02-17 08:59:01 +08002099 if (!*raw_super)
Yunlei Heb39f0de2015-12-15 17:17:20 +08002100 kfree(super);
Shawn Lin2b39e902016-02-17 08:59:01 +08002101 else
2102 err = 0;
hujianyangda554e42015-05-21 14:42:53 +08002103
Shawn Lin2b39e902016-02-17 08:59:01 +08002104 return err;
majianpeng14d7e9d2013-02-01 19:07:03 +08002105}
2106
Jaegeuk Kimfd694732016-03-20 15:33:20 -07002107int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
Jaegeuk Kim26d815ad2015-04-20 18:49:51 -07002108{
Jaegeuk Kim5d909cd2015-12-07 10:16:58 -08002109 struct buffer_head *bh;
Jaegeuk Kim26d815ad2015-04-20 18:49:51 -07002110 int err;
2111
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07002112 if ((recover && f2fs_readonly(sbi->sb)) ||
2113 bdev_read_only(sbi->sb->s_bdev)) {
2114 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
Jaegeuk Kimf2353d72016-03-23 10:42:01 -07002115 return -EROFS;
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07002116 }
Jaegeuk Kimf2353d72016-03-23 10:42:01 -07002117
Jaegeuk Kimfd694732016-03-20 15:33:20 -07002118 /* write back-up superblock first */
2119 bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
Jaegeuk Kim5d909cd2015-12-07 10:16:58 -08002120 if (!bh)
2121 return -EIO;
Jaegeuk Kimfd694732016-03-20 15:33:20 -07002122 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
Jaegeuk Kim5d909cd2015-12-07 10:16:58 -08002123 brelse(bh);
Chao Yuc5bda1c2015-06-08 13:28:03 +08002124
2125 /* if we are in recovery path, skip writing valid superblock */
2126 if (recover || err)
Jaegeuk Kim5d909cd2015-12-07 10:16:58 -08002127 return err;
Jaegeuk Kim26d815ad2015-04-20 18:49:51 -07002128
Chao Yue8240f62015-12-15 17:19:26 +08002129 /* write current valid superblock */
Jaegeuk Kimfd694732016-03-20 15:33:20 -07002130 bh = sb_getblk(sbi->sb, sbi->valid_super_block);
2131 if (!bh)
2132 return -EIO;
2133 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
2134 brelse(bh);
2135 return err;
Jaegeuk Kim26d815ad2015-04-20 18:49:51 -07002136}
2137
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002138static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
2139{
2140 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2141 unsigned int max_devices = MAX_DEVICES;
2142 int i;
2143
2144 /* Initialize single device information */
2145 if (!RDEV(0).path[0]) {
2146#ifdef CONFIG_BLK_DEV_ZONED
Jaegeuk Kimdbf05322017-07-10 19:16:28 -07002147 if (!bdev_is_zoned(sbi->sb->s_bdev))
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002148 return 0;
2149 max_devices = 1;
2150#else
2151 return 0;
2152#endif
2153 }
2154
2155 /*
2156 * Initialize multiple devices information, or single
2157 * zoned block device information.
2158 */
2159 sbi->devs = kcalloc(max_devices, sizeof(struct f2fs_dev_info),
2160 GFP_KERNEL);
2161 if (!sbi->devs)
2162 return -ENOMEM;
2163
2164 for (i = 0; i < max_devices; i++) {
2165
2166 if (i > 0 && !RDEV(i).path[0])
2167 break;
2168
2169 if (max_devices == 1) {
2170 /* Single zoned block device mount */
2171 FDEV(0).bdev =
2172 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
2173 sbi->sb->s_mode, sbi->sb->s_type);
2174 } else {
2175 /* Multi-device mount */
2176 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
2177 FDEV(i).total_segments =
2178 le32_to_cpu(RDEV(i).total_segments);
2179 if (i == 0) {
2180 FDEV(i).start_blk = 0;
2181 FDEV(i).end_blk = FDEV(i).start_blk +
2182 (FDEV(i).total_segments <<
2183 sbi->log_blocks_per_seg) - 1 +
2184 le32_to_cpu(raw_super->segment0_blkaddr);
2185 } else {
2186 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
2187 FDEV(i).end_blk = FDEV(i).start_blk +
2188 (FDEV(i).total_segments <<
2189 sbi->log_blocks_per_seg) - 1;
2190 }
2191 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
2192 sbi->sb->s_mode, sbi->sb->s_type);
2193 }
2194 if (IS_ERR(FDEV(i).bdev))
2195 return PTR_ERR(FDEV(i).bdev);
2196
2197 /* to release errored devices */
2198 sbi->s_ndevs = i + 1;
2199
2200#ifdef CONFIG_BLK_DEV_ZONED
2201 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
2202 !f2fs_sb_mounted_blkzoned(sbi->sb)) {
2203 f2fs_msg(sbi->sb, KERN_ERR,
2204 "Zoned block device feature not enabled\n");
2205 return -EINVAL;
2206 }
2207 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
2208 if (init_blkz_info(sbi, i)) {
2209 f2fs_msg(sbi->sb, KERN_ERR,
2210 "Failed to initialize F2FS blkzone information");
2211 return -EINVAL;
2212 }
2213 if (max_devices == 1)
2214 break;
2215 f2fs_msg(sbi->sb, KERN_INFO,
2216 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
2217 i, FDEV(i).path,
2218 FDEV(i).total_segments,
2219 FDEV(i).start_blk, FDEV(i).end_blk,
2220 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
2221 "Host-aware" : "Host-managed");
2222 continue;
2223 }
2224#endif
2225 f2fs_msg(sbi->sb, KERN_INFO,
2226 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
2227 i, FDEV(i).path,
2228 FDEV(i).total_segments,
2229 FDEV(i).start_blk, FDEV(i).end_blk);
2230 }
2231 f2fs_msg(sbi->sb, KERN_INFO,
2232 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
2233 return 0;
2234}
2235
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002236static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
2237{
2238 struct f2fs_sb_info *sbi;
hujianyangda554e42015-05-21 14:42:53 +08002239 struct f2fs_super_block *raw_super;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002240 struct inode *root;
Sheng Yong99e3e852016-05-11 17:08:14 +08002241 int err;
Chao Yu2adc3502015-03-16 21:08:44 +08002242 bool retry = true, need_fsck = false;
Jaegeuk Kimdabc4a52015-01-23 17:41:39 -08002243 char *options = NULL;
Chao Yue8240f62015-12-15 17:19:26 +08002244 int recovery, i, valid_super_block;
Shuoran Liu8f1dbbb2016-01-27 09:57:30 +08002245 struct curseg_info *seg_i;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002246
Jaegeuk Kimed2e6212014-08-08 15:37:41 -07002247try_onemore:
hujianyangda554e42015-05-21 14:42:53 +08002248 err = -EINVAL;
2249 raw_super = NULL;
Chao Yue8240f62015-12-15 17:19:26 +08002250 valid_super_block = -1;
hujianyangda554e42015-05-21 14:42:53 +08002251 recovery = 0;
2252
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002253 /* allocate memory for f2fs-specific super block info */
2254 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
2255 if (!sbi)
2256 return -ENOMEM;
2257
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07002258 sbi->sb = sb;
2259
Keith Mok43b65732016-03-02 12:04:24 -08002260 /* Load the checksum driver */
2261 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
2262 if (IS_ERR(sbi->s_chksum_driver)) {
2263 f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
2264 err = PTR_ERR(sbi->s_chksum_driver);
2265 sbi->s_chksum_driver = NULL;
2266 goto free_sbi;
2267 }
2268
Namjae Jeonff9234a2013-01-12 14:41:13 +09002269 /* set a block size */
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09002270 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
Namjae Jeona07ef782012-12-30 14:52:05 +09002271 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002272 goto free_sbi;
Namjae Jeona07ef782012-12-30 14:52:05 +09002273 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002274
Jaegeuk Kimdf728b02016-03-23 17:05:27 -07002275 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
Chao Yue8240f62015-12-15 17:19:26 +08002276 &recovery);
Gu Zheng9076a752013-10-14 18:47:11 +08002277 if (err)
2278 goto free_sbi;
2279
Gu Zheng5fb08372013-06-07 14:16:53 +08002280 sb->s_fs_info = sbi;
Jaegeuk Kim52763a42016-06-13 09:47:48 -07002281 sbi->raw_super = raw_super;
2282
Chao Yu43101252017-07-31 20:19:09 +08002283 /* precompute checksum seed for metadata */
2284 if (f2fs_sb_has_inode_chksum(sb))
2285 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
2286 sizeof(raw_super->uuid));
2287
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002288 /*
2289 * The BLKZONED feature indicates that the drive was formatted with
2290 * zone alignment optimization. This is optional for host-aware
2291 * devices, but mandatory for host-managed zoned block devices.
2292 */
2293#ifndef CONFIG_BLK_DEV_ZONED
2294 if (f2fs_sb_mounted_blkzoned(sb)) {
2295 f2fs_msg(sb, KERN_ERR,
2296 "Zoned block device support is not enabled\n");
Chao Yu3b0de562017-06-12 09:44:27 +08002297 err = -EOPNOTSUPP;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002298 goto free_sb_buf;
2299 }
2300#endif
Yunlei He498c5e92015-05-07 18:11:37 +08002301 default_options(sbi);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002302 /* parse mount options */
Jaegeuk Kimdabc4a52015-01-23 17:41:39 -08002303 options = kstrdup((const char *)data, GFP_KERNEL);
2304 if (data && !options) {
2305 err = -ENOMEM;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002306 goto free_sb_buf;
Jaegeuk Kimdabc4a52015-01-23 17:41:39 -08002307 }
2308
2309 err = parse_options(sb, options);
2310 if (err)
2311 goto free_options;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002312
Chao Yue0afc4d2015-12-31 14:35:37 +08002313 sbi->max_file_blocks = max_file_blocks();
2314 sb->s_maxbytes = sbi->max_file_blocks <<
2315 le32_to_cpu(raw_super->log_blocksize);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002316 sb->s_max_links = F2FS_LINK_MAX;
2317 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
2318
Chao Yu09c3a722017-07-09 00:13:07 +08002319#ifdef CONFIG_QUOTA
2320 sb->dq_op = &f2fs_quota_operations;
2321 sb->s_qcop = &f2fs_quotactl_ops;
Chao Yu5647b302017-07-26 00:01:41 +08002322 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
Chao Yu09c3a722017-07-09 00:13:07 +08002323#endif
2324
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002325 sb->s_op = &f2fs_sops;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -07002326 sb->s_cop = &f2fs_cryptops;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002327 sb->s_xattr = f2fs_xattr_handlers;
2328 sb->s_export_op = &f2fs_export_ops;
2329 sb->s_magic = F2FS_SUPER_MAGIC;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002330 sb->s_time_gran = 1;
2331 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
2332 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002333 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002334
2335 /* init f2fs-specific super block info */
Chao Yue8240f62015-12-15 17:19:26 +08002336 sbi->valid_super_block = valid_super_block;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002337 mutex_init(&sbi->gc_mutex);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002338 mutex_init(&sbi->cp_mutex);
Chao Yub3582c62014-07-03 18:58:39 +08002339 init_rwsem(&sbi->node_write);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002340 init_rwsem(&sbi->node_change);
Jaegeuk Kim315df832015-08-11 12:45:39 -07002341
2342 /* disallow all the data/node/meta page writes */
2343 set_sbi_flag(sbi, SBI_POR_DOING);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002344 spin_lock_init(&sbi->stat_lock);
Jaegeuk Kim971767c2013-11-18 17:16:17 +09002345
Chao Yuc0fe4882017-08-02 23:21:48 +08002346 /* init iostat info */
2347 spin_lock_init(&sbi->iostat_lock);
2348 sbi->iostat_enable = false;
2349
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002350 for (i = 0; i < NR_PAGE_TYPE; i++) {
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002351 int n = (i == META) ? 1: NR_TEMP_TYPE;
2352 int j;
2353
2354 sbi->write_io[i] = kmalloc(n * sizeof(struct f2fs_bio_info),
2355 GFP_KERNEL);
Christophe JAILLETd0917a42017-06-11 09:21:11 +02002356 if (!sbi->write_io[i]) {
2357 err = -ENOMEM;
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002358 goto free_options;
Christophe JAILLETd0917a42017-06-11 09:21:11 +02002359 }
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002360
2361 for (j = HOT; j < n; j++) {
2362 init_rwsem(&sbi->write_io[i][j].io_rwsem);
2363 sbi->write_io[i][j].sbi = sbi;
2364 sbi->write_io[i][j].bio = NULL;
Chao Yuc52dc0f2017-05-19 23:37:01 +08002365 spin_lock_init(&sbi->write_io[i][j].io_lock);
2366 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002367 }
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002368 }
Jaegeuk Kim971767c2013-11-18 17:16:17 +09002369
Jaegeuk Kimb873b792016-08-04 11:38:25 -07002370 init_rwsem(&sbi->cp_rwsem);
Changman Leefb51b5e2013-11-07 12:48:25 +09002371 init_waitqueue_head(&sbi->cp_wait);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002372 init_sb_info(sbi);
2373
Jaegeuk Kim523be8a2016-05-13 12:36:58 -07002374 err = init_percpu_info(sbi);
2375 if (err)
2376 goto free_options;
2377
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002378 if (F2FS_IO_SIZE(sbi) > 1) {
2379 sbi->write_io_dummy =
2380 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
Chao Yu3b0de562017-06-12 09:44:27 +08002381 if (!sbi->write_io_dummy) {
2382 err = -ENOMEM;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002383 goto free_options;
Chao Yu3b0de562017-06-12 09:44:27 +08002384 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002385 }
2386
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002387 /* get an inode for meta space */
2388 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
2389 if (IS_ERR(sbi->meta_inode)) {
Namjae Jeona07ef782012-12-30 14:52:05 +09002390 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002391 err = PTR_ERR(sbi->meta_inode);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002392 goto free_io_dummy;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002393 }
2394
2395 err = get_valid_checkpoint(sbi);
Namjae Jeona07ef782012-12-30 14:52:05 +09002396 if (err) {
2397 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002398 goto free_meta_inode;
Namjae Jeona07ef782012-12-30 14:52:05 +09002399 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002400
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002401 /* Initialize device list */
2402 err = f2fs_scan_devices(sbi);
2403 if (err) {
2404 f2fs_msg(sb, KERN_ERR, "Failed to find devices");
2405 goto free_devices;
2406 }
2407
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002408 sbi->total_valid_node_count =
2409 le32_to_cpu(sbi->ckpt->valid_node_count);
Jaegeuk Kim513c5f32016-05-16 11:42:32 -07002410 percpu_counter_set(&sbi->total_valid_inode_count,
2411 le32_to_cpu(sbi->ckpt->valid_inode_count));
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002412 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
2413 sbi->total_valid_block_count =
2414 le64_to_cpu(sbi->ckpt->valid_block_count);
2415 sbi->last_valid_block_count = sbi->total_valid_block_count;
Chao Yu026bd9d2017-06-26 16:24:41 +08002416 sbi->reserved_blocks = 0;
Jaegeuk Kim41382ec2016-05-16 11:06:50 -07002417
Chao Yuc227f912015-12-16 13:09:20 +08002418 for (i = 0; i < NR_INODE_TYPE; i++) {
2419 INIT_LIST_HEAD(&sbi->inode_list[i]);
2420 spin_lock_init(&sbi->inode_lock[i]);
2421 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002422
Chao Yu1dcc3362015-02-05 17:57:31 +08002423 init_extent_cache_info(sbi);
2424
Jaegeuk Kim6451e042014-07-25 15:47:17 -07002425 init_ino_entry_info(sbi);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002426
2427 /* setup f2fs internal modules */
2428 err = build_segment_manager(sbi);
Namjae Jeona07ef782012-12-30 14:52:05 +09002429 if (err) {
2430 f2fs_msg(sb, KERN_ERR,
2431 "Failed to initialize F2FS segment manager");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002432 goto free_sm;
Namjae Jeona07ef782012-12-30 14:52:05 +09002433 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002434 err = build_node_manager(sbi);
Namjae Jeona07ef782012-12-30 14:52:05 +09002435 if (err) {
2436 f2fs_msg(sb, KERN_ERR,
2437 "Failed to initialize F2FS node manager");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002438 goto free_nm;
Namjae Jeona07ef782012-12-30 14:52:05 +09002439 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002440
Shuoran Liu8f1dbbb2016-01-27 09:57:30 +08002441 /* For write statistics */
2442 if (sb->s_bdev->bd_part)
2443 sbi->sectors_written_start =
2444 (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
2445
2446 /* Read accumulated write IO statistics if exists */
2447 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
2448 if (__exist_node_summaries(sbi))
2449 sbi->kbytes_written =
Shuoran Liub2dde6f2016-03-29 18:00:15 +08002450 le64_to_cpu(seg_i->journal->info.kbytes_written);
Shuoran Liu8f1dbbb2016-01-27 09:57:30 +08002451
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002452 build_gc_manager(sbi);
2453
2454 /* get an inode for node space */
2455 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
2456 if (IS_ERR(sbi->node_inode)) {
Namjae Jeona07ef782012-12-30 14:52:05 +09002457 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002458 err = PTR_ERR(sbi->node_inode);
2459 goto free_nm;
2460 }
2461
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002462 f2fs_join_shrinker(sbi);
2463
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002464 err = f2fs_build_stats(sbi);
2465 if (err)
2466 goto free_nm;
2467
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002468 /* read root inode and dentry */
2469 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
2470 if (IS_ERR(root)) {
Namjae Jeona07ef782012-12-30 14:52:05 +09002471 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002472 err = PTR_ERR(root);
2473 goto free_node_inode;
2474 }
Chao Yu8f99a942013-11-28 15:43:43 +08002475 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
Chao Yu9d847952014-07-25 12:55:09 +08002476 iput(root);
Chao Yu8f99a942013-11-28 15:43:43 +08002477 err = -EINVAL;
Chao Yu9d847952014-07-25 12:55:09 +08002478 goto free_node_inode;
Chao Yu8f99a942013-11-28 15:43:43 +08002479 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002480
2481 sb->s_root = d_make_root(root); /* allocate root dentry */
2482 if (!sb->s_root) {
2483 err = -ENOMEM;
2484 goto free_root_inode;
2485 }
2486
Jaegeuk Kim883d5532017-07-26 11:24:13 -07002487 err = f2fs_register_sysfs(sbi);
Namjae Jeonb59d0ba2013-08-04 23:09:40 +09002488 if (err)
Chao Yu17a3fb52017-06-14 17:39:46 +08002489 goto free_root_inode;
Namjae Jeonb59d0ba2013-08-04 23:09:40 +09002490
Chao Yu41ad73f2017-08-08 10:54:31 +08002491 /* if there are nt orphan nodes free them */
2492 err = recover_orphan_inodes(sbi);
2493 if (err)
2494 goto free_sysfs;
2495
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002496 /* recover fsynced data */
2497 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
Jaegeuk Kim081d78c2015-01-23 19:16:59 -08002498 /*
2499 * mount should be failed, when device has readonly mode, and
2500 * previous checkpoint was not done by clean system shutdown.
2501 */
2502 if (bdev_read_only(sb->s_bdev) &&
Chao Yuaaec2b12016-09-20 11:04:18 +08002503 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
Jaegeuk Kim081d78c2015-01-23 19:16:59 -08002504 err = -EROFS;
Chao Yu41ad73f2017-08-08 10:54:31 +08002505 goto free_meta;
Jaegeuk Kim081d78c2015-01-23 19:16:59 -08002506 }
Chao Yu2adc3502015-03-16 21:08:44 +08002507
2508 if (need_fsck)
2509 set_sbi_flag(sbi, SBI_NEED_FSCK);
2510
Jaegeuk Kima468f0e2016-09-19 17:55:10 -07002511 if (!retry)
2512 goto skip_recovery;
2513
Jaegeuk Kim6781eab2016-03-23 16:12:58 -07002514 err = recover_fsync_data(sbi, false);
2515 if (err < 0) {
Chao Yu2adc3502015-03-16 21:08:44 +08002516 need_fsck = true;
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002517 f2fs_msg(sb, KERN_ERR,
Sheng Yong99e3e852016-05-11 17:08:14 +08002518 "Cannot recover all fsync data errno=%d", err);
Chao Yu41ad73f2017-08-08 10:54:31 +08002519 goto free_meta;
Jaegeuk Kimed2e6212014-08-08 15:37:41 -07002520 }
Jaegeuk Kim6781eab2016-03-23 16:12:58 -07002521 } else {
2522 err = recover_fsync_data(sbi, true);
2523
2524 if (!f2fs_readonly(sb) && err > 0) {
2525 err = -EINVAL;
2526 f2fs_msg(sb, KERN_ERR,
2527 "Need to recover fsync data");
Chao Yu17a3fb52017-06-14 17:39:46 +08002528 goto free_sysfs;
Jaegeuk Kim6781eab2016-03-23 16:12:58 -07002529 }
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002530 }
Jaegeuk Kima468f0e2016-09-19 17:55:10 -07002531skip_recovery:
Jaegeuk Kim315df832015-08-11 12:45:39 -07002532 /* recover_fsync_data() cleared this already */
2533 clear_sbi_flag(sbi, SBI_POR_DOING);
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002534
2535 /*
2536 * If filesystem is not mounted as read-only then
2537 * do start the gc_thread.
2538 */
Chao Yu6c029932014-11-18 11:16:01 +08002539 if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002540 /* After POR, we can run background GC thread.*/
2541 err = start_gc_thread(sbi);
2542 if (err)
Chao Yu41ad73f2017-08-08 10:54:31 +08002543 goto free_meta;
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002544 }
Jaegeuk Kimdabc4a52015-01-23 17:41:39 -08002545 kfree(options);
hujianyangda554e42015-05-21 14:42:53 +08002546
2547 /* recover broken superblock */
Jaegeuk Kimf2353d72016-03-23 10:42:01 -07002548 if (recovery) {
Chao Yu41214b32016-02-22 18:33:20 +08002549 err = f2fs_commit_super(sbi, true);
2550 f2fs_msg(sb, KERN_INFO,
Sheng Yong99e3e852016-05-11 17:08:14 +08002551 "Try to recover %dth superblock, ret: %d",
Chao Yu41214b32016-02-22 18:33:20 +08002552 sbi->valid_super_block ? 1 : 2, err);
hujianyangda554e42015-05-21 14:42:53 +08002553 }
2554
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002555 f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
2556 cur_cp_version(F2FS_CKPT(sbi)));
Jaegeuk Kim6beceb52016-01-08 15:51:50 -08002557 f2fs_update_time(sbi, CP_TIME);
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08002558 f2fs_update_time(sbi, REQ_TIME);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002559 return 0;
Jaegeuk Kim6437d1b2014-02-19 18:23:32 +09002560
Chao Yu41ad73f2017-08-08 10:54:31 +08002561free_meta:
Jaegeuk Kim0f18b462016-05-20 11:10:10 -07002562 f2fs_sync_inode_meta(sbi);
Chao Yu41ad73f2017-08-08 10:54:31 +08002563 /*
2564 * Some dirty meta pages can be produced by recover_orphan_inodes()
2565 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
2566 * followed by write_checkpoint() through f2fs_write_node_pages(), which
2567 * falls into an infinite loop in sync_meta_pages().
2568 */
2569 truncate_inode_pages_final(META_MAPPING(sbi));
2570free_sysfs:
Jaegeuk Kim883d5532017-07-26 11:24:13 -07002571 f2fs_unregister_sysfs(sbi);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002572free_root_inode:
2573 dput(sb->s_root);
2574 sb->s_root = NULL;
2575free_node_inode:
Jaegeuk Kimbb5dada2016-09-23 11:29:00 -07002576 truncate_inode_pages_final(NODE_MAPPING(sbi));
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002577 mutex_lock(&sbi->umount_mutex);
Jaegeuk Kimd41065e2016-09-21 11:39:42 -07002578 release_ino_entry(sbi, true);
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002579 f2fs_leave_shrinker(sbi);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002580 iput(sbi->node_inode);
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002581 mutex_unlock(&sbi->umount_mutex);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002582 f2fs_destroy_stats(sbi);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002583free_nm:
2584 destroy_node_manager(sbi);
2585free_sm:
2586 destroy_segment_manager(sbi);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002587free_devices:
2588 destroy_device_list(sbi);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002589 kfree(sbi->ckpt);
2590free_meta_inode:
2591 make_bad_inode(sbi->meta_inode);
2592 iput(sbi->meta_inode);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002593free_io_dummy:
2594 mempool_destroy(sbi->write_io_dummy);
Jaegeuk Kimdabc4a52015-01-23 17:41:39 -08002595free_options:
Jaegeuk Kimc4127262017-05-10 11:18:25 -07002596 for (i = 0; i < NR_PAGE_TYPE; i++)
2597 kfree(sbi->write_io[i]);
Jaegeuk Kim523be8a2016-05-13 12:36:58 -07002598 destroy_percpu_info(sbi);
Chao Yu41ad73f2017-08-08 10:54:31 +08002599#ifdef CONFIG_QUOTA
2600 for (i = 0; i < MAXQUOTAS; i++)
2601 kfree(sbi->s_qf_names[i]);
2602#endif
Jaegeuk Kimdabc4a52015-01-23 17:41:39 -08002603 kfree(options);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002604free_sb_buf:
Yunlei Heb39f0de2015-12-15 17:17:20 +08002605 kfree(raw_super);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002606free_sbi:
Keith Mok43b65732016-03-02 12:04:24 -08002607 if (sbi->s_chksum_driver)
2608 crypto_free_shash(sbi->s_chksum_driver);
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002609 kfree(sbi);
Jaegeuk Kimed2e6212014-08-08 15:37:41 -07002610
2611 /* give only one another chance */
2612 if (retry) {
Taehee Yoo9df47ba2015-04-13 21:48:06 +09002613 retry = false;
Jaegeuk Kimed2e6212014-08-08 15:37:41 -07002614 shrink_dcache_sb(sb);
2615 goto try_onemore;
2616 }
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002617 return err;
2618}
2619
2620static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
2621 const char *dev_name, void *data)
2622{
2623 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
2624}
2625
Jaegeuk Kim30a55372015-01-14 16:34:24 -08002626static void kill_f2fs_super(struct super_block *sb)
2627{
Chao Yud9d85cc2017-06-29 23:17:45 +08002628 if (sb->s_root) {
Chao Yucaf00472015-01-28 17:48:42 +08002629 set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
Chao Yud9d85cc2017-06-29 23:17:45 +08002630 stop_gc_thread(F2FS_SB(sb));
2631 stop_discard_thread(F2FS_SB(sb));
2632 }
Jaegeuk Kim30a55372015-01-14 16:34:24 -08002633 kill_block_super(sb);
2634}
2635
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002636static struct file_system_type f2fs_fs_type = {
2637 .owner = THIS_MODULE,
2638 .name = "f2fs",
2639 .mount = f2fs_mount,
Jaegeuk Kim30a55372015-01-14 16:34:24 -08002640 .kill_sb = kill_f2fs_super,
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002641 .fs_flags = FS_REQUIRES_DEV,
2642};
Eric W. Biederman7f78e032013-03-02 19:39:14 -08002643MODULE_ALIAS_FS("f2fs");
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002644
Namjae Jeon6e6093a2013-01-17 00:08:30 +09002645static int __init init_inodecache(void)
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002646{
Vladimir Davydov5d097052016-01-14 15:18:21 -08002647 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
2648 sizeof(struct f2fs_inode_info), 0,
2649 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09002650 if (!f2fs_inode_cachep)
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002651 return -ENOMEM;
2652 return 0;
2653}
2654
2655static void destroy_inodecache(void)
2656{
2657 /*
2658 * Make sure all delayed rcu free inodes are flushed before we
2659 * destroy cache.
2660 */
2661 rcu_barrier();
2662 kmem_cache_destroy(f2fs_inode_cachep);
2663}
2664
2665static int __init init_f2fs_fs(void)
2666{
2667 int err;
2668
Jaegeuk Kimc0508652015-01-07 14:07:36 -08002669 f2fs_build_trace_ios();
2670
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002671 err = init_inodecache();
2672 if (err)
2673 goto fail;
2674 err = create_node_manager_caches();
2675 if (err)
Zhao Hongjiang9890ff32013-08-20 16:49:51 +08002676 goto free_inodecache;
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09002677 err = create_segment_manager_caches();
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002678 if (err)
Zhao Hongjiang9890ff32013-08-20 16:49:51 +08002679 goto free_node_manager_caches;
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002680 err = create_checkpoint_caches();
2681 if (err)
Chao Yu06292072014-12-29 15:56:18 +08002682 goto free_segment_manager_caches;
Chao Yu1dcc3362015-02-05 17:57:31 +08002683 err = create_extent_cache();
2684 if (err)
2685 goto free_checkpoint_caches;
Jaegeuk Kim883d5532017-07-26 11:24:13 -07002686 err = f2fs_init_sysfs();
Chao Yu17a3fb52017-06-14 17:39:46 +08002687 if (err)
Chao Yu1dcc3362015-02-05 17:57:31 +08002688 goto free_extent_cache;
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002689 err = register_shrinker(&f2fs_shrinker_info);
Jaegeuk Kimcfc4d972015-05-15 15:37:24 -07002690 if (err)
Chao Yu17a3fb52017-06-14 17:39:46 +08002691 goto free_sysfs;
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002692 err = register_filesystem(&f2fs_fs_type);
2693 if (err)
2694 goto free_shrinker;
Chao Yu787c7b8c2015-10-29 09:13:04 +08002695 err = f2fs_create_root_stats();
2696 if (err)
2697 goto free_filesystem;
Zhao Hongjiang9890ff32013-08-20 16:49:51 +08002698 return 0;
2699
Chao Yu787c7b8c2015-10-29 09:13:04 +08002700free_filesystem:
2701 unregister_filesystem(&f2fs_fs_type);
Jaegeuk Kim2658e502015-06-19 12:01:21 -07002702free_shrinker:
2703 unregister_shrinker(&f2fs_shrinker_info);
Chao Yu17a3fb52017-06-14 17:39:46 +08002704free_sysfs:
Jaegeuk Kim883d5532017-07-26 11:24:13 -07002705 f2fs_exit_sysfs();
Chao Yu1dcc3362015-02-05 17:57:31 +08002706free_extent_cache:
2707 destroy_extent_cache();
Zhao Hongjiang9890ff32013-08-20 16:49:51 +08002708free_checkpoint_caches:
2709 destroy_checkpoint_caches();
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09002710free_segment_manager_caches:
2711 destroy_segment_manager_caches();
Zhao Hongjiang9890ff32013-08-20 16:49:51 +08002712free_node_manager_caches:
2713 destroy_node_manager_caches();
2714free_inodecache:
2715 destroy_inodecache();
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002716fail:
2717 return err;
2718}
2719
2720static void __exit exit_f2fs_fs(void)
2721{
Namjae Jeon4589d252013-01-15 19:58:47 +09002722 f2fs_destroy_root_stats();
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002723 unregister_filesystem(&f2fs_fs_type);
Tiezhu Yangb8bef792016-05-18 08:02:25 +08002724 unregister_shrinker(&f2fs_shrinker_info);
Jaegeuk Kim883d5532017-07-26 11:24:13 -07002725 f2fs_exit_sysfs();
Wanpeng Lifdf6c8b2015-03-06 15:00:54 +08002726 destroy_extent_cache();
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002727 destroy_checkpoint_caches();
Changman Lee5dcd8a72013-12-11 14:32:13 +09002728 destroy_segment_manager_caches();
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002729 destroy_node_manager_caches();
2730 destroy_inodecache();
Jaegeuk Kim351f4fb2015-01-07 14:09:48 -08002731 f2fs_destroy_trace_ios();
Jaegeuk Kimaff063e2012-11-02 17:07:47 +09002732}
2733
2734module_init(init_f2fs_fs)
2735module_exit(exit_f2fs_fs)
2736
2737MODULE_AUTHOR("Samsung Electronics's Praesto Team");
2738MODULE_DESCRIPTION("Flash Friendly File System");
2739MODULE_LICENSE("GPL");
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07002740