blob: cbf74f47cce8ad9005bf8486df4435c79e0b6b04 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimd624c962012-11-02 17:13:32 +09002 * fs/f2fs/recovery.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include "f2fs.h"
14#include "node.h"
15#include "segment.h"
16
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -070017/*
18 * Roll forward recovery scenarios.
19 *
20 * [Term] F: fsync_mark, D: dentry_mark
21 *
22 * 1. inode(x) | CP | inode(x) | dnode(F)
23 * -> Update the latest inode(x).
24 *
25 * 2. inode(x) | CP | inode(F) | dnode(F)
26 * -> No problem.
27 *
28 * 3. inode(x) | CP | dnode(F) | inode(x)
29 * -> Recover to the latest dnode(F), and drop the last inode(x)
30 *
31 * 4. inode(x) | CP | dnode(F) | inode(F)
32 * -> No problem.
33 *
34 * 5. CP | inode(x) | dnode(F)
35 * -> The inode(DF) was missing. Should drop this dnode(F).
36 *
37 * 6. CP | inode(DF) | dnode(F)
38 * -> No problem.
39 *
40 * 7. CP | dnode(F) | inode(DF)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
42 *
43 * 8. CP | dnode(F) | inode(x)
44 * -> If f2fs_iget fails, then goto next to find inode(DF).
45 * But it will fail due to no inode(DF).
46 */
47
Jaegeuk Kimd624c962012-11-02 17:13:32 +090048static struct kmem_cache *fsync_entry_slab;
49
50bool space_for_roll_forward(struct f2fs_sb_info *sbi)
51{
52 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
53 > sbi->user_block_count)
54 return false;
55 return true;
56}
57
58static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
59 nid_t ino)
60{
Jaegeuk Kimd624c962012-11-02 17:13:32 +090061 struct fsync_inode_entry *entry;
62
Chao Yu2d7b8222014-03-29 11:33:17 +080063 list_for_each_entry(entry, head, list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090064 if (entry->inode->i_ino == ino)
65 return entry;
Chao Yu2d7b8222014-03-29 11:33:17 +080066
Jaegeuk Kimd624c962012-11-02 17:13:32 +090067 return NULL;
68}
69
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -070070static int recover_dentry(struct inode *inode, struct page *ipage)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090071{
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +090072 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
Jaegeuk Kim74d0b912013-05-15 16:40:02 +090073 nid_t pino = le32_to_cpu(raw_inode->i_pino);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +090074 struct f2fs_dir_entry *de;
Al Virob7f7a5e2013-01-25 16:15:43 -050075 struct qstr name;
Jaegeuk Kimd624c962012-11-02 17:13:32 +090076 struct page *page;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +090077 struct inode *dir, *einode;
Jaegeuk Kimd624c962012-11-02 17:13:32 +090078 int err = 0;
79
Jaegeuk Kimed57c272014-04-15 11:19:28 +090080 dir = f2fs_iget(inode->i_sb, pino);
81 if (IS_ERR(dir)) {
82 err = PTR_ERR(dir);
83 goto out;
84 }
85
Jaegeuk Kime7d55452015-04-29 17:02:18 -070086 if (file_enc_name(inode)) {
87 iput(dir);
88 return 0;
89 }
90
Al Virob7f7a5e2013-01-25 16:15:43 -050091 name.len = le32_to_cpu(raw_inode->i_namelen);
92 name.name = raw_inode->i_name;
Chao Yud96b14312013-12-23 11:12:21 +080093
94 if (unlikely(name.len > F2FS_NAME_LEN)) {
95 WARN_ON(1);
96 err = -ENAMETOOLONG;
Jaegeuk Kim86928f92014-06-07 03:05:03 +090097 goto out_err;
Chao Yud96b14312013-12-23 11:12:21 +080098 }
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +090099retry:
100 de = f2fs_find_entry(dir, &name, &page);
Jaegeuk Kim418f6c22015-03-31 18:03:29 -0700101 if (de && inode->i_ino == le32_to_cpu(de->ino))
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500102 goto out_unmap_put;
Jaegeuk Kim418f6c22015-03-31 18:03:29 -0700103
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900104 if (de) {
105 einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
106 if (IS_ERR(einode)) {
107 WARN_ON(1);
Chao Yu5c1f9922014-04-28 17:58:34 +0800108 err = PTR_ERR(einode);
109 if (err == -ENOENT)
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900110 err = -EEXIST;
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500111 goto out_unmap_put;
112 }
Jaegeuk Kim40813632014-09-02 15:31:18 -0700113 err = acquire_orphan_inode(F2FS_I_SB(inode));
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500114 if (err) {
115 iput(einode);
116 goto out_unmap_put;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900117 }
Chao Yudbeacf02014-09-24 18:17:04 +0800118 f2fs_delete_entry(de, page, dir, einode);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900119 iput(einode);
120 goto retry;
121 }
Jaegeuk Kim510022a2015-03-30 15:07:16 -0700122 err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
Jaegeuk Kim86928f92014-06-07 03:05:03 +0900123 if (err)
124 goto out_err;
125
126 if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
127 iput(dir);
128 } else {
129 add_dirty_dir_inode(dir);
130 set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
131 }
132
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500133 goto out;
134
135out_unmap_put:
Jaegeuk Kim9486ba4422014-11-21 16:36:28 -0800136 f2fs_dentry_kunmap(dir, page);
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500137 f2fs_put_page(page, 0);
Jaegeuk Kim86928f92014-06-07 03:05:03 +0900138out_err:
139 iput(dir);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900140out:
Chris Fries6c311ec2014-01-17 14:44:39 -0600141 f2fs_msg(inode->i_sb, KERN_NOTICE,
142 "%s: ino = %x, name = %s, dir = %lx, err = %d",
143 __func__, ino_of_node(ipage), raw_inode->i_name,
Dan Carpenterf28c06f2013-05-23 13:02:13 +0300144 IS_ERR(dir) ? 0 : dir->i_ino, err);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900145 return err;
146}
147
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700148static void recover_inode(struct inode *inode, struct page *page)
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700149{
150 struct f2fs_inode *raw = F2FS_INODE(page);
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700151 char *name;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700152
153 inode->i_mode = le16_to_cpu(raw->i_mode);
154 i_size_write(inode, le64_to_cpu(raw->i_size));
155 inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
156 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
157 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
158 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
159 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
160 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900161
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700162 if (file_enc_name(inode))
163 name = "<encrypted>";
164 else
165 name = F2FS_INODE(page)->i_name;
166
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900167 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
Jaegeuk Kime7d55452015-04-29 17:02:18 -0700168 ino_of_node(page), name);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900169}
170
171static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
172{
Jaegeuk Kimd71b5562013-08-09 15:03:21 +0900173 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900174 struct curseg_info *curseg;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700175 struct page *page = NULL;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900176 block_t blkaddr;
177 int err = 0;
178
179 /* get node pages in the current segment */
180 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
Chao Yu695fd1e2014-02-27 19:52:21 +0800181 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900182
Chao Yu26879fb2015-10-12 17:05:59 +0800183 ra_meta_pages(sbi, blkaddr, 1, META_POR, true);
Chao Yu635aee12014-12-08 15:02:52 +0800184
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900185 while (1) {
186 struct fsync_inode_entry *entry;
187
Chao Yuf0c9cad2015-04-18 18:05:36 +0800188 if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700189 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900190
Chao Yu2b947002015-10-12 17:04:21 +0800191 page = get_tmp_page(sbi, blkaddr);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900192
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900193 if (cp_ver != cpver_of_node(page))
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900194 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900195
196 if (!is_fsync_dnode(page))
197 goto next;
198
199 entry = get_fsync_inode(head, ino_of_node(page));
Jaegeuk Kim418f6c22015-03-31 18:03:29 -0700200 if (!entry) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900201 if (IS_INODE(page) && is_dent_dnode(page)) {
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900202 err = recover_inode_page(sbi, page);
203 if (err)
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900204 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900205 }
206
207 /* add this fsync inode to the list */
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700208 entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900209 if (!entry) {
210 err = -ENOMEM;
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900211 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900212 }
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700213 /*
214 * CP | dnode(F) | inode(DF)
215 * For this case, we should not give up now.
216 */
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900217 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
218 if (IS_ERR(entry->inode)) {
219 err = PTR_ERR(entry->inode);
Namjae Jeonfd8bb652012-12-22 12:10:12 +0900220 kmem_cache_free(fsync_entry_slab, entry);
Jaegeuk Kim8fbc4182015-02-24 18:01:46 -0800221 if (err == -ENOENT) {
222 err = 0;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700223 goto next;
Jaegeuk Kim8fbc4182015-02-24 18:01:46 -0800224 }
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900225 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900226 }
Namjae Jeonfd8bb652012-12-22 12:10:12 +0900227 list_add_tail(&entry->list, head);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900228 }
Jaegeuk Kimaddbe452013-05-15 10:49:13 +0900229 entry->blkaddr = blkaddr;
230
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700231 if (IS_INODE(page)) {
232 entry->last_inode = blkaddr;
233 if (is_dent_dnode(page))
234 entry->last_dentry = blkaddr;
235 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900236next:
237 /* check next segment */
238 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700239 f2fs_put_page(page, 1);
Chao Yu635aee12014-12-08 15:02:52 +0800240
241 ra_meta_pages_cond(sbi, blkaddr);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900242 }
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700243 f2fs_put_page(page, 1);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900244 return err;
245}
246
Gu Zheng5ebefc52013-06-27 09:28:54 +0800247static void destroy_fsync_dnodes(struct list_head *head)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900248{
Dan Carpenterd8b79b22013-01-20 18:02:58 +0300249 struct fsync_inode_entry *entry, *tmp;
250
251 list_for_each_entry_safe(entry, tmp, head, list) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900252 iput(entry->inode);
253 list_del(&entry->list);
254 kmem_cache_free(fsync_entry_slab, entry);
255 }
256}
257
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900258static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900259 block_t blkaddr, struct dnode_of_data *dn)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900260{
261 struct seg_entry *sentry;
262 unsigned int segno = GET_SEGNO(sbi, blkaddr);
Jaegeuk Kim491c0852014-02-04 13:01:10 +0900263 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900264 struct f2fs_summary_block *sum_node;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900265 struct f2fs_summary sum;
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900266 struct page *sum_page, *node_page;
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700267 struct dnode_of_data tdn = *dn;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900268 nid_t ino, nid;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900269 struct inode *inode;
Jaegeuk Kimde936532013-08-12 21:08:03 +0900270 unsigned int offset;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900271 block_t bidx;
272 int i;
273
274 sentry = get_seg_entry(sbi, segno);
275 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900276 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900277
278 /* Get the previous summary */
279 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
280 struct curseg_info *curseg = CURSEG_I(sbi, i);
281 if (curseg->segno == segno) {
282 sum = curseg->sum_blk->entries[blkoff];
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900283 goto got_it;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900284 }
285 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900286
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900287 sum_page = get_sum_page(sbi, segno);
288 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
289 sum = sum_node->entries[blkoff];
290 f2fs_put_page(sum_page, 1);
291got_it:
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900292 /* Use the locked dnode page and inode */
293 nid = le32_to_cpu(sum.nid);
294 if (dn->inode->i_ino == nid) {
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900295 tdn.nid = nid;
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700296 if (!dn->inode_page_locked)
297 lock_page(dn->inode_page);
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900298 tdn.node_page = dn->inode_page;
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900299 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700300 goto truncate_out;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900301 } else if (dn->nid == nid) {
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900302 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700303 goto truncate_out;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900304 }
305
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900306 /* Get the node page */
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900307 node_page = get_node_page(sbi, nid);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900308 if (IS_ERR(node_page))
309 return PTR_ERR(node_page);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900310
311 offset = ofs_of_node(node_page);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900312 ino = ino_of_node(node_page);
313 f2fs_put_page(node_page, 1);
314
Jaegeuk Kim60979112014-09-13 00:35:58 +0900315 if (ino != dn->inode->i_ino) {
316 /* Deallocate previous index in the node page */
317 inode = f2fs_iget(sbi->sb, ino);
318 if (IS_ERR(inode))
319 return PTR_ERR(inode);
320 } else {
321 inode = dn->inode;
322 }
Namjae Jeon06025f42012-12-22 12:09:43 +0900323
Jaegeuk Kimde936532013-08-12 21:08:03 +0900324 bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
Jaegeuk Kim60979112014-09-13 00:35:58 +0900325 le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900326
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700327 /*
328 * if inode page is locked, unlock temporarily, but its reference
329 * count keeps alive.
330 */
331 if (ino == dn->inode->i_ino && dn->inode_page_locked)
332 unlock_page(dn->inode_page);
333
334 set_new_dnode(&tdn, inode, NULL, NULL, 0);
335 if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
336 goto out;
337
338 if (tdn.data_blkaddr == blkaddr)
339 truncate_data_blocks_range(&tdn, 1);
340
341 f2fs_put_dnode(&tdn);
342out:
343 if (ino != dn->inode->i_ino)
Jaegeuk Kim60979112014-09-13 00:35:58 +0900344 iput(inode);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700345 else if (dn->inode_page_locked)
346 lock_page(dn->inode_page);
347 return 0;
348
349truncate_out:
350 if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
351 truncate_data_blocks_range(&tdn, 1);
352 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
353 unlock_page(dn->inode_page);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900354 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900355}
356
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900357static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900358 struct page *page, block_t blkaddr)
359{
Jaegeuk Kimde936532013-08-12 21:08:03 +0900360 struct f2fs_inode_info *fi = F2FS_I(inode);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900361 unsigned int start, end;
362 struct dnode_of_data dn;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900363 struct node_info ni;
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900364 int err = 0, recovered = 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900365
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700366 /* step 1: recover xattr */
367 if (IS_INODE(page)) {
368 recover_inline_xattr(inode, page);
369 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
Jaegeuk Kimbc4a1f82015-01-22 14:48:28 -0800370 /*
371 * Deprecated; xattr blocks should be found from cold log.
372 * But, we should remain this for backward compatibility.
373 */
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700374 recover_xattr_data(inode, page, blkaddr);
375 goto out;
376 }
Chao Yu70cfed82014-08-02 15:26:04 +0800377
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700378 /* step 2: recover inline data */
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900379 if (recover_inline_data(inode, page))
380 goto out;
381
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700382 /* step 3: recover data indices */
Jaegeuk Kimde936532013-08-12 21:08:03 +0900383 start = start_bidx_of_node(ofs_of_node(page), fi);
Chao Yu6403eb12014-04-26 19:59:52 +0800384 end = start + ADDRS_PER_PAGE(page, fi);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900385
386 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900387
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900388 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
Chao Yu72235542015-09-25 17:54:56 +0800389 if (err)
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900390 goto out;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900391
Jaegeuk Kim3cb5ad12014-03-18 13:29:07 +0900392 f2fs_wait_on_page_writeback(dn.node_page, NODE);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900393
394 get_node_info(sbi, dn.nid, &ni);
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700395 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
396 f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900397
Chao Yu12a83432015-08-05 17:23:54 +0800398 for (; start < end; start++, dn.ofs_in_node++) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900399 block_t src, dest;
400
401 src = datablock_addr(dn.node_page, dn.ofs_in_node);
402 dest = datablock_addr(page, dn.ofs_in_node);
403
Chao Yu12a83432015-08-05 17:23:54 +0800404 /* skip recovering if dest is the same as src */
405 if (src == dest)
406 continue;
407
408 /* dest is invalid, just invalidate src block */
409 if (dest == NULL_ADDR) {
410 truncate_data_blocks_range(&dn, 1);
411 continue;
412 }
413
414 /*
415 * dest is reserved block, invalidate src block
416 * and then reserve one new block in dnode page.
417 */
418 if (dest == NEW_ADDR) {
419 truncate_data_blocks_range(&dn, 1);
420 err = reserve_new_block(&dn);
421 f2fs_bug_on(sbi, err);
422 continue;
423 }
424
425 /* dest is valid block, try to recover from src to dest */
426 if (is_valid_blkaddr(sbi, dest, META_POR)) {
Jaegeuk Kime03b07d2015-04-01 19:38:20 -0700427
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900428 if (src == NULL_ADDR) {
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900429 err = reserve_new_block(&dn);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900430 /* We should not get -ENOSPC */
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700431 f2fs_bug_on(sbi, err);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900432 }
433
434 /* Check the previous node page having this index */
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900435 err = check_index_in_prev_nodes(sbi, dest, &dn);
436 if (err)
437 goto err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900438
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900439 /* write dummy data page */
Chao Yu528e3452015-05-28 19:15:35 +0800440 f2fs_replace_block(sbi, &dn, src, dest,
441 ni.version, false);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900442 recovered++;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900443 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900444 }
445
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900446 if (IS_INODE(dn.node_page))
447 sync_inode_page(&dn);
448
449 copy_node_footer(dn.node_page, page);
450 fill_node_footer(dn.node_page, dn.nid, ni.ino,
451 ofs_of_node(page), false);
452 set_page_dirty(dn.node_page);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900453err:
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900454 f2fs_put_dnode(&dn);
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900455out:
Chris Fries6c311ec2014-01-17 14:44:39 -0600456 f2fs_msg(sbi->sb, KERN_NOTICE,
457 "recover_data: ino = %lx, recovered = %d blocks, err = %d",
458 inode->i_ino, recovered, err);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900459 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900460}
461
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900462static int recover_data(struct f2fs_sb_info *sbi,
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900463 struct list_head *head, int type)
464{
Jaegeuk Kimd71b5562013-08-09 15:03:21 +0900465 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900466 struct curseg_info *curseg;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700467 struct page *page = NULL;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900468 int err = 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900469 block_t blkaddr;
470
471 /* get node pages in the current segment */
472 curseg = CURSEG_I(sbi, type);
473 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
474
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900475 while (1) {
476 struct fsync_inode_entry *entry;
477
Chao Yuf0c9cad2015-04-18 18:05:36 +0800478 if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900479 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900480
Chao Yu635aee12014-12-08 15:02:52 +0800481 ra_meta_pages_cond(sbi, blkaddr);
482
Chao Yu2b947002015-10-12 17:04:21 +0800483 page = get_tmp_page(sbi, blkaddr);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700484
485 if (cp_ver != cpver_of_node(page)) {
486 f2fs_put_page(page, 1);
487 break;
488 }
489
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900490 entry = get_fsync_inode(head, ino_of_node(page));
491 if (!entry)
492 goto next;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700493 /*
494 * inode(x) | CP | inode(x) | dnode(F)
495 * In this case, we can lose the latest inode(x).
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700496 * So, call recover_inode for the inode update.
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700497 */
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700498 if (entry->last_inode == blkaddr)
499 recover_inode(entry->inode, page);
500 if (entry->last_dentry == blkaddr) {
501 err = recover_dentry(entry->inode, page);
502 if (err) {
503 f2fs_put_page(page, 1);
504 break;
505 }
506 }
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900507 err = do_recover_data(sbi, entry->inode, page, blkaddr);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700508 if (err) {
509 f2fs_put_page(page, 1);
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900510 break;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700511 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900512
513 if (entry->blkaddr == blkaddr) {
514 iput(entry->inode);
515 list_del(&entry->list);
516 kmem_cache_free(fsync_entry_slab, entry);
517 }
518next:
519 /* check next segment */
520 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700521 f2fs_put_page(page, 1);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900522 }
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900523 if (!err)
524 allocate_new_segments(sbi);
525 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900526}
527
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900528int recover_fsync_data(struct f2fs_sb_info *sbi)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900529{
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700530 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900531 struct list_head inode_list;
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700532 block_t blkaddr;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900533 int err;
Haicheng Liaabe5132013-10-23 12:39:32 +0800534 bool need_writecp = false;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900535
536 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
Gu Zhenge8512d22014-03-07 18:43:28 +0800537 sizeof(struct fsync_inode_entry));
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900538 if (!fsync_entry_slab)
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900539 return -ENOMEM;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900540
541 INIT_LIST_HEAD(&inode_list);
542
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700543 /* prevent checkpoint */
544 mutex_lock(&sbi->cp_mutex);
545
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700546 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
547
Jaegeuk Kim315df832015-08-11 12:45:39 -0700548 /* step #1: find fsynced inode numbers */
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900549 err = find_fsync_dnodes(sbi, &inode_list);
550 if (err)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900551 goto out;
552
553 if (list_empty(&inode_list))
554 goto out;
555
Haicheng Liaabe5132013-10-23 12:39:32 +0800556 need_writecp = true;
Chao Yu691c6fd2013-09-24 09:26:24 +0800557
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900558 /* step #2: recover data */
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900559 err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
Jaegeuk Kimb3073842014-08-08 10:18:43 -0700560 if (!err)
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700561 f2fs_bug_on(sbi, !list_empty(&inode_list));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900562out:
Gu Zheng5ebefc52013-06-27 09:28:54 +0800563 destroy_fsync_dnodes(&inode_list);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900564 kmem_cache_destroy(fsync_entry_slab);
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700565
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700566 /* truncate meta pages to be used by the recovery */
567 truncate_inode_pages_range(META_MAPPING(sbi),
Chao Yu9edcdab2015-09-11 14:43:52 +0800568 (loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700569
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700570 if (err) {
571 truncate_inode_pages_final(NODE_MAPPING(sbi));
572 truncate_inode_pages_final(META_MAPPING(sbi));
573 }
574
Chao Yucaf00472015-01-28 17:48:42 +0800575 clear_sbi_flag(sbi, SBI_POR_DOING);
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700576 if (err) {
Chao Yue90c2d22015-07-28 18:36:47 +0800577 bool invalidate = false;
578
579 if (discard_next_dnode(sbi, blkaddr))
580 invalidate = true;
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700581
582 /* Flush all the NAT/SIT pages */
583 while (get_pages(sbi, F2FS_DIRTY_META))
584 sync_meta_pages(sbi, META, LONG_MAX);
Chao Yue90c2d22015-07-28 18:36:47 +0800585
586 /* invalidate temporary meta page */
587 if (invalidate)
588 invalidate_mapping_pages(META_MAPPING(sbi),
589 blkaddr, blkaddr);
590
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700591 set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
592 mutex_unlock(&sbi->cp_mutex);
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700593 } else if (need_writecp) {
Jaegeuk Kim75ab4cb2014-09-20 21:57:51 -0700594 struct cp_control cpc = {
Jaegeuk Kim10027552015-04-09 17:03:53 -0700595 .reason = CP_RECOVERY,
Jaegeuk Kim75ab4cb2014-09-20 21:57:51 -0700596 };
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700597 mutex_unlock(&sbi->cp_mutex);
Jaegeuk Kim75ab4cb2014-09-20 21:57:51 -0700598 write_checkpoint(sbi, &cpc);
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700599 } else {
600 mutex_unlock(&sbi->cp_mutex);
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700601 }
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900602 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900603}