blob: 679c465e9def191ba1831eb0894931df0fbeeed9 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimd624c962012-11-02 17:13:32 +09002 * fs/f2fs/recovery.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include "f2fs.h"
14#include "node.h"
15#include "segment.h"
16
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -070017/*
18 * Roll forward recovery scenarios.
19 *
20 * [Term] F: fsync_mark, D: dentry_mark
21 *
22 * 1. inode(x) | CP | inode(x) | dnode(F)
23 * -> Update the latest inode(x).
24 *
25 * 2. inode(x) | CP | inode(F) | dnode(F)
26 * -> No problem.
27 *
28 * 3. inode(x) | CP | dnode(F) | inode(x)
29 * -> Recover to the latest dnode(F), and drop the last inode(x)
30 *
31 * 4. inode(x) | CP | dnode(F) | inode(F)
32 * -> No problem.
33 *
34 * 5. CP | inode(x) | dnode(F)
35 * -> The inode(DF) was missing. Should drop this dnode(F).
36 *
37 * 6. CP | inode(DF) | dnode(F)
38 * -> No problem.
39 *
40 * 7. CP | dnode(F) | inode(DF)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
42 *
43 * 8. CP | dnode(F) | inode(x)
44 * -> If f2fs_iget fails, then goto next to find inode(DF).
45 * But it will fail due to no inode(DF).
46 */
47
Jaegeuk Kimd624c962012-11-02 17:13:32 +090048static struct kmem_cache *fsync_entry_slab;
49
50bool space_for_roll_forward(struct f2fs_sb_info *sbi)
51{
52 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
53 > sbi->user_block_count)
54 return false;
55 return true;
56}
57
58static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
59 nid_t ino)
60{
Jaegeuk Kimd624c962012-11-02 17:13:32 +090061 struct fsync_inode_entry *entry;
62
Chao Yu2d7b8222014-03-29 11:33:17 +080063 list_for_each_entry(entry, head, list)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090064 if (entry->inode->i_ino == ino)
65 return entry;
Chao Yu2d7b8222014-03-29 11:33:17 +080066
Jaegeuk Kimd624c962012-11-02 17:13:32 +090067 return NULL;
68}
69
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -070070static int recover_dentry(struct inode *inode, struct page *ipage)
Jaegeuk Kimd624c962012-11-02 17:13:32 +090071{
Jaegeuk Kim58bfaf42013-12-26 16:30:41 +090072 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
Jaegeuk Kim74d0b912013-05-15 16:40:02 +090073 nid_t pino = le32_to_cpu(raw_inode->i_pino);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +090074 struct f2fs_dir_entry *de;
Al Virob7f7a5e2013-01-25 16:15:43 -050075 struct qstr name;
Jaegeuk Kimd624c962012-11-02 17:13:32 +090076 struct page *page;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +090077 struct inode *dir, *einode;
Jaegeuk Kimd624c962012-11-02 17:13:32 +090078 int err = 0;
79
Jaegeuk Kimed57c272014-04-15 11:19:28 +090080 dir = f2fs_iget(inode->i_sb, pino);
81 if (IS_ERR(dir)) {
82 err = PTR_ERR(dir);
83 goto out;
84 }
85
Al Virob7f7a5e2013-01-25 16:15:43 -050086 name.len = le32_to_cpu(raw_inode->i_namelen);
87 name.name = raw_inode->i_name;
Chao Yud96b14312013-12-23 11:12:21 +080088
89 if (unlikely(name.len > F2FS_NAME_LEN)) {
90 WARN_ON(1);
91 err = -ENAMETOOLONG;
Jaegeuk Kim86928f92014-06-07 03:05:03 +090092 goto out_err;
Chao Yud96b14312013-12-23 11:12:21 +080093 }
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +090094retry:
95 de = f2fs_find_entry(dir, &name, &page);
Jaegeuk Kim695facc2014-08-07 17:06:18 -070096 if (de && inode->i_ino == le32_to_cpu(de->ino)) {
97 clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
Russ W. Knize2e5558f2013-09-24 09:40:57 -050098 goto out_unmap_put;
Jaegeuk Kim695facc2014-08-07 17:06:18 -070099 }
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900100 if (de) {
101 einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
102 if (IS_ERR(einode)) {
103 WARN_ON(1);
Chao Yu5c1f9922014-04-28 17:58:34 +0800104 err = PTR_ERR(einode);
105 if (err == -ENOENT)
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900106 err = -EEXIST;
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500107 goto out_unmap_put;
108 }
Jaegeuk Kim40813632014-09-02 15:31:18 -0700109 err = acquire_orphan_inode(F2FS_I_SB(inode));
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500110 if (err) {
111 iput(einode);
112 goto out_unmap_put;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900113 }
Chao Yudbeacf02014-09-24 18:17:04 +0800114 f2fs_delete_entry(de, page, dir, einode);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +0900115 iput(einode);
116 goto retry;
117 }
Jaegeuk Kim510022a2015-03-30 15:07:16 -0700118 err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
Jaegeuk Kim86928f92014-06-07 03:05:03 +0900119 if (err)
120 goto out_err;
121
122 if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
123 iput(dir);
124 } else {
125 add_dirty_dir_inode(dir);
126 set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
127 }
128
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500129 goto out;
130
131out_unmap_put:
Jaegeuk Kim9486ba4422014-11-21 16:36:28 -0800132 f2fs_dentry_kunmap(dir, page);
Russ W. Knize2e5558f2013-09-24 09:40:57 -0500133 f2fs_put_page(page, 0);
Jaegeuk Kim86928f92014-06-07 03:05:03 +0900134out_err:
135 iput(dir);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900136out:
Chris Fries6c311ec2014-01-17 14:44:39 -0600137 f2fs_msg(inode->i_sb, KERN_NOTICE,
138 "%s: ino = %x, name = %s, dir = %lx, err = %d",
139 __func__, ino_of_node(ipage), raw_inode->i_name,
Dan Carpenterf28c06f2013-05-23 13:02:13 +0300140 IS_ERR(dir) ? 0 : dir->i_ino, err);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900141 return err;
142}
143
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700144static void recover_inode(struct inode *inode, struct page *page)
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700145{
146 struct f2fs_inode *raw = F2FS_INODE(page);
147
148 inode->i_mode = le16_to_cpu(raw->i_mode);
149 i_size_write(inode, le64_to_cpu(raw->i_size));
150 inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
151 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
152 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
153 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
154 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
155 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900156
157 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700158 ino_of_node(page), F2FS_INODE(page)->i_name);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900159}
160
161static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
162{
Jaegeuk Kimd71b5562013-08-09 15:03:21 +0900163 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900164 struct curseg_info *curseg;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700165 struct page *page = NULL;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900166 block_t blkaddr;
167 int err = 0;
168
169 /* get node pages in the current segment */
170 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
Chao Yu695fd1e2014-02-27 19:52:21 +0800171 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900172
Chao Yu635aee12014-12-08 15:02:52 +0800173 ra_meta_pages(sbi, blkaddr, 1, META_POR);
174
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900175 while (1) {
176 struct fsync_inode_entry *entry;
177
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700178 if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi))
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700179 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900180
Chao Yu635aee12014-12-08 15:02:52 +0800181 page = get_meta_page(sbi, blkaddr);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900182
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900183 if (cp_ver != cpver_of_node(page))
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900184 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900185
186 if (!is_fsync_dnode(page))
187 goto next;
188
189 entry = get_fsync_inode(head, ino_of_node(page));
190 if (entry) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900191 if (IS_INODE(page) && is_dent_dnode(page))
192 set_inode_flag(F2FS_I(entry->inode),
193 FI_INC_LINK);
194 } else {
195 if (IS_INODE(page) && is_dent_dnode(page)) {
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900196 err = recover_inode_page(sbi, page);
197 if (err)
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900198 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900199 }
200
201 /* add this fsync inode to the list */
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700202 entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900203 if (!entry) {
204 err = -ENOMEM;
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900205 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900206 }
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700207 /*
208 * CP | dnode(F) | inode(DF)
209 * For this case, we should not give up now.
210 */
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900211 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
212 if (IS_ERR(entry->inode)) {
213 err = PTR_ERR(entry->inode);
Namjae Jeonfd8bb652012-12-22 12:10:12 +0900214 kmem_cache_free(fsync_entry_slab, entry);
Jaegeuk Kim8fbc4182015-02-24 18:01:46 -0800215 if (err == -ENOENT) {
216 err = 0;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700217 goto next;
Jaegeuk Kim8fbc4182015-02-24 18:01:46 -0800218 }
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900219 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900220 }
Namjae Jeonfd8bb652012-12-22 12:10:12 +0900221 list_add_tail(&entry->list, head);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900222 }
Jaegeuk Kimaddbe452013-05-15 10:49:13 +0900223 entry->blkaddr = blkaddr;
224
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700225 if (IS_INODE(page)) {
226 entry->last_inode = blkaddr;
227 if (is_dent_dnode(page))
228 entry->last_dentry = blkaddr;
229 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900230next:
231 /* check next segment */
232 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700233 f2fs_put_page(page, 1);
Chao Yu635aee12014-12-08 15:02:52 +0800234
235 ra_meta_pages_cond(sbi, blkaddr);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900236 }
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700237 f2fs_put_page(page, 1);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900238 return err;
239}
240
Gu Zheng5ebefc52013-06-27 09:28:54 +0800241static void destroy_fsync_dnodes(struct list_head *head)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900242{
Dan Carpenterd8b79b22013-01-20 18:02:58 +0300243 struct fsync_inode_entry *entry, *tmp;
244
245 list_for_each_entry_safe(entry, tmp, head, list) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900246 iput(entry->inode);
247 list_del(&entry->list);
248 kmem_cache_free(fsync_entry_slab, entry);
249 }
250}
251
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900252static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900253 block_t blkaddr, struct dnode_of_data *dn)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900254{
255 struct seg_entry *sentry;
256 unsigned int segno = GET_SEGNO(sbi, blkaddr);
Jaegeuk Kim491c0852014-02-04 13:01:10 +0900257 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900258 struct f2fs_summary_block *sum_node;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900259 struct f2fs_summary sum;
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900260 struct page *sum_page, *node_page;
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700261 struct dnode_of_data tdn = *dn;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900262 nid_t ino, nid;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900263 struct inode *inode;
Jaegeuk Kimde936532013-08-12 21:08:03 +0900264 unsigned int offset;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900265 block_t bidx;
266 int i;
267
268 sentry = get_seg_entry(sbi, segno);
269 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900270 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900271
272 /* Get the previous summary */
273 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
274 struct curseg_info *curseg = CURSEG_I(sbi, i);
275 if (curseg->segno == segno) {
276 sum = curseg->sum_blk->entries[blkoff];
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900277 goto got_it;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900278 }
279 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900280
Jaegeuk Kimf6517cf2014-01-28 14:54:07 +0900281 sum_page = get_sum_page(sbi, segno);
282 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
283 sum = sum_node->entries[blkoff];
284 f2fs_put_page(sum_page, 1);
285got_it:
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900286 /* Use the locked dnode page and inode */
287 nid = le32_to_cpu(sum.nid);
288 if (dn->inode->i_ino == nid) {
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900289 tdn.nid = nid;
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700290 if (!dn->inode_page_locked)
291 lock_page(dn->inode_page);
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900292 tdn.node_page = dn->inode_page;
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900293 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700294 goto truncate_out;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900295 } else if (dn->nid == nid) {
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900296 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700297 goto truncate_out;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900298 }
299
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900300 /* Get the node page */
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900301 node_page = get_node_page(sbi, nid);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900302 if (IS_ERR(node_page))
303 return PTR_ERR(node_page);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900304
305 offset = ofs_of_node(node_page);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900306 ino = ino_of_node(node_page);
307 f2fs_put_page(node_page, 1);
308
Jaegeuk Kim60979112014-09-13 00:35:58 +0900309 if (ino != dn->inode->i_ino) {
310 /* Deallocate previous index in the node page */
311 inode = f2fs_iget(sbi->sb, ino);
312 if (IS_ERR(inode))
313 return PTR_ERR(inode);
314 } else {
315 inode = dn->inode;
316 }
Namjae Jeon06025f42012-12-22 12:09:43 +0900317
Jaegeuk Kimde936532013-08-12 21:08:03 +0900318 bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
Jaegeuk Kim60979112014-09-13 00:35:58 +0900319 le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900320
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700321 /*
322 * if inode page is locked, unlock temporarily, but its reference
323 * count keeps alive.
324 */
325 if (ino == dn->inode->i_ino && dn->inode_page_locked)
326 unlock_page(dn->inode_page);
327
328 set_new_dnode(&tdn, inode, NULL, NULL, 0);
329 if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
330 goto out;
331
332 if (tdn.data_blkaddr == blkaddr)
333 truncate_data_blocks_range(&tdn, 1);
334
335 f2fs_put_dnode(&tdn);
336out:
337 if (ino != dn->inode->i_ino)
Jaegeuk Kim60979112014-09-13 00:35:58 +0900338 iput(inode);
Jaegeuk Kimc9ef4812015-03-26 18:46:38 -0700339 else if (dn->inode_page_locked)
340 lock_page(dn->inode_page);
341 return 0;
342
343truncate_out:
344 if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
345 truncate_data_blocks_range(&tdn, 1);
346 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
347 unlock_page(dn->inode_page);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900348 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900349}
350
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900351static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900352 struct page *page, block_t blkaddr)
353{
Jaegeuk Kimde936532013-08-12 21:08:03 +0900354 struct f2fs_inode_info *fi = F2FS_I(inode);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900355 unsigned int start, end;
356 struct dnode_of_data dn;
357 struct f2fs_summary sum;
358 struct node_info ni;
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900359 int err = 0, recovered = 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900360
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700361 /* step 1: recover xattr */
362 if (IS_INODE(page)) {
363 recover_inline_xattr(inode, page);
364 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
Jaegeuk Kimbc4a1f82015-01-22 14:48:28 -0800365 /*
366 * Deprecated; xattr blocks should be found from cold log.
367 * But, we should remain this for backward compatibility.
368 */
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700369 recover_xattr_data(inode, page, blkaddr);
370 goto out;
371 }
Chao Yu70cfed82014-08-02 15:26:04 +0800372
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700373 /* step 2: recover inline data */
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900374 if (recover_inline_data(inode, page))
375 goto out;
376
Jaegeuk Kim1c35a902014-08-07 23:49:17 -0700377 /* step 3: recover data indices */
Jaegeuk Kimde936532013-08-12 21:08:03 +0900378 start = start_bidx_of_node(ofs_of_node(page), fi);
Chao Yu6403eb12014-04-26 19:59:52 +0800379 end = start + ADDRS_PER_PAGE(page, fi);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900380
Gu Zhenge4795562013-09-27 18:08:30 +0800381 f2fs_lock_op(sbi);
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900382
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900383 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900384
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900385 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900386 if (err) {
Gu Zhenge4795562013-09-27 18:08:30 +0800387 f2fs_unlock_op(sbi);
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900388 goto out;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900389 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900390
Jaegeuk Kim3cb5ad12014-03-18 13:29:07 +0900391 f2fs_wait_on_page_writeback(dn.node_page, NODE);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900392
393 get_node_info(sbi, dn.nid, &ni);
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700394 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
395 f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900396
397 for (; start < end; start++) {
398 block_t src, dest;
399
400 src = datablock_addr(dn.node_page, dn.ofs_in_node);
401 dest = datablock_addr(page, dn.ofs_in_node);
402
403 if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
404 if (src == NULL_ADDR) {
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900405 err = reserve_new_block(&dn);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900406 /* We should not get -ENOSPC */
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700407 f2fs_bug_on(sbi, err);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900408 }
409
410 /* Check the previous node page having this index */
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900411 err = check_index_in_prev_nodes(sbi, dest, &dn);
412 if (err)
413 goto err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900414
415 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
416
417 /* write dummy data page */
418 recover_data_page(sbi, NULL, &sum, src, dest);
Jaegeuk Kime1509cf2014-12-30 22:57:55 -0800419 dn.data_blkaddr = dest;
Chao Yu216a6202015-03-19 19:23:32 +0800420 set_data_blkaddr(&dn);
Chao Yu7e4dde72015-02-05 17:51:34 +0800421 f2fs_update_extent_cache(&dn);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900422 recovered++;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900423 }
424 dn.ofs_in_node++;
425 }
426
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900427 if (IS_INODE(dn.node_page))
428 sync_inode_page(&dn);
429
430 copy_node_footer(dn.node_page, page);
431 fill_node_footer(dn.node_page, dn.nid, ni.ino,
432 ofs_of_node(page), false);
433 set_page_dirty(dn.node_page);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900434err:
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900435 f2fs_put_dnode(&dn);
Gu Zhenge4795562013-09-27 18:08:30 +0800436 f2fs_unlock_op(sbi);
Jaegeuk Kim1e1bb4b2013-12-26 12:49:48 +0900437out:
Chris Fries6c311ec2014-01-17 14:44:39 -0600438 f2fs_msg(sbi->sb, KERN_NOTICE,
439 "recover_data: ino = %lx, recovered = %d blocks, err = %d",
440 inode->i_ino, recovered, err);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900441 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900442}
443
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900444static int recover_data(struct f2fs_sb_info *sbi,
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900445 struct list_head *head, int type)
446{
Jaegeuk Kimd71b5562013-08-09 15:03:21 +0900447 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900448 struct curseg_info *curseg;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700449 struct page *page = NULL;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900450 int err = 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900451 block_t blkaddr;
452
453 /* get node pages in the current segment */
454 curseg = CURSEG_I(sbi, type);
455 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
456
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900457 while (1) {
458 struct fsync_inode_entry *entry;
459
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700460 if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi))
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900461 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900462
Chao Yu635aee12014-12-08 15:02:52 +0800463 ra_meta_pages_cond(sbi, blkaddr);
464
465 page = get_meta_page(sbi, blkaddr);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700466
467 if (cp_ver != cpver_of_node(page)) {
468 f2fs_put_page(page, 1);
469 break;
470 }
471
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900472 entry = get_fsync_inode(head, ino_of_node(page));
473 if (!entry)
474 goto next;
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700475 /*
476 * inode(x) | CP | inode(x) | dnode(F)
477 * In this case, we can lose the latest inode(x).
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700478 * So, call recover_inode for the inode update.
Jaegeuk Kim441ac5c2014-09-15 16:46:08 -0700479 */
Jaegeuk Kimc52e1b12014-09-11 14:29:06 -0700480 if (entry->last_inode == blkaddr)
481 recover_inode(entry->inode, page);
482 if (entry->last_dentry == blkaddr) {
483 err = recover_dentry(entry->inode, page);
484 if (err) {
485 f2fs_put_page(page, 1);
486 break;
487 }
488 }
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900489 err = do_recover_data(sbi, entry->inode, page, blkaddr);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700490 if (err) {
491 f2fs_put_page(page, 1);
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900492 break;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700493 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900494
495 if (entry->blkaddr == blkaddr) {
496 iput(entry->inode);
497 list_del(&entry->list);
498 kmem_cache_free(fsync_entry_slab, entry);
499 }
500next:
501 /* check next segment */
502 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700503 f2fs_put_page(page, 1);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900504 }
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900505 if (!err)
506 allocate_new_segments(sbi);
507 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900508}
509
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900510int recover_fsync_data(struct f2fs_sb_info *sbi)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900511{
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700512 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900513 struct list_head inode_list;
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700514 block_t blkaddr;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900515 int err;
Haicheng Liaabe5132013-10-23 12:39:32 +0800516 bool need_writecp = false;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900517
518 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
Gu Zhenge8512d22014-03-07 18:43:28 +0800519 sizeof(struct fsync_inode_entry));
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900520 if (!fsync_entry_slab)
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900521 return -ENOMEM;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900522
523 INIT_LIST_HEAD(&inode_list);
524
525 /* step #1: find fsynced inode numbers */
Chao Yucaf00472015-01-28 17:48:42 +0800526 set_sbi_flag(sbi, SBI_POR_DOING);
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700527
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700528 /* prevent checkpoint */
529 mutex_lock(&sbi->cp_mutex);
530
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700531 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
532
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900533 err = find_fsync_dnodes(sbi, &inode_list);
534 if (err)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900535 goto out;
536
537 if (list_empty(&inode_list))
538 goto out;
539
Haicheng Liaabe5132013-10-23 12:39:32 +0800540 need_writecp = true;
Chao Yu691c6fd2013-09-24 09:26:24 +0800541
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900542 /* step #2: recover data */
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900543 err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
Jaegeuk Kimb3073842014-08-08 10:18:43 -0700544 if (!err)
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700545 f2fs_bug_on(sbi, !list_empty(&inode_list));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900546out:
Gu Zheng5ebefc52013-06-27 09:28:54 +0800547 destroy_fsync_dnodes(&inode_list);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900548 kmem_cache_destroy(fsync_entry_slab);
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700549
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700550 /* truncate meta pages to be used by the recovery */
551 truncate_inode_pages_range(META_MAPPING(sbi),
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700552 MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700553
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700554 if (err) {
555 truncate_inode_pages_final(NODE_MAPPING(sbi));
556 truncate_inode_pages_final(META_MAPPING(sbi));
557 }
558
Chao Yucaf00472015-01-28 17:48:42 +0800559 clear_sbi_flag(sbi, SBI_POR_DOING);
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700560 if (err) {
561 discard_next_dnode(sbi, blkaddr);
562
563 /* Flush all the NAT/SIT pages */
564 while (get_pages(sbi, F2FS_DIRTY_META))
565 sync_meta_pages(sbi, META, LONG_MAX);
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700566 set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
567 mutex_unlock(&sbi->cp_mutex);
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700568 } else if (need_writecp) {
Jaegeuk Kim75ab4cb2014-09-20 21:57:51 -0700569 struct cp_control cpc = {
570 .reason = CP_SYNC,
571 };
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700572 mutex_unlock(&sbi->cp_mutex);
Jaegeuk Kim75ab4cb2014-09-20 21:57:51 -0700573 write_checkpoint(sbi, &cpc);
Jaegeuk Kim14f4e692014-08-13 16:30:46 -0700574 } else {
575 mutex_unlock(&sbi->cp_mutex);
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700576 }
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900577 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900578}