blob: 51ef5eec33d7fec07503e6eb9c90b86256b1dc3a [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kimd624c962012-11-02 17:13:32 +09002 * fs/f2fs/recovery.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include "f2fs.h"
14#include "node.h"
15#include "segment.h"
16
17static struct kmem_cache *fsync_entry_slab;
18
19bool space_for_roll_forward(struct f2fs_sb_info *sbi)
20{
21 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
22 > sbi->user_block_count)
23 return false;
24 return true;
25}
26
27static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
28 nid_t ino)
29{
30 struct list_head *this;
31 struct fsync_inode_entry *entry;
32
33 list_for_each(this, head) {
34 entry = list_entry(this, struct fsync_inode_entry, list);
35 if (entry->inode->i_ino == ino)
36 return entry;
37 }
38 return NULL;
39}
40
41static int recover_dentry(struct page *ipage, struct inode *inode)
42{
Gu Zheng45590712013-07-15 17:57:38 +080043 struct f2fs_node *raw_node = F2FS_NODE(ipage);
Jaegeuk Kimd624c962012-11-02 17:13:32 +090044 struct f2fs_inode *raw_inode = &(raw_node->i);
Jaegeuk Kim74d0b912013-05-15 16:40:02 +090045 nid_t pino = le32_to_cpu(raw_inode->i_pino);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +090046 struct f2fs_dir_entry *de;
Al Virob7f7a5e2013-01-25 16:15:43 -050047 struct qstr name;
Jaegeuk Kimd624c962012-11-02 17:13:32 +090048 struct page *page;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +090049 struct inode *dir, *einode;
Jaegeuk Kimd624c962012-11-02 17:13:32 +090050 int err = 0;
51
Jaegeuk Kim74d0b912013-05-15 16:40:02 +090052 dir = check_dirty_dir_inode(F2FS_SB(inode->i_sb), pino);
53 if (!dir) {
54 dir = f2fs_iget(inode->i_sb, pino);
55 if (IS_ERR(dir)) {
56 err = PTR_ERR(dir);
57 goto out;
58 }
59 set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
Jaegeuk Kim5deb8262013-06-05 17:42:45 +090060 add_dirty_dir_inode(dir);
Jaegeuk Kimd624c962012-11-02 17:13:32 +090061 }
62
Al Virob7f7a5e2013-01-25 16:15:43 -050063 name.len = le32_to_cpu(raw_inode->i_namelen);
64 name.name = raw_inode->i_name;
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +090065retry:
66 de = f2fs_find_entry(dir, &name, &page);
67 if (de && inode->i_ino == le32_to_cpu(de->ino)) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +090068 kunmap(page);
69 f2fs_put_page(page, 0);
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +090070 goto out;
Jaegeuk Kimd624c962012-11-02 17:13:32 +090071 }
Jaegeuk Kim6b8213d2013-05-28 09:19:22 +090072 if (de) {
73 einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
74 if (IS_ERR(einode)) {
75 WARN_ON(1);
76 if (PTR_ERR(einode) == -ENOENT)
77 err = -EEXIST;
78 goto out;
79 }
80 f2fs_delete_entry(de, page, einode);
81 iput(einode);
82 goto retry;
83 }
84 err = __f2fs_add_link(dir, &name, inode);
Jaegeuk Kimd624c962012-11-02 17:13:32 +090085out:
Jaegeuk Kimf356fe02013-05-16 15:04:49 +090086 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: "
87 "ino = %x, name = %s, dir = %lx, err = %d",
Dan Carpenterf28c06f2013-05-23 13:02:13 +030088 ino_of_node(ipage), raw_inode->i_name,
89 IS_ERR(dir) ? 0 : dir->i_ino, err);
Jaegeuk Kimd624c962012-11-02 17:13:32 +090090 return err;
91}
92
93static int recover_inode(struct inode *inode, struct page *node_page)
94{
Gu Zheng45590712013-07-15 17:57:38 +080095 struct f2fs_node *raw_node = F2FS_NODE(node_page);
Jaegeuk Kimd624c962012-11-02 17:13:32 +090096 struct f2fs_inode *raw_inode = &(raw_node->i);
97
Jaegeuk Kimf356fe02013-05-16 15:04:49 +090098 if (!IS_INODE(node_page))
99 return 0;
100
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900101 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900102 i_size_write(inode, le64_to_cpu(raw_inode->i_size));
103 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
104 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
105 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
106 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
107 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
108 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
109
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900110 if (is_dent_dnode(node_page))
111 return recover_dentry(node_page, inode);
112
113 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
114 ino_of_node(node_page), raw_inode->i_name);
115 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900116}
117
118static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
119{
Jaegeuk Kimd71b5562013-08-09 15:03:21 +0900120 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900121 struct curseg_info *curseg;
122 struct page *page;
123 block_t blkaddr;
124 int err = 0;
125
126 /* get node pages in the current segment */
127 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
128 blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
129
130 /* read node page */
131 page = alloc_page(GFP_F2FS_ZERO);
Dan Carpentere27dae42013-08-15 08:54:56 +0300132 if (!page)
133 return -ENOMEM;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900134 lock_page(page);
135
136 while (1) {
137 struct fsync_inode_entry *entry;
138
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900139 err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
140 if (err)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900141 goto out;
142
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900143 lock_page(page);
144
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900145 if (cp_ver != cpver_of_node(page))
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900146 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900147
148 if (!is_fsync_dnode(page))
149 goto next;
150
151 entry = get_fsync_inode(head, ino_of_node(page));
152 if (entry) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900153 if (IS_INODE(page) && is_dent_dnode(page))
154 set_inode_flag(F2FS_I(entry->inode),
155 FI_INC_LINK);
156 } else {
157 if (IS_INODE(page) && is_dent_dnode(page)) {
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900158 err = recover_inode_page(sbi, page);
159 if (err)
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900160 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900161 }
162
163 /* add this fsync inode to the list */
164 entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
165 if (!entry) {
166 err = -ENOMEM;
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900167 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900168 }
169
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900170 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
171 if (IS_ERR(entry->inode)) {
172 err = PTR_ERR(entry->inode);
Namjae Jeonfd8bb652012-12-22 12:10:12 +0900173 kmem_cache_free(fsync_entry_slab, entry);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900174 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900175 }
Namjae Jeonfd8bb652012-12-22 12:10:12 +0900176 list_add_tail(&entry->list, head);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900177 }
Jaegeuk Kimaddbe452013-05-15 10:49:13 +0900178 entry->blkaddr = blkaddr;
179
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900180 err = recover_inode(entry->inode, page);
181 if (err && err != -ENOENT)
182 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900183next:
184 /* check next segment */
185 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900186 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900187 unlock_page(page);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900188out:
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900189 __free_pages(page, 0);
190 return err;
191}
192
Gu Zheng5ebefc52013-06-27 09:28:54 +0800193static void destroy_fsync_dnodes(struct list_head *head)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900194{
Dan Carpenterd8b79b22013-01-20 18:02:58 +0300195 struct fsync_inode_entry *entry, *tmp;
196
197 list_for_each_entry_safe(entry, tmp, head, list) {
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900198 iput(entry->inode);
199 list_del(&entry->list);
200 kmem_cache_free(fsync_entry_slab, entry);
201 }
202}
203
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900204static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900205 block_t blkaddr, struct dnode_of_data *dn)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900206{
207 struct seg_entry *sentry;
208 unsigned int segno = GET_SEGNO(sbi, blkaddr);
209 unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
210 (sbi->blocks_per_seg - 1);
211 struct f2fs_summary sum;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900212 nid_t ino, nid;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900213 void *kaddr;
214 struct inode *inode;
215 struct page *node_page;
Jaegeuk Kimde936532013-08-12 21:08:03 +0900216 unsigned int offset;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900217 block_t bidx;
218 int i;
219
220 sentry = get_seg_entry(sbi, segno);
221 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900222 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900223
224 /* Get the previous summary */
225 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
226 struct curseg_info *curseg = CURSEG_I(sbi, i);
227 if (curseg->segno == segno) {
228 sum = curseg->sum_blk->entries[blkoff];
229 break;
230 }
231 }
232 if (i > CURSEG_COLD_DATA) {
233 struct page *sum_page = get_sum_page(sbi, segno);
234 struct f2fs_summary_block *sum_node;
235 kaddr = page_address(sum_page);
236 sum_node = (struct f2fs_summary_block *)kaddr;
237 sum = sum_node->entries[blkoff];
238 f2fs_put_page(sum_page, 1);
239 }
240
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900241 /* Use the locked dnode page and inode */
242 nid = le32_to_cpu(sum.nid);
243 if (dn->inode->i_ino == nid) {
244 struct dnode_of_data tdn = *dn;
245 tdn.nid = nid;
246 tdn.node_page = dn->inode_page;
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900247 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900248 truncate_data_blocks_range(&tdn, 1);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900249 return 0;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900250 } else if (dn->nid == nid) {
251 struct dnode_of_data tdn = *dn;
Jaegeuk Kim060dd672013-06-24 07:47:23 +0900252 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900253 truncate_data_blocks_range(&tdn, 1);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900254 return 0;
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900255 }
256
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900257 /* Get the node page */
Jaegeuk Kimb292dcab2013-05-22 08:02:02 +0900258 node_page = get_node_page(sbi, nid);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900259 if (IS_ERR(node_page))
260 return PTR_ERR(node_page);
Jaegeuk Kimde936532013-08-12 21:08:03 +0900261
262 offset = ofs_of_node(node_page);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900263 ino = ino_of_node(node_page);
264 f2fs_put_page(node_page, 1);
265
266 /* Deallocate previous index in the node page */
Jaegeuk Kimd4686d562013-01-31 15:36:04 +0900267 inode = f2fs_iget(sbi->sb, ino);
Namjae Jeon06025f42012-12-22 12:09:43 +0900268 if (IS_ERR(inode))
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900269 return PTR_ERR(inode);
Namjae Jeon06025f42012-12-22 12:09:43 +0900270
Jaegeuk Kimde936532013-08-12 21:08:03 +0900271 bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
272 le16_to_cpu(sum.ofs_in_node);
273
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900274 truncate_hole(inode, bidx, bidx + 1);
275 iput(inode);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900276 return 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900277}
278
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900279static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900280 struct page *page, block_t blkaddr)
281{
Jaegeuk Kimde936532013-08-12 21:08:03 +0900282 struct f2fs_inode_info *fi = F2FS_I(inode);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900283 unsigned int start, end;
284 struct dnode_of_data dn;
285 struct f2fs_summary sum;
286 struct node_info ni;
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900287 int err = 0, recovered = 0;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900288 int ilock;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900289
Jaegeuk Kimde936532013-08-12 21:08:03 +0900290 start = start_bidx_of_node(ofs_of_node(page), fi);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900291 if (IS_INODE(page))
Jaegeuk Kimde936532013-08-12 21:08:03 +0900292 end = start + ADDRS_PER_INODE(fi);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900293 else
294 end = start + ADDRS_PER_BLOCK;
295
Jaegeuk Kim39936832012-11-22 16:21:29 +0900296 ilock = mutex_lock_op(sbi);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900297 set_new_dnode(&dn, inode, NULL, NULL, 0);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900298
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900299 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900300 if (err) {
301 mutex_unlock_op(sbi, ilock);
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900302 return err;
Jaegeuk Kim39936832012-11-22 16:21:29 +0900303 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900304
305 wait_on_page_writeback(dn.node_page);
306
307 get_node_info(sbi, dn.nid, &ni);
308 BUG_ON(ni.ino != ino_of_node(page));
309 BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page));
310
311 for (; start < end; start++) {
312 block_t src, dest;
313
314 src = datablock_addr(dn.node_page, dn.ofs_in_node);
315 dest = datablock_addr(page, dn.ofs_in_node);
316
317 if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
318 if (src == NULL_ADDR) {
319 int err = reserve_new_block(&dn);
320 /* We should not get -ENOSPC */
321 BUG_ON(err);
322 }
323
324 /* Check the previous node page having this index */
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900325 err = check_index_in_prev_nodes(sbi, dest, &dn);
326 if (err)
327 goto err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900328
329 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
330
331 /* write dummy data page */
332 recover_data_page(sbi, NULL, &sum, src, dest);
333 update_extent_cache(dest, &dn);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900334 recovered++;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900335 }
336 dn.ofs_in_node++;
337 }
338
339 /* write node page in place */
340 set_summary(&sum, dn.nid, 0, 0);
341 if (IS_INODE(dn.node_page))
342 sync_inode_page(&dn);
343
344 copy_node_footer(dn.node_page, page);
345 fill_node_footer(dn.node_page, dn.nid, ni.ino,
346 ofs_of_node(page), false);
347 set_page_dirty(dn.node_page);
348
349 recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900350err:
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900351 f2fs_put_dnode(&dn);
Jaegeuk Kim39936832012-11-22 16:21:29 +0900352 mutex_unlock_op(sbi, ilock);
Jaegeuk Kimf356fe02013-05-16 15:04:49 +0900353
354 f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, "
Jaegeuk Kim39cf72c2013-05-22 08:20:01 +0900355 "recovered_data = %d blocks, err = %d",
356 inode->i_ino, recovered, err);
357 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900358}
359
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900360static int recover_data(struct f2fs_sb_info *sbi,
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900361 struct list_head *head, int type)
362{
Jaegeuk Kimd71b5562013-08-09 15:03:21 +0900363 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900364 struct curseg_info *curseg;
365 struct page *page;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900366 int err = 0;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900367 block_t blkaddr;
368
369 /* get node pages in the current segment */
370 curseg = CURSEG_I(sbi, type);
371 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
372
373 /* read node page */
374 page = alloc_page(GFP_NOFS | __GFP_ZERO);
Dan Carpentere27dae42013-08-15 08:54:56 +0300375 if (!page)
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900376 return -ENOMEM;
377
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900378 lock_page(page);
379
380 while (1) {
381 struct fsync_inode_entry *entry;
382
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900383 err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
384 if (err)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900385 goto out;
386
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900387 lock_page(page);
388
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900389 if (cp_ver != cpver_of_node(page))
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900390 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900391
392 entry = get_fsync_inode(head, ino_of_node(page));
393 if (!entry)
394 goto next;
395
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900396 err = do_recover_data(sbi, entry->inode, page, blkaddr);
397 if (err)
Jaegeuk Kim45856af2013-05-20 10:26:09 +0900398 break;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900399
400 if (entry->blkaddr == blkaddr) {
401 iput(entry->inode);
402 list_del(&entry->list);
403 kmem_cache_free(fsync_entry_slab, entry);
404 }
405next:
406 /* check next segment */
407 blkaddr = next_blkaddr_of_node(page);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900408 }
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900409 unlock_page(page);
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900410out:
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900411 __free_pages(page, 0);
412
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900413 if (!err)
414 allocate_new_segments(sbi);
415 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900416}
417
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900418int recover_fsync_data(struct f2fs_sb_info *sbi)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900419{
420 struct list_head inode_list;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900421 int err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900422
423 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
424 sizeof(struct fsync_inode_entry), NULL);
425 if (unlikely(!fsync_entry_slab))
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900426 return -ENOMEM;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900427
428 INIT_LIST_HEAD(&inode_list);
429
430 /* step #1: find fsynced inode numbers */
Jaegeuk Kim8c26d7d2013-05-15 16:12:18 +0900431 sbi->por_doing = 1;
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900432 err = find_fsync_dnodes(sbi, &inode_list);
433 if (err)
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900434 goto out;
435
436 if (list_empty(&inode_list))
437 goto out;
438
439 /* step #2: recover data */
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900440 err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900441 BUG_ON(!list_empty(&inode_list));
442out:
Gu Zheng5ebefc52013-06-27 09:28:54 +0800443 destroy_fsync_dnodes(&inode_list);
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900444 kmem_cache_destroy(fsync_entry_slab);
Jaegeuk Kim8c26d7d2013-05-15 16:12:18 +0900445 sbi->por_doing = 0;
Jaegeuk Kim2c2c1492013-05-20 14:48:49 +0900446 if (!err)
447 write_checkpoint(sbi, false);
Jaegeuk Kim6ead1142013-03-20 19:01:06 +0900448 return err;
Jaegeuk Kimd624c962012-11-02 17:13:32 +0900449}