blob: fa284d397199faed53f1c8ffb4de611440ad2e73 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/bio.h>
14#include <linux/blkdev.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010015#include <linux/prefetch.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090016#include <linux/vmalloc.h>
17
18#include "f2fs.h"
19#include "segment.h"
20#include "node.h"
Namjae Jeon6ec178d2013-04-23 17:51:43 +090021#include <trace/events/f2fs.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090022
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090023/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090024 * This function balances dirty node and dentry pages.
25 * In addition, it controls garbage collection.
26 */
27void f2fs_balance_fs(struct f2fs_sb_info *sbi)
28{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090029 /*
Jaegeuk Kim029cd282012-12-21 17:20:21 +090030 * We should do GC or end up with checkpoint, if there are so many dirty
31 * dir/node pages without enough free segments.
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090032 */
Jaegeuk Kim43727522013-02-04 15:11:17 +090033 if (has_not_enough_free_secs(sbi, 0)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090034 mutex_lock(&sbi->gc_mutex);
Jaegeuk Kim408e9372013-01-03 17:55:52 +090035 f2fs_gc(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090036 }
37}
38
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +090039void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
40{
41 /* check the # of cached NAT entries and prefree segments */
42 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
43 excess_prefree_segs(sbi))
44 f2fs_sync_fs(sbi->sb, true);
45}
46
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090047static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
48 enum dirty_type dirty_type)
49{
50 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
51
52 /* need not be added */
53 if (IS_CURSEG(sbi, segno))
54 return;
55
56 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
57 dirty_i->nr_dirty[dirty_type]++;
58
59 if (dirty_type == DIRTY) {
60 struct seg_entry *sentry = get_seg_entry(sbi, segno);
Changman Lee4625d6a2013-10-25 17:31:57 +090061 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090062
Changman Lee4625d6a2013-10-25 17:31:57 +090063 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
64 dirty_i->nr_dirty[t]++;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090065 }
66}
67
68static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
69 enum dirty_type dirty_type)
70{
71 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
72
73 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
74 dirty_i->nr_dirty[dirty_type]--;
75
76 if (dirty_type == DIRTY) {
Changman Lee4625d6a2013-10-25 17:31:57 +090077 struct seg_entry *sentry = get_seg_entry(sbi, segno);
78 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090079
Changman Lee4625d6a2013-10-25 17:31:57 +090080 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
81 dirty_i->nr_dirty[t]--;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090082
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +090083 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
84 clear_bit(GET_SECNO(sbi, segno),
85 dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090086 }
87}
88
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090089/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090090 * Should not occur error such as -ENOMEM.
91 * Adding dirty entry into seglist is not critical operation.
92 * If a given segment is one of current working segments, it won't be added.
93 */
Haicheng Li8d8451a2013-06-13 16:59:28 +080094static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090095{
96 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
97 unsigned short valid_blocks;
98
99 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
100 return;
101
102 mutex_lock(&dirty_i->seglist_lock);
103
104 valid_blocks = get_valid_blocks(sbi, segno, 0);
105
106 if (valid_blocks == 0) {
107 __locate_dirty_segment(sbi, segno, PRE);
108 __remove_dirty_segment(sbi, segno, DIRTY);
109 } else if (valid_blocks < sbi->blocks_per_seg) {
110 __locate_dirty_segment(sbi, segno, DIRTY);
111 } else {
112 /* Recovery routine with SSR needs this */
113 __remove_dirty_segment(sbi, segno, DIRTY);
114 }
115
116 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900117}
118
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900119/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900120 * Should call clear_prefree_segments after checkpoint is done.
121 */
122static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
123{
124 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800125 unsigned int segno = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900126 unsigned int total_segs = TOTAL_SEGS(sbi);
127
128 mutex_lock(&dirty_i->seglist_lock);
129 while (1) {
130 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800131 segno + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900132 if (segno >= total_segs)
133 break;
134 __set_test_and_free(sbi, segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900135 }
136 mutex_unlock(&dirty_i->seglist_lock);
137}
138
139void clear_prefree_segments(struct f2fs_sb_info *sbi)
140{
141 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Changman Lee29e59c12013-11-11 09:24:37 +0900142 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900143 unsigned int total_segs = TOTAL_SEGS(sbi);
Changman Lee29e59c12013-11-11 09:24:37 +0900144 unsigned int start = 0, end = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900145
146 mutex_lock(&dirty_i->seglist_lock);
Changman Lee29e59c12013-11-11 09:24:37 +0900147
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900148 while (1) {
Changman Lee29e59c12013-11-11 09:24:37 +0900149 int i;
150 start = find_next_bit(prefree_map, total_segs, end + 1);
151 if (start >= total_segs)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900152 break;
Changman Lee29e59c12013-11-11 09:24:37 +0900153 end = find_next_zero_bit(prefree_map, total_segs, start + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900154
Changman Lee29e59c12013-11-11 09:24:37 +0900155 for (i = start; i < end; i++)
156 clear_bit(i, prefree_map);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900157
Changman Lee29e59c12013-11-11 09:24:37 +0900158 dirty_i->nr_dirty[PRE] -= end - start;
159
160 if (!test_opt(sbi, DISCARD))
161 continue;
162
163 blkdev_issue_discard(sbi->sb->s_bdev,
164 START_BLOCK(sbi, start) <<
165 sbi->log_sectors_per_block,
166 (1 << (sbi->log_sectors_per_block +
167 sbi->log_blocks_per_seg)) * (end - start),
168 GFP_NOFS, 0);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900169 }
170 mutex_unlock(&dirty_i->seglist_lock);
171}
172
173static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
174{
175 struct sit_info *sit_i = SIT_I(sbi);
176 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
177 sit_i->dirty_sentries++;
178}
179
180static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
181 unsigned int segno, int modified)
182{
183 struct seg_entry *se = get_seg_entry(sbi, segno);
184 se->type = type;
185 if (modified)
186 __mark_sit_entry_dirty(sbi, segno);
187}
188
189static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
190{
191 struct seg_entry *se;
192 unsigned int segno, offset;
193 long int new_vblocks;
194
195 segno = GET_SEGNO(sbi, blkaddr);
196
197 se = get_seg_entry(sbi, segno);
198 new_vblocks = se->valid_blocks + del;
199 offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1);
200
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900201 f2fs_bug_on((new_vblocks >> (sizeof(unsigned short) << 3) ||
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900202 (new_vblocks > sbi->blocks_per_seg)));
203
204 se->valid_blocks = new_vblocks;
205 se->mtime = get_mtime(sbi);
206 SIT_I(sbi)->max_mtime = se->mtime;
207
208 /* Update valid block bitmap */
209 if (del > 0) {
210 if (f2fs_set_bit(offset, se->cur_valid_map))
211 BUG();
212 } else {
213 if (!f2fs_clear_bit(offset, se->cur_valid_map))
214 BUG();
215 }
216 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
217 se->ckpt_valid_blocks += del;
218
219 __mark_sit_entry_dirty(sbi, segno);
220
221 /* update total number of valid blocks to be written in ckpt area */
222 SIT_I(sbi)->written_valid_blocks += del;
223
224 if (sbi->segs_per_sec > 1)
225 get_sec_entry(sbi, segno)->valid_blocks += del;
226}
227
228static void refresh_sit_entry(struct f2fs_sb_info *sbi,
229 block_t old_blkaddr, block_t new_blkaddr)
230{
231 update_sit_entry(sbi, new_blkaddr, 1);
232 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
233 update_sit_entry(sbi, old_blkaddr, -1);
234}
235
236void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
237{
238 unsigned int segno = GET_SEGNO(sbi, addr);
239 struct sit_info *sit_i = SIT_I(sbi);
240
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900241 f2fs_bug_on(addr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900242 if (addr == NEW_ADDR)
243 return;
244
245 /* add it into sit main buffer */
246 mutex_lock(&sit_i->sentry_lock);
247
248 update_sit_entry(sbi, addr, -1);
249
250 /* add it into dirty seglist */
251 locate_dirty_segment(sbi, segno);
252
253 mutex_unlock(&sit_i->sentry_lock);
254}
255
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900256/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900257 * This function should be resided under the curseg_mutex lock
258 */
259static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
Haicheng Lie79efe32013-06-13 16:59:27 +0800260 struct f2fs_summary *sum)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900261{
262 struct curseg_info *curseg = CURSEG_I(sbi, type);
263 void *addr = curseg->sum_blk;
Haicheng Lie79efe32013-06-13 16:59:27 +0800264 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900265 memcpy(addr, sum, sizeof(struct f2fs_summary));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900266}
267
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900268/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900269 * Calculate the number of current summary pages for writing
270 */
271int npages_for_summary_flush(struct f2fs_sb_info *sbi)
272{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900273 int valid_sum_count = 0;
Fan Li9a479382013-10-29 16:21:47 +0800274 int i, sum_in_page;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900275
276 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
277 if (sbi->ckpt->alloc_type[i] == SSR)
278 valid_sum_count += sbi->blocks_per_seg;
279 else
280 valid_sum_count += curseg_blkoff(sbi, i);
281 }
282
Fan Li9a479382013-10-29 16:21:47 +0800283 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
284 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
285 if (valid_sum_count <= sum_in_page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900286 return 1;
Fan Li9a479382013-10-29 16:21:47 +0800287 else if ((valid_sum_count - sum_in_page) <=
288 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900289 return 2;
290 return 3;
291}
292
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900293/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900294 * Caller should put this summary page
295 */
296struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
297{
298 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
299}
300
301static void write_sum_page(struct f2fs_sb_info *sbi,
302 struct f2fs_summary_block *sum_blk, block_t blk_addr)
303{
304 struct page *page = grab_meta_page(sbi, blk_addr);
305 void *kaddr = page_address(page);
306 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
307 set_page_dirty(page);
308 f2fs_put_page(page, 1);
309}
310
Jaegeuk Kim60374682013-03-31 13:58:51 +0900311static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
312{
313 struct curseg_info *curseg = CURSEG_I(sbi, type);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800314 unsigned int segno = curseg->segno + 1;
Jaegeuk Kim60374682013-03-31 13:58:51 +0900315 struct free_segmap_info *free_i = FREE_I(sbi);
316
Haicheng Li81fb5e82013-05-14 18:20:28 +0800317 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec)
318 return !test_bit(segno, free_i->free_segmap);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900319 return 0;
320}
321
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900322/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900323 * Find a new segment from the free segments bitmap to right order
324 * This function should be returned with success, otherwise BUG
325 */
326static void get_new_segment(struct f2fs_sb_info *sbi,
327 unsigned int *newseg, bool new_sec, int dir)
328{
329 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900330 unsigned int segno, secno, zoneno;
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900331 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900332 unsigned int hint = *newseg / sbi->segs_per_sec;
333 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
334 unsigned int left_start = hint;
335 bool init = true;
336 int go_left = 0;
337 int i;
338
339 write_lock(&free_i->segmap_lock);
340
341 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
342 segno = find_next_zero_bit(free_i->free_segmap,
343 TOTAL_SEGS(sbi), *newseg + 1);
Jaegeuk Kim33afa7f2013-03-31 12:59:53 +0900344 if (segno - *newseg < sbi->segs_per_sec -
345 (*newseg % sbi->segs_per_sec))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900346 goto got_it;
347 }
348find_other_zone:
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900349 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
350 if (secno >= TOTAL_SECS(sbi)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900351 if (dir == ALLOC_RIGHT) {
352 secno = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900353 TOTAL_SECS(sbi), 0);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900354 f2fs_bug_on(secno >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900355 } else {
356 go_left = 1;
357 left_start = hint - 1;
358 }
359 }
360 if (go_left == 0)
361 goto skip_left;
362
363 while (test_bit(left_start, free_i->free_secmap)) {
364 if (left_start > 0) {
365 left_start--;
366 continue;
367 }
368 left_start = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900369 TOTAL_SECS(sbi), 0);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900370 f2fs_bug_on(left_start >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900371 break;
372 }
373 secno = left_start;
374skip_left:
375 hint = secno;
376 segno = secno * sbi->segs_per_sec;
377 zoneno = secno / sbi->secs_per_zone;
378
379 /* give up on finding another zone */
380 if (!init)
381 goto got_it;
382 if (sbi->secs_per_zone == 1)
383 goto got_it;
384 if (zoneno == old_zoneno)
385 goto got_it;
386 if (dir == ALLOC_LEFT) {
387 if (!go_left && zoneno + 1 >= total_zones)
388 goto got_it;
389 if (go_left && zoneno == 0)
390 goto got_it;
391 }
392 for (i = 0; i < NR_CURSEG_TYPE; i++)
393 if (CURSEG_I(sbi, i)->zone == zoneno)
394 break;
395
396 if (i < NR_CURSEG_TYPE) {
397 /* zone is in user, try another */
398 if (go_left)
399 hint = zoneno * sbi->secs_per_zone - 1;
400 else if (zoneno + 1 >= total_zones)
401 hint = 0;
402 else
403 hint = (zoneno + 1) * sbi->secs_per_zone;
404 init = false;
405 goto find_other_zone;
406 }
407got_it:
408 /* set it as dirty segment in free segmap */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900409 f2fs_bug_on(test_bit(segno, free_i->free_segmap));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900410 __set_inuse(sbi, segno);
411 *newseg = segno;
412 write_unlock(&free_i->segmap_lock);
413}
414
415static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
416{
417 struct curseg_info *curseg = CURSEG_I(sbi, type);
418 struct summary_footer *sum_footer;
419
420 curseg->segno = curseg->next_segno;
421 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
422 curseg->next_blkoff = 0;
423 curseg->next_segno = NULL_SEGNO;
424
425 sum_footer = &(curseg->sum_blk->footer);
426 memset(sum_footer, 0, sizeof(struct summary_footer));
427 if (IS_DATASEG(type))
428 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
429 if (IS_NODESEG(type))
430 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
431 __set_sit_entry_type(sbi, type, curseg->segno, modified);
432}
433
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900434/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900435 * Allocate a current working segment.
436 * This function always allocates a free segment in LFS manner.
437 */
438static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
439{
440 struct curseg_info *curseg = CURSEG_I(sbi, type);
441 unsigned int segno = curseg->segno;
442 int dir = ALLOC_LEFT;
443
444 write_sum_page(sbi, curseg->sum_blk,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800445 GET_SUM_BLOCK(sbi, segno));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900446 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
447 dir = ALLOC_RIGHT;
448
449 if (test_opt(sbi, NOHEAP))
450 dir = ALLOC_RIGHT;
451
452 get_new_segment(sbi, &segno, new_sec, dir);
453 curseg->next_segno = segno;
454 reset_curseg(sbi, type, 1);
455 curseg->alloc_type = LFS;
456}
457
458static void __next_free_blkoff(struct f2fs_sb_info *sbi,
459 struct curseg_info *seg, block_t start)
460{
461 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
462 block_t ofs;
463 for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) {
464 if (!f2fs_test_bit(ofs, se->ckpt_valid_map)
465 && !f2fs_test_bit(ofs, se->cur_valid_map))
466 break;
467 }
468 seg->next_blkoff = ofs;
469}
470
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900471/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900472 * If a segment is written by LFS manner, next block offset is just obtained
473 * by increasing the current block offset. However, if a segment is written by
474 * SSR manner, next block offset obtained by calling __next_free_blkoff
475 */
476static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
477 struct curseg_info *seg)
478{
479 if (seg->alloc_type == SSR)
480 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
481 else
482 seg->next_blkoff++;
483}
484
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900485/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900486 * This function always allocates a used segment (from dirty seglist) by SSR
487 * manner, so it should recover the existing segment information of valid blocks
488 */
489static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
490{
491 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
492 struct curseg_info *curseg = CURSEG_I(sbi, type);
493 unsigned int new_segno = curseg->next_segno;
494 struct f2fs_summary_block *sum_node;
495 struct page *sum_page;
496
497 write_sum_page(sbi, curseg->sum_blk,
498 GET_SUM_BLOCK(sbi, curseg->segno));
499 __set_test_and_inuse(sbi, new_segno);
500
501 mutex_lock(&dirty_i->seglist_lock);
502 __remove_dirty_segment(sbi, new_segno, PRE);
503 __remove_dirty_segment(sbi, new_segno, DIRTY);
504 mutex_unlock(&dirty_i->seglist_lock);
505
506 reset_curseg(sbi, type, 1);
507 curseg->alloc_type = SSR;
508 __next_free_blkoff(sbi, curseg, 0);
509
510 if (reuse) {
511 sum_page = get_sum_page(sbi, new_segno);
512 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
513 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
514 f2fs_put_page(sum_page, 1);
515 }
516}
517
Jaegeuk Kim43727522013-02-04 15:11:17 +0900518static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
519{
520 struct curseg_info *curseg = CURSEG_I(sbi, type);
521 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
522
523 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
524 return v_ops->get_victim(sbi,
525 &(curseg)->next_segno, BG_GC, type, SSR);
526
527 /* For data segments, let's do SSR more intensively */
528 for (; type >= CURSEG_HOT_DATA; type--)
529 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
530 BG_GC, type, SSR))
531 return 1;
532 return 0;
533}
534
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900535/*
536 * flush out current segment and replace it with new segment
537 * This function should be returned with success, otherwise BUG
538 */
539static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
540 int type, bool force)
541{
542 struct curseg_info *curseg = CURSEG_I(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900543
Gu Zheng7b405272013-08-19 09:41:15 +0800544 if (force)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900545 new_curseg(sbi, type, true);
Gu Zheng7b405272013-08-19 09:41:15 +0800546 else if (type == CURSEG_WARM_NODE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900547 new_curseg(sbi, type, false);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900548 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
549 new_curseg(sbi, type, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900550 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
551 change_curseg(sbi, type, true);
552 else
553 new_curseg(sbi, type, false);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900554
555 stat_inc_seg_type(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900556}
557
558void allocate_new_segments(struct f2fs_sb_info *sbi)
559{
560 struct curseg_info *curseg;
561 unsigned int old_curseg;
562 int i;
563
564 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
565 curseg = CURSEG_I(sbi, i);
566 old_curseg = curseg->segno;
567 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
568 locate_dirty_segment(sbi, old_curseg);
569 }
570}
571
572static const struct segment_allocation default_salloc_ops = {
573 .allocate_segment = allocate_segment_by_default,
574};
575
576static void f2fs_end_io_write(struct bio *bio, int err)
577{
578 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
579 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
580 struct bio_private *p = bio->bi_private;
581
582 do {
583 struct page *page = bvec->bv_page;
584
585 if (--bvec >= bio->bi_io_vec)
586 prefetchw(&bvec->bv_page->flags);
587 if (!uptodate) {
588 SetPageError(page);
589 if (page->mapping)
590 set_bit(AS_EIO, &page->mapping->flags);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900591 set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900592 p->sbi->sb->s_flags |= MS_RDONLY;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900593 }
594 end_page_writeback(page);
595 dec_page_count(p->sbi, F2FS_WRITEBACK);
596 } while (bvec >= bio->bi_io_vec);
597
598 if (p->is_sync)
599 complete(p->wait);
Gu Zhenge2340882013-10-14 18:45:56 +0800600
Changman Leefb51b5e2013-11-07 12:48:25 +0900601 if (!get_pages(p->sbi, F2FS_WRITEBACK) &&
602 !list_empty(&p->sbi->cp_wait.task_list))
603 wake_up(&p->sbi->cp_wait);
Gu Zhenge2340882013-10-14 18:45:56 +0800604
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900605 kfree(p);
606 bio_put(bio);
607}
608
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900609struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900610{
611 struct bio *bio;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900612
613 /* No failure on bio allocation */
614 bio = bio_alloc(GFP_NOIO, npages);
615 bio->bi_bdev = bdev;
Gu Zhengd8207f62013-07-25 11:30:01 +0800616 bio->bi_private = NULL;
617
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900618 return bio;
619}
620
621static void do_submit_bio(struct f2fs_sb_info *sbi,
622 enum page_type type, bool sync)
623{
624 int rw = sync ? WRITE_SYNC : WRITE;
625 enum page_type btype = type > META ? META : type;
626
627 if (type >= META_FLUSH)
628 rw = WRITE_FLUSH_FUA;
629
Namjae Jeon86804412013-04-25 11:45:21 +0900630 if (btype == META)
631 rw |= REQ_META;
632
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900633 if (sbi->bio[btype]) {
634 struct bio_private *p = sbi->bio[btype]->bi_private;
635 p->sbi = sbi;
636 sbi->bio[btype]->bi_end_io = f2fs_end_io_write;
Namjae Jeon6ec178d2013-04-23 17:51:43 +0900637
638 trace_f2fs_do_submit_bio(sbi->sb, btype, sync, sbi->bio[btype]);
639
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900640 if (type == META_FLUSH) {
641 DECLARE_COMPLETION_ONSTACK(wait);
642 p->is_sync = true;
643 p->wait = &wait;
644 submit_bio(rw, sbi->bio[btype]);
645 wait_for_completion(&wait);
646 } else {
647 p->is_sync = false;
648 submit_bio(rw, sbi->bio[btype]);
649 }
650 sbi->bio[btype] = NULL;
651 }
652}
653
654void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync)
655{
656 down_write(&sbi->bio_sem);
657 do_submit_bio(sbi, type, sync);
658 up_write(&sbi->bio_sem);
659}
660
661static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
662 block_t blk_addr, enum page_type type)
663{
664 struct block_device *bdev = sbi->sb->s_bdev;
Chao Yucc7b1bb2013-09-22 15:50:50 +0800665 int bio_blocks;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900666
667 verify_block_addr(sbi, blk_addr);
668
669 down_write(&sbi->bio_sem);
670
671 inc_page_count(sbi, F2FS_WRITEBACK);
672
673 if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1)
674 do_submit_bio(sbi, type, false);
675alloc_new:
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900676 if (sbi->bio[type] == NULL) {
Gu Zhengd8207f62013-07-25 11:30:01 +0800677 struct bio_private *priv;
678retry:
679 priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
680 if (!priv) {
681 cond_resched();
682 goto retry;
683 }
684
Chao Yucc7b1bb2013-09-22 15:50:50 +0800685 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
686 sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks);
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900687 sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
Gu Zhengd8207f62013-07-25 11:30:01 +0800688 sbi->bio[type]->bi_private = priv;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900689 /*
690 * The end_io will be assigned at the sumbission phase.
691 * Until then, let bio_add_page() merge consecutive IOs as much
692 * as possible.
693 */
694 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900695
696 if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) <
697 PAGE_CACHE_SIZE) {
698 do_submit_bio(sbi, type, false);
699 goto alloc_new;
700 }
701
702 sbi->last_block_in_bio[type] = blk_addr;
703
704 up_write(&sbi->bio_sem);
Namjae Jeon6ec178d2013-04-23 17:51:43 +0900705 trace_f2fs_submit_write_page(page, blk_addr, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900706}
707
Jin Xua5694692013-08-05 20:02:04 +0800708void f2fs_wait_on_page_writeback(struct page *page,
709 enum page_type type, bool sync)
710{
711 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
712 if (PageWriteback(page)) {
713 f2fs_submit_bio(sbi, type, sync);
714 wait_on_page_writeback(page);
715 }
716}
717
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900718static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
719{
720 struct curseg_info *curseg = CURSEG_I(sbi, type);
721 if (curseg->next_blkoff < sbi->blocks_per_seg)
722 return true;
723 return false;
724}
725
726static int __get_segment_type_2(struct page *page, enum page_type p_type)
727{
728 if (p_type == DATA)
729 return CURSEG_HOT_DATA;
730 else
731 return CURSEG_HOT_NODE;
732}
733
734static int __get_segment_type_4(struct page *page, enum page_type p_type)
735{
736 if (p_type == DATA) {
737 struct inode *inode = page->mapping->host;
738
739 if (S_ISDIR(inode->i_mode))
740 return CURSEG_HOT_DATA;
741 else
742 return CURSEG_COLD_DATA;
743 } else {
744 if (IS_DNODE(page) && !is_cold_node(page))
745 return CURSEG_HOT_NODE;
746 else
747 return CURSEG_COLD_NODE;
748 }
749}
750
751static int __get_segment_type_6(struct page *page, enum page_type p_type)
752{
753 if (p_type == DATA) {
754 struct inode *inode = page->mapping->host;
755
756 if (S_ISDIR(inode->i_mode))
757 return CURSEG_HOT_DATA;
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900758 else if (is_cold_data(page) || file_is_cold(inode))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900759 return CURSEG_COLD_DATA;
760 else
761 return CURSEG_WARM_DATA;
762 } else {
763 if (IS_DNODE(page))
764 return is_cold_node(page) ? CURSEG_WARM_NODE :
765 CURSEG_HOT_NODE;
766 else
767 return CURSEG_COLD_NODE;
768 }
769}
770
771static int __get_segment_type(struct page *page, enum page_type p_type)
772{
773 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
774 switch (sbi->active_logs) {
775 case 2:
776 return __get_segment_type_2(page, p_type);
777 case 4:
778 return __get_segment_type_4(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900779 }
Jaegeuk Kim12a67142012-12-21 11:47:05 +0900780 /* NR_CURSEG_TYPE(6) logs by default */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900781 f2fs_bug_on(sbi->active_logs != NR_CURSEG_TYPE);
Jaegeuk Kim12a67142012-12-21 11:47:05 +0900782 return __get_segment_type_6(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900783}
784
785static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
786 block_t old_blkaddr, block_t *new_blkaddr,
787 struct f2fs_summary *sum, enum page_type p_type)
788{
789 struct sit_info *sit_i = SIT_I(sbi);
790 struct curseg_info *curseg;
791 unsigned int old_cursegno;
792 int type;
793
794 type = __get_segment_type(page, p_type);
795 curseg = CURSEG_I(sbi, type);
796
797 mutex_lock(&curseg->curseg_mutex);
798
799 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
800 old_cursegno = curseg->segno;
801
802 /*
803 * __add_sum_entry should be resided under the curseg_mutex
804 * because, this function updates a summary entry in the
805 * current summary block.
806 */
Haicheng Lie79efe32013-06-13 16:59:27 +0800807 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900808
809 mutex_lock(&sit_i->sentry_lock);
810 __refresh_next_blkoff(sbi, curseg);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900811
812 stat_inc_block_count(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900813
814 /*
815 * SIT information should be updated before segment allocation,
816 * since SSR needs latest valid block information.
817 */
818 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
819
820 if (!__has_curseg_space(sbi, type))
821 sit_i->s_ops->allocate_segment(sbi, type, false);
822
823 locate_dirty_segment(sbi, old_cursegno);
824 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
825 mutex_unlock(&sit_i->sentry_lock);
826
827 if (p_type == NODE)
828 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
829
830 /* writeout dirty page into bdev */
831 submit_write_page(sbi, page, *new_blkaddr, p_type);
832
833 mutex_unlock(&curseg->curseg_mutex);
834}
835
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900836void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900837{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900838 set_page_writeback(page);
839 submit_write_page(sbi, page, page->index, META);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900840}
841
842void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
843 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
844{
845 struct f2fs_summary sum;
846 set_summary(&sum, nid, 0, 0);
847 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE);
848}
849
850void write_data_page(struct inode *inode, struct page *page,
851 struct dnode_of_data *dn, block_t old_blkaddr,
852 block_t *new_blkaddr)
853{
854 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
855 struct f2fs_summary sum;
856 struct node_info ni;
857
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900858 f2fs_bug_on(old_blkaddr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900859 get_node_info(sbi, dn->nid, &ni);
860 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
861
862 do_write_page(sbi, page, old_blkaddr,
863 new_blkaddr, &sum, DATA);
864}
865
866void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page,
867 block_t old_blk_addr)
868{
869 submit_write_page(sbi, page, old_blk_addr, DATA);
870}
871
872void recover_data_page(struct f2fs_sb_info *sbi,
873 struct page *page, struct f2fs_summary *sum,
874 block_t old_blkaddr, block_t new_blkaddr)
875{
876 struct sit_info *sit_i = SIT_I(sbi);
877 struct curseg_info *curseg;
878 unsigned int segno, old_cursegno;
879 struct seg_entry *se;
880 int type;
881
882 segno = GET_SEGNO(sbi, new_blkaddr);
883 se = get_seg_entry(sbi, segno);
884 type = se->type;
885
886 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
887 if (old_blkaddr == NULL_ADDR)
888 type = CURSEG_COLD_DATA;
889 else
890 type = CURSEG_WARM_DATA;
891 }
892 curseg = CURSEG_I(sbi, type);
893
894 mutex_lock(&curseg->curseg_mutex);
895 mutex_lock(&sit_i->sentry_lock);
896
897 old_cursegno = curseg->segno;
898
899 /* change the current segment */
900 if (segno != curseg->segno) {
901 curseg->next_segno = segno;
902 change_curseg(sbi, type, true);
903 }
904
905 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
906 (sbi->blocks_per_seg - 1);
Haicheng Lie79efe32013-06-13 16:59:27 +0800907 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900908
909 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
910
911 locate_dirty_segment(sbi, old_cursegno);
912 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
913
914 mutex_unlock(&sit_i->sentry_lock);
915 mutex_unlock(&curseg->curseg_mutex);
916}
917
918void rewrite_node_page(struct f2fs_sb_info *sbi,
919 struct page *page, struct f2fs_summary *sum,
920 block_t old_blkaddr, block_t new_blkaddr)
921{
922 struct sit_info *sit_i = SIT_I(sbi);
923 int type = CURSEG_WARM_NODE;
924 struct curseg_info *curseg;
925 unsigned int segno, old_cursegno;
926 block_t next_blkaddr = next_blkaddr_of_node(page);
927 unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
928
929 curseg = CURSEG_I(sbi, type);
930
931 mutex_lock(&curseg->curseg_mutex);
932 mutex_lock(&sit_i->sentry_lock);
933
934 segno = GET_SEGNO(sbi, new_blkaddr);
935 old_cursegno = curseg->segno;
936
937 /* change the current segment */
938 if (segno != curseg->segno) {
939 curseg->next_segno = segno;
940 change_curseg(sbi, type, true);
941 }
942 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
943 (sbi->blocks_per_seg - 1);
Haicheng Lie79efe32013-06-13 16:59:27 +0800944 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900945
946 /* change the current log to the next block addr in advance */
947 if (next_segno != segno) {
948 curseg->next_segno = next_segno;
949 change_curseg(sbi, type, true);
950 }
951 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) &
952 (sbi->blocks_per_seg - 1);
953
954 /* rewrite node page */
955 set_page_writeback(page);
956 submit_write_page(sbi, page, new_blkaddr, NODE);
957 f2fs_submit_bio(sbi, NODE, true);
958 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
959
960 locate_dirty_segment(sbi, old_cursegno);
961 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
962
963 mutex_unlock(&sit_i->sentry_lock);
964 mutex_unlock(&curseg->curseg_mutex);
965}
966
967static int read_compacted_summaries(struct f2fs_sb_info *sbi)
968{
969 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
970 struct curseg_info *seg_i;
971 unsigned char *kaddr;
972 struct page *page;
973 block_t start;
974 int i, j, offset;
975
976 start = start_sum_block(sbi);
977
978 page = get_meta_page(sbi, start++);
979 kaddr = (unsigned char *)page_address(page);
980
981 /* Step 1: restore nat cache */
982 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
983 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
984
985 /* Step 2: restore sit cache */
986 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
987 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
988 SUM_JOURNAL_SIZE);
989 offset = 2 * SUM_JOURNAL_SIZE;
990
991 /* Step 3: restore summary entries */
992 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
993 unsigned short blk_off;
994 unsigned int segno;
995
996 seg_i = CURSEG_I(sbi, i);
997 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
998 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
999 seg_i->next_segno = segno;
1000 reset_curseg(sbi, i, 0);
1001 seg_i->alloc_type = ckpt->alloc_type[i];
1002 seg_i->next_blkoff = blk_off;
1003
1004 if (seg_i->alloc_type == SSR)
1005 blk_off = sbi->blocks_per_seg;
1006
1007 for (j = 0; j < blk_off; j++) {
1008 struct f2fs_summary *s;
1009 s = (struct f2fs_summary *)(kaddr + offset);
1010 seg_i->sum_blk->entries[j] = *s;
1011 offset += SUMMARY_SIZE;
1012 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1013 SUM_FOOTER_SIZE)
1014 continue;
1015
1016 f2fs_put_page(page, 1);
1017 page = NULL;
1018
1019 page = get_meta_page(sbi, start++);
1020 kaddr = (unsigned char *)page_address(page);
1021 offset = 0;
1022 }
1023 }
1024 f2fs_put_page(page, 1);
1025 return 0;
1026}
1027
1028static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1029{
1030 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1031 struct f2fs_summary_block *sum;
1032 struct curseg_info *curseg;
1033 struct page *new;
1034 unsigned short blk_off;
1035 unsigned int segno = 0;
1036 block_t blk_addr = 0;
1037
1038 /* get segment number and block addr */
1039 if (IS_DATASEG(type)) {
1040 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1041 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1042 CURSEG_HOT_DATA]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001043 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001044 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1045 else
1046 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1047 } else {
1048 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1049 CURSEG_HOT_NODE]);
1050 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1051 CURSEG_HOT_NODE]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001052 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001053 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1054 type - CURSEG_HOT_NODE);
1055 else
1056 blk_addr = GET_SUM_BLOCK(sbi, segno);
1057 }
1058
1059 new = get_meta_page(sbi, blk_addr);
1060 sum = (struct f2fs_summary_block *)page_address(new);
1061
1062 if (IS_NODESEG(type)) {
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001063 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001064 struct f2fs_summary *ns = &sum->entries[0];
1065 int i;
1066 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1067 ns->version = 0;
1068 ns->ofs_in_node = 0;
1069 }
1070 } else {
1071 if (restore_node_summary(sbi, segno, sum)) {
1072 f2fs_put_page(new, 1);
1073 return -EINVAL;
1074 }
1075 }
1076 }
1077
1078 /* set uncompleted segment to curseg */
1079 curseg = CURSEG_I(sbi, type);
1080 mutex_lock(&curseg->curseg_mutex);
1081 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1082 curseg->next_segno = segno;
1083 reset_curseg(sbi, type, 0);
1084 curseg->alloc_type = ckpt->alloc_type[type];
1085 curseg->next_blkoff = blk_off;
1086 mutex_unlock(&curseg->curseg_mutex);
1087 f2fs_put_page(new, 1);
1088 return 0;
1089}
1090
1091static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1092{
1093 int type = CURSEG_HOT_DATA;
1094
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001095 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001096 /* restore for compacted data summary */
1097 if (read_compacted_summaries(sbi))
1098 return -EINVAL;
1099 type = CURSEG_HOT_NODE;
1100 }
1101
1102 for (; type <= CURSEG_COLD_NODE; type++)
1103 if (read_normal_summaries(sbi, type))
1104 return -EINVAL;
1105 return 0;
1106}
1107
1108static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1109{
1110 struct page *page;
1111 unsigned char *kaddr;
1112 struct f2fs_summary *summary;
1113 struct curseg_info *seg_i;
1114 int written_size = 0;
1115 int i, j;
1116
1117 page = grab_meta_page(sbi, blkaddr++);
1118 kaddr = (unsigned char *)page_address(page);
1119
1120 /* Step 1: write nat cache */
1121 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1122 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1123 written_size += SUM_JOURNAL_SIZE;
1124
1125 /* Step 2: write sit cache */
1126 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1127 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1128 SUM_JOURNAL_SIZE);
1129 written_size += SUM_JOURNAL_SIZE;
1130
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001131 /* Step 3: write summary entries */
1132 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1133 unsigned short blkoff;
1134 seg_i = CURSEG_I(sbi, i);
1135 if (sbi->ckpt->alloc_type[i] == SSR)
1136 blkoff = sbi->blocks_per_seg;
1137 else
1138 blkoff = curseg_blkoff(sbi, i);
1139
1140 for (j = 0; j < blkoff; j++) {
1141 if (!page) {
1142 page = grab_meta_page(sbi, blkaddr++);
1143 kaddr = (unsigned char *)page_address(page);
1144 written_size = 0;
1145 }
1146 summary = (struct f2fs_summary *)(kaddr + written_size);
1147 *summary = seg_i->sum_blk->entries[j];
1148 written_size += SUMMARY_SIZE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001149
1150 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1151 SUM_FOOTER_SIZE)
1152 continue;
1153
Chao Yue8d61a72013-10-24 15:08:28 +08001154 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001155 f2fs_put_page(page, 1);
1156 page = NULL;
1157 }
1158 }
Chao Yue8d61a72013-10-24 15:08:28 +08001159 if (page) {
1160 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001161 f2fs_put_page(page, 1);
Chao Yue8d61a72013-10-24 15:08:28 +08001162 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001163}
1164
1165static void write_normal_summaries(struct f2fs_sb_info *sbi,
1166 block_t blkaddr, int type)
1167{
1168 int i, end;
1169 if (IS_DATASEG(type))
1170 end = type + NR_CURSEG_DATA_TYPE;
1171 else
1172 end = type + NR_CURSEG_NODE_TYPE;
1173
1174 for (i = type; i < end; i++) {
1175 struct curseg_info *sum = CURSEG_I(sbi, i);
1176 mutex_lock(&sum->curseg_mutex);
1177 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1178 mutex_unlock(&sum->curseg_mutex);
1179 }
1180}
1181
1182void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1183{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001184 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001185 write_compacted_summaries(sbi, start_blk);
1186 else
1187 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1188}
1189
1190void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1191{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001192 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001193 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001194}
1195
1196int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1197 unsigned int val, int alloc)
1198{
1199 int i;
1200
1201 if (type == NAT_JOURNAL) {
1202 for (i = 0; i < nats_in_cursum(sum); i++) {
1203 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1204 return i;
1205 }
1206 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1207 return update_nats_in_cursum(sum, 1);
1208 } else if (type == SIT_JOURNAL) {
1209 for (i = 0; i < sits_in_cursum(sum); i++)
1210 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1211 return i;
1212 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1213 return update_sits_in_cursum(sum, 1);
1214 }
1215 return -1;
1216}
1217
1218static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1219 unsigned int segno)
1220{
1221 struct sit_info *sit_i = SIT_I(sbi);
1222 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1223 block_t blk_addr = sit_i->sit_base_addr + offset;
1224
1225 check_seg_range(sbi, segno);
1226
1227 /* calculate sit block address */
1228 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1229 blk_addr += sit_i->sit_blocks;
1230
1231 return get_meta_page(sbi, blk_addr);
1232}
1233
1234static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1235 unsigned int start)
1236{
1237 struct sit_info *sit_i = SIT_I(sbi);
1238 struct page *src_page, *dst_page;
1239 pgoff_t src_off, dst_off;
1240 void *src_addr, *dst_addr;
1241
1242 src_off = current_sit_addr(sbi, start);
1243 dst_off = next_sit_addr(sbi, src_off);
1244
1245 /* get current sit block page without lock */
1246 src_page = get_meta_page(sbi, src_off);
1247 dst_page = grab_meta_page(sbi, dst_off);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001248 f2fs_bug_on(PageDirty(src_page));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001249
1250 src_addr = page_address(src_page);
1251 dst_addr = page_address(dst_page);
1252 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1253
1254 set_page_dirty(dst_page);
1255 f2fs_put_page(src_page, 1);
1256
1257 set_to_next_sit(sit_i, start);
1258
1259 return dst_page;
1260}
1261
1262static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
1263{
1264 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1265 struct f2fs_summary_block *sum = curseg->sum_blk;
1266 int i;
1267
1268 /*
1269 * If the journal area in the current summary is full of sit entries,
1270 * all the sit entries will be flushed. Otherwise the sit entries
1271 * are not able to replace with newly hot sit entries.
1272 */
1273 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
1274 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1275 unsigned int segno;
1276 segno = le32_to_cpu(segno_in_journal(sum, i));
1277 __mark_sit_entry_dirty(sbi, segno);
1278 }
1279 update_sits_in_cursum(sum, -sits_in_cursum(sum));
Haicheng Licffbfa62013-10-18 17:24:07 +08001280 return true;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001281 }
Haicheng Licffbfa62013-10-18 17:24:07 +08001282 return false;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001283}
1284
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001285/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001286 * CP calls this function, which flushes SIT entries including sit_journal,
1287 * and moves prefree segs to free segs.
1288 */
1289void flush_sit_entries(struct f2fs_sb_info *sbi)
1290{
1291 struct sit_info *sit_i = SIT_I(sbi);
1292 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1293 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1294 struct f2fs_summary_block *sum = curseg->sum_blk;
1295 unsigned long nsegs = TOTAL_SEGS(sbi);
1296 struct page *page = NULL;
1297 struct f2fs_sit_block *raw_sit = NULL;
1298 unsigned int start = 0, end = 0;
1299 unsigned int segno = -1;
1300 bool flushed;
1301
1302 mutex_lock(&curseg->curseg_mutex);
1303 mutex_lock(&sit_i->sentry_lock);
1304
1305 /*
1306 * "flushed" indicates whether sit entries in journal are flushed
1307 * to the SIT area or not.
1308 */
1309 flushed = flush_sits_in_journal(sbi);
1310
1311 while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) {
1312 struct seg_entry *se = get_seg_entry(sbi, segno);
1313 int sit_offset, offset;
1314
1315 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1316
1317 if (flushed)
1318 goto to_sit_page;
1319
1320 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
1321 if (offset >= 0) {
1322 segno_in_journal(sum, offset) = cpu_to_le32(segno);
1323 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
1324 goto flush_done;
1325 }
1326to_sit_page:
1327 if (!page || (start > segno) || (segno > end)) {
1328 if (page) {
1329 f2fs_put_page(page, 1);
1330 page = NULL;
1331 }
1332
1333 start = START_SEGNO(sit_i, segno);
1334 end = start + SIT_ENTRY_PER_BLOCK - 1;
1335
1336 /* read sit block that will be updated */
1337 page = get_next_sit_page(sbi, start);
1338 raw_sit = page_address(page);
1339 }
1340
1341 /* udpate entry in SIT block */
1342 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
1343flush_done:
1344 __clear_bit(segno, bitmap);
1345 sit_i->dirty_sentries--;
1346 }
1347 mutex_unlock(&sit_i->sentry_lock);
1348 mutex_unlock(&curseg->curseg_mutex);
1349
1350 /* writeout last modified SIT block */
1351 f2fs_put_page(page, 1);
1352
1353 set_prefree_as_free_segments(sbi);
1354}
1355
1356static int build_sit_info(struct f2fs_sb_info *sbi)
1357{
1358 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1359 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1360 struct sit_info *sit_i;
1361 unsigned int sit_segs, start;
1362 char *src_bitmap, *dst_bitmap;
1363 unsigned int bitmap_size;
1364
1365 /* allocate memory for SIT information */
1366 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1367 if (!sit_i)
1368 return -ENOMEM;
1369
1370 SM_I(sbi)->sit_info = sit_i;
1371
1372 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
1373 if (!sit_i->sentries)
1374 return -ENOMEM;
1375
1376 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1377 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1378 if (!sit_i->dirty_sentries_bitmap)
1379 return -ENOMEM;
1380
1381 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1382 sit_i->sentries[start].cur_valid_map
1383 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1384 sit_i->sentries[start].ckpt_valid_map
1385 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1386 if (!sit_i->sentries[start].cur_valid_map
1387 || !sit_i->sentries[start].ckpt_valid_map)
1388 return -ENOMEM;
1389 }
1390
1391 if (sbi->segs_per_sec > 1) {
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001392 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001393 sizeof(struct sec_entry));
1394 if (!sit_i->sec_entries)
1395 return -ENOMEM;
1396 }
1397
1398 /* get information related with SIT */
1399 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1400
1401 /* setup SIT bitmap from ckeckpoint pack */
1402 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1403 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1404
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02001405 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001406 if (!dst_bitmap)
1407 return -ENOMEM;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001408
1409 /* init SIT information */
1410 sit_i->s_ops = &default_salloc_ops;
1411
1412 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1413 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1414 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1415 sit_i->sit_bitmap = dst_bitmap;
1416 sit_i->bitmap_size = bitmap_size;
1417 sit_i->dirty_sentries = 0;
1418 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1419 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1420 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1421 mutex_init(&sit_i->sentry_lock);
1422 return 0;
1423}
1424
1425static int build_free_segmap(struct f2fs_sb_info *sbi)
1426{
1427 struct f2fs_sm_info *sm_info = SM_I(sbi);
1428 struct free_segmap_info *free_i;
1429 unsigned int bitmap_size, sec_bitmap_size;
1430
1431 /* allocate memory for free segmap information */
1432 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1433 if (!free_i)
1434 return -ENOMEM;
1435
1436 SM_I(sbi)->free_info = free_i;
1437
1438 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1439 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1440 if (!free_i->free_segmap)
1441 return -ENOMEM;
1442
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001443 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001444 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1445 if (!free_i->free_secmap)
1446 return -ENOMEM;
1447
1448 /* set all segments as dirty temporarily */
1449 memset(free_i->free_segmap, 0xff, bitmap_size);
1450 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1451
1452 /* init free segmap information */
1453 free_i->start_segno =
1454 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
1455 free_i->free_segments = 0;
1456 free_i->free_sections = 0;
1457 rwlock_init(&free_i->segmap_lock);
1458 return 0;
1459}
1460
1461static int build_curseg(struct f2fs_sb_info *sbi)
1462{
Namjae Jeon1042d602012-12-01 10:56:13 +09001463 struct curseg_info *array;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001464 int i;
1465
1466 array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
1467 if (!array)
1468 return -ENOMEM;
1469
1470 SM_I(sbi)->curseg_array = array;
1471
1472 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1473 mutex_init(&array[i].curseg_mutex);
1474 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1475 if (!array[i].sum_blk)
1476 return -ENOMEM;
1477 array[i].segno = NULL_SEGNO;
1478 array[i].next_blkoff = 0;
1479 }
1480 return restore_curseg_summaries(sbi);
1481}
1482
1483static void build_sit_entries(struct f2fs_sb_info *sbi)
1484{
1485 struct sit_info *sit_i = SIT_I(sbi);
1486 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1487 struct f2fs_summary_block *sum = curseg->sum_blk;
1488 unsigned int start;
1489
1490 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1491 struct seg_entry *se = &sit_i->sentries[start];
1492 struct f2fs_sit_block *sit_blk;
1493 struct f2fs_sit_entry sit;
1494 struct page *page;
1495 int i;
1496
1497 mutex_lock(&curseg->curseg_mutex);
1498 for (i = 0; i < sits_in_cursum(sum); i++) {
1499 if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
1500 sit = sit_in_journal(sum, i);
1501 mutex_unlock(&curseg->curseg_mutex);
1502 goto got_it;
1503 }
1504 }
1505 mutex_unlock(&curseg->curseg_mutex);
1506 page = get_current_sit_page(sbi, start);
1507 sit_blk = (struct f2fs_sit_block *)page_address(page);
1508 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1509 f2fs_put_page(page, 1);
1510got_it:
1511 check_block_count(sbi, start, &sit);
1512 seg_info_from_raw_sit(se, &sit);
1513 if (sbi->segs_per_sec > 1) {
1514 struct sec_entry *e = get_sec_entry(sbi, start);
1515 e->valid_blocks += se->valid_blocks;
1516 }
1517 }
1518}
1519
1520static void init_free_segmap(struct f2fs_sb_info *sbi)
1521{
1522 unsigned int start;
1523 int type;
1524
1525 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1526 struct seg_entry *sentry = get_seg_entry(sbi, start);
1527 if (!sentry->valid_blocks)
1528 __set_free(sbi, start);
1529 }
1530
1531 /* set use the current segments */
1532 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
1533 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
1534 __set_test_and_inuse(sbi, curseg_t->segno);
1535 }
1536}
1537
1538static void init_dirty_segmap(struct f2fs_sb_info *sbi)
1539{
1540 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1541 struct free_segmap_info *free_i = FREE_I(sbi);
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001542 unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001543 unsigned short valid_blocks;
1544
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001545 while (1) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001546 /* find dirty segment based on free segmap */
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001547 segno = find_next_inuse(free_i, total_segs, offset);
1548 if (segno >= total_segs)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001549 break;
1550 offset = segno + 1;
1551 valid_blocks = get_valid_blocks(sbi, segno, 0);
1552 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks)
1553 continue;
1554 mutex_lock(&dirty_i->seglist_lock);
1555 __locate_dirty_segment(sbi, segno, DIRTY);
1556 mutex_unlock(&dirty_i->seglist_lock);
1557 }
1558}
1559
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001560static int init_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001561{
1562 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001563 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001564
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001565 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
1566 if (!dirty_i->victim_secmap)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001567 return -ENOMEM;
1568 return 0;
1569}
1570
1571static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1572{
1573 struct dirty_seglist_info *dirty_i;
1574 unsigned int bitmap_size, i;
1575
1576 /* allocate memory for dirty segments list information */
1577 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
1578 if (!dirty_i)
1579 return -ENOMEM;
1580
1581 SM_I(sbi)->dirty_info = dirty_i;
1582 mutex_init(&dirty_i->seglist_lock);
1583
1584 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1585
1586 for (i = 0; i < NR_DIRTY_TYPE; i++) {
1587 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001588 if (!dirty_i->dirty_segmap[i])
1589 return -ENOMEM;
1590 }
1591
1592 init_dirty_segmap(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001593 return init_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001594}
1595
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001596/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001597 * Update min, max modified time for cost-benefit GC algorithm
1598 */
1599static void init_min_max_mtime(struct f2fs_sb_info *sbi)
1600{
1601 struct sit_info *sit_i = SIT_I(sbi);
1602 unsigned int segno;
1603
1604 mutex_lock(&sit_i->sentry_lock);
1605
1606 sit_i->min_mtime = LLONG_MAX;
1607
1608 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
1609 unsigned int i;
1610 unsigned long long mtime = 0;
1611
1612 for (i = 0; i < sbi->segs_per_sec; i++)
1613 mtime += get_seg_entry(sbi, segno + i)->mtime;
1614
1615 mtime = div_u64(mtime, sbi->segs_per_sec);
1616
1617 if (sit_i->min_mtime > mtime)
1618 sit_i->min_mtime = mtime;
1619 }
1620 sit_i->max_mtime = get_mtime(sbi);
1621 mutex_unlock(&sit_i->sentry_lock);
1622}
1623
1624int build_segment_manager(struct f2fs_sb_info *sbi)
1625{
1626 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1627 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Namjae Jeon1042d602012-12-01 10:56:13 +09001628 struct f2fs_sm_info *sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001629 int err;
1630
1631 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
1632 if (!sm_info)
1633 return -ENOMEM;
1634
1635 /* init sm info */
1636 sbi->sm_info = sm_info;
1637 INIT_LIST_HEAD(&sm_info->wblist_head);
1638 spin_lock_init(&sm_info->wblist_lock);
1639 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1640 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1641 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
1642 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1643 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1644 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
1645 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +09001646 sm_info->rec_prefree_segments = DEF_RECLAIM_PREFREE_SEGMENTS;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001647
1648 err = build_sit_info(sbi);
1649 if (err)
1650 return err;
1651 err = build_free_segmap(sbi);
1652 if (err)
1653 return err;
1654 err = build_curseg(sbi);
1655 if (err)
1656 return err;
1657
1658 /* reinit free segmap based on SIT */
1659 build_sit_entries(sbi);
1660
1661 init_free_segmap(sbi);
1662 err = build_dirty_segmap(sbi);
1663 if (err)
1664 return err;
1665
1666 init_min_max_mtime(sbi);
1667 return 0;
1668}
1669
1670static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
1671 enum dirty_type dirty_type)
1672{
1673 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1674
1675 mutex_lock(&dirty_i->seglist_lock);
1676 kfree(dirty_i->dirty_segmap[dirty_type]);
1677 dirty_i->nr_dirty[dirty_type] = 0;
1678 mutex_unlock(&dirty_i->seglist_lock);
1679}
1680
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001681static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001682{
1683 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001684 kfree(dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001685}
1686
1687static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
1688{
1689 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1690 int i;
1691
1692 if (!dirty_i)
1693 return;
1694
1695 /* discard pre-free/dirty segments list */
1696 for (i = 0; i < NR_DIRTY_TYPE; i++)
1697 discard_dirty_segmap(sbi, i);
1698
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001699 destroy_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001700 SM_I(sbi)->dirty_info = NULL;
1701 kfree(dirty_i);
1702}
1703
1704static void destroy_curseg(struct f2fs_sb_info *sbi)
1705{
1706 struct curseg_info *array = SM_I(sbi)->curseg_array;
1707 int i;
1708
1709 if (!array)
1710 return;
1711 SM_I(sbi)->curseg_array = NULL;
1712 for (i = 0; i < NR_CURSEG_TYPE; i++)
1713 kfree(array[i].sum_blk);
1714 kfree(array);
1715}
1716
1717static void destroy_free_segmap(struct f2fs_sb_info *sbi)
1718{
1719 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
1720 if (!free_i)
1721 return;
1722 SM_I(sbi)->free_info = NULL;
1723 kfree(free_i->free_segmap);
1724 kfree(free_i->free_secmap);
1725 kfree(free_i);
1726}
1727
1728static void destroy_sit_info(struct f2fs_sb_info *sbi)
1729{
1730 struct sit_info *sit_i = SIT_I(sbi);
1731 unsigned int start;
1732
1733 if (!sit_i)
1734 return;
1735
1736 if (sit_i->sentries) {
1737 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1738 kfree(sit_i->sentries[start].cur_valid_map);
1739 kfree(sit_i->sentries[start].ckpt_valid_map);
1740 }
1741 }
1742 vfree(sit_i->sentries);
1743 vfree(sit_i->sec_entries);
1744 kfree(sit_i->dirty_sentries_bitmap);
1745
1746 SM_I(sbi)->sit_info = NULL;
1747 kfree(sit_i->sit_bitmap);
1748 kfree(sit_i);
1749}
1750
1751void destroy_segment_manager(struct f2fs_sb_info *sbi)
1752{
1753 struct f2fs_sm_info *sm_info = SM_I(sbi);
Chao Yu3b03f722013-11-06 09:12:04 +08001754 if (!sm_info)
1755 return;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001756 destroy_dirty_segmap(sbi);
1757 destroy_curseg(sbi);
1758 destroy_free_segmap(sbi);
1759 destroy_sit_info(sbi);
1760 sbi->sm_info = NULL;
1761 kfree(sm_info);
1762}