blob: 487af619aedab4a06ce43366af83d0a84adc7cc6 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/bio.h>
14#include <linux/blkdev.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010015#include <linux/prefetch.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090016#include <linux/vmalloc.h>
17
18#include "f2fs.h"
19#include "segment.h"
20#include "node.h"
Namjae Jeon6ec178d2013-04-23 17:51:43 +090021#include <trace/events/f2fs.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090022
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090023/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090024 * This function balances dirty node and dentry pages.
25 * In addition, it controls garbage collection.
26 */
27void f2fs_balance_fs(struct f2fs_sb_info *sbi)
28{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090029 /*
Jaegeuk Kim029cd282012-12-21 17:20:21 +090030 * We should do GC or end up with checkpoint, if there are so many dirty
31 * dir/node pages without enough free segments.
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090032 */
Jaegeuk Kim43727522013-02-04 15:11:17 +090033 if (has_not_enough_free_secs(sbi, 0)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090034 mutex_lock(&sbi->gc_mutex);
Jaegeuk Kim408e9372013-01-03 17:55:52 +090035 f2fs_gc(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090036 }
37}
38
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +090039void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
40{
41 /* check the # of cached NAT entries and prefree segments */
42 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
43 excess_prefree_segs(sbi))
44 f2fs_sync_fs(sbi->sb, true);
45}
46
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090047static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
48 enum dirty_type dirty_type)
49{
50 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
51
52 /* need not be added */
53 if (IS_CURSEG(sbi, segno))
54 return;
55
56 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
57 dirty_i->nr_dirty[dirty_type]++;
58
59 if (dirty_type == DIRTY) {
60 struct seg_entry *sentry = get_seg_entry(sbi, segno);
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090061 enum dirty_type t = DIRTY_HOT_DATA;
62
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090063 dirty_type = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090064
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090065 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
66 dirty_i->nr_dirty[dirty_type]++;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090067
68 /* Only one bitmap should be set */
69 for (; t <= DIRTY_COLD_NODE; t++) {
70 if (t == dirty_type)
71 continue;
72 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
73 dirty_i->nr_dirty[t]--;
74 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090075 }
76}
77
78static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
79 enum dirty_type dirty_type)
80{
81 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
82
83 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
84 dirty_i->nr_dirty[dirty_type]--;
85
86 if (dirty_type == DIRTY) {
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090087 enum dirty_type t = DIRTY_HOT_DATA;
88
Haicheng Li435f2a12013-10-18 17:24:08 +080089 /* clear its dirty bitmap */
90 for (; t <= DIRTY_COLD_NODE; t++) {
91 if (test_and_clear_bit(segno,
92 dirty_i->dirty_segmap[t])) {
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090093 dirty_i->nr_dirty[t]--;
Haicheng Li435f2a12013-10-18 17:24:08 +080094 break;
95 }
96 }
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090097
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +090098 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
99 clear_bit(GET_SECNO(sbi, segno),
100 dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900101 }
102}
103
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900104/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900105 * Should not occur error such as -ENOMEM.
106 * Adding dirty entry into seglist is not critical operation.
107 * If a given segment is one of current working segments, it won't be added.
108 */
Haicheng Li8d8451a2013-06-13 16:59:28 +0800109static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900110{
111 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
112 unsigned short valid_blocks;
113
114 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
115 return;
116
117 mutex_lock(&dirty_i->seglist_lock);
118
119 valid_blocks = get_valid_blocks(sbi, segno, 0);
120
121 if (valid_blocks == 0) {
122 __locate_dirty_segment(sbi, segno, PRE);
123 __remove_dirty_segment(sbi, segno, DIRTY);
124 } else if (valid_blocks < sbi->blocks_per_seg) {
125 __locate_dirty_segment(sbi, segno, DIRTY);
126 } else {
127 /* Recovery routine with SSR needs this */
128 __remove_dirty_segment(sbi, segno, DIRTY);
129 }
130
131 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900132}
133
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900134/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900135 * Should call clear_prefree_segments after checkpoint is done.
136 */
137static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
138{
139 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800140 unsigned int segno = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900141 unsigned int total_segs = TOTAL_SEGS(sbi);
142
143 mutex_lock(&dirty_i->seglist_lock);
144 while (1) {
145 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800146 segno + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900147 if (segno >= total_segs)
148 break;
149 __set_test_and_free(sbi, segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900150 }
151 mutex_unlock(&dirty_i->seglist_lock);
152}
153
154void clear_prefree_segments(struct f2fs_sb_info *sbi)
155{
156 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800157 unsigned int segno = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900158 unsigned int total_segs = TOTAL_SEGS(sbi);
159
160 mutex_lock(&dirty_i->seglist_lock);
161 while (1) {
162 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800163 segno + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900164 if (segno >= total_segs)
165 break;
166
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900167 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE]))
168 dirty_i->nr_dirty[PRE]--;
169
170 /* Let's use trim */
171 if (test_opt(sbi, DISCARD))
172 blkdev_issue_discard(sbi->sb->s_bdev,
173 START_BLOCK(sbi, segno) <<
174 sbi->log_sectors_per_block,
175 1 << (sbi->log_sectors_per_block +
176 sbi->log_blocks_per_seg),
177 GFP_NOFS, 0);
178 }
179 mutex_unlock(&dirty_i->seglist_lock);
180}
181
182static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
183{
184 struct sit_info *sit_i = SIT_I(sbi);
185 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
186 sit_i->dirty_sentries++;
187}
188
189static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
190 unsigned int segno, int modified)
191{
192 struct seg_entry *se = get_seg_entry(sbi, segno);
193 se->type = type;
194 if (modified)
195 __mark_sit_entry_dirty(sbi, segno);
196}
197
198static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
199{
200 struct seg_entry *se;
201 unsigned int segno, offset;
202 long int new_vblocks;
203
204 segno = GET_SEGNO(sbi, blkaddr);
205
206 se = get_seg_entry(sbi, segno);
207 new_vblocks = se->valid_blocks + del;
208 offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1);
209
210 BUG_ON((new_vblocks >> (sizeof(unsigned short) << 3) ||
211 (new_vblocks > sbi->blocks_per_seg)));
212
213 se->valid_blocks = new_vblocks;
214 se->mtime = get_mtime(sbi);
215 SIT_I(sbi)->max_mtime = se->mtime;
216
217 /* Update valid block bitmap */
218 if (del > 0) {
219 if (f2fs_set_bit(offset, se->cur_valid_map))
220 BUG();
221 } else {
222 if (!f2fs_clear_bit(offset, se->cur_valid_map))
223 BUG();
224 }
225 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
226 se->ckpt_valid_blocks += del;
227
228 __mark_sit_entry_dirty(sbi, segno);
229
230 /* update total number of valid blocks to be written in ckpt area */
231 SIT_I(sbi)->written_valid_blocks += del;
232
233 if (sbi->segs_per_sec > 1)
234 get_sec_entry(sbi, segno)->valid_blocks += del;
235}
236
237static void refresh_sit_entry(struct f2fs_sb_info *sbi,
238 block_t old_blkaddr, block_t new_blkaddr)
239{
240 update_sit_entry(sbi, new_blkaddr, 1);
241 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
242 update_sit_entry(sbi, old_blkaddr, -1);
243}
244
245void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
246{
247 unsigned int segno = GET_SEGNO(sbi, addr);
248 struct sit_info *sit_i = SIT_I(sbi);
249
250 BUG_ON(addr == NULL_ADDR);
251 if (addr == NEW_ADDR)
252 return;
253
254 /* add it into sit main buffer */
255 mutex_lock(&sit_i->sentry_lock);
256
257 update_sit_entry(sbi, addr, -1);
258
259 /* add it into dirty seglist */
260 locate_dirty_segment(sbi, segno);
261
262 mutex_unlock(&sit_i->sentry_lock);
263}
264
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900265/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900266 * This function should be resided under the curseg_mutex lock
267 */
268static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
Haicheng Lie79efe32013-06-13 16:59:27 +0800269 struct f2fs_summary *sum)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900270{
271 struct curseg_info *curseg = CURSEG_I(sbi, type);
272 void *addr = curseg->sum_blk;
Haicheng Lie79efe32013-06-13 16:59:27 +0800273 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900274 memcpy(addr, sum, sizeof(struct f2fs_summary));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900275}
276
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900277/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900278 * Calculate the number of current summary pages for writing
279 */
280int npages_for_summary_flush(struct f2fs_sb_info *sbi)
281{
282 int total_size_bytes = 0;
283 int valid_sum_count = 0;
284 int i, sum_space;
285
286 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
287 if (sbi->ckpt->alloc_type[i] == SSR)
288 valid_sum_count += sbi->blocks_per_seg;
289 else
290 valid_sum_count += curseg_blkoff(sbi, i);
291 }
292
293 total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1)
294 + sizeof(struct nat_journal) + 2
295 + sizeof(struct sit_journal) + 2;
296 sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE;
297 if (total_size_bytes < sum_space)
298 return 1;
299 else if (total_size_bytes < 2 * sum_space)
300 return 2;
301 return 3;
302}
303
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900304/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900305 * Caller should put this summary page
306 */
307struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
308{
309 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
310}
311
312static void write_sum_page(struct f2fs_sb_info *sbi,
313 struct f2fs_summary_block *sum_blk, block_t blk_addr)
314{
315 struct page *page = grab_meta_page(sbi, blk_addr);
316 void *kaddr = page_address(page);
317 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
318 set_page_dirty(page);
319 f2fs_put_page(page, 1);
320}
321
Jaegeuk Kim60374682013-03-31 13:58:51 +0900322static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
323{
324 struct curseg_info *curseg = CURSEG_I(sbi, type);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800325 unsigned int segno = curseg->segno + 1;
Jaegeuk Kim60374682013-03-31 13:58:51 +0900326 struct free_segmap_info *free_i = FREE_I(sbi);
327
Haicheng Li81fb5e82013-05-14 18:20:28 +0800328 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec)
329 return !test_bit(segno, free_i->free_segmap);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900330 return 0;
331}
332
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900333/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900334 * Find a new segment from the free segments bitmap to right order
335 * This function should be returned with success, otherwise BUG
336 */
337static void get_new_segment(struct f2fs_sb_info *sbi,
338 unsigned int *newseg, bool new_sec, int dir)
339{
340 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900341 unsigned int segno, secno, zoneno;
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900342 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900343 unsigned int hint = *newseg / sbi->segs_per_sec;
344 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
345 unsigned int left_start = hint;
346 bool init = true;
347 int go_left = 0;
348 int i;
349
350 write_lock(&free_i->segmap_lock);
351
352 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
353 segno = find_next_zero_bit(free_i->free_segmap,
354 TOTAL_SEGS(sbi), *newseg + 1);
Jaegeuk Kim33afa7f2013-03-31 12:59:53 +0900355 if (segno - *newseg < sbi->segs_per_sec -
356 (*newseg % sbi->segs_per_sec))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900357 goto got_it;
358 }
359find_other_zone:
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900360 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
361 if (secno >= TOTAL_SECS(sbi)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900362 if (dir == ALLOC_RIGHT) {
363 secno = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900364 TOTAL_SECS(sbi), 0);
365 BUG_ON(secno >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900366 } else {
367 go_left = 1;
368 left_start = hint - 1;
369 }
370 }
371 if (go_left == 0)
372 goto skip_left;
373
374 while (test_bit(left_start, free_i->free_secmap)) {
375 if (left_start > 0) {
376 left_start--;
377 continue;
378 }
379 left_start = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900380 TOTAL_SECS(sbi), 0);
381 BUG_ON(left_start >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900382 break;
383 }
384 secno = left_start;
385skip_left:
386 hint = secno;
387 segno = secno * sbi->segs_per_sec;
388 zoneno = secno / sbi->secs_per_zone;
389
390 /* give up on finding another zone */
391 if (!init)
392 goto got_it;
393 if (sbi->secs_per_zone == 1)
394 goto got_it;
395 if (zoneno == old_zoneno)
396 goto got_it;
397 if (dir == ALLOC_LEFT) {
398 if (!go_left && zoneno + 1 >= total_zones)
399 goto got_it;
400 if (go_left && zoneno == 0)
401 goto got_it;
402 }
403 for (i = 0; i < NR_CURSEG_TYPE; i++)
404 if (CURSEG_I(sbi, i)->zone == zoneno)
405 break;
406
407 if (i < NR_CURSEG_TYPE) {
408 /* zone is in user, try another */
409 if (go_left)
410 hint = zoneno * sbi->secs_per_zone - 1;
411 else if (zoneno + 1 >= total_zones)
412 hint = 0;
413 else
414 hint = (zoneno + 1) * sbi->secs_per_zone;
415 init = false;
416 goto find_other_zone;
417 }
418got_it:
419 /* set it as dirty segment in free segmap */
420 BUG_ON(test_bit(segno, free_i->free_segmap));
421 __set_inuse(sbi, segno);
422 *newseg = segno;
423 write_unlock(&free_i->segmap_lock);
424}
425
426static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
427{
428 struct curseg_info *curseg = CURSEG_I(sbi, type);
429 struct summary_footer *sum_footer;
430
431 curseg->segno = curseg->next_segno;
432 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
433 curseg->next_blkoff = 0;
434 curseg->next_segno = NULL_SEGNO;
435
436 sum_footer = &(curseg->sum_blk->footer);
437 memset(sum_footer, 0, sizeof(struct summary_footer));
438 if (IS_DATASEG(type))
439 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
440 if (IS_NODESEG(type))
441 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
442 __set_sit_entry_type(sbi, type, curseg->segno, modified);
443}
444
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900445/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900446 * Allocate a current working segment.
447 * This function always allocates a free segment in LFS manner.
448 */
449static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
450{
451 struct curseg_info *curseg = CURSEG_I(sbi, type);
452 unsigned int segno = curseg->segno;
453 int dir = ALLOC_LEFT;
454
455 write_sum_page(sbi, curseg->sum_blk,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800456 GET_SUM_BLOCK(sbi, segno));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900457 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
458 dir = ALLOC_RIGHT;
459
460 if (test_opt(sbi, NOHEAP))
461 dir = ALLOC_RIGHT;
462
463 get_new_segment(sbi, &segno, new_sec, dir);
464 curseg->next_segno = segno;
465 reset_curseg(sbi, type, 1);
466 curseg->alloc_type = LFS;
467}
468
469static void __next_free_blkoff(struct f2fs_sb_info *sbi,
470 struct curseg_info *seg, block_t start)
471{
472 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
473 block_t ofs;
474 for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) {
475 if (!f2fs_test_bit(ofs, se->ckpt_valid_map)
476 && !f2fs_test_bit(ofs, se->cur_valid_map))
477 break;
478 }
479 seg->next_blkoff = ofs;
480}
481
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900482/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900483 * If a segment is written by LFS manner, next block offset is just obtained
484 * by increasing the current block offset. However, if a segment is written by
485 * SSR manner, next block offset obtained by calling __next_free_blkoff
486 */
487static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
488 struct curseg_info *seg)
489{
490 if (seg->alloc_type == SSR)
491 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
492 else
493 seg->next_blkoff++;
494}
495
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900496/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900497 * This function always allocates a used segment (from dirty seglist) by SSR
498 * manner, so it should recover the existing segment information of valid blocks
499 */
500static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
501{
502 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
503 struct curseg_info *curseg = CURSEG_I(sbi, type);
504 unsigned int new_segno = curseg->next_segno;
505 struct f2fs_summary_block *sum_node;
506 struct page *sum_page;
507
508 write_sum_page(sbi, curseg->sum_blk,
509 GET_SUM_BLOCK(sbi, curseg->segno));
510 __set_test_and_inuse(sbi, new_segno);
511
512 mutex_lock(&dirty_i->seglist_lock);
513 __remove_dirty_segment(sbi, new_segno, PRE);
514 __remove_dirty_segment(sbi, new_segno, DIRTY);
515 mutex_unlock(&dirty_i->seglist_lock);
516
517 reset_curseg(sbi, type, 1);
518 curseg->alloc_type = SSR;
519 __next_free_blkoff(sbi, curseg, 0);
520
521 if (reuse) {
522 sum_page = get_sum_page(sbi, new_segno);
523 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
524 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
525 f2fs_put_page(sum_page, 1);
526 }
527}
528
Jaegeuk Kim43727522013-02-04 15:11:17 +0900529static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
530{
531 struct curseg_info *curseg = CURSEG_I(sbi, type);
532 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
533
534 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
535 return v_ops->get_victim(sbi,
536 &(curseg)->next_segno, BG_GC, type, SSR);
537
538 /* For data segments, let's do SSR more intensively */
539 for (; type >= CURSEG_HOT_DATA; type--)
540 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
541 BG_GC, type, SSR))
542 return 1;
543 return 0;
544}
545
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900546/*
547 * flush out current segment and replace it with new segment
548 * This function should be returned with success, otherwise BUG
549 */
550static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
551 int type, bool force)
552{
553 struct curseg_info *curseg = CURSEG_I(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900554
Gu Zheng7b405272013-08-19 09:41:15 +0800555 if (force)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900556 new_curseg(sbi, type, true);
Gu Zheng7b405272013-08-19 09:41:15 +0800557 else if (type == CURSEG_WARM_NODE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900558 new_curseg(sbi, type, false);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900559 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
560 new_curseg(sbi, type, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900561 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
562 change_curseg(sbi, type, true);
563 else
564 new_curseg(sbi, type, false);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900565
566 stat_inc_seg_type(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900567}
568
569void allocate_new_segments(struct f2fs_sb_info *sbi)
570{
571 struct curseg_info *curseg;
572 unsigned int old_curseg;
573 int i;
574
575 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
576 curseg = CURSEG_I(sbi, i);
577 old_curseg = curseg->segno;
578 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
579 locate_dirty_segment(sbi, old_curseg);
580 }
581}
582
583static const struct segment_allocation default_salloc_ops = {
584 .allocate_segment = allocate_segment_by_default,
585};
586
587static void f2fs_end_io_write(struct bio *bio, int err)
588{
589 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
590 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
591 struct bio_private *p = bio->bi_private;
592
593 do {
594 struct page *page = bvec->bv_page;
595
596 if (--bvec >= bio->bi_io_vec)
597 prefetchw(&bvec->bv_page->flags);
598 if (!uptodate) {
599 SetPageError(page);
600 if (page->mapping)
601 set_bit(AS_EIO, &page->mapping->flags);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900602 set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900603 p->sbi->sb->s_flags |= MS_RDONLY;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900604 }
605 end_page_writeback(page);
606 dec_page_count(p->sbi, F2FS_WRITEBACK);
607 } while (bvec >= bio->bi_io_vec);
608
609 if (p->is_sync)
610 complete(p->wait);
Gu Zhenge2340882013-10-14 18:45:56 +0800611
612 if (!get_pages(p->sbi, F2FS_WRITEBACK) && p->sbi->cp_task)
613 wake_up_process(p->sbi->cp_task);
614
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900615 kfree(p);
616 bio_put(bio);
617}
618
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900619struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900620{
621 struct bio *bio;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900622
623 /* No failure on bio allocation */
624 bio = bio_alloc(GFP_NOIO, npages);
625 bio->bi_bdev = bdev;
Gu Zhengd8207f62013-07-25 11:30:01 +0800626 bio->bi_private = NULL;
627
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900628 return bio;
629}
630
631static void do_submit_bio(struct f2fs_sb_info *sbi,
632 enum page_type type, bool sync)
633{
634 int rw = sync ? WRITE_SYNC : WRITE;
635 enum page_type btype = type > META ? META : type;
636
637 if (type >= META_FLUSH)
638 rw = WRITE_FLUSH_FUA;
639
Namjae Jeon86804412013-04-25 11:45:21 +0900640 if (btype == META)
641 rw |= REQ_META;
642
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900643 if (sbi->bio[btype]) {
644 struct bio_private *p = sbi->bio[btype]->bi_private;
645 p->sbi = sbi;
646 sbi->bio[btype]->bi_end_io = f2fs_end_io_write;
Namjae Jeon6ec178d2013-04-23 17:51:43 +0900647
648 trace_f2fs_do_submit_bio(sbi->sb, btype, sync, sbi->bio[btype]);
649
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900650 if (type == META_FLUSH) {
651 DECLARE_COMPLETION_ONSTACK(wait);
652 p->is_sync = true;
653 p->wait = &wait;
654 submit_bio(rw, sbi->bio[btype]);
655 wait_for_completion(&wait);
656 } else {
657 p->is_sync = false;
658 submit_bio(rw, sbi->bio[btype]);
659 }
660 sbi->bio[btype] = NULL;
661 }
662}
663
664void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync)
665{
666 down_write(&sbi->bio_sem);
667 do_submit_bio(sbi, type, sync);
668 up_write(&sbi->bio_sem);
669}
670
671static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
672 block_t blk_addr, enum page_type type)
673{
674 struct block_device *bdev = sbi->sb->s_bdev;
Chao Yucc7b1bb2013-09-22 15:50:50 +0800675 int bio_blocks;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900676
677 verify_block_addr(sbi, blk_addr);
678
679 down_write(&sbi->bio_sem);
680
681 inc_page_count(sbi, F2FS_WRITEBACK);
682
683 if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1)
684 do_submit_bio(sbi, type, false);
685alloc_new:
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900686 if (sbi->bio[type] == NULL) {
Gu Zhengd8207f62013-07-25 11:30:01 +0800687 struct bio_private *priv;
688retry:
689 priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
690 if (!priv) {
691 cond_resched();
692 goto retry;
693 }
694
Chao Yucc7b1bb2013-09-22 15:50:50 +0800695 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
696 sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks);
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900697 sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
Gu Zhengd8207f62013-07-25 11:30:01 +0800698 sbi->bio[type]->bi_private = priv;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900699 /*
700 * The end_io will be assigned at the sumbission phase.
701 * Until then, let bio_add_page() merge consecutive IOs as much
702 * as possible.
703 */
704 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900705
706 if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) <
707 PAGE_CACHE_SIZE) {
708 do_submit_bio(sbi, type, false);
709 goto alloc_new;
710 }
711
712 sbi->last_block_in_bio[type] = blk_addr;
713
714 up_write(&sbi->bio_sem);
Namjae Jeon6ec178d2013-04-23 17:51:43 +0900715 trace_f2fs_submit_write_page(page, blk_addr, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900716}
717
Jin Xua5694692013-08-05 20:02:04 +0800718void f2fs_wait_on_page_writeback(struct page *page,
719 enum page_type type, bool sync)
720{
721 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
722 if (PageWriteback(page)) {
723 f2fs_submit_bio(sbi, type, sync);
724 wait_on_page_writeback(page);
725 }
726}
727
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900728static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
729{
730 struct curseg_info *curseg = CURSEG_I(sbi, type);
731 if (curseg->next_blkoff < sbi->blocks_per_seg)
732 return true;
733 return false;
734}
735
736static int __get_segment_type_2(struct page *page, enum page_type p_type)
737{
738 if (p_type == DATA)
739 return CURSEG_HOT_DATA;
740 else
741 return CURSEG_HOT_NODE;
742}
743
744static int __get_segment_type_4(struct page *page, enum page_type p_type)
745{
746 if (p_type == DATA) {
747 struct inode *inode = page->mapping->host;
748
749 if (S_ISDIR(inode->i_mode))
750 return CURSEG_HOT_DATA;
751 else
752 return CURSEG_COLD_DATA;
753 } else {
754 if (IS_DNODE(page) && !is_cold_node(page))
755 return CURSEG_HOT_NODE;
756 else
757 return CURSEG_COLD_NODE;
758 }
759}
760
761static int __get_segment_type_6(struct page *page, enum page_type p_type)
762{
763 if (p_type == DATA) {
764 struct inode *inode = page->mapping->host;
765
766 if (S_ISDIR(inode->i_mode))
767 return CURSEG_HOT_DATA;
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900768 else if (is_cold_data(page) || file_is_cold(inode))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900769 return CURSEG_COLD_DATA;
770 else
771 return CURSEG_WARM_DATA;
772 } else {
773 if (IS_DNODE(page))
774 return is_cold_node(page) ? CURSEG_WARM_NODE :
775 CURSEG_HOT_NODE;
776 else
777 return CURSEG_COLD_NODE;
778 }
779}
780
781static int __get_segment_type(struct page *page, enum page_type p_type)
782{
783 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
784 switch (sbi->active_logs) {
785 case 2:
786 return __get_segment_type_2(page, p_type);
787 case 4:
788 return __get_segment_type_4(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900789 }
Jaegeuk Kim12a67142012-12-21 11:47:05 +0900790 /* NR_CURSEG_TYPE(6) logs by default */
791 BUG_ON(sbi->active_logs != NR_CURSEG_TYPE);
792 return __get_segment_type_6(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900793}
794
795static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
796 block_t old_blkaddr, block_t *new_blkaddr,
797 struct f2fs_summary *sum, enum page_type p_type)
798{
799 struct sit_info *sit_i = SIT_I(sbi);
800 struct curseg_info *curseg;
801 unsigned int old_cursegno;
802 int type;
803
804 type = __get_segment_type(page, p_type);
805 curseg = CURSEG_I(sbi, type);
806
807 mutex_lock(&curseg->curseg_mutex);
808
809 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
810 old_cursegno = curseg->segno;
811
812 /*
813 * __add_sum_entry should be resided under the curseg_mutex
814 * because, this function updates a summary entry in the
815 * current summary block.
816 */
Haicheng Lie79efe32013-06-13 16:59:27 +0800817 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900818
819 mutex_lock(&sit_i->sentry_lock);
820 __refresh_next_blkoff(sbi, curseg);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900821
822 stat_inc_block_count(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900823
824 /*
825 * SIT information should be updated before segment allocation,
826 * since SSR needs latest valid block information.
827 */
828 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
829
830 if (!__has_curseg_space(sbi, type))
831 sit_i->s_ops->allocate_segment(sbi, type, false);
832
833 locate_dirty_segment(sbi, old_cursegno);
834 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
835 mutex_unlock(&sit_i->sentry_lock);
836
837 if (p_type == NODE)
838 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
839
840 /* writeout dirty page into bdev */
841 submit_write_page(sbi, page, *new_blkaddr, p_type);
842
843 mutex_unlock(&curseg->curseg_mutex);
844}
845
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900846void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900847{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900848 set_page_writeback(page);
849 submit_write_page(sbi, page, page->index, META);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900850}
851
852void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
853 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
854{
855 struct f2fs_summary sum;
856 set_summary(&sum, nid, 0, 0);
857 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE);
858}
859
860void write_data_page(struct inode *inode, struct page *page,
861 struct dnode_of_data *dn, block_t old_blkaddr,
862 block_t *new_blkaddr)
863{
864 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
865 struct f2fs_summary sum;
866 struct node_info ni;
867
868 BUG_ON(old_blkaddr == NULL_ADDR);
869 get_node_info(sbi, dn->nid, &ni);
870 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
871
872 do_write_page(sbi, page, old_blkaddr,
873 new_blkaddr, &sum, DATA);
874}
875
876void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page,
877 block_t old_blk_addr)
878{
879 submit_write_page(sbi, page, old_blk_addr, DATA);
880}
881
882void recover_data_page(struct f2fs_sb_info *sbi,
883 struct page *page, struct f2fs_summary *sum,
884 block_t old_blkaddr, block_t new_blkaddr)
885{
886 struct sit_info *sit_i = SIT_I(sbi);
887 struct curseg_info *curseg;
888 unsigned int segno, old_cursegno;
889 struct seg_entry *se;
890 int type;
891
892 segno = GET_SEGNO(sbi, new_blkaddr);
893 se = get_seg_entry(sbi, segno);
894 type = se->type;
895
896 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
897 if (old_blkaddr == NULL_ADDR)
898 type = CURSEG_COLD_DATA;
899 else
900 type = CURSEG_WARM_DATA;
901 }
902 curseg = CURSEG_I(sbi, type);
903
904 mutex_lock(&curseg->curseg_mutex);
905 mutex_lock(&sit_i->sentry_lock);
906
907 old_cursegno = curseg->segno;
908
909 /* change the current segment */
910 if (segno != curseg->segno) {
911 curseg->next_segno = segno;
912 change_curseg(sbi, type, true);
913 }
914
915 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
916 (sbi->blocks_per_seg - 1);
Haicheng Lie79efe32013-06-13 16:59:27 +0800917 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900918
919 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
920
921 locate_dirty_segment(sbi, old_cursegno);
922 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
923
924 mutex_unlock(&sit_i->sentry_lock);
925 mutex_unlock(&curseg->curseg_mutex);
926}
927
928void rewrite_node_page(struct f2fs_sb_info *sbi,
929 struct page *page, struct f2fs_summary *sum,
930 block_t old_blkaddr, block_t new_blkaddr)
931{
932 struct sit_info *sit_i = SIT_I(sbi);
933 int type = CURSEG_WARM_NODE;
934 struct curseg_info *curseg;
935 unsigned int segno, old_cursegno;
936 block_t next_blkaddr = next_blkaddr_of_node(page);
937 unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
938
939 curseg = CURSEG_I(sbi, type);
940
941 mutex_lock(&curseg->curseg_mutex);
942 mutex_lock(&sit_i->sentry_lock);
943
944 segno = GET_SEGNO(sbi, new_blkaddr);
945 old_cursegno = curseg->segno;
946
947 /* change the current segment */
948 if (segno != curseg->segno) {
949 curseg->next_segno = segno;
950 change_curseg(sbi, type, true);
951 }
952 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
953 (sbi->blocks_per_seg - 1);
Haicheng Lie79efe32013-06-13 16:59:27 +0800954 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900955
956 /* change the current log to the next block addr in advance */
957 if (next_segno != segno) {
958 curseg->next_segno = next_segno;
959 change_curseg(sbi, type, true);
960 }
961 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) &
962 (sbi->blocks_per_seg - 1);
963
964 /* rewrite node page */
965 set_page_writeback(page);
966 submit_write_page(sbi, page, new_blkaddr, NODE);
967 f2fs_submit_bio(sbi, NODE, true);
968 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
969
970 locate_dirty_segment(sbi, old_cursegno);
971 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
972
973 mutex_unlock(&sit_i->sentry_lock);
974 mutex_unlock(&curseg->curseg_mutex);
975}
976
977static int read_compacted_summaries(struct f2fs_sb_info *sbi)
978{
979 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
980 struct curseg_info *seg_i;
981 unsigned char *kaddr;
982 struct page *page;
983 block_t start;
984 int i, j, offset;
985
986 start = start_sum_block(sbi);
987
988 page = get_meta_page(sbi, start++);
989 kaddr = (unsigned char *)page_address(page);
990
991 /* Step 1: restore nat cache */
992 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
993 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
994
995 /* Step 2: restore sit cache */
996 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
997 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
998 SUM_JOURNAL_SIZE);
999 offset = 2 * SUM_JOURNAL_SIZE;
1000
1001 /* Step 3: restore summary entries */
1002 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1003 unsigned short blk_off;
1004 unsigned int segno;
1005
1006 seg_i = CURSEG_I(sbi, i);
1007 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1008 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1009 seg_i->next_segno = segno;
1010 reset_curseg(sbi, i, 0);
1011 seg_i->alloc_type = ckpt->alloc_type[i];
1012 seg_i->next_blkoff = blk_off;
1013
1014 if (seg_i->alloc_type == SSR)
1015 blk_off = sbi->blocks_per_seg;
1016
1017 for (j = 0; j < blk_off; j++) {
1018 struct f2fs_summary *s;
1019 s = (struct f2fs_summary *)(kaddr + offset);
1020 seg_i->sum_blk->entries[j] = *s;
1021 offset += SUMMARY_SIZE;
1022 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1023 SUM_FOOTER_SIZE)
1024 continue;
1025
1026 f2fs_put_page(page, 1);
1027 page = NULL;
1028
1029 page = get_meta_page(sbi, start++);
1030 kaddr = (unsigned char *)page_address(page);
1031 offset = 0;
1032 }
1033 }
1034 f2fs_put_page(page, 1);
1035 return 0;
1036}
1037
1038static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1039{
1040 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1041 struct f2fs_summary_block *sum;
1042 struct curseg_info *curseg;
1043 struct page *new;
1044 unsigned short blk_off;
1045 unsigned int segno = 0;
1046 block_t blk_addr = 0;
1047
1048 /* get segment number and block addr */
1049 if (IS_DATASEG(type)) {
1050 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1051 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1052 CURSEG_HOT_DATA]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001053 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001054 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1055 else
1056 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1057 } else {
1058 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1059 CURSEG_HOT_NODE]);
1060 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1061 CURSEG_HOT_NODE]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001062 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001063 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1064 type - CURSEG_HOT_NODE);
1065 else
1066 blk_addr = GET_SUM_BLOCK(sbi, segno);
1067 }
1068
1069 new = get_meta_page(sbi, blk_addr);
1070 sum = (struct f2fs_summary_block *)page_address(new);
1071
1072 if (IS_NODESEG(type)) {
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001073 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001074 struct f2fs_summary *ns = &sum->entries[0];
1075 int i;
1076 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1077 ns->version = 0;
1078 ns->ofs_in_node = 0;
1079 }
1080 } else {
1081 if (restore_node_summary(sbi, segno, sum)) {
1082 f2fs_put_page(new, 1);
1083 return -EINVAL;
1084 }
1085 }
1086 }
1087
1088 /* set uncompleted segment to curseg */
1089 curseg = CURSEG_I(sbi, type);
1090 mutex_lock(&curseg->curseg_mutex);
1091 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1092 curseg->next_segno = segno;
1093 reset_curseg(sbi, type, 0);
1094 curseg->alloc_type = ckpt->alloc_type[type];
1095 curseg->next_blkoff = blk_off;
1096 mutex_unlock(&curseg->curseg_mutex);
1097 f2fs_put_page(new, 1);
1098 return 0;
1099}
1100
1101static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1102{
1103 int type = CURSEG_HOT_DATA;
1104
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001105 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001106 /* restore for compacted data summary */
1107 if (read_compacted_summaries(sbi))
1108 return -EINVAL;
1109 type = CURSEG_HOT_NODE;
1110 }
1111
1112 for (; type <= CURSEG_COLD_NODE; type++)
1113 if (read_normal_summaries(sbi, type))
1114 return -EINVAL;
1115 return 0;
1116}
1117
1118static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1119{
1120 struct page *page;
1121 unsigned char *kaddr;
1122 struct f2fs_summary *summary;
1123 struct curseg_info *seg_i;
1124 int written_size = 0;
1125 int i, j;
1126
1127 page = grab_meta_page(sbi, blkaddr++);
1128 kaddr = (unsigned char *)page_address(page);
1129
1130 /* Step 1: write nat cache */
1131 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1132 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1133 written_size += SUM_JOURNAL_SIZE;
1134
1135 /* Step 2: write sit cache */
1136 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1137 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1138 SUM_JOURNAL_SIZE);
1139 written_size += SUM_JOURNAL_SIZE;
1140
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001141 /* Step 3: write summary entries */
1142 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1143 unsigned short blkoff;
1144 seg_i = CURSEG_I(sbi, i);
1145 if (sbi->ckpt->alloc_type[i] == SSR)
1146 blkoff = sbi->blocks_per_seg;
1147 else
1148 blkoff = curseg_blkoff(sbi, i);
1149
1150 for (j = 0; j < blkoff; j++) {
1151 if (!page) {
1152 page = grab_meta_page(sbi, blkaddr++);
1153 kaddr = (unsigned char *)page_address(page);
1154 written_size = 0;
1155 }
1156 summary = (struct f2fs_summary *)(kaddr + written_size);
1157 *summary = seg_i->sum_blk->entries[j];
1158 written_size += SUMMARY_SIZE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001159
1160 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1161 SUM_FOOTER_SIZE)
1162 continue;
1163
Chao Yue8d61a72013-10-24 15:08:28 +08001164 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001165 f2fs_put_page(page, 1);
1166 page = NULL;
1167 }
1168 }
Chao Yue8d61a72013-10-24 15:08:28 +08001169 if (page) {
1170 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001171 f2fs_put_page(page, 1);
Chao Yue8d61a72013-10-24 15:08:28 +08001172 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001173}
1174
1175static void write_normal_summaries(struct f2fs_sb_info *sbi,
1176 block_t blkaddr, int type)
1177{
1178 int i, end;
1179 if (IS_DATASEG(type))
1180 end = type + NR_CURSEG_DATA_TYPE;
1181 else
1182 end = type + NR_CURSEG_NODE_TYPE;
1183
1184 for (i = type; i < end; i++) {
1185 struct curseg_info *sum = CURSEG_I(sbi, i);
1186 mutex_lock(&sum->curseg_mutex);
1187 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1188 mutex_unlock(&sum->curseg_mutex);
1189 }
1190}
1191
1192void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1193{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001194 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001195 write_compacted_summaries(sbi, start_blk);
1196 else
1197 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1198}
1199
1200void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1201{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001202 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001203 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001204}
1205
1206int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1207 unsigned int val, int alloc)
1208{
1209 int i;
1210
1211 if (type == NAT_JOURNAL) {
1212 for (i = 0; i < nats_in_cursum(sum); i++) {
1213 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1214 return i;
1215 }
1216 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1217 return update_nats_in_cursum(sum, 1);
1218 } else if (type == SIT_JOURNAL) {
1219 for (i = 0; i < sits_in_cursum(sum); i++)
1220 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1221 return i;
1222 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1223 return update_sits_in_cursum(sum, 1);
1224 }
1225 return -1;
1226}
1227
1228static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1229 unsigned int segno)
1230{
1231 struct sit_info *sit_i = SIT_I(sbi);
1232 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1233 block_t blk_addr = sit_i->sit_base_addr + offset;
1234
1235 check_seg_range(sbi, segno);
1236
1237 /* calculate sit block address */
1238 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1239 blk_addr += sit_i->sit_blocks;
1240
1241 return get_meta_page(sbi, blk_addr);
1242}
1243
1244static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1245 unsigned int start)
1246{
1247 struct sit_info *sit_i = SIT_I(sbi);
1248 struct page *src_page, *dst_page;
1249 pgoff_t src_off, dst_off;
1250 void *src_addr, *dst_addr;
1251
1252 src_off = current_sit_addr(sbi, start);
1253 dst_off = next_sit_addr(sbi, src_off);
1254
1255 /* get current sit block page without lock */
1256 src_page = get_meta_page(sbi, src_off);
1257 dst_page = grab_meta_page(sbi, dst_off);
1258 BUG_ON(PageDirty(src_page));
1259
1260 src_addr = page_address(src_page);
1261 dst_addr = page_address(dst_page);
1262 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1263
1264 set_page_dirty(dst_page);
1265 f2fs_put_page(src_page, 1);
1266
1267 set_to_next_sit(sit_i, start);
1268
1269 return dst_page;
1270}
1271
1272static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
1273{
1274 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1275 struct f2fs_summary_block *sum = curseg->sum_blk;
1276 int i;
1277
1278 /*
1279 * If the journal area in the current summary is full of sit entries,
1280 * all the sit entries will be flushed. Otherwise the sit entries
1281 * are not able to replace with newly hot sit entries.
1282 */
1283 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
1284 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1285 unsigned int segno;
1286 segno = le32_to_cpu(segno_in_journal(sum, i));
1287 __mark_sit_entry_dirty(sbi, segno);
1288 }
1289 update_sits_in_cursum(sum, -sits_in_cursum(sum));
Haicheng Licffbfa62013-10-18 17:24:07 +08001290 return true;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001291 }
Haicheng Licffbfa62013-10-18 17:24:07 +08001292 return false;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001293}
1294
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001295/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001296 * CP calls this function, which flushes SIT entries including sit_journal,
1297 * and moves prefree segs to free segs.
1298 */
1299void flush_sit_entries(struct f2fs_sb_info *sbi)
1300{
1301 struct sit_info *sit_i = SIT_I(sbi);
1302 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1303 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1304 struct f2fs_summary_block *sum = curseg->sum_blk;
1305 unsigned long nsegs = TOTAL_SEGS(sbi);
1306 struct page *page = NULL;
1307 struct f2fs_sit_block *raw_sit = NULL;
1308 unsigned int start = 0, end = 0;
1309 unsigned int segno = -1;
1310 bool flushed;
1311
1312 mutex_lock(&curseg->curseg_mutex);
1313 mutex_lock(&sit_i->sentry_lock);
1314
1315 /*
1316 * "flushed" indicates whether sit entries in journal are flushed
1317 * to the SIT area or not.
1318 */
1319 flushed = flush_sits_in_journal(sbi);
1320
1321 while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) {
1322 struct seg_entry *se = get_seg_entry(sbi, segno);
1323 int sit_offset, offset;
1324
1325 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1326
1327 if (flushed)
1328 goto to_sit_page;
1329
1330 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
1331 if (offset >= 0) {
1332 segno_in_journal(sum, offset) = cpu_to_le32(segno);
1333 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
1334 goto flush_done;
1335 }
1336to_sit_page:
1337 if (!page || (start > segno) || (segno > end)) {
1338 if (page) {
1339 f2fs_put_page(page, 1);
1340 page = NULL;
1341 }
1342
1343 start = START_SEGNO(sit_i, segno);
1344 end = start + SIT_ENTRY_PER_BLOCK - 1;
1345
1346 /* read sit block that will be updated */
1347 page = get_next_sit_page(sbi, start);
1348 raw_sit = page_address(page);
1349 }
1350
1351 /* udpate entry in SIT block */
1352 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
1353flush_done:
1354 __clear_bit(segno, bitmap);
1355 sit_i->dirty_sentries--;
1356 }
1357 mutex_unlock(&sit_i->sentry_lock);
1358 mutex_unlock(&curseg->curseg_mutex);
1359
1360 /* writeout last modified SIT block */
1361 f2fs_put_page(page, 1);
1362
1363 set_prefree_as_free_segments(sbi);
1364}
1365
1366static int build_sit_info(struct f2fs_sb_info *sbi)
1367{
1368 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1369 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1370 struct sit_info *sit_i;
1371 unsigned int sit_segs, start;
1372 char *src_bitmap, *dst_bitmap;
1373 unsigned int bitmap_size;
1374
1375 /* allocate memory for SIT information */
1376 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1377 if (!sit_i)
1378 return -ENOMEM;
1379
1380 SM_I(sbi)->sit_info = sit_i;
1381
1382 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
1383 if (!sit_i->sentries)
1384 return -ENOMEM;
1385
1386 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1387 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1388 if (!sit_i->dirty_sentries_bitmap)
1389 return -ENOMEM;
1390
1391 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1392 sit_i->sentries[start].cur_valid_map
1393 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1394 sit_i->sentries[start].ckpt_valid_map
1395 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1396 if (!sit_i->sentries[start].cur_valid_map
1397 || !sit_i->sentries[start].ckpt_valid_map)
1398 return -ENOMEM;
1399 }
1400
1401 if (sbi->segs_per_sec > 1) {
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001402 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001403 sizeof(struct sec_entry));
1404 if (!sit_i->sec_entries)
1405 return -ENOMEM;
1406 }
1407
1408 /* get information related with SIT */
1409 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1410
1411 /* setup SIT bitmap from ckeckpoint pack */
1412 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1413 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1414
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02001415 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001416 if (!dst_bitmap)
1417 return -ENOMEM;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001418
1419 /* init SIT information */
1420 sit_i->s_ops = &default_salloc_ops;
1421
1422 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1423 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1424 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1425 sit_i->sit_bitmap = dst_bitmap;
1426 sit_i->bitmap_size = bitmap_size;
1427 sit_i->dirty_sentries = 0;
1428 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1429 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1430 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1431 mutex_init(&sit_i->sentry_lock);
1432 return 0;
1433}
1434
1435static int build_free_segmap(struct f2fs_sb_info *sbi)
1436{
1437 struct f2fs_sm_info *sm_info = SM_I(sbi);
1438 struct free_segmap_info *free_i;
1439 unsigned int bitmap_size, sec_bitmap_size;
1440
1441 /* allocate memory for free segmap information */
1442 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1443 if (!free_i)
1444 return -ENOMEM;
1445
1446 SM_I(sbi)->free_info = free_i;
1447
1448 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1449 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1450 if (!free_i->free_segmap)
1451 return -ENOMEM;
1452
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001453 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001454 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1455 if (!free_i->free_secmap)
1456 return -ENOMEM;
1457
1458 /* set all segments as dirty temporarily */
1459 memset(free_i->free_segmap, 0xff, bitmap_size);
1460 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1461
1462 /* init free segmap information */
1463 free_i->start_segno =
1464 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
1465 free_i->free_segments = 0;
1466 free_i->free_sections = 0;
1467 rwlock_init(&free_i->segmap_lock);
1468 return 0;
1469}
1470
1471static int build_curseg(struct f2fs_sb_info *sbi)
1472{
Namjae Jeon1042d602012-12-01 10:56:13 +09001473 struct curseg_info *array;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001474 int i;
1475
1476 array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
1477 if (!array)
1478 return -ENOMEM;
1479
1480 SM_I(sbi)->curseg_array = array;
1481
1482 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1483 mutex_init(&array[i].curseg_mutex);
1484 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1485 if (!array[i].sum_blk)
1486 return -ENOMEM;
1487 array[i].segno = NULL_SEGNO;
1488 array[i].next_blkoff = 0;
1489 }
1490 return restore_curseg_summaries(sbi);
1491}
1492
1493static void build_sit_entries(struct f2fs_sb_info *sbi)
1494{
1495 struct sit_info *sit_i = SIT_I(sbi);
1496 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1497 struct f2fs_summary_block *sum = curseg->sum_blk;
1498 unsigned int start;
1499
1500 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1501 struct seg_entry *se = &sit_i->sentries[start];
1502 struct f2fs_sit_block *sit_blk;
1503 struct f2fs_sit_entry sit;
1504 struct page *page;
1505 int i;
1506
1507 mutex_lock(&curseg->curseg_mutex);
1508 for (i = 0; i < sits_in_cursum(sum); i++) {
1509 if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
1510 sit = sit_in_journal(sum, i);
1511 mutex_unlock(&curseg->curseg_mutex);
1512 goto got_it;
1513 }
1514 }
1515 mutex_unlock(&curseg->curseg_mutex);
1516 page = get_current_sit_page(sbi, start);
1517 sit_blk = (struct f2fs_sit_block *)page_address(page);
1518 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1519 f2fs_put_page(page, 1);
1520got_it:
1521 check_block_count(sbi, start, &sit);
1522 seg_info_from_raw_sit(se, &sit);
1523 if (sbi->segs_per_sec > 1) {
1524 struct sec_entry *e = get_sec_entry(sbi, start);
1525 e->valid_blocks += se->valid_blocks;
1526 }
1527 }
1528}
1529
1530static void init_free_segmap(struct f2fs_sb_info *sbi)
1531{
1532 unsigned int start;
1533 int type;
1534
1535 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1536 struct seg_entry *sentry = get_seg_entry(sbi, start);
1537 if (!sentry->valid_blocks)
1538 __set_free(sbi, start);
1539 }
1540
1541 /* set use the current segments */
1542 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
1543 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
1544 __set_test_and_inuse(sbi, curseg_t->segno);
1545 }
1546}
1547
1548static void init_dirty_segmap(struct f2fs_sb_info *sbi)
1549{
1550 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1551 struct free_segmap_info *free_i = FREE_I(sbi);
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001552 unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001553 unsigned short valid_blocks;
1554
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001555 while (1) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001556 /* find dirty segment based on free segmap */
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001557 segno = find_next_inuse(free_i, total_segs, offset);
1558 if (segno >= total_segs)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001559 break;
1560 offset = segno + 1;
1561 valid_blocks = get_valid_blocks(sbi, segno, 0);
1562 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks)
1563 continue;
1564 mutex_lock(&dirty_i->seglist_lock);
1565 __locate_dirty_segment(sbi, segno, DIRTY);
1566 mutex_unlock(&dirty_i->seglist_lock);
1567 }
1568}
1569
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001570static int init_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001571{
1572 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001573 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001574
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001575 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
1576 if (!dirty_i->victim_secmap)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001577 return -ENOMEM;
1578 return 0;
1579}
1580
1581static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1582{
1583 struct dirty_seglist_info *dirty_i;
1584 unsigned int bitmap_size, i;
1585
1586 /* allocate memory for dirty segments list information */
1587 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
1588 if (!dirty_i)
1589 return -ENOMEM;
1590
1591 SM_I(sbi)->dirty_info = dirty_i;
1592 mutex_init(&dirty_i->seglist_lock);
1593
1594 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1595
1596 for (i = 0; i < NR_DIRTY_TYPE; i++) {
1597 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001598 if (!dirty_i->dirty_segmap[i])
1599 return -ENOMEM;
1600 }
1601
1602 init_dirty_segmap(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001603 return init_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001604}
1605
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001606/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001607 * Update min, max modified time for cost-benefit GC algorithm
1608 */
1609static void init_min_max_mtime(struct f2fs_sb_info *sbi)
1610{
1611 struct sit_info *sit_i = SIT_I(sbi);
1612 unsigned int segno;
1613
1614 mutex_lock(&sit_i->sentry_lock);
1615
1616 sit_i->min_mtime = LLONG_MAX;
1617
1618 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
1619 unsigned int i;
1620 unsigned long long mtime = 0;
1621
1622 for (i = 0; i < sbi->segs_per_sec; i++)
1623 mtime += get_seg_entry(sbi, segno + i)->mtime;
1624
1625 mtime = div_u64(mtime, sbi->segs_per_sec);
1626
1627 if (sit_i->min_mtime > mtime)
1628 sit_i->min_mtime = mtime;
1629 }
1630 sit_i->max_mtime = get_mtime(sbi);
1631 mutex_unlock(&sit_i->sentry_lock);
1632}
1633
1634int build_segment_manager(struct f2fs_sb_info *sbi)
1635{
1636 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1637 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Namjae Jeon1042d602012-12-01 10:56:13 +09001638 struct f2fs_sm_info *sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001639 int err;
1640
1641 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
1642 if (!sm_info)
1643 return -ENOMEM;
1644
1645 /* init sm info */
1646 sbi->sm_info = sm_info;
1647 INIT_LIST_HEAD(&sm_info->wblist_head);
1648 spin_lock_init(&sm_info->wblist_lock);
1649 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1650 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1651 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
1652 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1653 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1654 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
1655 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +09001656 sm_info->rec_prefree_segments = DEF_RECLAIM_PREFREE_SEGMENTS;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001657
1658 err = build_sit_info(sbi);
1659 if (err)
1660 return err;
1661 err = build_free_segmap(sbi);
1662 if (err)
1663 return err;
1664 err = build_curseg(sbi);
1665 if (err)
1666 return err;
1667
1668 /* reinit free segmap based on SIT */
1669 build_sit_entries(sbi);
1670
1671 init_free_segmap(sbi);
1672 err = build_dirty_segmap(sbi);
1673 if (err)
1674 return err;
1675
1676 init_min_max_mtime(sbi);
1677 return 0;
1678}
1679
1680static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
1681 enum dirty_type dirty_type)
1682{
1683 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1684
1685 mutex_lock(&dirty_i->seglist_lock);
1686 kfree(dirty_i->dirty_segmap[dirty_type]);
1687 dirty_i->nr_dirty[dirty_type] = 0;
1688 mutex_unlock(&dirty_i->seglist_lock);
1689}
1690
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001691static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001692{
1693 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001694 kfree(dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001695}
1696
1697static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
1698{
1699 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1700 int i;
1701
1702 if (!dirty_i)
1703 return;
1704
1705 /* discard pre-free/dirty segments list */
1706 for (i = 0; i < NR_DIRTY_TYPE; i++)
1707 discard_dirty_segmap(sbi, i);
1708
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001709 destroy_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001710 SM_I(sbi)->dirty_info = NULL;
1711 kfree(dirty_i);
1712}
1713
1714static void destroy_curseg(struct f2fs_sb_info *sbi)
1715{
1716 struct curseg_info *array = SM_I(sbi)->curseg_array;
1717 int i;
1718
1719 if (!array)
1720 return;
1721 SM_I(sbi)->curseg_array = NULL;
1722 for (i = 0; i < NR_CURSEG_TYPE; i++)
1723 kfree(array[i].sum_blk);
1724 kfree(array);
1725}
1726
1727static void destroy_free_segmap(struct f2fs_sb_info *sbi)
1728{
1729 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
1730 if (!free_i)
1731 return;
1732 SM_I(sbi)->free_info = NULL;
1733 kfree(free_i->free_segmap);
1734 kfree(free_i->free_secmap);
1735 kfree(free_i);
1736}
1737
1738static void destroy_sit_info(struct f2fs_sb_info *sbi)
1739{
1740 struct sit_info *sit_i = SIT_I(sbi);
1741 unsigned int start;
1742
1743 if (!sit_i)
1744 return;
1745
1746 if (sit_i->sentries) {
1747 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1748 kfree(sit_i->sentries[start].cur_valid_map);
1749 kfree(sit_i->sentries[start].ckpt_valid_map);
1750 }
1751 }
1752 vfree(sit_i->sentries);
1753 vfree(sit_i->sec_entries);
1754 kfree(sit_i->dirty_sentries_bitmap);
1755
1756 SM_I(sbi)->sit_info = NULL;
1757 kfree(sit_i->sit_bitmap);
1758 kfree(sit_i);
1759}
1760
1761void destroy_segment_manager(struct f2fs_sb_info *sbi)
1762{
1763 struct f2fs_sm_info *sm_info = SM_I(sbi);
1764 destroy_dirty_segmap(sbi);
1765 destroy_curseg(sbi);
1766 destroy_free_segmap(sbi);
1767 destroy_sit_info(sbi);
1768 sbi->sm_info = NULL;
1769 kfree(sm_info);
1770}