blob: be668ffb001c1d5d0a0f5f2608ae8360560df2d1 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/bio.h>
14#include <linux/blkdev.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010015#include <linux/prefetch.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090016#include <linux/vmalloc.h>
17
18#include "f2fs.h"
19#include "segment.h"
20#include "node.h"
Namjae Jeon6ec178d2013-04-23 17:51:43 +090021#include <trace/events/f2fs.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090022
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090023/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090024 * This function balances dirty node and dentry pages.
25 * In addition, it controls garbage collection.
26 */
27void f2fs_balance_fs(struct f2fs_sb_info *sbi)
28{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090029 /*
Jaegeuk Kim029cd282012-12-21 17:20:21 +090030 * We should do GC or end up with checkpoint, if there are so many dirty
31 * dir/node pages without enough free segments.
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090032 */
Jaegeuk Kim43727522013-02-04 15:11:17 +090033 if (has_not_enough_free_secs(sbi, 0)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090034 mutex_lock(&sbi->gc_mutex);
Jaegeuk Kim408e9372013-01-03 17:55:52 +090035 f2fs_gc(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090036 }
37}
38
39static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
40 enum dirty_type dirty_type)
41{
42 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
43
44 /* need not be added */
45 if (IS_CURSEG(sbi, segno))
46 return;
47
48 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
49 dirty_i->nr_dirty[dirty_type]++;
50
51 if (dirty_type == DIRTY) {
52 struct seg_entry *sentry = get_seg_entry(sbi, segno);
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090053 enum dirty_type t = DIRTY_HOT_DATA;
54
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090055 dirty_type = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090056
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090057 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
58 dirty_i->nr_dirty[dirty_type]++;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090059
60 /* Only one bitmap should be set */
61 for (; t <= DIRTY_COLD_NODE; t++) {
62 if (t == dirty_type)
63 continue;
64 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
65 dirty_i->nr_dirty[t]--;
66 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090067 }
68}
69
70static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
71 enum dirty_type dirty_type)
72{
73 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
74
75 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
76 dirty_i->nr_dirty[dirty_type]--;
77
78 if (dirty_type == DIRTY) {
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +090079 enum dirty_type t = DIRTY_HOT_DATA;
80
81 /* clear all the bitmaps */
82 for (; t <= DIRTY_COLD_NODE; t++)
83 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
84 dirty_i->nr_dirty[t]--;
85
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +090086 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
87 clear_bit(GET_SECNO(sbi, segno),
88 dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090089 }
90}
91
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090092/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090093 * Should not occur error such as -ENOMEM.
94 * Adding dirty entry into seglist is not critical operation.
95 * If a given segment is one of current working segments, it won't be added.
96 */
97void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
98{
99 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
100 unsigned short valid_blocks;
101
102 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
103 return;
104
105 mutex_lock(&dirty_i->seglist_lock);
106
107 valid_blocks = get_valid_blocks(sbi, segno, 0);
108
109 if (valid_blocks == 0) {
110 __locate_dirty_segment(sbi, segno, PRE);
111 __remove_dirty_segment(sbi, segno, DIRTY);
112 } else if (valid_blocks < sbi->blocks_per_seg) {
113 __locate_dirty_segment(sbi, segno, DIRTY);
114 } else {
115 /* Recovery routine with SSR needs this */
116 __remove_dirty_segment(sbi, segno, DIRTY);
117 }
118
119 mutex_unlock(&dirty_i->seglist_lock);
120 return;
121}
122
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900123/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900124 * Should call clear_prefree_segments after checkpoint is done.
125 */
126static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
127{
128 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800129 unsigned int segno = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900130 unsigned int total_segs = TOTAL_SEGS(sbi);
131
132 mutex_lock(&dirty_i->seglist_lock);
133 while (1) {
134 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800135 segno + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900136 if (segno >= total_segs)
137 break;
138 __set_test_and_free(sbi, segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900139 }
140 mutex_unlock(&dirty_i->seglist_lock);
141}
142
143void clear_prefree_segments(struct f2fs_sb_info *sbi)
144{
145 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800146 unsigned int segno = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900147 unsigned int total_segs = TOTAL_SEGS(sbi);
148
149 mutex_lock(&dirty_i->seglist_lock);
150 while (1) {
151 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800152 segno + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900153 if (segno >= total_segs)
154 break;
155
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900156 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE]))
157 dirty_i->nr_dirty[PRE]--;
158
159 /* Let's use trim */
160 if (test_opt(sbi, DISCARD))
161 blkdev_issue_discard(sbi->sb->s_bdev,
162 START_BLOCK(sbi, segno) <<
163 sbi->log_sectors_per_block,
164 1 << (sbi->log_sectors_per_block +
165 sbi->log_blocks_per_seg),
166 GFP_NOFS, 0);
167 }
168 mutex_unlock(&dirty_i->seglist_lock);
169}
170
171static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
172{
173 struct sit_info *sit_i = SIT_I(sbi);
174 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
175 sit_i->dirty_sentries++;
176}
177
178static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
179 unsigned int segno, int modified)
180{
181 struct seg_entry *se = get_seg_entry(sbi, segno);
182 se->type = type;
183 if (modified)
184 __mark_sit_entry_dirty(sbi, segno);
185}
186
187static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
188{
189 struct seg_entry *se;
190 unsigned int segno, offset;
191 long int new_vblocks;
192
193 segno = GET_SEGNO(sbi, blkaddr);
194
195 se = get_seg_entry(sbi, segno);
196 new_vblocks = se->valid_blocks + del;
197 offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1);
198
199 BUG_ON((new_vblocks >> (sizeof(unsigned short) << 3) ||
200 (new_vblocks > sbi->blocks_per_seg)));
201
202 se->valid_blocks = new_vblocks;
203 se->mtime = get_mtime(sbi);
204 SIT_I(sbi)->max_mtime = se->mtime;
205
206 /* Update valid block bitmap */
207 if (del > 0) {
208 if (f2fs_set_bit(offset, se->cur_valid_map))
209 BUG();
210 } else {
211 if (!f2fs_clear_bit(offset, se->cur_valid_map))
212 BUG();
213 }
214 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
215 se->ckpt_valid_blocks += del;
216
217 __mark_sit_entry_dirty(sbi, segno);
218
219 /* update total number of valid blocks to be written in ckpt area */
220 SIT_I(sbi)->written_valid_blocks += del;
221
222 if (sbi->segs_per_sec > 1)
223 get_sec_entry(sbi, segno)->valid_blocks += del;
224}
225
226static void refresh_sit_entry(struct f2fs_sb_info *sbi,
227 block_t old_blkaddr, block_t new_blkaddr)
228{
229 update_sit_entry(sbi, new_blkaddr, 1);
230 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
231 update_sit_entry(sbi, old_blkaddr, -1);
232}
233
234void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
235{
236 unsigned int segno = GET_SEGNO(sbi, addr);
237 struct sit_info *sit_i = SIT_I(sbi);
238
239 BUG_ON(addr == NULL_ADDR);
240 if (addr == NEW_ADDR)
241 return;
242
243 /* add it into sit main buffer */
244 mutex_lock(&sit_i->sentry_lock);
245
246 update_sit_entry(sbi, addr, -1);
247
248 /* add it into dirty seglist */
249 locate_dirty_segment(sbi, segno);
250
251 mutex_unlock(&sit_i->sentry_lock);
252}
253
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900254/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900255 * This function should be resided under the curseg_mutex lock
256 */
257static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
258 struct f2fs_summary *sum, unsigned short offset)
259{
260 struct curseg_info *curseg = CURSEG_I(sbi, type);
261 void *addr = curseg->sum_blk;
262 addr += offset * sizeof(struct f2fs_summary);
263 memcpy(addr, sum, sizeof(struct f2fs_summary));
264 return;
265}
266
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900267/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900268 * Calculate the number of current summary pages for writing
269 */
270int npages_for_summary_flush(struct f2fs_sb_info *sbi)
271{
272 int total_size_bytes = 0;
273 int valid_sum_count = 0;
274 int i, sum_space;
275
276 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
277 if (sbi->ckpt->alloc_type[i] == SSR)
278 valid_sum_count += sbi->blocks_per_seg;
279 else
280 valid_sum_count += curseg_blkoff(sbi, i);
281 }
282
283 total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1)
284 + sizeof(struct nat_journal) + 2
285 + sizeof(struct sit_journal) + 2;
286 sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE;
287 if (total_size_bytes < sum_space)
288 return 1;
289 else if (total_size_bytes < 2 * sum_space)
290 return 2;
291 return 3;
292}
293
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900294/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900295 * Caller should put this summary page
296 */
297struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
298{
299 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
300}
301
302static void write_sum_page(struct f2fs_sb_info *sbi,
303 struct f2fs_summary_block *sum_blk, block_t blk_addr)
304{
305 struct page *page = grab_meta_page(sbi, blk_addr);
306 void *kaddr = page_address(page);
307 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
308 set_page_dirty(page);
309 f2fs_put_page(page, 1);
310}
311
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900312static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, int type)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900313{
314 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
315 unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900316 unsigned int segno;
317 unsigned int ofs = 0;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900318
319 /*
320 * If there is not enough reserved sections,
321 * we should not reuse prefree segments.
322 */
Jaegeuk Kim43727522013-02-04 15:11:17 +0900323 if (has_not_enough_free_secs(sbi, 0))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900324 return NULL_SEGNO;
325
326 /*
327 * NODE page should not reuse prefree segment,
328 * since those information is used for SPOR.
329 */
330 if (IS_NODESEG(type))
331 return NULL_SEGNO;
332next:
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900333 segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs);
334 ofs += sbi->segs_per_sec;
335
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900336 if (segno < TOTAL_SEGS(sbi)) {
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900337 int i;
338
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900339 /* skip intermediate segments in a section */
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900340 if (segno % sbi->segs_per_sec)
341 goto next;
342
343 /* skip if the section is currently used */
344 if (sec_usage_check(sbi, GET_SECNO(sbi, segno)))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900345 goto next;
346
347 /* skip if whole section is not prefree */
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900348 for (i = 1; i < sbi->segs_per_sec; i++)
349 if (!test_bit(segno + i, prefree_segmap))
350 goto next;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900351
352 /* skip if whole section was not free at the last checkpoint */
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900353 for (i = 0; i < sbi->segs_per_sec; i++)
354 if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900355 goto next;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900356
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900357 return segno;
358 }
359 return NULL_SEGNO;
360}
361
Jaegeuk Kim60374682013-03-31 13:58:51 +0900362static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
363{
364 struct curseg_info *curseg = CURSEG_I(sbi, type);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800365 unsigned int segno = curseg->segno + 1;
Jaegeuk Kim60374682013-03-31 13:58:51 +0900366 struct free_segmap_info *free_i = FREE_I(sbi);
367
Haicheng Li81fb5e82013-05-14 18:20:28 +0800368 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec)
369 return !test_bit(segno, free_i->free_segmap);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900370 return 0;
371}
372
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900373/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900374 * Find a new segment from the free segments bitmap to right order
375 * This function should be returned with success, otherwise BUG
376 */
377static void get_new_segment(struct f2fs_sb_info *sbi,
378 unsigned int *newseg, bool new_sec, int dir)
379{
380 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900381 unsigned int segno, secno, zoneno;
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900382 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900383 unsigned int hint = *newseg / sbi->segs_per_sec;
384 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
385 unsigned int left_start = hint;
386 bool init = true;
387 int go_left = 0;
388 int i;
389
390 write_lock(&free_i->segmap_lock);
391
392 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
393 segno = find_next_zero_bit(free_i->free_segmap,
394 TOTAL_SEGS(sbi), *newseg + 1);
Jaegeuk Kim33afa7f2013-03-31 12:59:53 +0900395 if (segno - *newseg < sbi->segs_per_sec -
396 (*newseg % sbi->segs_per_sec))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900397 goto got_it;
398 }
399find_other_zone:
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900400 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
401 if (secno >= TOTAL_SECS(sbi)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900402 if (dir == ALLOC_RIGHT) {
403 secno = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900404 TOTAL_SECS(sbi), 0);
405 BUG_ON(secno >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900406 } else {
407 go_left = 1;
408 left_start = hint - 1;
409 }
410 }
411 if (go_left == 0)
412 goto skip_left;
413
414 while (test_bit(left_start, free_i->free_secmap)) {
415 if (left_start > 0) {
416 left_start--;
417 continue;
418 }
419 left_start = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900420 TOTAL_SECS(sbi), 0);
421 BUG_ON(left_start >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900422 break;
423 }
424 secno = left_start;
425skip_left:
426 hint = secno;
427 segno = secno * sbi->segs_per_sec;
428 zoneno = secno / sbi->secs_per_zone;
429
430 /* give up on finding another zone */
431 if (!init)
432 goto got_it;
433 if (sbi->secs_per_zone == 1)
434 goto got_it;
435 if (zoneno == old_zoneno)
436 goto got_it;
437 if (dir == ALLOC_LEFT) {
438 if (!go_left && zoneno + 1 >= total_zones)
439 goto got_it;
440 if (go_left && zoneno == 0)
441 goto got_it;
442 }
443 for (i = 0; i < NR_CURSEG_TYPE; i++)
444 if (CURSEG_I(sbi, i)->zone == zoneno)
445 break;
446
447 if (i < NR_CURSEG_TYPE) {
448 /* zone is in user, try another */
449 if (go_left)
450 hint = zoneno * sbi->secs_per_zone - 1;
451 else if (zoneno + 1 >= total_zones)
452 hint = 0;
453 else
454 hint = (zoneno + 1) * sbi->secs_per_zone;
455 init = false;
456 goto find_other_zone;
457 }
458got_it:
459 /* set it as dirty segment in free segmap */
460 BUG_ON(test_bit(segno, free_i->free_segmap));
461 __set_inuse(sbi, segno);
462 *newseg = segno;
463 write_unlock(&free_i->segmap_lock);
464}
465
466static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
467{
468 struct curseg_info *curseg = CURSEG_I(sbi, type);
469 struct summary_footer *sum_footer;
470
471 curseg->segno = curseg->next_segno;
472 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
473 curseg->next_blkoff = 0;
474 curseg->next_segno = NULL_SEGNO;
475
476 sum_footer = &(curseg->sum_blk->footer);
477 memset(sum_footer, 0, sizeof(struct summary_footer));
478 if (IS_DATASEG(type))
479 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
480 if (IS_NODESEG(type))
481 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
482 __set_sit_entry_type(sbi, type, curseg->segno, modified);
483}
484
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900485/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900486 * Allocate a current working segment.
487 * This function always allocates a free segment in LFS manner.
488 */
489static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
490{
491 struct curseg_info *curseg = CURSEG_I(sbi, type);
492 unsigned int segno = curseg->segno;
493 int dir = ALLOC_LEFT;
494
495 write_sum_page(sbi, curseg->sum_blk,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800496 GET_SUM_BLOCK(sbi, segno));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900497 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
498 dir = ALLOC_RIGHT;
499
500 if (test_opt(sbi, NOHEAP))
501 dir = ALLOC_RIGHT;
502
503 get_new_segment(sbi, &segno, new_sec, dir);
504 curseg->next_segno = segno;
505 reset_curseg(sbi, type, 1);
506 curseg->alloc_type = LFS;
507}
508
509static void __next_free_blkoff(struct f2fs_sb_info *sbi,
510 struct curseg_info *seg, block_t start)
511{
512 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
513 block_t ofs;
514 for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) {
515 if (!f2fs_test_bit(ofs, se->ckpt_valid_map)
516 && !f2fs_test_bit(ofs, se->cur_valid_map))
517 break;
518 }
519 seg->next_blkoff = ofs;
520}
521
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900522/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900523 * If a segment is written by LFS manner, next block offset is just obtained
524 * by increasing the current block offset. However, if a segment is written by
525 * SSR manner, next block offset obtained by calling __next_free_blkoff
526 */
527static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
528 struct curseg_info *seg)
529{
530 if (seg->alloc_type == SSR)
531 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
532 else
533 seg->next_blkoff++;
534}
535
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900536/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900537 * This function always allocates a used segment (from dirty seglist) by SSR
538 * manner, so it should recover the existing segment information of valid blocks
539 */
540static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
541{
542 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
543 struct curseg_info *curseg = CURSEG_I(sbi, type);
544 unsigned int new_segno = curseg->next_segno;
545 struct f2fs_summary_block *sum_node;
546 struct page *sum_page;
547
548 write_sum_page(sbi, curseg->sum_blk,
549 GET_SUM_BLOCK(sbi, curseg->segno));
550 __set_test_and_inuse(sbi, new_segno);
551
552 mutex_lock(&dirty_i->seglist_lock);
553 __remove_dirty_segment(sbi, new_segno, PRE);
554 __remove_dirty_segment(sbi, new_segno, DIRTY);
555 mutex_unlock(&dirty_i->seglist_lock);
556
557 reset_curseg(sbi, type, 1);
558 curseg->alloc_type = SSR;
559 __next_free_blkoff(sbi, curseg, 0);
560
561 if (reuse) {
562 sum_page = get_sum_page(sbi, new_segno);
563 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
564 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
565 f2fs_put_page(sum_page, 1);
566 }
567}
568
Jaegeuk Kim43727522013-02-04 15:11:17 +0900569static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
570{
571 struct curseg_info *curseg = CURSEG_I(sbi, type);
572 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
573
574 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
575 return v_ops->get_victim(sbi,
576 &(curseg)->next_segno, BG_GC, type, SSR);
577
578 /* For data segments, let's do SSR more intensively */
579 for (; type >= CURSEG_HOT_DATA; type--)
580 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
581 BG_GC, type, SSR))
582 return 1;
583 return 0;
584}
585
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900586/*
587 * flush out current segment and replace it with new segment
588 * This function should be returned with success, otherwise BUG
589 */
590static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
591 int type, bool force)
592{
593 struct curseg_info *curseg = CURSEG_I(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900594
595 if (force) {
596 new_curseg(sbi, type, true);
597 goto out;
598 }
599
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900600 curseg->next_segno = check_prefree_segments(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900601
602 if (curseg->next_segno != NULL_SEGNO)
603 change_curseg(sbi, type, false);
604 else if (type == CURSEG_WARM_NODE)
605 new_curseg(sbi, type, false);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900606 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
607 new_curseg(sbi, type, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900608 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
609 change_curseg(sbi, type, true);
610 else
611 new_curseg(sbi, type, false);
612out:
Namjae Jeon35b09d82013-05-23 22:57:53 +0900613#ifdef CONFIG_F2FS_STAT_FS
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900614 sbi->segment_count[curseg->alloc_type]++;
Namjae Jeon35b09d82013-05-23 22:57:53 +0900615#endif
616 return;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900617}
618
619void allocate_new_segments(struct f2fs_sb_info *sbi)
620{
621 struct curseg_info *curseg;
622 unsigned int old_curseg;
623 int i;
624
625 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
626 curseg = CURSEG_I(sbi, i);
627 old_curseg = curseg->segno;
628 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
629 locate_dirty_segment(sbi, old_curseg);
630 }
631}
632
633static const struct segment_allocation default_salloc_ops = {
634 .allocate_segment = allocate_segment_by_default,
635};
636
637static void f2fs_end_io_write(struct bio *bio, int err)
638{
639 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
640 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
641 struct bio_private *p = bio->bi_private;
642
643 do {
644 struct page *page = bvec->bv_page;
645
646 if (--bvec >= bio->bi_io_vec)
647 prefetchw(&bvec->bv_page->flags);
648 if (!uptodate) {
649 SetPageError(page);
650 if (page->mapping)
651 set_bit(AS_EIO, &page->mapping->flags);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900652 set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900653 p->sbi->sb->s_flags |= MS_RDONLY;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900654 }
655 end_page_writeback(page);
656 dec_page_count(p->sbi, F2FS_WRITEBACK);
657 } while (bvec >= bio->bi_io_vec);
658
659 if (p->is_sync)
660 complete(p->wait);
661 kfree(p);
662 bio_put(bio);
663}
664
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900665struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900666{
667 struct bio *bio;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900668 struct bio_private *priv;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900669retry:
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900670 priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
671 if (!priv) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900672 cond_resched();
Namjae Jeonc2129912012-12-08 14:53:40 +0900673 goto retry;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900674 }
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900675
676 /* No failure on bio allocation */
677 bio = bio_alloc(GFP_NOIO, npages);
678 bio->bi_bdev = bdev;
679 bio->bi_private = priv;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900680 return bio;
681}
682
683static void do_submit_bio(struct f2fs_sb_info *sbi,
684 enum page_type type, bool sync)
685{
686 int rw = sync ? WRITE_SYNC : WRITE;
687 enum page_type btype = type > META ? META : type;
688
689 if (type >= META_FLUSH)
690 rw = WRITE_FLUSH_FUA;
691
Namjae Jeon86804412013-04-25 11:45:21 +0900692 if (btype == META)
693 rw |= REQ_META;
694
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900695 if (sbi->bio[btype]) {
696 struct bio_private *p = sbi->bio[btype]->bi_private;
697 p->sbi = sbi;
698 sbi->bio[btype]->bi_end_io = f2fs_end_io_write;
Namjae Jeon6ec178d2013-04-23 17:51:43 +0900699
700 trace_f2fs_do_submit_bio(sbi->sb, btype, sync, sbi->bio[btype]);
701
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900702 if (type == META_FLUSH) {
703 DECLARE_COMPLETION_ONSTACK(wait);
704 p->is_sync = true;
705 p->wait = &wait;
706 submit_bio(rw, sbi->bio[btype]);
707 wait_for_completion(&wait);
708 } else {
709 p->is_sync = false;
710 submit_bio(rw, sbi->bio[btype]);
711 }
712 sbi->bio[btype] = NULL;
713 }
714}
715
716void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync)
717{
718 down_write(&sbi->bio_sem);
719 do_submit_bio(sbi, type, sync);
720 up_write(&sbi->bio_sem);
721}
722
723static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
724 block_t blk_addr, enum page_type type)
725{
726 struct block_device *bdev = sbi->sb->s_bdev;
727
728 verify_block_addr(sbi, blk_addr);
729
730 down_write(&sbi->bio_sem);
731
732 inc_page_count(sbi, F2FS_WRITEBACK);
733
734 if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1)
735 do_submit_bio(sbi, type, false);
736alloc_new:
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900737 if (sbi->bio[type] == NULL) {
Jaegeuk Kimac5d1562013-04-29 16:58:39 +0900738 sbi->bio[type] = f2fs_bio_alloc(bdev, max_hw_blocks(sbi));
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900739 sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
740 /*
741 * The end_io will be assigned at the sumbission phase.
742 * Until then, let bio_add_page() merge consecutive IOs as much
743 * as possible.
744 */
745 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900746
747 if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) <
748 PAGE_CACHE_SIZE) {
749 do_submit_bio(sbi, type, false);
750 goto alloc_new;
751 }
752
753 sbi->last_block_in_bio[type] = blk_addr;
754
755 up_write(&sbi->bio_sem);
Namjae Jeon6ec178d2013-04-23 17:51:43 +0900756 trace_f2fs_submit_write_page(page, blk_addr, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900757}
758
759static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
760{
761 struct curseg_info *curseg = CURSEG_I(sbi, type);
762 if (curseg->next_blkoff < sbi->blocks_per_seg)
763 return true;
764 return false;
765}
766
767static int __get_segment_type_2(struct page *page, enum page_type p_type)
768{
769 if (p_type == DATA)
770 return CURSEG_HOT_DATA;
771 else
772 return CURSEG_HOT_NODE;
773}
774
775static int __get_segment_type_4(struct page *page, enum page_type p_type)
776{
777 if (p_type == DATA) {
778 struct inode *inode = page->mapping->host;
779
780 if (S_ISDIR(inode->i_mode))
781 return CURSEG_HOT_DATA;
782 else
783 return CURSEG_COLD_DATA;
784 } else {
785 if (IS_DNODE(page) && !is_cold_node(page))
786 return CURSEG_HOT_NODE;
787 else
788 return CURSEG_COLD_NODE;
789 }
790}
791
792static int __get_segment_type_6(struct page *page, enum page_type p_type)
793{
794 if (p_type == DATA) {
795 struct inode *inode = page->mapping->host;
796
797 if (S_ISDIR(inode->i_mode))
798 return CURSEG_HOT_DATA;
799 else if (is_cold_data(page) || is_cold_file(inode))
800 return CURSEG_COLD_DATA;
801 else
802 return CURSEG_WARM_DATA;
803 } else {
804 if (IS_DNODE(page))
805 return is_cold_node(page) ? CURSEG_WARM_NODE :
806 CURSEG_HOT_NODE;
807 else
808 return CURSEG_COLD_NODE;
809 }
810}
811
812static int __get_segment_type(struct page *page, enum page_type p_type)
813{
814 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
815 switch (sbi->active_logs) {
816 case 2:
817 return __get_segment_type_2(page, p_type);
818 case 4:
819 return __get_segment_type_4(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900820 }
Jaegeuk Kim12a67142012-12-21 11:47:05 +0900821 /* NR_CURSEG_TYPE(6) logs by default */
822 BUG_ON(sbi->active_logs != NR_CURSEG_TYPE);
823 return __get_segment_type_6(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900824}
825
826static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
827 block_t old_blkaddr, block_t *new_blkaddr,
828 struct f2fs_summary *sum, enum page_type p_type)
829{
830 struct sit_info *sit_i = SIT_I(sbi);
831 struct curseg_info *curseg;
832 unsigned int old_cursegno;
833 int type;
834
835 type = __get_segment_type(page, p_type);
836 curseg = CURSEG_I(sbi, type);
837
838 mutex_lock(&curseg->curseg_mutex);
839
840 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
841 old_cursegno = curseg->segno;
842
843 /*
844 * __add_sum_entry should be resided under the curseg_mutex
845 * because, this function updates a summary entry in the
846 * current summary block.
847 */
848 __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
849
850 mutex_lock(&sit_i->sentry_lock);
851 __refresh_next_blkoff(sbi, curseg);
Namjae Jeon35b09d82013-05-23 22:57:53 +0900852#ifdef CONFIG_F2FS_STAT_FS
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900853 sbi->block_count[curseg->alloc_type]++;
Namjae Jeon35b09d82013-05-23 22:57:53 +0900854#endif
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900855
856 /*
857 * SIT information should be updated before segment allocation,
858 * since SSR needs latest valid block information.
859 */
860 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
861
862 if (!__has_curseg_space(sbi, type))
863 sit_i->s_ops->allocate_segment(sbi, type, false);
864
865 locate_dirty_segment(sbi, old_cursegno);
866 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
867 mutex_unlock(&sit_i->sentry_lock);
868
869 if (p_type == NODE)
870 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
871
872 /* writeout dirty page into bdev */
873 submit_write_page(sbi, page, *new_blkaddr, p_type);
874
875 mutex_unlock(&curseg->curseg_mutex);
876}
877
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900878void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900879{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900880 set_page_writeback(page);
881 submit_write_page(sbi, page, page->index, META);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900882}
883
884void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
885 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
886{
887 struct f2fs_summary sum;
888 set_summary(&sum, nid, 0, 0);
889 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE);
890}
891
892void write_data_page(struct inode *inode, struct page *page,
893 struct dnode_of_data *dn, block_t old_blkaddr,
894 block_t *new_blkaddr)
895{
896 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
897 struct f2fs_summary sum;
898 struct node_info ni;
899
900 BUG_ON(old_blkaddr == NULL_ADDR);
901 get_node_info(sbi, dn->nid, &ni);
902 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
903
904 do_write_page(sbi, page, old_blkaddr,
905 new_blkaddr, &sum, DATA);
906}
907
908void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page,
909 block_t old_blk_addr)
910{
911 submit_write_page(sbi, page, old_blk_addr, DATA);
912}
913
914void recover_data_page(struct f2fs_sb_info *sbi,
915 struct page *page, struct f2fs_summary *sum,
916 block_t old_blkaddr, block_t new_blkaddr)
917{
918 struct sit_info *sit_i = SIT_I(sbi);
919 struct curseg_info *curseg;
920 unsigned int segno, old_cursegno;
921 struct seg_entry *se;
922 int type;
923
924 segno = GET_SEGNO(sbi, new_blkaddr);
925 se = get_seg_entry(sbi, segno);
926 type = se->type;
927
928 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
929 if (old_blkaddr == NULL_ADDR)
930 type = CURSEG_COLD_DATA;
931 else
932 type = CURSEG_WARM_DATA;
933 }
934 curseg = CURSEG_I(sbi, type);
935
936 mutex_lock(&curseg->curseg_mutex);
937 mutex_lock(&sit_i->sentry_lock);
938
939 old_cursegno = curseg->segno;
940
941 /* change the current segment */
942 if (segno != curseg->segno) {
943 curseg->next_segno = segno;
944 change_curseg(sbi, type, true);
945 }
946
947 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
948 (sbi->blocks_per_seg - 1);
949 __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
950
951 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
952
953 locate_dirty_segment(sbi, old_cursegno);
954 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
955
956 mutex_unlock(&sit_i->sentry_lock);
957 mutex_unlock(&curseg->curseg_mutex);
958}
959
960void rewrite_node_page(struct f2fs_sb_info *sbi,
961 struct page *page, struct f2fs_summary *sum,
962 block_t old_blkaddr, block_t new_blkaddr)
963{
964 struct sit_info *sit_i = SIT_I(sbi);
965 int type = CURSEG_WARM_NODE;
966 struct curseg_info *curseg;
967 unsigned int segno, old_cursegno;
968 block_t next_blkaddr = next_blkaddr_of_node(page);
969 unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
970
971 curseg = CURSEG_I(sbi, type);
972
973 mutex_lock(&curseg->curseg_mutex);
974 mutex_lock(&sit_i->sentry_lock);
975
976 segno = GET_SEGNO(sbi, new_blkaddr);
977 old_cursegno = curseg->segno;
978
979 /* change the current segment */
980 if (segno != curseg->segno) {
981 curseg->next_segno = segno;
982 change_curseg(sbi, type, true);
983 }
984 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
985 (sbi->blocks_per_seg - 1);
986 __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
987
988 /* change the current log to the next block addr in advance */
989 if (next_segno != segno) {
990 curseg->next_segno = next_segno;
991 change_curseg(sbi, type, true);
992 }
993 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) &
994 (sbi->blocks_per_seg - 1);
995
996 /* rewrite node page */
997 set_page_writeback(page);
998 submit_write_page(sbi, page, new_blkaddr, NODE);
999 f2fs_submit_bio(sbi, NODE, true);
1000 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
1001
1002 locate_dirty_segment(sbi, old_cursegno);
1003 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
1004
1005 mutex_unlock(&sit_i->sentry_lock);
1006 mutex_unlock(&curseg->curseg_mutex);
1007}
1008
1009static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1010{
1011 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1012 struct curseg_info *seg_i;
1013 unsigned char *kaddr;
1014 struct page *page;
1015 block_t start;
1016 int i, j, offset;
1017
1018 start = start_sum_block(sbi);
1019
1020 page = get_meta_page(sbi, start++);
1021 kaddr = (unsigned char *)page_address(page);
1022
1023 /* Step 1: restore nat cache */
1024 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1025 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1026
1027 /* Step 2: restore sit cache */
1028 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1029 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1030 SUM_JOURNAL_SIZE);
1031 offset = 2 * SUM_JOURNAL_SIZE;
1032
1033 /* Step 3: restore summary entries */
1034 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1035 unsigned short blk_off;
1036 unsigned int segno;
1037
1038 seg_i = CURSEG_I(sbi, i);
1039 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1040 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1041 seg_i->next_segno = segno;
1042 reset_curseg(sbi, i, 0);
1043 seg_i->alloc_type = ckpt->alloc_type[i];
1044 seg_i->next_blkoff = blk_off;
1045
1046 if (seg_i->alloc_type == SSR)
1047 blk_off = sbi->blocks_per_seg;
1048
1049 for (j = 0; j < blk_off; j++) {
1050 struct f2fs_summary *s;
1051 s = (struct f2fs_summary *)(kaddr + offset);
1052 seg_i->sum_blk->entries[j] = *s;
1053 offset += SUMMARY_SIZE;
1054 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1055 SUM_FOOTER_SIZE)
1056 continue;
1057
1058 f2fs_put_page(page, 1);
1059 page = NULL;
1060
1061 page = get_meta_page(sbi, start++);
1062 kaddr = (unsigned char *)page_address(page);
1063 offset = 0;
1064 }
1065 }
1066 f2fs_put_page(page, 1);
1067 return 0;
1068}
1069
1070static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1071{
1072 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1073 struct f2fs_summary_block *sum;
1074 struct curseg_info *curseg;
1075 struct page *new;
1076 unsigned short blk_off;
1077 unsigned int segno = 0;
1078 block_t blk_addr = 0;
1079
1080 /* get segment number and block addr */
1081 if (IS_DATASEG(type)) {
1082 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1083 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1084 CURSEG_HOT_DATA]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001085 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001086 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1087 else
1088 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1089 } else {
1090 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1091 CURSEG_HOT_NODE]);
1092 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1093 CURSEG_HOT_NODE]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001094 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001095 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1096 type - CURSEG_HOT_NODE);
1097 else
1098 blk_addr = GET_SUM_BLOCK(sbi, segno);
1099 }
1100
1101 new = get_meta_page(sbi, blk_addr);
1102 sum = (struct f2fs_summary_block *)page_address(new);
1103
1104 if (IS_NODESEG(type)) {
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001105 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001106 struct f2fs_summary *ns = &sum->entries[0];
1107 int i;
1108 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1109 ns->version = 0;
1110 ns->ofs_in_node = 0;
1111 }
1112 } else {
1113 if (restore_node_summary(sbi, segno, sum)) {
1114 f2fs_put_page(new, 1);
1115 return -EINVAL;
1116 }
1117 }
1118 }
1119
1120 /* set uncompleted segment to curseg */
1121 curseg = CURSEG_I(sbi, type);
1122 mutex_lock(&curseg->curseg_mutex);
1123 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1124 curseg->next_segno = segno;
1125 reset_curseg(sbi, type, 0);
1126 curseg->alloc_type = ckpt->alloc_type[type];
1127 curseg->next_blkoff = blk_off;
1128 mutex_unlock(&curseg->curseg_mutex);
1129 f2fs_put_page(new, 1);
1130 return 0;
1131}
1132
1133static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1134{
1135 int type = CURSEG_HOT_DATA;
1136
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001137 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001138 /* restore for compacted data summary */
1139 if (read_compacted_summaries(sbi))
1140 return -EINVAL;
1141 type = CURSEG_HOT_NODE;
1142 }
1143
1144 for (; type <= CURSEG_COLD_NODE; type++)
1145 if (read_normal_summaries(sbi, type))
1146 return -EINVAL;
1147 return 0;
1148}
1149
1150static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1151{
1152 struct page *page;
1153 unsigned char *kaddr;
1154 struct f2fs_summary *summary;
1155 struct curseg_info *seg_i;
1156 int written_size = 0;
1157 int i, j;
1158
1159 page = grab_meta_page(sbi, blkaddr++);
1160 kaddr = (unsigned char *)page_address(page);
1161
1162 /* Step 1: write nat cache */
1163 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1164 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1165 written_size += SUM_JOURNAL_SIZE;
1166
1167 /* Step 2: write sit cache */
1168 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1169 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1170 SUM_JOURNAL_SIZE);
1171 written_size += SUM_JOURNAL_SIZE;
1172
1173 set_page_dirty(page);
1174
1175 /* Step 3: write summary entries */
1176 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1177 unsigned short blkoff;
1178 seg_i = CURSEG_I(sbi, i);
1179 if (sbi->ckpt->alloc_type[i] == SSR)
1180 blkoff = sbi->blocks_per_seg;
1181 else
1182 blkoff = curseg_blkoff(sbi, i);
1183
1184 for (j = 0; j < blkoff; j++) {
1185 if (!page) {
1186 page = grab_meta_page(sbi, blkaddr++);
1187 kaddr = (unsigned char *)page_address(page);
1188 written_size = 0;
1189 }
1190 summary = (struct f2fs_summary *)(kaddr + written_size);
1191 *summary = seg_i->sum_blk->entries[j];
1192 written_size += SUMMARY_SIZE;
1193 set_page_dirty(page);
1194
1195 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1196 SUM_FOOTER_SIZE)
1197 continue;
1198
1199 f2fs_put_page(page, 1);
1200 page = NULL;
1201 }
1202 }
1203 if (page)
1204 f2fs_put_page(page, 1);
1205}
1206
1207static void write_normal_summaries(struct f2fs_sb_info *sbi,
1208 block_t blkaddr, int type)
1209{
1210 int i, end;
1211 if (IS_DATASEG(type))
1212 end = type + NR_CURSEG_DATA_TYPE;
1213 else
1214 end = type + NR_CURSEG_NODE_TYPE;
1215
1216 for (i = type; i < end; i++) {
1217 struct curseg_info *sum = CURSEG_I(sbi, i);
1218 mutex_lock(&sum->curseg_mutex);
1219 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1220 mutex_unlock(&sum->curseg_mutex);
1221 }
1222}
1223
1224void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1225{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001226 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001227 write_compacted_summaries(sbi, start_blk);
1228 else
1229 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1230}
1231
1232void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1233{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001234 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001235 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
1236 return;
1237}
1238
1239int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1240 unsigned int val, int alloc)
1241{
1242 int i;
1243
1244 if (type == NAT_JOURNAL) {
1245 for (i = 0; i < nats_in_cursum(sum); i++) {
1246 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1247 return i;
1248 }
1249 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1250 return update_nats_in_cursum(sum, 1);
1251 } else if (type == SIT_JOURNAL) {
1252 for (i = 0; i < sits_in_cursum(sum); i++)
1253 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1254 return i;
1255 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1256 return update_sits_in_cursum(sum, 1);
1257 }
1258 return -1;
1259}
1260
1261static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1262 unsigned int segno)
1263{
1264 struct sit_info *sit_i = SIT_I(sbi);
1265 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1266 block_t blk_addr = sit_i->sit_base_addr + offset;
1267
1268 check_seg_range(sbi, segno);
1269
1270 /* calculate sit block address */
1271 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1272 blk_addr += sit_i->sit_blocks;
1273
1274 return get_meta_page(sbi, blk_addr);
1275}
1276
1277static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1278 unsigned int start)
1279{
1280 struct sit_info *sit_i = SIT_I(sbi);
1281 struct page *src_page, *dst_page;
1282 pgoff_t src_off, dst_off;
1283 void *src_addr, *dst_addr;
1284
1285 src_off = current_sit_addr(sbi, start);
1286 dst_off = next_sit_addr(sbi, src_off);
1287
1288 /* get current sit block page without lock */
1289 src_page = get_meta_page(sbi, src_off);
1290 dst_page = grab_meta_page(sbi, dst_off);
1291 BUG_ON(PageDirty(src_page));
1292
1293 src_addr = page_address(src_page);
1294 dst_addr = page_address(dst_page);
1295 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1296
1297 set_page_dirty(dst_page);
1298 f2fs_put_page(src_page, 1);
1299
1300 set_to_next_sit(sit_i, start);
1301
1302 return dst_page;
1303}
1304
1305static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
1306{
1307 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1308 struct f2fs_summary_block *sum = curseg->sum_blk;
1309 int i;
1310
1311 /*
1312 * If the journal area in the current summary is full of sit entries,
1313 * all the sit entries will be flushed. Otherwise the sit entries
1314 * are not able to replace with newly hot sit entries.
1315 */
1316 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
1317 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1318 unsigned int segno;
1319 segno = le32_to_cpu(segno_in_journal(sum, i));
1320 __mark_sit_entry_dirty(sbi, segno);
1321 }
1322 update_sits_in_cursum(sum, -sits_in_cursum(sum));
1323 return 1;
1324 }
1325 return 0;
1326}
1327
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001328/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001329 * CP calls this function, which flushes SIT entries including sit_journal,
1330 * and moves prefree segs to free segs.
1331 */
1332void flush_sit_entries(struct f2fs_sb_info *sbi)
1333{
1334 struct sit_info *sit_i = SIT_I(sbi);
1335 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1336 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1337 struct f2fs_summary_block *sum = curseg->sum_blk;
1338 unsigned long nsegs = TOTAL_SEGS(sbi);
1339 struct page *page = NULL;
1340 struct f2fs_sit_block *raw_sit = NULL;
1341 unsigned int start = 0, end = 0;
1342 unsigned int segno = -1;
1343 bool flushed;
1344
1345 mutex_lock(&curseg->curseg_mutex);
1346 mutex_lock(&sit_i->sentry_lock);
1347
1348 /*
1349 * "flushed" indicates whether sit entries in journal are flushed
1350 * to the SIT area or not.
1351 */
1352 flushed = flush_sits_in_journal(sbi);
1353
1354 while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) {
1355 struct seg_entry *se = get_seg_entry(sbi, segno);
1356 int sit_offset, offset;
1357
1358 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1359
1360 if (flushed)
1361 goto to_sit_page;
1362
1363 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
1364 if (offset >= 0) {
1365 segno_in_journal(sum, offset) = cpu_to_le32(segno);
1366 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
1367 goto flush_done;
1368 }
1369to_sit_page:
1370 if (!page || (start > segno) || (segno > end)) {
1371 if (page) {
1372 f2fs_put_page(page, 1);
1373 page = NULL;
1374 }
1375
1376 start = START_SEGNO(sit_i, segno);
1377 end = start + SIT_ENTRY_PER_BLOCK - 1;
1378
1379 /* read sit block that will be updated */
1380 page = get_next_sit_page(sbi, start);
1381 raw_sit = page_address(page);
1382 }
1383
1384 /* udpate entry in SIT block */
1385 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
1386flush_done:
1387 __clear_bit(segno, bitmap);
1388 sit_i->dirty_sentries--;
1389 }
1390 mutex_unlock(&sit_i->sentry_lock);
1391 mutex_unlock(&curseg->curseg_mutex);
1392
1393 /* writeout last modified SIT block */
1394 f2fs_put_page(page, 1);
1395
1396 set_prefree_as_free_segments(sbi);
1397}
1398
1399static int build_sit_info(struct f2fs_sb_info *sbi)
1400{
1401 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1402 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1403 struct sit_info *sit_i;
1404 unsigned int sit_segs, start;
1405 char *src_bitmap, *dst_bitmap;
1406 unsigned int bitmap_size;
1407
1408 /* allocate memory for SIT information */
1409 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1410 if (!sit_i)
1411 return -ENOMEM;
1412
1413 SM_I(sbi)->sit_info = sit_i;
1414
1415 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
1416 if (!sit_i->sentries)
1417 return -ENOMEM;
1418
1419 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1420 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1421 if (!sit_i->dirty_sentries_bitmap)
1422 return -ENOMEM;
1423
1424 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1425 sit_i->sentries[start].cur_valid_map
1426 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1427 sit_i->sentries[start].ckpt_valid_map
1428 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1429 if (!sit_i->sentries[start].cur_valid_map
1430 || !sit_i->sentries[start].ckpt_valid_map)
1431 return -ENOMEM;
1432 }
1433
1434 if (sbi->segs_per_sec > 1) {
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001435 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001436 sizeof(struct sec_entry));
1437 if (!sit_i->sec_entries)
1438 return -ENOMEM;
1439 }
1440
1441 /* get information related with SIT */
1442 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1443
1444 /* setup SIT bitmap from ckeckpoint pack */
1445 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1446 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1447
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02001448 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001449 if (!dst_bitmap)
1450 return -ENOMEM;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001451
1452 /* init SIT information */
1453 sit_i->s_ops = &default_salloc_ops;
1454
1455 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1456 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1457 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1458 sit_i->sit_bitmap = dst_bitmap;
1459 sit_i->bitmap_size = bitmap_size;
1460 sit_i->dirty_sentries = 0;
1461 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1462 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1463 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1464 mutex_init(&sit_i->sentry_lock);
1465 return 0;
1466}
1467
1468static int build_free_segmap(struct f2fs_sb_info *sbi)
1469{
1470 struct f2fs_sm_info *sm_info = SM_I(sbi);
1471 struct free_segmap_info *free_i;
1472 unsigned int bitmap_size, sec_bitmap_size;
1473
1474 /* allocate memory for free segmap information */
1475 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1476 if (!free_i)
1477 return -ENOMEM;
1478
1479 SM_I(sbi)->free_info = free_i;
1480
1481 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1482 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1483 if (!free_i->free_segmap)
1484 return -ENOMEM;
1485
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001486 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001487 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1488 if (!free_i->free_secmap)
1489 return -ENOMEM;
1490
1491 /* set all segments as dirty temporarily */
1492 memset(free_i->free_segmap, 0xff, bitmap_size);
1493 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1494
1495 /* init free segmap information */
1496 free_i->start_segno =
1497 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
1498 free_i->free_segments = 0;
1499 free_i->free_sections = 0;
1500 rwlock_init(&free_i->segmap_lock);
1501 return 0;
1502}
1503
1504static int build_curseg(struct f2fs_sb_info *sbi)
1505{
Namjae Jeon1042d602012-12-01 10:56:13 +09001506 struct curseg_info *array;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001507 int i;
1508
1509 array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
1510 if (!array)
1511 return -ENOMEM;
1512
1513 SM_I(sbi)->curseg_array = array;
1514
1515 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1516 mutex_init(&array[i].curseg_mutex);
1517 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1518 if (!array[i].sum_blk)
1519 return -ENOMEM;
1520 array[i].segno = NULL_SEGNO;
1521 array[i].next_blkoff = 0;
1522 }
1523 return restore_curseg_summaries(sbi);
1524}
1525
1526static void build_sit_entries(struct f2fs_sb_info *sbi)
1527{
1528 struct sit_info *sit_i = SIT_I(sbi);
1529 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1530 struct f2fs_summary_block *sum = curseg->sum_blk;
1531 unsigned int start;
1532
1533 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1534 struct seg_entry *se = &sit_i->sentries[start];
1535 struct f2fs_sit_block *sit_blk;
1536 struct f2fs_sit_entry sit;
1537 struct page *page;
1538 int i;
1539
1540 mutex_lock(&curseg->curseg_mutex);
1541 for (i = 0; i < sits_in_cursum(sum); i++) {
1542 if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
1543 sit = sit_in_journal(sum, i);
1544 mutex_unlock(&curseg->curseg_mutex);
1545 goto got_it;
1546 }
1547 }
1548 mutex_unlock(&curseg->curseg_mutex);
1549 page = get_current_sit_page(sbi, start);
1550 sit_blk = (struct f2fs_sit_block *)page_address(page);
1551 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1552 f2fs_put_page(page, 1);
1553got_it:
1554 check_block_count(sbi, start, &sit);
1555 seg_info_from_raw_sit(se, &sit);
1556 if (sbi->segs_per_sec > 1) {
1557 struct sec_entry *e = get_sec_entry(sbi, start);
1558 e->valid_blocks += se->valid_blocks;
1559 }
1560 }
1561}
1562
1563static void init_free_segmap(struct f2fs_sb_info *sbi)
1564{
1565 unsigned int start;
1566 int type;
1567
1568 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1569 struct seg_entry *sentry = get_seg_entry(sbi, start);
1570 if (!sentry->valid_blocks)
1571 __set_free(sbi, start);
1572 }
1573
1574 /* set use the current segments */
1575 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
1576 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
1577 __set_test_and_inuse(sbi, curseg_t->segno);
1578 }
1579}
1580
1581static void init_dirty_segmap(struct f2fs_sb_info *sbi)
1582{
1583 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1584 struct free_segmap_info *free_i = FREE_I(sbi);
1585 unsigned int segno = 0, offset = 0;
1586 unsigned short valid_blocks;
1587
1588 while (segno < TOTAL_SEGS(sbi)) {
1589 /* find dirty segment based on free segmap */
1590 segno = find_next_inuse(free_i, TOTAL_SEGS(sbi), offset);
1591 if (segno >= TOTAL_SEGS(sbi))
1592 break;
1593 offset = segno + 1;
1594 valid_blocks = get_valid_blocks(sbi, segno, 0);
1595 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks)
1596 continue;
1597 mutex_lock(&dirty_i->seglist_lock);
1598 __locate_dirty_segment(sbi, segno, DIRTY);
1599 mutex_unlock(&dirty_i->seglist_lock);
1600 }
1601}
1602
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001603static int init_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001604{
1605 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001606 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001607
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001608 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
1609 if (!dirty_i->victim_secmap)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001610 return -ENOMEM;
1611 return 0;
1612}
1613
1614static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1615{
1616 struct dirty_seglist_info *dirty_i;
1617 unsigned int bitmap_size, i;
1618
1619 /* allocate memory for dirty segments list information */
1620 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
1621 if (!dirty_i)
1622 return -ENOMEM;
1623
1624 SM_I(sbi)->dirty_info = dirty_i;
1625 mutex_init(&dirty_i->seglist_lock);
1626
1627 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1628
1629 for (i = 0; i < NR_DIRTY_TYPE; i++) {
1630 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001631 if (!dirty_i->dirty_segmap[i])
1632 return -ENOMEM;
1633 }
1634
1635 init_dirty_segmap(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001636 return init_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001637}
1638
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001639/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001640 * Update min, max modified time for cost-benefit GC algorithm
1641 */
1642static void init_min_max_mtime(struct f2fs_sb_info *sbi)
1643{
1644 struct sit_info *sit_i = SIT_I(sbi);
1645 unsigned int segno;
1646
1647 mutex_lock(&sit_i->sentry_lock);
1648
1649 sit_i->min_mtime = LLONG_MAX;
1650
1651 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
1652 unsigned int i;
1653 unsigned long long mtime = 0;
1654
1655 for (i = 0; i < sbi->segs_per_sec; i++)
1656 mtime += get_seg_entry(sbi, segno + i)->mtime;
1657
1658 mtime = div_u64(mtime, sbi->segs_per_sec);
1659
1660 if (sit_i->min_mtime > mtime)
1661 sit_i->min_mtime = mtime;
1662 }
1663 sit_i->max_mtime = get_mtime(sbi);
1664 mutex_unlock(&sit_i->sentry_lock);
1665}
1666
1667int build_segment_manager(struct f2fs_sb_info *sbi)
1668{
1669 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1670 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Namjae Jeon1042d602012-12-01 10:56:13 +09001671 struct f2fs_sm_info *sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001672 int err;
1673
1674 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
1675 if (!sm_info)
1676 return -ENOMEM;
1677
1678 /* init sm info */
1679 sbi->sm_info = sm_info;
1680 INIT_LIST_HEAD(&sm_info->wblist_head);
1681 spin_lock_init(&sm_info->wblist_lock);
1682 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1683 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1684 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
1685 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1686 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1687 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
1688 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
1689
1690 err = build_sit_info(sbi);
1691 if (err)
1692 return err;
1693 err = build_free_segmap(sbi);
1694 if (err)
1695 return err;
1696 err = build_curseg(sbi);
1697 if (err)
1698 return err;
1699
1700 /* reinit free segmap based on SIT */
1701 build_sit_entries(sbi);
1702
1703 init_free_segmap(sbi);
1704 err = build_dirty_segmap(sbi);
1705 if (err)
1706 return err;
1707
1708 init_min_max_mtime(sbi);
1709 return 0;
1710}
1711
1712static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
1713 enum dirty_type dirty_type)
1714{
1715 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1716
1717 mutex_lock(&dirty_i->seglist_lock);
1718 kfree(dirty_i->dirty_segmap[dirty_type]);
1719 dirty_i->nr_dirty[dirty_type] = 0;
1720 mutex_unlock(&dirty_i->seglist_lock);
1721}
1722
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001723static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001724{
1725 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001726 kfree(dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001727}
1728
1729static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
1730{
1731 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1732 int i;
1733
1734 if (!dirty_i)
1735 return;
1736
1737 /* discard pre-free/dirty segments list */
1738 for (i = 0; i < NR_DIRTY_TYPE; i++)
1739 discard_dirty_segmap(sbi, i);
1740
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001741 destroy_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001742 SM_I(sbi)->dirty_info = NULL;
1743 kfree(dirty_i);
1744}
1745
1746static void destroy_curseg(struct f2fs_sb_info *sbi)
1747{
1748 struct curseg_info *array = SM_I(sbi)->curseg_array;
1749 int i;
1750
1751 if (!array)
1752 return;
1753 SM_I(sbi)->curseg_array = NULL;
1754 for (i = 0; i < NR_CURSEG_TYPE; i++)
1755 kfree(array[i].sum_blk);
1756 kfree(array);
1757}
1758
1759static void destroy_free_segmap(struct f2fs_sb_info *sbi)
1760{
1761 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
1762 if (!free_i)
1763 return;
1764 SM_I(sbi)->free_info = NULL;
1765 kfree(free_i->free_segmap);
1766 kfree(free_i->free_secmap);
1767 kfree(free_i);
1768}
1769
1770static void destroy_sit_info(struct f2fs_sb_info *sbi)
1771{
1772 struct sit_info *sit_i = SIT_I(sbi);
1773 unsigned int start;
1774
1775 if (!sit_i)
1776 return;
1777
1778 if (sit_i->sentries) {
1779 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1780 kfree(sit_i->sentries[start].cur_valid_map);
1781 kfree(sit_i->sentries[start].ckpt_valid_map);
1782 }
1783 }
1784 vfree(sit_i->sentries);
1785 vfree(sit_i->sec_entries);
1786 kfree(sit_i->dirty_sentries_bitmap);
1787
1788 SM_I(sbi)->sit_info = NULL;
1789 kfree(sit_i->sit_bitmap);
1790 kfree(sit_i);
1791}
1792
1793void destroy_segment_manager(struct f2fs_sb_info *sbi)
1794{
1795 struct f2fs_sm_info *sm_info = SM_I(sbi);
1796 destroy_dirty_segmap(sbi);
1797 destroy_curseg(sbi);
1798 destroy_free_segmap(sbi);
1799 destroy_sit_info(sbi);
1800 sbi->sm_info = NULL;
1801 kfree(sm_info);
1802}