blob: 67f1e5bbdac9d60a18ff86a5984ecc8820c3a1d3 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/bio.h>
14#include <linux/blkdev.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010015#include <linux/prefetch.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090016#include <linux/vmalloc.h>
17
18#include "f2fs.h"
19#include "segment.h"
20#include "node.h"
Namjae Jeon6ec178d2013-04-23 17:51:43 +090021#include <trace/events/f2fs.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090022
Changman Lee9a7f1432013-11-15 10:42:51 +090023#define __reverse_ffz(x) __reverse_ffs(~(x))
24
25/*
26 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
27 * MSB and LSB are reversed in a byte by f2fs_set_bit.
28 */
29static inline unsigned long __reverse_ffs(unsigned long word)
30{
31 int num = 0;
32
33#if BITS_PER_LONG == 64
34 if ((word & 0xffffffff) == 0) {
35 num += 32;
36 word >>= 32;
37 }
38#endif
39 if ((word & 0xffff) == 0) {
40 num += 16;
41 word >>= 16;
42 }
43 if ((word & 0xff) == 0) {
44 num += 8;
45 word >>= 8;
46 }
47 if ((word & 0xf0) == 0)
48 num += 4;
49 else
50 word >>= 4;
51 if ((word & 0xc) == 0)
52 num += 2;
53 else
54 word >>= 2;
55 if ((word & 0x2) == 0)
56 num += 1;
57 return num;
58}
59
60/*
61 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c becasue
62 * f2fs_set_bit makes MSB and LSB reversed in a byte.
63 * Example:
64 * LSB <--> MSB
65 * f2fs_set_bit(0, bitmap) => 0000 0001
66 * f2fs_set_bit(7, bitmap) => 1000 0000
67 */
68static unsigned long __find_rev_next_bit(const unsigned long *addr,
69 unsigned long size, unsigned long offset)
70{
71 const unsigned long *p = addr + BIT_WORD(offset);
72 unsigned long result = offset & ~(BITS_PER_LONG - 1);
73 unsigned long tmp;
74 unsigned long mask, submask;
75 unsigned long quot, rest;
76
77 if (offset >= size)
78 return size;
79
80 size -= result;
81 offset %= BITS_PER_LONG;
82 if (!offset)
83 goto aligned;
84
85 tmp = *(p++);
86 quot = (offset >> 3) << 3;
87 rest = offset & 0x7;
88 mask = ~0UL << quot;
89 submask = (unsigned char)(0xff << rest) >> rest;
90 submask <<= quot;
91 mask &= submask;
92 tmp &= mask;
93 if (size < BITS_PER_LONG)
94 goto found_first;
95 if (tmp)
96 goto found_middle;
97
98 size -= BITS_PER_LONG;
99 result += BITS_PER_LONG;
100aligned:
101 while (size & ~(BITS_PER_LONG-1)) {
102 tmp = *(p++);
103 if (tmp)
104 goto found_middle;
105 result += BITS_PER_LONG;
106 size -= BITS_PER_LONG;
107 }
108 if (!size)
109 return result;
110 tmp = *p;
111found_first:
112 tmp &= (~0UL >> (BITS_PER_LONG - size));
113 if (tmp == 0UL) /* Are any bits set? */
114 return result + size; /* Nope. */
115found_middle:
116 return result + __reverse_ffs(tmp);
117}
118
119static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
120 unsigned long size, unsigned long offset)
121{
122 const unsigned long *p = addr + BIT_WORD(offset);
123 unsigned long result = offset & ~(BITS_PER_LONG - 1);
124 unsigned long tmp;
125 unsigned long mask, submask;
126 unsigned long quot, rest;
127
128 if (offset >= size)
129 return size;
130
131 size -= result;
132 offset %= BITS_PER_LONG;
133 if (!offset)
134 goto aligned;
135
136 tmp = *(p++);
137 quot = (offset >> 3) << 3;
138 rest = offset & 0x7;
139 mask = ~(~0UL << quot);
140 submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest);
141 submask <<= quot;
142 mask += submask;
143 tmp |= mask;
144 if (size < BITS_PER_LONG)
145 goto found_first;
146 if (~tmp)
147 goto found_middle;
148
149 size -= BITS_PER_LONG;
150 result += BITS_PER_LONG;
151aligned:
152 while (size & ~(BITS_PER_LONG - 1)) {
153 tmp = *(p++);
154 if (~tmp)
155 goto found_middle;
156 result += BITS_PER_LONG;
157 size -= BITS_PER_LONG;
158 }
159 if (!size)
160 return result;
161 tmp = *p;
162
163found_first:
164 tmp |= ~0UL << size;
165 if (tmp == ~0UL) /* Are any bits zero? */
166 return result + size; /* Nope. */
167found_middle:
168 return result + __reverse_ffz(tmp);
169}
170
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900171/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900172 * This function balances dirty node and dentry pages.
173 * In addition, it controls garbage collection.
174 */
175void f2fs_balance_fs(struct f2fs_sb_info *sbi)
176{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900177 /*
Jaegeuk Kim029cd282012-12-21 17:20:21 +0900178 * We should do GC or end up with checkpoint, if there are so many dirty
179 * dir/node pages without enough free segments.
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900180 */
Jaegeuk Kim43727522013-02-04 15:11:17 +0900181 if (has_not_enough_free_secs(sbi, 0)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900182 mutex_lock(&sbi->gc_mutex);
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900183 f2fs_gc(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900184 }
185}
186
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900187void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
188{
189 /* check the # of cached NAT entries and prefree segments */
190 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
191 excess_prefree_segs(sbi))
192 f2fs_sync_fs(sbi->sb, true);
193}
194
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900195static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
196 enum dirty_type dirty_type)
197{
198 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
199
200 /* need not be added */
201 if (IS_CURSEG(sbi, segno))
202 return;
203
204 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
205 dirty_i->nr_dirty[dirty_type]++;
206
207 if (dirty_type == DIRTY) {
208 struct seg_entry *sentry = get_seg_entry(sbi, segno);
Changman Lee4625d6a2013-10-25 17:31:57 +0900209 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900210
Changman Lee4625d6a2013-10-25 17:31:57 +0900211 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
212 dirty_i->nr_dirty[t]++;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900213 }
214}
215
216static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
217 enum dirty_type dirty_type)
218{
219 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
220
221 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
222 dirty_i->nr_dirty[dirty_type]--;
223
224 if (dirty_type == DIRTY) {
Changman Lee4625d6a2013-10-25 17:31:57 +0900225 struct seg_entry *sentry = get_seg_entry(sbi, segno);
226 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900227
Changman Lee4625d6a2013-10-25 17:31:57 +0900228 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
229 dirty_i->nr_dirty[t]--;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900230
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900231 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
232 clear_bit(GET_SECNO(sbi, segno),
233 dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900234 }
235}
236
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900237/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900238 * Should not occur error such as -ENOMEM.
239 * Adding dirty entry into seglist is not critical operation.
240 * If a given segment is one of current working segments, it won't be added.
241 */
Haicheng Li8d8451a2013-06-13 16:59:28 +0800242static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900243{
244 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
245 unsigned short valid_blocks;
246
247 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
248 return;
249
250 mutex_lock(&dirty_i->seglist_lock);
251
252 valid_blocks = get_valid_blocks(sbi, segno, 0);
253
254 if (valid_blocks == 0) {
255 __locate_dirty_segment(sbi, segno, PRE);
256 __remove_dirty_segment(sbi, segno, DIRTY);
257 } else if (valid_blocks < sbi->blocks_per_seg) {
258 __locate_dirty_segment(sbi, segno, DIRTY);
259 } else {
260 /* Recovery routine with SSR needs this */
261 __remove_dirty_segment(sbi, segno, DIRTY);
262 }
263
264 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900265}
266
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900267/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900268 * Should call clear_prefree_segments after checkpoint is done.
269 */
270static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
271{
272 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800273 unsigned int segno = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900274 unsigned int total_segs = TOTAL_SEGS(sbi);
275
276 mutex_lock(&dirty_i->seglist_lock);
277 while (1) {
278 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800279 segno + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900280 if (segno >= total_segs)
281 break;
282 __set_test_and_free(sbi, segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900283 }
284 mutex_unlock(&dirty_i->seglist_lock);
285}
286
287void clear_prefree_segments(struct f2fs_sb_info *sbi)
288{
289 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Changman Lee29e59c12013-11-11 09:24:37 +0900290 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900291 unsigned int total_segs = TOTAL_SEGS(sbi);
Changman Lee29e59c12013-11-11 09:24:37 +0900292 unsigned int start = 0, end = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900293
294 mutex_lock(&dirty_i->seglist_lock);
Changman Lee29e59c12013-11-11 09:24:37 +0900295
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900296 while (1) {
Changman Lee29e59c12013-11-11 09:24:37 +0900297 int i;
298 start = find_next_bit(prefree_map, total_segs, end + 1);
299 if (start >= total_segs)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900300 break;
Changman Lee29e59c12013-11-11 09:24:37 +0900301 end = find_next_zero_bit(prefree_map, total_segs, start + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900302
Changman Lee29e59c12013-11-11 09:24:37 +0900303 for (i = start; i < end; i++)
304 clear_bit(i, prefree_map);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900305
Changman Lee29e59c12013-11-11 09:24:37 +0900306 dirty_i->nr_dirty[PRE] -= end - start;
307
308 if (!test_opt(sbi, DISCARD))
309 continue;
310
311 blkdev_issue_discard(sbi->sb->s_bdev,
312 START_BLOCK(sbi, start) <<
313 sbi->log_sectors_per_block,
314 (1 << (sbi->log_sectors_per_block +
315 sbi->log_blocks_per_seg)) * (end - start),
316 GFP_NOFS, 0);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900317 }
318 mutex_unlock(&dirty_i->seglist_lock);
319}
320
321static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
322{
323 struct sit_info *sit_i = SIT_I(sbi);
324 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
325 sit_i->dirty_sentries++;
326}
327
328static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
329 unsigned int segno, int modified)
330{
331 struct seg_entry *se = get_seg_entry(sbi, segno);
332 se->type = type;
333 if (modified)
334 __mark_sit_entry_dirty(sbi, segno);
335}
336
337static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
338{
339 struct seg_entry *se;
340 unsigned int segno, offset;
341 long int new_vblocks;
342
343 segno = GET_SEGNO(sbi, blkaddr);
344
345 se = get_seg_entry(sbi, segno);
346 new_vblocks = se->valid_blocks + del;
347 offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1);
348
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900349 f2fs_bug_on((new_vblocks >> (sizeof(unsigned short) << 3) ||
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900350 (new_vblocks > sbi->blocks_per_seg)));
351
352 se->valid_blocks = new_vblocks;
353 se->mtime = get_mtime(sbi);
354 SIT_I(sbi)->max_mtime = se->mtime;
355
356 /* Update valid block bitmap */
357 if (del > 0) {
358 if (f2fs_set_bit(offset, se->cur_valid_map))
359 BUG();
360 } else {
361 if (!f2fs_clear_bit(offset, se->cur_valid_map))
362 BUG();
363 }
364 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
365 se->ckpt_valid_blocks += del;
366
367 __mark_sit_entry_dirty(sbi, segno);
368
369 /* update total number of valid blocks to be written in ckpt area */
370 SIT_I(sbi)->written_valid_blocks += del;
371
372 if (sbi->segs_per_sec > 1)
373 get_sec_entry(sbi, segno)->valid_blocks += del;
374}
375
376static void refresh_sit_entry(struct f2fs_sb_info *sbi,
377 block_t old_blkaddr, block_t new_blkaddr)
378{
379 update_sit_entry(sbi, new_blkaddr, 1);
380 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
381 update_sit_entry(sbi, old_blkaddr, -1);
382}
383
384void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
385{
386 unsigned int segno = GET_SEGNO(sbi, addr);
387 struct sit_info *sit_i = SIT_I(sbi);
388
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900389 f2fs_bug_on(addr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900390 if (addr == NEW_ADDR)
391 return;
392
393 /* add it into sit main buffer */
394 mutex_lock(&sit_i->sentry_lock);
395
396 update_sit_entry(sbi, addr, -1);
397
398 /* add it into dirty seglist */
399 locate_dirty_segment(sbi, segno);
400
401 mutex_unlock(&sit_i->sentry_lock);
402}
403
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900404/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900405 * This function should be resided under the curseg_mutex lock
406 */
407static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
Haicheng Lie79efe32013-06-13 16:59:27 +0800408 struct f2fs_summary *sum)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900409{
410 struct curseg_info *curseg = CURSEG_I(sbi, type);
411 void *addr = curseg->sum_blk;
Haicheng Lie79efe32013-06-13 16:59:27 +0800412 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900413 memcpy(addr, sum, sizeof(struct f2fs_summary));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900414}
415
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900416/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900417 * Calculate the number of current summary pages for writing
418 */
419int npages_for_summary_flush(struct f2fs_sb_info *sbi)
420{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900421 int valid_sum_count = 0;
Fan Li9a479382013-10-29 16:21:47 +0800422 int i, sum_in_page;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900423
424 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
425 if (sbi->ckpt->alloc_type[i] == SSR)
426 valid_sum_count += sbi->blocks_per_seg;
427 else
428 valid_sum_count += curseg_blkoff(sbi, i);
429 }
430
Fan Li9a479382013-10-29 16:21:47 +0800431 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
432 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
433 if (valid_sum_count <= sum_in_page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900434 return 1;
Fan Li9a479382013-10-29 16:21:47 +0800435 else if ((valid_sum_count - sum_in_page) <=
436 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900437 return 2;
438 return 3;
439}
440
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900441/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900442 * Caller should put this summary page
443 */
444struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
445{
446 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
447}
448
449static void write_sum_page(struct f2fs_sb_info *sbi,
450 struct f2fs_summary_block *sum_blk, block_t blk_addr)
451{
452 struct page *page = grab_meta_page(sbi, blk_addr);
453 void *kaddr = page_address(page);
454 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
455 set_page_dirty(page);
456 f2fs_put_page(page, 1);
457}
458
Jaegeuk Kim60374682013-03-31 13:58:51 +0900459static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
460{
461 struct curseg_info *curseg = CURSEG_I(sbi, type);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800462 unsigned int segno = curseg->segno + 1;
Jaegeuk Kim60374682013-03-31 13:58:51 +0900463 struct free_segmap_info *free_i = FREE_I(sbi);
464
Haicheng Li81fb5e82013-05-14 18:20:28 +0800465 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec)
466 return !test_bit(segno, free_i->free_segmap);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900467 return 0;
468}
469
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900470/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900471 * Find a new segment from the free segments bitmap to right order
472 * This function should be returned with success, otherwise BUG
473 */
474static void get_new_segment(struct f2fs_sb_info *sbi,
475 unsigned int *newseg, bool new_sec, int dir)
476{
477 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900478 unsigned int segno, secno, zoneno;
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900479 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900480 unsigned int hint = *newseg / sbi->segs_per_sec;
481 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
482 unsigned int left_start = hint;
483 bool init = true;
484 int go_left = 0;
485 int i;
486
487 write_lock(&free_i->segmap_lock);
488
489 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
490 segno = find_next_zero_bit(free_i->free_segmap,
491 TOTAL_SEGS(sbi), *newseg + 1);
Jaegeuk Kim33afa7f2013-03-31 12:59:53 +0900492 if (segno - *newseg < sbi->segs_per_sec -
493 (*newseg % sbi->segs_per_sec))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900494 goto got_it;
495 }
496find_other_zone:
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900497 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
498 if (secno >= TOTAL_SECS(sbi)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900499 if (dir == ALLOC_RIGHT) {
500 secno = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900501 TOTAL_SECS(sbi), 0);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900502 f2fs_bug_on(secno >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900503 } else {
504 go_left = 1;
505 left_start = hint - 1;
506 }
507 }
508 if (go_left == 0)
509 goto skip_left;
510
511 while (test_bit(left_start, free_i->free_secmap)) {
512 if (left_start > 0) {
513 left_start--;
514 continue;
515 }
516 left_start = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900517 TOTAL_SECS(sbi), 0);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900518 f2fs_bug_on(left_start >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900519 break;
520 }
521 secno = left_start;
522skip_left:
523 hint = secno;
524 segno = secno * sbi->segs_per_sec;
525 zoneno = secno / sbi->secs_per_zone;
526
527 /* give up on finding another zone */
528 if (!init)
529 goto got_it;
530 if (sbi->secs_per_zone == 1)
531 goto got_it;
532 if (zoneno == old_zoneno)
533 goto got_it;
534 if (dir == ALLOC_LEFT) {
535 if (!go_left && zoneno + 1 >= total_zones)
536 goto got_it;
537 if (go_left && zoneno == 0)
538 goto got_it;
539 }
540 for (i = 0; i < NR_CURSEG_TYPE; i++)
541 if (CURSEG_I(sbi, i)->zone == zoneno)
542 break;
543
544 if (i < NR_CURSEG_TYPE) {
545 /* zone is in user, try another */
546 if (go_left)
547 hint = zoneno * sbi->secs_per_zone - 1;
548 else if (zoneno + 1 >= total_zones)
549 hint = 0;
550 else
551 hint = (zoneno + 1) * sbi->secs_per_zone;
552 init = false;
553 goto find_other_zone;
554 }
555got_it:
556 /* set it as dirty segment in free segmap */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900557 f2fs_bug_on(test_bit(segno, free_i->free_segmap));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900558 __set_inuse(sbi, segno);
559 *newseg = segno;
560 write_unlock(&free_i->segmap_lock);
561}
562
563static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
564{
565 struct curseg_info *curseg = CURSEG_I(sbi, type);
566 struct summary_footer *sum_footer;
567
568 curseg->segno = curseg->next_segno;
569 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
570 curseg->next_blkoff = 0;
571 curseg->next_segno = NULL_SEGNO;
572
573 sum_footer = &(curseg->sum_blk->footer);
574 memset(sum_footer, 0, sizeof(struct summary_footer));
575 if (IS_DATASEG(type))
576 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
577 if (IS_NODESEG(type))
578 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
579 __set_sit_entry_type(sbi, type, curseg->segno, modified);
580}
581
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900582/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900583 * Allocate a current working segment.
584 * This function always allocates a free segment in LFS manner.
585 */
586static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
587{
588 struct curseg_info *curseg = CURSEG_I(sbi, type);
589 unsigned int segno = curseg->segno;
590 int dir = ALLOC_LEFT;
591
592 write_sum_page(sbi, curseg->sum_blk,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800593 GET_SUM_BLOCK(sbi, segno));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900594 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
595 dir = ALLOC_RIGHT;
596
597 if (test_opt(sbi, NOHEAP))
598 dir = ALLOC_RIGHT;
599
600 get_new_segment(sbi, &segno, new_sec, dir);
601 curseg->next_segno = segno;
602 reset_curseg(sbi, type, 1);
603 curseg->alloc_type = LFS;
604}
605
606static void __next_free_blkoff(struct f2fs_sb_info *sbi,
607 struct curseg_info *seg, block_t start)
608{
609 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
Changman Leee81c93c2013-11-15 13:21:16 +0900610 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
611 unsigned long target_map[entries];
612 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
613 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
614 int i, pos;
615
616 for (i = 0; i < entries; i++)
617 target_map[i] = ckpt_map[i] | cur_map[i];
618
619 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
620
621 seg->next_blkoff = pos;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900622}
623
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900624/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900625 * If a segment is written by LFS manner, next block offset is just obtained
626 * by increasing the current block offset. However, if a segment is written by
627 * SSR manner, next block offset obtained by calling __next_free_blkoff
628 */
629static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
630 struct curseg_info *seg)
631{
632 if (seg->alloc_type == SSR)
633 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
634 else
635 seg->next_blkoff++;
636}
637
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900638/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900639 * This function always allocates a used segment (from dirty seglist) by SSR
640 * manner, so it should recover the existing segment information of valid blocks
641 */
642static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
643{
644 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
645 struct curseg_info *curseg = CURSEG_I(sbi, type);
646 unsigned int new_segno = curseg->next_segno;
647 struct f2fs_summary_block *sum_node;
648 struct page *sum_page;
649
650 write_sum_page(sbi, curseg->sum_blk,
651 GET_SUM_BLOCK(sbi, curseg->segno));
652 __set_test_and_inuse(sbi, new_segno);
653
654 mutex_lock(&dirty_i->seglist_lock);
655 __remove_dirty_segment(sbi, new_segno, PRE);
656 __remove_dirty_segment(sbi, new_segno, DIRTY);
657 mutex_unlock(&dirty_i->seglist_lock);
658
659 reset_curseg(sbi, type, 1);
660 curseg->alloc_type = SSR;
661 __next_free_blkoff(sbi, curseg, 0);
662
663 if (reuse) {
664 sum_page = get_sum_page(sbi, new_segno);
665 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
666 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
667 f2fs_put_page(sum_page, 1);
668 }
669}
670
Jaegeuk Kim43727522013-02-04 15:11:17 +0900671static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
672{
673 struct curseg_info *curseg = CURSEG_I(sbi, type);
674 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
675
676 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
677 return v_ops->get_victim(sbi,
678 &(curseg)->next_segno, BG_GC, type, SSR);
679
680 /* For data segments, let's do SSR more intensively */
681 for (; type >= CURSEG_HOT_DATA; type--)
682 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
683 BG_GC, type, SSR))
684 return 1;
685 return 0;
686}
687
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900688/*
689 * flush out current segment and replace it with new segment
690 * This function should be returned with success, otherwise BUG
691 */
692static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
693 int type, bool force)
694{
695 struct curseg_info *curseg = CURSEG_I(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900696
Gu Zheng7b405272013-08-19 09:41:15 +0800697 if (force)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900698 new_curseg(sbi, type, true);
Gu Zheng7b405272013-08-19 09:41:15 +0800699 else if (type == CURSEG_WARM_NODE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900700 new_curseg(sbi, type, false);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900701 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
702 new_curseg(sbi, type, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900703 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
704 change_curseg(sbi, type, true);
705 else
706 new_curseg(sbi, type, false);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900707
708 stat_inc_seg_type(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900709}
710
711void allocate_new_segments(struct f2fs_sb_info *sbi)
712{
713 struct curseg_info *curseg;
714 unsigned int old_curseg;
715 int i;
716
717 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
718 curseg = CURSEG_I(sbi, i);
719 old_curseg = curseg->segno;
720 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
721 locate_dirty_segment(sbi, old_curseg);
722 }
723}
724
725static const struct segment_allocation default_salloc_ops = {
726 .allocate_segment = allocate_segment_by_default,
727};
728
729static void f2fs_end_io_write(struct bio *bio, int err)
730{
731 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
732 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
733 struct bio_private *p = bio->bi_private;
734
735 do {
736 struct page *page = bvec->bv_page;
737
738 if (--bvec >= bio->bi_io_vec)
739 prefetchw(&bvec->bv_page->flags);
740 if (!uptodate) {
741 SetPageError(page);
742 if (page->mapping)
743 set_bit(AS_EIO, &page->mapping->flags);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900744 set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900745 p->sbi->sb->s_flags |= MS_RDONLY;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900746 }
747 end_page_writeback(page);
748 dec_page_count(p->sbi, F2FS_WRITEBACK);
749 } while (bvec >= bio->bi_io_vec);
750
751 if (p->is_sync)
752 complete(p->wait);
Gu Zhenge2340882013-10-14 18:45:56 +0800753
Changman Leefb51b5e2013-11-07 12:48:25 +0900754 if (!get_pages(p->sbi, F2FS_WRITEBACK) &&
755 !list_empty(&p->sbi->cp_wait.task_list))
756 wake_up(&p->sbi->cp_wait);
Gu Zhenge2340882013-10-14 18:45:56 +0800757
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900758 kfree(p);
759 bio_put(bio);
760}
761
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900762struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900763{
764 struct bio *bio;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900765
766 /* No failure on bio allocation */
767 bio = bio_alloc(GFP_NOIO, npages);
768 bio->bi_bdev = bdev;
Gu Zhengd8207f62013-07-25 11:30:01 +0800769 bio->bi_private = NULL;
770
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900771 return bio;
772}
773
774static void do_submit_bio(struct f2fs_sb_info *sbi,
775 enum page_type type, bool sync)
776{
777 int rw = sync ? WRITE_SYNC : WRITE;
778 enum page_type btype = type > META ? META : type;
779
780 if (type >= META_FLUSH)
781 rw = WRITE_FLUSH_FUA;
782
Namjae Jeon86804412013-04-25 11:45:21 +0900783 if (btype == META)
784 rw |= REQ_META;
785
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900786 if (sbi->bio[btype]) {
787 struct bio_private *p = sbi->bio[btype]->bi_private;
788 p->sbi = sbi;
789 sbi->bio[btype]->bi_end_io = f2fs_end_io_write;
Namjae Jeon6ec178d2013-04-23 17:51:43 +0900790
791 trace_f2fs_do_submit_bio(sbi->sb, btype, sync, sbi->bio[btype]);
792
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900793 if (type == META_FLUSH) {
794 DECLARE_COMPLETION_ONSTACK(wait);
795 p->is_sync = true;
796 p->wait = &wait;
797 submit_bio(rw, sbi->bio[btype]);
798 wait_for_completion(&wait);
799 } else {
800 p->is_sync = false;
801 submit_bio(rw, sbi->bio[btype]);
802 }
803 sbi->bio[btype] = NULL;
804 }
805}
806
807void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync)
808{
809 down_write(&sbi->bio_sem);
810 do_submit_bio(sbi, type, sync);
811 up_write(&sbi->bio_sem);
812}
813
814static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
815 block_t blk_addr, enum page_type type)
816{
817 struct block_device *bdev = sbi->sb->s_bdev;
Chao Yucc7b1bb2013-09-22 15:50:50 +0800818 int bio_blocks;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900819
820 verify_block_addr(sbi, blk_addr);
821
822 down_write(&sbi->bio_sem);
823
824 inc_page_count(sbi, F2FS_WRITEBACK);
825
826 if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1)
827 do_submit_bio(sbi, type, false);
828alloc_new:
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900829 if (sbi->bio[type] == NULL) {
Gu Zhengd8207f62013-07-25 11:30:01 +0800830 struct bio_private *priv;
831retry:
832 priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
833 if (!priv) {
834 cond_resched();
835 goto retry;
836 }
837
Chao Yucc7b1bb2013-09-22 15:50:50 +0800838 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
839 sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks);
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900840 sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
Gu Zhengd8207f62013-07-25 11:30:01 +0800841 sbi->bio[type]->bi_private = priv;
Jaegeuk Kim3cd8a232012-12-10 09:26:05 +0900842 /*
843 * The end_io will be assigned at the sumbission phase.
844 * Until then, let bio_add_page() merge consecutive IOs as much
845 * as possible.
846 */
847 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900848
849 if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) <
850 PAGE_CACHE_SIZE) {
851 do_submit_bio(sbi, type, false);
852 goto alloc_new;
853 }
854
855 sbi->last_block_in_bio[type] = blk_addr;
856
857 up_write(&sbi->bio_sem);
Namjae Jeon6ec178d2013-04-23 17:51:43 +0900858 trace_f2fs_submit_write_page(page, blk_addr, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900859}
860
Jin Xua5694692013-08-05 20:02:04 +0800861void f2fs_wait_on_page_writeback(struct page *page,
862 enum page_type type, bool sync)
863{
864 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
865 if (PageWriteback(page)) {
866 f2fs_submit_bio(sbi, type, sync);
867 wait_on_page_writeback(page);
868 }
869}
870
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900871static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
872{
873 struct curseg_info *curseg = CURSEG_I(sbi, type);
874 if (curseg->next_blkoff < sbi->blocks_per_seg)
875 return true;
876 return false;
877}
878
879static int __get_segment_type_2(struct page *page, enum page_type p_type)
880{
881 if (p_type == DATA)
882 return CURSEG_HOT_DATA;
883 else
884 return CURSEG_HOT_NODE;
885}
886
887static int __get_segment_type_4(struct page *page, enum page_type p_type)
888{
889 if (p_type == DATA) {
890 struct inode *inode = page->mapping->host;
891
892 if (S_ISDIR(inode->i_mode))
893 return CURSEG_HOT_DATA;
894 else
895 return CURSEG_COLD_DATA;
896 } else {
897 if (IS_DNODE(page) && !is_cold_node(page))
898 return CURSEG_HOT_NODE;
899 else
900 return CURSEG_COLD_NODE;
901 }
902}
903
904static int __get_segment_type_6(struct page *page, enum page_type p_type)
905{
906 if (p_type == DATA) {
907 struct inode *inode = page->mapping->host;
908
909 if (S_ISDIR(inode->i_mode))
910 return CURSEG_HOT_DATA;
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900911 else if (is_cold_data(page) || file_is_cold(inode))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900912 return CURSEG_COLD_DATA;
913 else
914 return CURSEG_WARM_DATA;
915 } else {
916 if (IS_DNODE(page))
917 return is_cold_node(page) ? CURSEG_WARM_NODE :
918 CURSEG_HOT_NODE;
919 else
920 return CURSEG_COLD_NODE;
921 }
922}
923
924static int __get_segment_type(struct page *page, enum page_type p_type)
925{
926 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
927 switch (sbi->active_logs) {
928 case 2:
929 return __get_segment_type_2(page, p_type);
930 case 4:
931 return __get_segment_type_4(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900932 }
Jaegeuk Kim12a67142012-12-21 11:47:05 +0900933 /* NR_CURSEG_TYPE(6) logs by default */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900934 f2fs_bug_on(sbi->active_logs != NR_CURSEG_TYPE);
Jaegeuk Kim12a67142012-12-21 11:47:05 +0900935 return __get_segment_type_6(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900936}
937
938static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
939 block_t old_blkaddr, block_t *new_blkaddr,
940 struct f2fs_summary *sum, enum page_type p_type)
941{
942 struct sit_info *sit_i = SIT_I(sbi);
943 struct curseg_info *curseg;
944 unsigned int old_cursegno;
945 int type;
946
947 type = __get_segment_type(page, p_type);
948 curseg = CURSEG_I(sbi, type);
949
950 mutex_lock(&curseg->curseg_mutex);
951
952 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
953 old_cursegno = curseg->segno;
954
955 /*
956 * __add_sum_entry should be resided under the curseg_mutex
957 * because, this function updates a summary entry in the
958 * current summary block.
959 */
Haicheng Lie79efe32013-06-13 16:59:27 +0800960 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900961
962 mutex_lock(&sit_i->sentry_lock);
963 __refresh_next_blkoff(sbi, curseg);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900964
965 stat_inc_block_count(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900966
967 /*
968 * SIT information should be updated before segment allocation,
969 * since SSR needs latest valid block information.
970 */
971 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
972
973 if (!__has_curseg_space(sbi, type))
974 sit_i->s_ops->allocate_segment(sbi, type, false);
975
976 locate_dirty_segment(sbi, old_cursegno);
977 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
978 mutex_unlock(&sit_i->sentry_lock);
979
980 if (p_type == NODE)
981 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
982
983 /* writeout dirty page into bdev */
984 submit_write_page(sbi, page, *new_blkaddr, p_type);
985
986 mutex_unlock(&curseg->curseg_mutex);
987}
988
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900989void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900990{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900991 set_page_writeback(page);
992 submit_write_page(sbi, page, page->index, META);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900993}
994
995void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
996 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
997{
998 struct f2fs_summary sum;
999 set_summary(&sum, nid, 0, 0);
1000 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE);
1001}
1002
1003void write_data_page(struct inode *inode, struct page *page,
1004 struct dnode_of_data *dn, block_t old_blkaddr,
1005 block_t *new_blkaddr)
1006{
1007 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1008 struct f2fs_summary sum;
1009 struct node_info ni;
1010
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001011 f2fs_bug_on(old_blkaddr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001012 get_node_info(sbi, dn->nid, &ni);
1013 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1014
1015 do_write_page(sbi, page, old_blkaddr,
1016 new_blkaddr, &sum, DATA);
1017}
1018
1019void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page,
1020 block_t old_blk_addr)
1021{
1022 submit_write_page(sbi, page, old_blk_addr, DATA);
1023}
1024
1025void recover_data_page(struct f2fs_sb_info *sbi,
1026 struct page *page, struct f2fs_summary *sum,
1027 block_t old_blkaddr, block_t new_blkaddr)
1028{
1029 struct sit_info *sit_i = SIT_I(sbi);
1030 struct curseg_info *curseg;
1031 unsigned int segno, old_cursegno;
1032 struct seg_entry *se;
1033 int type;
1034
1035 segno = GET_SEGNO(sbi, new_blkaddr);
1036 se = get_seg_entry(sbi, segno);
1037 type = se->type;
1038
1039 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
1040 if (old_blkaddr == NULL_ADDR)
1041 type = CURSEG_COLD_DATA;
1042 else
1043 type = CURSEG_WARM_DATA;
1044 }
1045 curseg = CURSEG_I(sbi, type);
1046
1047 mutex_lock(&curseg->curseg_mutex);
1048 mutex_lock(&sit_i->sentry_lock);
1049
1050 old_cursegno = curseg->segno;
1051
1052 /* change the current segment */
1053 if (segno != curseg->segno) {
1054 curseg->next_segno = segno;
1055 change_curseg(sbi, type, true);
1056 }
1057
1058 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
1059 (sbi->blocks_per_seg - 1);
Haicheng Lie79efe32013-06-13 16:59:27 +08001060 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001061
1062 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
1063
1064 locate_dirty_segment(sbi, old_cursegno);
1065 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
1066
1067 mutex_unlock(&sit_i->sentry_lock);
1068 mutex_unlock(&curseg->curseg_mutex);
1069}
1070
1071void rewrite_node_page(struct f2fs_sb_info *sbi,
1072 struct page *page, struct f2fs_summary *sum,
1073 block_t old_blkaddr, block_t new_blkaddr)
1074{
1075 struct sit_info *sit_i = SIT_I(sbi);
1076 int type = CURSEG_WARM_NODE;
1077 struct curseg_info *curseg;
1078 unsigned int segno, old_cursegno;
1079 block_t next_blkaddr = next_blkaddr_of_node(page);
1080 unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
1081
1082 curseg = CURSEG_I(sbi, type);
1083
1084 mutex_lock(&curseg->curseg_mutex);
1085 mutex_lock(&sit_i->sentry_lock);
1086
1087 segno = GET_SEGNO(sbi, new_blkaddr);
1088 old_cursegno = curseg->segno;
1089
1090 /* change the current segment */
1091 if (segno != curseg->segno) {
1092 curseg->next_segno = segno;
1093 change_curseg(sbi, type, true);
1094 }
1095 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
1096 (sbi->blocks_per_seg - 1);
Haicheng Lie79efe32013-06-13 16:59:27 +08001097 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001098
1099 /* change the current log to the next block addr in advance */
1100 if (next_segno != segno) {
1101 curseg->next_segno = next_segno;
1102 change_curseg(sbi, type, true);
1103 }
1104 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) &
1105 (sbi->blocks_per_seg - 1);
1106
1107 /* rewrite node page */
1108 set_page_writeback(page);
1109 submit_write_page(sbi, page, new_blkaddr, NODE);
1110 f2fs_submit_bio(sbi, NODE, true);
1111 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
1112
1113 locate_dirty_segment(sbi, old_cursegno);
1114 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
1115
1116 mutex_unlock(&sit_i->sentry_lock);
1117 mutex_unlock(&curseg->curseg_mutex);
1118}
1119
1120static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1121{
1122 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1123 struct curseg_info *seg_i;
1124 unsigned char *kaddr;
1125 struct page *page;
1126 block_t start;
1127 int i, j, offset;
1128
1129 start = start_sum_block(sbi);
1130
1131 page = get_meta_page(sbi, start++);
1132 kaddr = (unsigned char *)page_address(page);
1133
1134 /* Step 1: restore nat cache */
1135 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1136 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1137
1138 /* Step 2: restore sit cache */
1139 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1140 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1141 SUM_JOURNAL_SIZE);
1142 offset = 2 * SUM_JOURNAL_SIZE;
1143
1144 /* Step 3: restore summary entries */
1145 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1146 unsigned short blk_off;
1147 unsigned int segno;
1148
1149 seg_i = CURSEG_I(sbi, i);
1150 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1151 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1152 seg_i->next_segno = segno;
1153 reset_curseg(sbi, i, 0);
1154 seg_i->alloc_type = ckpt->alloc_type[i];
1155 seg_i->next_blkoff = blk_off;
1156
1157 if (seg_i->alloc_type == SSR)
1158 blk_off = sbi->blocks_per_seg;
1159
1160 for (j = 0; j < blk_off; j++) {
1161 struct f2fs_summary *s;
1162 s = (struct f2fs_summary *)(kaddr + offset);
1163 seg_i->sum_blk->entries[j] = *s;
1164 offset += SUMMARY_SIZE;
1165 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1166 SUM_FOOTER_SIZE)
1167 continue;
1168
1169 f2fs_put_page(page, 1);
1170 page = NULL;
1171
1172 page = get_meta_page(sbi, start++);
1173 kaddr = (unsigned char *)page_address(page);
1174 offset = 0;
1175 }
1176 }
1177 f2fs_put_page(page, 1);
1178 return 0;
1179}
1180
1181static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1182{
1183 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1184 struct f2fs_summary_block *sum;
1185 struct curseg_info *curseg;
1186 struct page *new;
1187 unsigned short blk_off;
1188 unsigned int segno = 0;
1189 block_t blk_addr = 0;
1190
1191 /* get segment number and block addr */
1192 if (IS_DATASEG(type)) {
1193 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1194 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1195 CURSEG_HOT_DATA]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001196 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001197 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1198 else
1199 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1200 } else {
1201 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1202 CURSEG_HOT_NODE]);
1203 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1204 CURSEG_HOT_NODE]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001205 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001206 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1207 type - CURSEG_HOT_NODE);
1208 else
1209 blk_addr = GET_SUM_BLOCK(sbi, segno);
1210 }
1211
1212 new = get_meta_page(sbi, blk_addr);
1213 sum = (struct f2fs_summary_block *)page_address(new);
1214
1215 if (IS_NODESEG(type)) {
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001216 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001217 struct f2fs_summary *ns = &sum->entries[0];
1218 int i;
1219 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1220 ns->version = 0;
1221 ns->ofs_in_node = 0;
1222 }
1223 } else {
1224 if (restore_node_summary(sbi, segno, sum)) {
1225 f2fs_put_page(new, 1);
1226 return -EINVAL;
1227 }
1228 }
1229 }
1230
1231 /* set uncompleted segment to curseg */
1232 curseg = CURSEG_I(sbi, type);
1233 mutex_lock(&curseg->curseg_mutex);
1234 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1235 curseg->next_segno = segno;
1236 reset_curseg(sbi, type, 0);
1237 curseg->alloc_type = ckpt->alloc_type[type];
1238 curseg->next_blkoff = blk_off;
1239 mutex_unlock(&curseg->curseg_mutex);
1240 f2fs_put_page(new, 1);
1241 return 0;
1242}
1243
1244static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1245{
1246 int type = CURSEG_HOT_DATA;
1247
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001248 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001249 /* restore for compacted data summary */
1250 if (read_compacted_summaries(sbi))
1251 return -EINVAL;
1252 type = CURSEG_HOT_NODE;
1253 }
1254
1255 for (; type <= CURSEG_COLD_NODE; type++)
1256 if (read_normal_summaries(sbi, type))
1257 return -EINVAL;
1258 return 0;
1259}
1260
1261static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1262{
1263 struct page *page;
1264 unsigned char *kaddr;
1265 struct f2fs_summary *summary;
1266 struct curseg_info *seg_i;
1267 int written_size = 0;
1268 int i, j;
1269
1270 page = grab_meta_page(sbi, blkaddr++);
1271 kaddr = (unsigned char *)page_address(page);
1272
1273 /* Step 1: write nat cache */
1274 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1275 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1276 written_size += SUM_JOURNAL_SIZE;
1277
1278 /* Step 2: write sit cache */
1279 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1280 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1281 SUM_JOURNAL_SIZE);
1282 written_size += SUM_JOURNAL_SIZE;
1283
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001284 /* Step 3: write summary entries */
1285 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1286 unsigned short blkoff;
1287 seg_i = CURSEG_I(sbi, i);
1288 if (sbi->ckpt->alloc_type[i] == SSR)
1289 blkoff = sbi->blocks_per_seg;
1290 else
1291 blkoff = curseg_blkoff(sbi, i);
1292
1293 for (j = 0; j < blkoff; j++) {
1294 if (!page) {
1295 page = grab_meta_page(sbi, blkaddr++);
1296 kaddr = (unsigned char *)page_address(page);
1297 written_size = 0;
1298 }
1299 summary = (struct f2fs_summary *)(kaddr + written_size);
1300 *summary = seg_i->sum_blk->entries[j];
1301 written_size += SUMMARY_SIZE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001302
1303 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1304 SUM_FOOTER_SIZE)
1305 continue;
1306
Chao Yue8d61a72013-10-24 15:08:28 +08001307 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001308 f2fs_put_page(page, 1);
1309 page = NULL;
1310 }
1311 }
Chao Yue8d61a72013-10-24 15:08:28 +08001312 if (page) {
1313 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001314 f2fs_put_page(page, 1);
Chao Yue8d61a72013-10-24 15:08:28 +08001315 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001316}
1317
1318static void write_normal_summaries(struct f2fs_sb_info *sbi,
1319 block_t blkaddr, int type)
1320{
1321 int i, end;
1322 if (IS_DATASEG(type))
1323 end = type + NR_CURSEG_DATA_TYPE;
1324 else
1325 end = type + NR_CURSEG_NODE_TYPE;
1326
1327 for (i = type; i < end; i++) {
1328 struct curseg_info *sum = CURSEG_I(sbi, i);
1329 mutex_lock(&sum->curseg_mutex);
1330 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1331 mutex_unlock(&sum->curseg_mutex);
1332 }
1333}
1334
1335void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1336{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001337 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001338 write_compacted_summaries(sbi, start_blk);
1339 else
1340 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1341}
1342
1343void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1344{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001345 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001346 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001347}
1348
1349int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1350 unsigned int val, int alloc)
1351{
1352 int i;
1353
1354 if (type == NAT_JOURNAL) {
1355 for (i = 0; i < nats_in_cursum(sum); i++) {
1356 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1357 return i;
1358 }
1359 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1360 return update_nats_in_cursum(sum, 1);
1361 } else if (type == SIT_JOURNAL) {
1362 for (i = 0; i < sits_in_cursum(sum); i++)
1363 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1364 return i;
1365 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1366 return update_sits_in_cursum(sum, 1);
1367 }
1368 return -1;
1369}
1370
1371static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1372 unsigned int segno)
1373{
1374 struct sit_info *sit_i = SIT_I(sbi);
1375 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1376 block_t blk_addr = sit_i->sit_base_addr + offset;
1377
1378 check_seg_range(sbi, segno);
1379
1380 /* calculate sit block address */
1381 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1382 blk_addr += sit_i->sit_blocks;
1383
1384 return get_meta_page(sbi, blk_addr);
1385}
1386
1387static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1388 unsigned int start)
1389{
1390 struct sit_info *sit_i = SIT_I(sbi);
1391 struct page *src_page, *dst_page;
1392 pgoff_t src_off, dst_off;
1393 void *src_addr, *dst_addr;
1394
1395 src_off = current_sit_addr(sbi, start);
1396 dst_off = next_sit_addr(sbi, src_off);
1397
1398 /* get current sit block page without lock */
1399 src_page = get_meta_page(sbi, src_off);
1400 dst_page = grab_meta_page(sbi, dst_off);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001401 f2fs_bug_on(PageDirty(src_page));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001402
1403 src_addr = page_address(src_page);
1404 dst_addr = page_address(dst_page);
1405 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1406
1407 set_page_dirty(dst_page);
1408 f2fs_put_page(src_page, 1);
1409
1410 set_to_next_sit(sit_i, start);
1411
1412 return dst_page;
1413}
1414
1415static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
1416{
1417 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1418 struct f2fs_summary_block *sum = curseg->sum_blk;
1419 int i;
1420
1421 /*
1422 * If the journal area in the current summary is full of sit entries,
1423 * all the sit entries will be flushed. Otherwise the sit entries
1424 * are not able to replace with newly hot sit entries.
1425 */
1426 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
1427 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1428 unsigned int segno;
1429 segno = le32_to_cpu(segno_in_journal(sum, i));
1430 __mark_sit_entry_dirty(sbi, segno);
1431 }
1432 update_sits_in_cursum(sum, -sits_in_cursum(sum));
Haicheng Licffbfa62013-10-18 17:24:07 +08001433 return true;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001434 }
Haicheng Licffbfa62013-10-18 17:24:07 +08001435 return false;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001436}
1437
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001438/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001439 * CP calls this function, which flushes SIT entries including sit_journal,
1440 * and moves prefree segs to free segs.
1441 */
1442void flush_sit_entries(struct f2fs_sb_info *sbi)
1443{
1444 struct sit_info *sit_i = SIT_I(sbi);
1445 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1446 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1447 struct f2fs_summary_block *sum = curseg->sum_blk;
1448 unsigned long nsegs = TOTAL_SEGS(sbi);
1449 struct page *page = NULL;
1450 struct f2fs_sit_block *raw_sit = NULL;
1451 unsigned int start = 0, end = 0;
1452 unsigned int segno = -1;
1453 bool flushed;
1454
1455 mutex_lock(&curseg->curseg_mutex);
1456 mutex_lock(&sit_i->sentry_lock);
1457
1458 /*
1459 * "flushed" indicates whether sit entries in journal are flushed
1460 * to the SIT area or not.
1461 */
1462 flushed = flush_sits_in_journal(sbi);
1463
1464 while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) {
1465 struct seg_entry *se = get_seg_entry(sbi, segno);
1466 int sit_offset, offset;
1467
1468 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1469
1470 if (flushed)
1471 goto to_sit_page;
1472
1473 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
1474 if (offset >= 0) {
1475 segno_in_journal(sum, offset) = cpu_to_le32(segno);
1476 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
1477 goto flush_done;
1478 }
1479to_sit_page:
1480 if (!page || (start > segno) || (segno > end)) {
1481 if (page) {
1482 f2fs_put_page(page, 1);
1483 page = NULL;
1484 }
1485
1486 start = START_SEGNO(sit_i, segno);
1487 end = start + SIT_ENTRY_PER_BLOCK - 1;
1488
1489 /* read sit block that will be updated */
1490 page = get_next_sit_page(sbi, start);
1491 raw_sit = page_address(page);
1492 }
1493
1494 /* udpate entry in SIT block */
1495 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
1496flush_done:
1497 __clear_bit(segno, bitmap);
1498 sit_i->dirty_sentries--;
1499 }
1500 mutex_unlock(&sit_i->sentry_lock);
1501 mutex_unlock(&curseg->curseg_mutex);
1502
1503 /* writeout last modified SIT block */
1504 f2fs_put_page(page, 1);
1505
1506 set_prefree_as_free_segments(sbi);
1507}
1508
1509static int build_sit_info(struct f2fs_sb_info *sbi)
1510{
1511 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1512 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1513 struct sit_info *sit_i;
1514 unsigned int sit_segs, start;
1515 char *src_bitmap, *dst_bitmap;
1516 unsigned int bitmap_size;
1517
1518 /* allocate memory for SIT information */
1519 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1520 if (!sit_i)
1521 return -ENOMEM;
1522
1523 SM_I(sbi)->sit_info = sit_i;
1524
1525 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
1526 if (!sit_i->sentries)
1527 return -ENOMEM;
1528
1529 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1530 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1531 if (!sit_i->dirty_sentries_bitmap)
1532 return -ENOMEM;
1533
1534 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1535 sit_i->sentries[start].cur_valid_map
1536 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1537 sit_i->sentries[start].ckpt_valid_map
1538 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1539 if (!sit_i->sentries[start].cur_valid_map
1540 || !sit_i->sentries[start].ckpt_valid_map)
1541 return -ENOMEM;
1542 }
1543
1544 if (sbi->segs_per_sec > 1) {
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001545 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001546 sizeof(struct sec_entry));
1547 if (!sit_i->sec_entries)
1548 return -ENOMEM;
1549 }
1550
1551 /* get information related with SIT */
1552 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1553
1554 /* setup SIT bitmap from ckeckpoint pack */
1555 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1556 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1557
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02001558 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001559 if (!dst_bitmap)
1560 return -ENOMEM;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001561
1562 /* init SIT information */
1563 sit_i->s_ops = &default_salloc_ops;
1564
1565 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1566 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1567 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1568 sit_i->sit_bitmap = dst_bitmap;
1569 sit_i->bitmap_size = bitmap_size;
1570 sit_i->dirty_sentries = 0;
1571 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1572 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1573 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1574 mutex_init(&sit_i->sentry_lock);
1575 return 0;
1576}
1577
1578static int build_free_segmap(struct f2fs_sb_info *sbi)
1579{
1580 struct f2fs_sm_info *sm_info = SM_I(sbi);
1581 struct free_segmap_info *free_i;
1582 unsigned int bitmap_size, sec_bitmap_size;
1583
1584 /* allocate memory for free segmap information */
1585 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1586 if (!free_i)
1587 return -ENOMEM;
1588
1589 SM_I(sbi)->free_info = free_i;
1590
1591 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1592 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1593 if (!free_i->free_segmap)
1594 return -ENOMEM;
1595
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001596 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001597 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1598 if (!free_i->free_secmap)
1599 return -ENOMEM;
1600
1601 /* set all segments as dirty temporarily */
1602 memset(free_i->free_segmap, 0xff, bitmap_size);
1603 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1604
1605 /* init free segmap information */
1606 free_i->start_segno =
1607 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
1608 free_i->free_segments = 0;
1609 free_i->free_sections = 0;
1610 rwlock_init(&free_i->segmap_lock);
1611 return 0;
1612}
1613
1614static int build_curseg(struct f2fs_sb_info *sbi)
1615{
Namjae Jeon1042d602012-12-01 10:56:13 +09001616 struct curseg_info *array;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001617 int i;
1618
1619 array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
1620 if (!array)
1621 return -ENOMEM;
1622
1623 SM_I(sbi)->curseg_array = array;
1624
1625 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1626 mutex_init(&array[i].curseg_mutex);
1627 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1628 if (!array[i].sum_blk)
1629 return -ENOMEM;
1630 array[i].segno = NULL_SEGNO;
1631 array[i].next_blkoff = 0;
1632 }
1633 return restore_curseg_summaries(sbi);
1634}
1635
1636static void build_sit_entries(struct f2fs_sb_info *sbi)
1637{
1638 struct sit_info *sit_i = SIT_I(sbi);
1639 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1640 struct f2fs_summary_block *sum = curseg->sum_blk;
1641 unsigned int start;
1642
1643 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1644 struct seg_entry *se = &sit_i->sentries[start];
1645 struct f2fs_sit_block *sit_blk;
1646 struct f2fs_sit_entry sit;
1647 struct page *page;
1648 int i;
1649
1650 mutex_lock(&curseg->curseg_mutex);
1651 for (i = 0; i < sits_in_cursum(sum); i++) {
1652 if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
1653 sit = sit_in_journal(sum, i);
1654 mutex_unlock(&curseg->curseg_mutex);
1655 goto got_it;
1656 }
1657 }
1658 mutex_unlock(&curseg->curseg_mutex);
1659 page = get_current_sit_page(sbi, start);
1660 sit_blk = (struct f2fs_sit_block *)page_address(page);
1661 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1662 f2fs_put_page(page, 1);
1663got_it:
1664 check_block_count(sbi, start, &sit);
1665 seg_info_from_raw_sit(se, &sit);
1666 if (sbi->segs_per_sec > 1) {
1667 struct sec_entry *e = get_sec_entry(sbi, start);
1668 e->valid_blocks += se->valid_blocks;
1669 }
1670 }
1671}
1672
1673static void init_free_segmap(struct f2fs_sb_info *sbi)
1674{
1675 unsigned int start;
1676 int type;
1677
1678 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1679 struct seg_entry *sentry = get_seg_entry(sbi, start);
1680 if (!sentry->valid_blocks)
1681 __set_free(sbi, start);
1682 }
1683
1684 /* set use the current segments */
1685 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
1686 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
1687 __set_test_and_inuse(sbi, curseg_t->segno);
1688 }
1689}
1690
1691static void init_dirty_segmap(struct f2fs_sb_info *sbi)
1692{
1693 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1694 struct free_segmap_info *free_i = FREE_I(sbi);
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001695 unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001696 unsigned short valid_blocks;
1697
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001698 while (1) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001699 /* find dirty segment based on free segmap */
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001700 segno = find_next_inuse(free_i, total_segs, offset);
1701 if (segno >= total_segs)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001702 break;
1703 offset = segno + 1;
1704 valid_blocks = get_valid_blocks(sbi, segno, 0);
1705 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks)
1706 continue;
1707 mutex_lock(&dirty_i->seglist_lock);
1708 __locate_dirty_segment(sbi, segno, DIRTY);
1709 mutex_unlock(&dirty_i->seglist_lock);
1710 }
1711}
1712
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001713static int init_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001714{
1715 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001716 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001717
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001718 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
1719 if (!dirty_i->victim_secmap)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001720 return -ENOMEM;
1721 return 0;
1722}
1723
1724static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1725{
1726 struct dirty_seglist_info *dirty_i;
1727 unsigned int bitmap_size, i;
1728
1729 /* allocate memory for dirty segments list information */
1730 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
1731 if (!dirty_i)
1732 return -ENOMEM;
1733
1734 SM_I(sbi)->dirty_info = dirty_i;
1735 mutex_init(&dirty_i->seglist_lock);
1736
1737 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1738
1739 for (i = 0; i < NR_DIRTY_TYPE; i++) {
1740 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001741 if (!dirty_i->dirty_segmap[i])
1742 return -ENOMEM;
1743 }
1744
1745 init_dirty_segmap(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001746 return init_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001747}
1748
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001749/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001750 * Update min, max modified time for cost-benefit GC algorithm
1751 */
1752static void init_min_max_mtime(struct f2fs_sb_info *sbi)
1753{
1754 struct sit_info *sit_i = SIT_I(sbi);
1755 unsigned int segno;
1756
1757 mutex_lock(&sit_i->sentry_lock);
1758
1759 sit_i->min_mtime = LLONG_MAX;
1760
1761 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
1762 unsigned int i;
1763 unsigned long long mtime = 0;
1764
1765 for (i = 0; i < sbi->segs_per_sec; i++)
1766 mtime += get_seg_entry(sbi, segno + i)->mtime;
1767
1768 mtime = div_u64(mtime, sbi->segs_per_sec);
1769
1770 if (sit_i->min_mtime > mtime)
1771 sit_i->min_mtime = mtime;
1772 }
1773 sit_i->max_mtime = get_mtime(sbi);
1774 mutex_unlock(&sit_i->sentry_lock);
1775}
1776
1777int build_segment_manager(struct f2fs_sb_info *sbi)
1778{
1779 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1780 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Namjae Jeon1042d602012-12-01 10:56:13 +09001781 struct f2fs_sm_info *sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001782 int err;
1783
1784 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
1785 if (!sm_info)
1786 return -ENOMEM;
1787
1788 /* init sm info */
1789 sbi->sm_info = sm_info;
1790 INIT_LIST_HEAD(&sm_info->wblist_head);
1791 spin_lock_init(&sm_info->wblist_lock);
1792 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1793 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1794 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
1795 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1796 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1797 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
1798 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +09001799 sm_info->rec_prefree_segments = DEF_RECLAIM_PREFREE_SEGMENTS;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001800
1801 err = build_sit_info(sbi);
1802 if (err)
1803 return err;
1804 err = build_free_segmap(sbi);
1805 if (err)
1806 return err;
1807 err = build_curseg(sbi);
1808 if (err)
1809 return err;
1810
1811 /* reinit free segmap based on SIT */
1812 build_sit_entries(sbi);
1813
1814 init_free_segmap(sbi);
1815 err = build_dirty_segmap(sbi);
1816 if (err)
1817 return err;
1818
1819 init_min_max_mtime(sbi);
1820 return 0;
1821}
1822
1823static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
1824 enum dirty_type dirty_type)
1825{
1826 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1827
1828 mutex_lock(&dirty_i->seglist_lock);
1829 kfree(dirty_i->dirty_segmap[dirty_type]);
1830 dirty_i->nr_dirty[dirty_type] = 0;
1831 mutex_unlock(&dirty_i->seglist_lock);
1832}
1833
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001834static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001835{
1836 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001837 kfree(dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001838}
1839
1840static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
1841{
1842 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1843 int i;
1844
1845 if (!dirty_i)
1846 return;
1847
1848 /* discard pre-free/dirty segments list */
1849 for (i = 0; i < NR_DIRTY_TYPE; i++)
1850 discard_dirty_segmap(sbi, i);
1851
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001852 destroy_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001853 SM_I(sbi)->dirty_info = NULL;
1854 kfree(dirty_i);
1855}
1856
1857static void destroy_curseg(struct f2fs_sb_info *sbi)
1858{
1859 struct curseg_info *array = SM_I(sbi)->curseg_array;
1860 int i;
1861
1862 if (!array)
1863 return;
1864 SM_I(sbi)->curseg_array = NULL;
1865 for (i = 0; i < NR_CURSEG_TYPE; i++)
1866 kfree(array[i].sum_blk);
1867 kfree(array);
1868}
1869
1870static void destroy_free_segmap(struct f2fs_sb_info *sbi)
1871{
1872 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
1873 if (!free_i)
1874 return;
1875 SM_I(sbi)->free_info = NULL;
1876 kfree(free_i->free_segmap);
1877 kfree(free_i->free_secmap);
1878 kfree(free_i);
1879}
1880
1881static void destroy_sit_info(struct f2fs_sb_info *sbi)
1882{
1883 struct sit_info *sit_i = SIT_I(sbi);
1884 unsigned int start;
1885
1886 if (!sit_i)
1887 return;
1888
1889 if (sit_i->sentries) {
1890 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1891 kfree(sit_i->sentries[start].cur_valid_map);
1892 kfree(sit_i->sentries[start].ckpt_valid_map);
1893 }
1894 }
1895 vfree(sit_i->sentries);
1896 vfree(sit_i->sec_entries);
1897 kfree(sit_i->dirty_sentries_bitmap);
1898
1899 SM_I(sbi)->sit_info = NULL;
1900 kfree(sit_i->sit_bitmap);
1901 kfree(sit_i);
1902}
1903
1904void destroy_segment_manager(struct f2fs_sb_info *sbi)
1905{
1906 struct f2fs_sm_info *sm_info = SM_I(sbi);
Chao Yu3b03f722013-11-06 09:12:04 +08001907 if (!sm_info)
1908 return;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001909 destroy_dirty_segmap(sbi);
1910 destroy_curseg(sbi);
1911 destroy_free_segmap(sbi);
1912 destroy_sit_info(sbi);
1913 sbi->sm_info = NULL;
1914 kfree(sm_info);
1915}