blob: e5dc41114867c08c1ba01f5b8acd06e93e398e57 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/bio.h>
14#include <linux/blkdev.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010015#include <linux/prefetch.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090016#include <linux/vmalloc.h>
Chao Yu74de5932013-11-22 09:09:59 +080017#include <linux/swap.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090018
19#include "f2fs.h"
20#include "segment.h"
21#include "node.h"
Namjae Jeon6ec178d2013-04-23 17:51:43 +090022#include <trace/events/f2fs.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090023
Changman Lee9a7f1432013-11-15 10:42:51 +090024#define __reverse_ffz(x) __reverse_ffs(~(x))
25
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +090026static struct kmem_cache *discard_entry_slab;
27
Changman Lee9a7f1432013-11-15 10:42:51 +090028/*
29 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
30 * MSB and LSB are reversed in a byte by f2fs_set_bit.
31 */
32static inline unsigned long __reverse_ffs(unsigned long word)
33{
34 int num = 0;
35
36#if BITS_PER_LONG == 64
37 if ((word & 0xffffffff) == 0) {
38 num += 32;
39 word >>= 32;
40 }
41#endif
42 if ((word & 0xffff) == 0) {
43 num += 16;
44 word >>= 16;
45 }
46 if ((word & 0xff) == 0) {
47 num += 8;
48 word >>= 8;
49 }
50 if ((word & 0xf0) == 0)
51 num += 4;
52 else
53 word >>= 4;
54 if ((word & 0xc) == 0)
55 num += 2;
56 else
57 word >>= 2;
58 if ((word & 0x2) == 0)
59 num += 1;
60 return num;
61}
62
63/*
64 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c becasue
65 * f2fs_set_bit makes MSB and LSB reversed in a byte.
66 * Example:
67 * LSB <--> MSB
68 * f2fs_set_bit(0, bitmap) => 0000 0001
69 * f2fs_set_bit(7, bitmap) => 1000 0000
70 */
71static unsigned long __find_rev_next_bit(const unsigned long *addr,
72 unsigned long size, unsigned long offset)
73{
74 const unsigned long *p = addr + BIT_WORD(offset);
75 unsigned long result = offset & ~(BITS_PER_LONG - 1);
76 unsigned long tmp;
77 unsigned long mask, submask;
78 unsigned long quot, rest;
79
80 if (offset >= size)
81 return size;
82
83 size -= result;
84 offset %= BITS_PER_LONG;
85 if (!offset)
86 goto aligned;
87
88 tmp = *(p++);
89 quot = (offset >> 3) << 3;
90 rest = offset & 0x7;
91 mask = ~0UL << quot;
92 submask = (unsigned char)(0xff << rest) >> rest;
93 submask <<= quot;
94 mask &= submask;
95 tmp &= mask;
96 if (size < BITS_PER_LONG)
97 goto found_first;
98 if (tmp)
99 goto found_middle;
100
101 size -= BITS_PER_LONG;
102 result += BITS_PER_LONG;
103aligned:
104 while (size & ~(BITS_PER_LONG-1)) {
105 tmp = *(p++);
106 if (tmp)
107 goto found_middle;
108 result += BITS_PER_LONG;
109 size -= BITS_PER_LONG;
110 }
111 if (!size)
112 return result;
113 tmp = *p;
114found_first:
115 tmp &= (~0UL >> (BITS_PER_LONG - size));
116 if (tmp == 0UL) /* Are any bits set? */
117 return result + size; /* Nope. */
118found_middle:
119 return result + __reverse_ffs(tmp);
120}
121
122static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
123 unsigned long size, unsigned long offset)
124{
125 const unsigned long *p = addr + BIT_WORD(offset);
126 unsigned long result = offset & ~(BITS_PER_LONG - 1);
127 unsigned long tmp;
128 unsigned long mask, submask;
129 unsigned long quot, rest;
130
131 if (offset >= size)
132 return size;
133
134 size -= result;
135 offset %= BITS_PER_LONG;
136 if (!offset)
137 goto aligned;
138
139 tmp = *(p++);
140 quot = (offset >> 3) << 3;
141 rest = offset & 0x7;
142 mask = ~(~0UL << quot);
143 submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest);
144 submask <<= quot;
145 mask += submask;
146 tmp |= mask;
147 if (size < BITS_PER_LONG)
148 goto found_first;
149 if (~tmp)
150 goto found_middle;
151
152 size -= BITS_PER_LONG;
153 result += BITS_PER_LONG;
154aligned:
155 while (size & ~(BITS_PER_LONG - 1)) {
156 tmp = *(p++);
157 if (~tmp)
158 goto found_middle;
159 result += BITS_PER_LONG;
160 size -= BITS_PER_LONG;
161 }
162 if (!size)
163 return result;
164 tmp = *p;
165
166found_first:
167 tmp |= ~0UL << size;
168 if (tmp == ~0UL) /* Are any bits zero? */
169 return result + size; /* Nope. */
170found_middle:
171 return result + __reverse_ffz(tmp);
172}
173
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900174/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900175 * This function balances dirty node and dentry pages.
176 * In addition, it controls garbage collection.
177 */
178void f2fs_balance_fs(struct f2fs_sb_info *sbi)
179{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900180 /*
Jaegeuk Kim029cd282012-12-21 17:20:21 +0900181 * We should do GC or end up with checkpoint, if there are so many dirty
182 * dir/node pages without enough free segments.
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900183 */
Jaegeuk Kim43727522013-02-04 15:11:17 +0900184 if (has_not_enough_free_secs(sbi, 0)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900185 mutex_lock(&sbi->gc_mutex);
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900186 f2fs_gc(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900187 }
188}
189
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900190void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
191{
192 /* check the # of cached NAT entries and prefree segments */
193 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
194 excess_prefree_segs(sbi))
195 f2fs_sync_fs(sbi->sb, true);
196}
197
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900198static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
199 enum dirty_type dirty_type)
200{
201 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
202
203 /* need not be added */
204 if (IS_CURSEG(sbi, segno))
205 return;
206
207 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
208 dirty_i->nr_dirty[dirty_type]++;
209
210 if (dirty_type == DIRTY) {
211 struct seg_entry *sentry = get_seg_entry(sbi, segno);
Changman Lee4625d6a2013-10-25 17:31:57 +0900212 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900213
Changman Lee4625d6a2013-10-25 17:31:57 +0900214 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
215 dirty_i->nr_dirty[t]++;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900216 }
217}
218
219static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
220 enum dirty_type dirty_type)
221{
222 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
223
224 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
225 dirty_i->nr_dirty[dirty_type]--;
226
227 if (dirty_type == DIRTY) {
Changman Lee4625d6a2013-10-25 17:31:57 +0900228 struct seg_entry *sentry = get_seg_entry(sbi, segno);
229 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900230
Changman Lee4625d6a2013-10-25 17:31:57 +0900231 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
232 dirty_i->nr_dirty[t]--;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900233
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900234 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
235 clear_bit(GET_SECNO(sbi, segno),
236 dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900237 }
238}
239
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900240/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900241 * Should not occur error such as -ENOMEM.
242 * Adding dirty entry into seglist is not critical operation.
243 * If a given segment is one of current working segments, it won't be added.
244 */
Haicheng Li8d8451a2013-06-13 16:59:28 +0800245static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900246{
247 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
248 unsigned short valid_blocks;
249
250 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
251 return;
252
253 mutex_lock(&dirty_i->seglist_lock);
254
255 valid_blocks = get_valid_blocks(sbi, segno, 0);
256
257 if (valid_blocks == 0) {
258 __locate_dirty_segment(sbi, segno, PRE);
259 __remove_dirty_segment(sbi, segno, DIRTY);
260 } else if (valid_blocks < sbi->blocks_per_seg) {
261 __locate_dirty_segment(sbi, segno, DIRTY);
262 } else {
263 /* Recovery routine with SSR needs this */
264 __remove_dirty_segment(sbi, segno, DIRTY);
265 }
266
267 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900268}
269
Jaegeuk Kim37208872013-11-12 16:55:17 +0900270static void f2fs_issue_discard(struct f2fs_sb_info *sbi,
271 block_t blkstart, block_t blklen)
272{
Jaegeuk Kimf9a4e6d2013-11-28 12:44:05 +0900273 sector_t start = SECTOR_FROM_BLOCK(sbi, blkstart);
274 sector_t len = SECTOR_FROM_BLOCK(sbi, blklen);
Jaegeuk Kim37208872013-11-12 16:55:17 +0900275 blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
Jaegeuk Kim1661d072013-11-12 17:01:00 +0900276 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
Jaegeuk Kim37208872013-11-12 16:55:17 +0900277}
278
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900279static void add_discard_addrs(struct f2fs_sb_info *sbi,
280 unsigned int segno, struct seg_entry *se)
281{
282 struct list_head *head = &SM_I(sbi)->discard_list;
283 struct discard_entry *new;
284 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
285 int max_blocks = sbi->blocks_per_seg;
286 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
287 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
288 unsigned long dmap[entries];
289 unsigned int start = 0, end = -1;
290 int i;
291
292 if (!test_opt(sbi, DISCARD))
293 return;
294
295 /* zero block will be discarded through the prefree list */
296 if (!se->valid_blocks || se->valid_blocks == max_blocks)
297 return;
298
299 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
300 for (i = 0; i < entries; i++)
301 dmap[i] = (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
302
303 while (SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
304 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
305 if (start >= max_blocks)
306 break;
307
308 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
309
310 new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
311 INIT_LIST_HEAD(&new->list);
312 new->blkaddr = START_BLOCK(sbi, segno) + start;
313 new->len = end - start;
314
315 list_add_tail(&new->list, head);
316 SM_I(sbi)->nr_discards += end - start;
317 }
318}
319
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900320/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900321 * Should call clear_prefree_segments after checkpoint is done.
322 */
323static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
324{
325 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800326 unsigned int segno = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900327 unsigned int total_segs = TOTAL_SEGS(sbi);
328
329 mutex_lock(&dirty_i->seglist_lock);
330 while (1) {
331 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800332 segno + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900333 if (segno >= total_segs)
334 break;
335 __set_test_and_free(sbi, segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900336 }
337 mutex_unlock(&dirty_i->seglist_lock);
338}
339
340void clear_prefree_segments(struct f2fs_sb_info *sbi)
341{
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900342 struct list_head *head = &(SM_I(sbi)->discard_list);
343 struct list_head *this, *next;
344 struct discard_entry *entry;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900345 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Changman Lee29e59c12013-11-11 09:24:37 +0900346 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900347 unsigned int total_segs = TOTAL_SEGS(sbi);
Changman Lee29e59c12013-11-11 09:24:37 +0900348 unsigned int start = 0, end = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900349
350 mutex_lock(&dirty_i->seglist_lock);
Changman Lee29e59c12013-11-11 09:24:37 +0900351
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900352 while (1) {
Changman Lee29e59c12013-11-11 09:24:37 +0900353 int i;
354 start = find_next_bit(prefree_map, total_segs, end + 1);
355 if (start >= total_segs)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900356 break;
Changman Lee29e59c12013-11-11 09:24:37 +0900357 end = find_next_zero_bit(prefree_map, total_segs, start + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900358
Changman Lee29e59c12013-11-11 09:24:37 +0900359 for (i = start; i < end; i++)
360 clear_bit(i, prefree_map);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900361
Changman Lee29e59c12013-11-11 09:24:37 +0900362 dirty_i->nr_dirty[PRE] -= end - start;
363
364 if (!test_opt(sbi, DISCARD))
365 continue;
366
Jaegeuk Kim37208872013-11-12 16:55:17 +0900367 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
368 (end - start) << sbi->log_blocks_per_seg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900369 }
370 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900371
372 /* send small discards */
373 list_for_each_safe(this, next, head) {
374 entry = list_entry(this, struct discard_entry, list);
Jaegeuk Kim37208872013-11-12 16:55:17 +0900375 f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900376 list_del(&entry->list);
377 SM_I(sbi)->nr_discards -= entry->len;
378 kmem_cache_free(discard_entry_slab, entry);
379 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900380}
381
382static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
383{
384 struct sit_info *sit_i = SIT_I(sbi);
385 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
386 sit_i->dirty_sentries++;
387}
388
389static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
390 unsigned int segno, int modified)
391{
392 struct seg_entry *se = get_seg_entry(sbi, segno);
393 se->type = type;
394 if (modified)
395 __mark_sit_entry_dirty(sbi, segno);
396}
397
398static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
399{
400 struct seg_entry *se;
401 unsigned int segno, offset;
402 long int new_vblocks;
403
404 segno = GET_SEGNO(sbi, blkaddr);
405
406 se = get_seg_entry(sbi, segno);
407 new_vblocks = se->valid_blocks + del;
408 offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1);
409
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900410 f2fs_bug_on((new_vblocks >> (sizeof(unsigned short) << 3) ||
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900411 (new_vblocks > sbi->blocks_per_seg)));
412
413 se->valid_blocks = new_vblocks;
414 se->mtime = get_mtime(sbi);
415 SIT_I(sbi)->max_mtime = se->mtime;
416
417 /* Update valid block bitmap */
418 if (del > 0) {
419 if (f2fs_set_bit(offset, se->cur_valid_map))
420 BUG();
421 } else {
422 if (!f2fs_clear_bit(offset, se->cur_valid_map))
423 BUG();
424 }
425 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
426 se->ckpt_valid_blocks += del;
427
428 __mark_sit_entry_dirty(sbi, segno);
429
430 /* update total number of valid blocks to be written in ckpt area */
431 SIT_I(sbi)->written_valid_blocks += del;
432
433 if (sbi->segs_per_sec > 1)
434 get_sec_entry(sbi, segno)->valid_blocks += del;
435}
436
437static void refresh_sit_entry(struct f2fs_sb_info *sbi,
438 block_t old_blkaddr, block_t new_blkaddr)
439{
440 update_sit_entry(sbi, new_blkaddr, 1);
441 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
442 update_sit_entry(sbi, old_blkaddr, -1);
443}
444
445void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
446{
447 unsigned int segno = GET_SEGNO(sbi, addr);
448 struct sit_info *sit_i = SIT_I(sbi);
449
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900450 f2fs_bug_on(addr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900451 if (addr == NEW_ADDR)
452 return;
453
454 /* add it into sit main buffer */
455 mutex_lock(&sit_i->sentry_lock);
456
457 update_sit_entry(sbi, addr, -1);
458
459 /* add it into dirty seglist */
460 locate_dirty_segment(sbi, segno);
461
462 mutex_unlock(&sit_i->sentry_lock);
463}
464
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900465/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900466 * This function should be resided under the curseg_mutex lock
467 */
468static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
Haicheng Lie79efe32013-06-13 16:59:27 +0800469 struct f2fs_summary *sum)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900470{
471 struct curseg_info *curseg = CURSEG_I(sbi, type);
472 void *addr = curseg->sum_blk;
Haicheng Lie79efe32013-06-13 16:59:27 +0800473 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900474 memcpy(addr, sum, sizeof(struct f2fs_summary));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900475}
476
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900477/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900478 * Calculate the number of current summary pages for writing
479 */
480int npages_for_summary_flush(struct f2fs_sb_info *sbi)
481{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900482 int valid_sum_count = 0;
Fan Li9a479382013-10-29 16:21:47 +0800483 int i, sum_in_page;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900484
485 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
486 if (sbi->ckpt->alloc_type[i] == SSR)
487 valid_sum_count += sbi->blocks_per_seg;
488 else
489 valid_sum_count += curseg_blkoff(sbi, i);
490 }
491
Fan Li9a479382013-10-29 16:21:47 +0800492 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
493 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
494 if (valid_sum_count <= sum_in_page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900495 return 1;
Fan Li9a479382013-10-29 16:21:47 +0800496 else if ((valid_sum_count - sum_in_page) <=
497 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900498 return 2;
499 return 3;
500}
501
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900502/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900503 * Caller should put this summary page
504 */
505struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
506{
507 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
508}
509
510static void write_sum_page(struct f2fs_sb_info *sbi,
511 struct f2fs_summary_block *sum_blk, block_t blk_addr)
512{
513 struct page *page = grab_meta_page(sbi, blk_addr);
514 void *kaddr = page_address(page);
515 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
516 set_page_dirty(page);
517 f2fs_put_page(page, 1);
518}
519
Jaegeuk Kim60374682013-03-31 13:58:51 +0900520static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
521{
522 struct curseg_info *curseg = CURSEG_I(sbi, type);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800523 unsigned int segno = curseg->segno + 1;
Jaegeuk Kim60374682013-03-31 13:58:51 +0900524 struct free_segmap_info *free_i = FREE_I(sbi);
525
Haicheng Li81fb5e82013-05-14 18:20:28 +0800526 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec)
527 return !test_bit(segno, free_i->free_segmap);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900528 return 0;
529}
530
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900531/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900532 * Find a new segment from the free segments bitmap to right order
533 * This function should be returned with success, otherwise BUG
534 */
535static void get_new_segment(struct f2fs_sb_info *sbi,
536 unsigned int *newseg, bool new_sec, int dir)
537{
538 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900539 unsigned int segno, secno, zoneno;
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900540 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900541 unsigned int hint = *newseg / sbi->segs_per_sec;
542 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
543 unsigned int left_start = hint;
544 bool init = true;
545 int go_left = 0;
546 int i;
547
548 write_lock(&free_i->segmap_lock);
549
550 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
551 segno = find_next_zero_bit(free_i->free_segmap,
552 TOTAL_SEGS(sbi), *newseg + 1);
Jaegeuk Kim33afa7f2013-03-31 12:59:53 +0900553 if (segno - *newseg < sbi->segs_per_sec -
554 (*newseg % sbi->segs_per_sec))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900555 goto got_it;
556 }
557find_other_zone:
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900558 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
559 if (secno >= TOTAL_SECS(sbi)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900560 if (dir == ALLOC_RIGHT) {
561 secno = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900562 TOTAL_SECS(sbi), 0);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900563 f2fs_bug_on(secno >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900564 } else {
565 go_left = 1;
566 left_start = hint - 1;
567 }
568 }
569 if (go_left == 0)
570 goto skip_left;
571
572 while (test_bit(left_start, free_i->free_secmap)) {
573 if (left_start > 0) {
574 left_start--;
575 continue;
576 }
577 left_start = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900578 TOTAL_SECS(sbi), 0);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900579 f2fs_bug_on(left_start >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900580 break;
581 }
582 secno = left_start;
583skip_left:
584 hint = secno;
585 segno = secno * sbi->segs_per_sec;
586 zoneno = secno / sbi->secs_per_zone;
587
588 /* give up on finding another zone */
589 if (!init)
590 goto got_it;
591 if (sbi->secs_per_zone == 1)
592 goto got_it;
593 if (zoneno == old_zoneno)
594 goto got_it;
595 if (dir == ALLOC_LEFT) {
596 if (!go_left && zoneno + 1 >= total_zones)
597 goto got_it;
598 if (go_left && zoneno == 0)
599 goto got_it;
600 }
601 for (i = 0; i < NR_CURSEG_TYPE; i++)
602 if (CURSEG_I(sbi, i)->zone == zoneno)
603 break;
604
605 if (i < NR_CURSEG_TYPE) {
606 /* zone is in user, try another */
607 if (go_left)
608 hint = zoneno * sbi->secs_per_zone - 1;
609 else if (zoneno + 1 >= total_zones)
610 hint = 0;
611 else
612 hint = (zoneno + 1) * sbi->secs_per_zone;
613 init = false;
614 goto find_other_zone;
615 }
616got_it:
617 /* set it as dirty segment in free segmap */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900618 f2fs_bug_on(test_bit(segno, free_i->free_segmap));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900619 __set_inuse(sbi, segno);
620 *newseg = segno;
621 write_unlock(&free_i->segmap_lock);
622}
623
624static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
625{
626 struct curseg_info *curseg = CURSEG_I(sbi, type);
627 struct summary_footer *sum_footer;
628
629 curseg->segno = curseg->next_segno;
630 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
631 curseg->next_blkoff = 0;
632 curseg->next_segno = NULL_SEGNO;
633
634 sum_footer = &(curseg->sum_blk->footer);
635 memset(sum_footer, 0, sizeof(struct summary_footer));
636 if (IS_DATASEG(type))
637 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
638 if (IS_NODESEG(type))
639 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
640 __set_sit_entry_type(sbi, type, curseg->segno, modified);
641}
642
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900643/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900644 * Allocate a current working segment.
645 * This function always allocates a free segment in LFS manner.
646 */
647static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
648{
649 struct curseg_info *curseg = CURSEG_I(sbi, type);
650 unsigned int segno = curseg->segno;
651 int dir = ALLOC_LEFT;
652
653 write_sum_page(sbi, curseg->sum_blk,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800654 GET_SUM_BLOCK(sbi, segno));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900655 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
656 dir = ALLOC_RIGHT;
657
658 if (test_opt(sbi, NOHEAP))
659 dir = ALLOC_RIGHT;
660
661 get_new_segment(sbi, &segno, new_sec, dir);
662 curseg->next_segno = segno;
663 reset_curseg(sbi, type, 1);
664 curseg->alloc_type = LFS;
665}
666
667static void __next_free_blkoff(struct f2fs_sb_info *sbi,
668 struct curseg_info *seg, block_t start)
669{
670 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
Changman Leee81c93c2013-11-15 13:21:16 +0900671 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
672 unsigned long target_map[entries];
673 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
674 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
675 int i, pos;
676
677 for (i = 0; i < entries; i++)
678 target_map[i] = ckpt_map[i] | cur_map[i];
679
680 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
681
682 seg->next_blkoff = pos;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900683}
684
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900685/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900686 * If a segment is written by LFS manner, next block offset is just obtained
687 * by increasing the current block offset. However, if a segment is written by
688 * SSR manner, next block offset obtained by calling __next_free_blkoff
689 */
690static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
691 struct curseg_info *seg)
692{
693 if (seg->alloc_type == SSR)
694 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
695 else
696 seg->next_blkoff++;
697}
698
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900699/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900700 * This function always allocates a used segment (from dirty seglist) by SSR
701 * manner, so it should recover the existing segment information of valid blocks
702 */
703static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
704{
705 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
706 struct curseg_info *curseg = CURSEG_I(sbi, type);
707 unsigned int new_segno = curseg->next_segno;
708 struct f2fs_summary_block *sum_node;
709 struct page *sum_page;
710
711 write_sum_page(sbi, curseg->sum_blk,
712 GET_SUM_BLOCK(sbi, curseg->segno));
713 __set_test_and_inuse(sbi, new_segno);
714
715 mutex_lock(&dirty_i->seglist_lock);
716 __remove_dirty_segment(sbi, new_segno, PRE);
717 __remove_dirty_segment(sbi, new_segno, DIRTY);
718 mutex_unlock(&dirty_i->seglist_lock);
719
720 reset_curseg(sbi, type, 1);
721 curseg->alloc_type = SSR;
722 __next_free_blkoff(sbi, curseg, 0);
723
724 if (reuse) {
725 sum_page = get_sum_page(sbi, new_segno);
726 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
727 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
728 f2fs_put_page(sum_page, 1);
729 }
730}
731
Jaegeuk Kim43727522013-02-04 15:11:17 +0900732static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
733{
734 struct curseg_info *curseg = CURSEG_I(sbi, type);
735 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
736
737 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
738 return v_ops->get_victim(sbi,
739 &(curseg)->next_segno, BG_GC, type, SSR);
740
741 /* For data segments, let's do SSR more intensively */
742 for (; type >= CURSEG_HOT_DATA; type--)
743 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
744 BG_GC, type, SSR))
745 return 1;
746 return 0;
747}
748
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900749/*
750 * flush out current segment and replace it with new segment
751 * This function should be returned with success, otherwise BUG
752 */
753static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
754 int type, bool force)
755{
756 struct curseg_info *curseg = CURSEG_I(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900757
Gu Zheng7b405272013-08-19 09:41:15 +0800758 if (force)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900759 new_curseg(sbi, type, true);
Gu Zheng7b405272013-08-19 09:41:15 +0800760 else if (type == CURSEG_WARM_NODE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900761 new_curseg(sbi, type, false);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900762 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
763 new_curseg(sbi, type, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900764 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
765 change_curseg(sbi, type, true);
766 else
767 new_curseg(sbi, type, false);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900768
769 stat_inc_seg_type(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900770}
771
772void allocate_new_segments(struct f2fs_sb_info *sbi)
773{
774 struct curseg_info *curseg;
775 unsigned int old_curseg;
776 int i;
777
778 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
779 curseg = CURSEG_I(sbi, i);
780 old_curseg = curseg->segno;
781 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
782 locate_dirty_segment(sbi, old_curseg);
783 }
784}
785
786static const struct segment_allocation default_salloc_ops = {
787 .allocate_segment = allocate_segment_by_default,
788};
789
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900790static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
791{
792 struct curseg_info *curseg = CURSEG_I(sbi, type);
793 if (curseg->next_blkoff < sbi->blocks_per_seg)
794 return true;
795 return false;
796}
797
798static int __get_segment_type_2(struct page *page, enum page_type p_type)
799{
800 if (p_type == DATA)
801 return CURSEG_HOT_DATA;
802 else
803 return CURSEG_HOT_NODE;
804}
805
806static int __get_segment_type_4(struct page *page, enum page_type p_type)
807{
808 if (p_type == DATA) {
809 struct inode *inode = page->mapping->host;
810
811 if (S_ISDIR(inode->i_mode))
812 return CURSEG_HOT_DATA;
813 else
814 return CURSEG_COLD_DATA;
815 } else {
816 if (IS_DNODE(page) && !is_cold_node(page))
817 return CURSEG_HOT_NODE;
818 else
819 return CURSEG_COLD_NODE;
820 }
821}
822
823static int __get_segment_type_6(struct page *page, enum page_type p_type)
824{
825 if (p_type == DATA) {
826 struct inode *inode = page->mapping->host;
827
828 if (S_ISDIR(inode->i_mode))
829 return CURSEG_HOT_DATA;
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900830 else if (is_cold_data(page) || file_is_cold(inode))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900831 return CURSEG_COLD_DATA;
832 else
833 return CURSEG_WARM_DATA;
834 } else {
835 if (IS_DNODE(page))
836 return is_cold_node(page) ? CURSEG_WARM_NODE :
837 CURSEG_HOT_NODE;
838 else
839 return CURSEG_COLD_NODE;
840 }
841}
842
843static int __get_segment_type(struct page *page, enum page_type p_type)
844{
845 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
846 switch (sbi->active_logs) {
847 case 2:
848 return __get_segment_type_2(page, p_type);
849 case 4:
850 return __get_segment_type_4(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900851 }
Jaegeuk Kim12a67142012-12-21 11:47:05 +0900852 /* NR_CURSEG_TYPE(6) logs by default */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900853 f2fs_bug_on(sbi->active_logs != NR_CURSEG_TYPE);
Jaegeuk Kim12a67142012-12-21 11:47:05 +0900854 return __get_segment_type_6(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900855}
856
857static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
858 block_t old_blkaddr, block_t *new_blkaddr,
Fan Li63a0b7cb2013-12-09 16:09:00 +0800859 struct f2fs_summary *sum, enum page_type p_type,
860 struct writeback_control *wbc)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900861{
862 struct sit_info *sit_i = SIT_I(sbi);
863 struct curseg_info *curseg;
864 unsigned int old_cursegno;
Fan Li63a0b7cb2013-12-09 16:09:00 +0800865 int type, rw = WRITE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900866
867 type = __get_segment_type(page, p_type);
868 curseg = CURSEG_I(sbi, type);
869
870 mutex_lock(&curseg->curseg_mutex);
871
872 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
873 old_cursegno = curseg->segno;
874
875 /*
876 * __add_sum_entry should be resided under the curseg_mutex
877 * because, this function updates a summary entry in the
878 * current summary block.
879 */
Haicheng Lie79efe32013-06-13 16:59:27 +0800880 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900881
882 mutex_lock(&sit_i->sentry_lock);
883 __refresh_next_blkoff(sbi, curseg);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900884
885 stat_inc_block_count(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900886
887 /*
888 * SIT information should be updated before segment allocation,
889 * since SSR needs latest valid block information.
890 */
891 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
892
893 if (!__has_curseg_space(sbi, type))
894 sit_i->s_ops->allocate_segment(sbi, type, false);
895
896 locate_dirty_segment(sbi, old_cursegno);
897 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
898 mutex_unlock(&sit_i->sentry_lock);
899
900 if (p_type == NODE)
901 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
902
903 /* writeout dirty page into bdev */
Fan Li63a0b7cb2013-12-09 16:09:00 +0800904 if (wbc->sync_mode == WB_SYNC_ALL)
905 rw |= WRITE_SYNC;
906 f2fs_submit_page_mbio(sbi, page, *new_blkaddr, p_type, rw);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900907
908 mutex_unlock(&curseg->curseg_mutex);
909}
910
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900911void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900912{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900913 set_page_writeback(page);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900914 f2fs_submit_page_mbio(sbi, page, page->index, META, WRITE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900915}
916
917void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
918 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
919{
920 struct f2fs_summary sum;
Fan Li63a0b7cb2013-12-09 16:09:00 +0800921 struct writeback_control wbc = {
922 .sync_mode = 1,
923 };
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900924 set_summary(&sum, nid, 0, 0);
Fan Li63a0b7cb2013-12-09 16:09:00 +0800925 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE, &wbc);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900926}
927
928void write_data_page(struct inode *inode, struct page *page,
929 struct dnode_of_data *dn, block_t old_blkaddr,
Fan Li63a0b7cb2013-12-09 16:09:00 +0800930 block_t *new_blkaddr, struct writeback_control *wbc)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900931{
932 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
933 struct f2fs_summary sum;
934 struct node_info ni;
935
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900936 f2fs_bug_on(old_blkaddr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900937 get_node_info(sbi, dn->nid, &ni);
938 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
939
940 do_write_page(sbi, page, old_blkaddr,
Fan Li63a0b7cb2013-12-09 16:09:00 +0800941 new_blkaddr, &sum, DATA, wbc);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900942}
943
944void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page,
Fan Li63a0b7cb2013-12-09 16:09:00 +0800945 block_t old_blk_addr, struct writeback_control *wbc)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900946{
Fan Li63a0b7cb2013-12-09 16:09:00 +0800947 int rw = wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE;
948 f2fs_submit_page_mbio(sbi, page, old_blk_addr, DATA, rw);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900949}
950
951void recover_data_page(struct f2fs_sb_info *sbi,
952 struct page *page, struct f2fs_summary *sum,
953 block_t old_blkaddr, block_t new_blkaddr)
954{
955 struct sit_info *sit_i = SIT_I(sbi);
956 struct curseg_info *curseg;
957 unsigned int segno, old_cursegno;
958 struct seg_entry *se;
959 int type;
960
961 segno = GET_SEGNO(sbi, new_blkaddr);
962 se = get_seg_entry(sbi, segno);
963 type = se->type;
964
965 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
966 if (old_blkaddr == NULL_ADDR)
967 type = CURSEG_COLD_DATA;
968 else
969 type = CURSEG_WARM_DATA;
970 }
971 curseg = CURSEG_I(sbi, type);
972
973 mutex_lock(&curseg->curseg_mutex);
974 mutex_lock(&sit_i->sentry_lock);
975
976 old_cursegno = curseg->segno;
977
978 /* change the current segment */
979 if (segno != curseg->segno) {
980 curseg->next_segno = segno;
981 change_curseg(sbi, type, true);
982 }
983
984 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
985 (sbi->blocks_per_seg - 1);
Haicheng Lie79efe32013-06-13 16:59:27 +0800986 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900987
988 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
989
990 locate_dirty_segment(sbi, old_cursegno);
991 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
992
993 mutex_unlock(&sit_i->sentry_lock);
994 mutex_unlock(&curseg->curseg_mutex);
995}
996
997void rewrite_node_page(struct f2fs_sb_info *sbi,
998 struct page *page, struct f2fs_summary *sum,
999 block_t old_blkaddr, block_t new_blkaddr)
1000{
1001 struct sit_info *sit_i = SIT_I(sbi);
1002 int type = CURSEG_WARM_NODE;
1003 struct curseg_info *curseg;
1004 unsigned int segno, old_cursegno;
1005 block_t next_blkaddr = next_blkaddr_of_node(page);
1006 unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
1007
1008 curseg = CURSEG_I(sbi, type);
1009
1010 mutex_lock(&curseg->curseg_mutex);
1011 mutex_lock(&sit_i->sentry_lock);
1012
1013 segno = GET_SEGNO(sbi, new_blkaddr);
1014 old_cursegno = curseg->segno;
1015
1016 /* change the current segment */
1017 if (segno != curseg->segno) {
1018 curseg->next_segno = segno;
1019 change_curseg(sbi, type, true);
1020 }
1021 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
1022 (sbi->blocks_per_seg - 1);
Haicheng Lie79efe32013-06-13 16:59:27 +08001023 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001024
1025 /* change the current log to the next block addr in advance */
1026 if (next_segno != segno) {
1027 curseg->next_segno = next_segno;
1028 change_curseg(sbi, type, true);
1029 }
1030 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) &
1031 (sbi->blocks_per_seg - 1);
1032
1033 /* rewrite node page */
1034 set_page_writeback(page);
Fan Li63a0b7cb2013-12-09 16:09:00 +08001035 f2fs_submit_page_mbio(sbi, page, new_blkaddr, NODE, WRITE_SYNC);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001036 f2fs_submit_merged_bio(sbi, NODE, true, WRITE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001037 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
1038
1039 locate_dirty_segment(sbi, old_cursegno);
1040 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
1041
1042 mutex_unlock(&sit_i->sentry_lock);
1043 mutex_unlock(&curseg->curseg_mutex);
1044}
1045
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001046void f2fs_wait_on_page_writeback(struct page *page,
1047 enum page_type type, bool sync)
1048{
1049 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1050 if (PageWriteback(page)) {
1051 f2fs_submit_merged_bio(sbi, type, sync, WRITE);
1052 wait_on_page_writeback(page);
1053 }
1054}
1055
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001056static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1057{
1058 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1059 struct curseg_info *seg_i;
1060 unsigned char *kaddr;
1061 struct page *page;
1062 block_t start;
1063 int i, j, offset;
1064
1065 start = start_sum_block(sbi);
1066
1067 page = get_meta_page(sbi, start++);
1068 kaddr = (unsigned char *)page_address(page);
1069
1070 /* Step 1: restore nat cache */
1071 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1072 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1073
1074 /* Step 2: restore sit cache */
1075 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1076 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1077 SUM_JOURNAL_SIZE);
1078 offset = 2 * SUM_JOURNAL_SIZE;
1079
1080 /* Step 3: restore summary entries */
1081 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1082 unsigned short blk_off;
1083 unsigned int segno;
1084
1085 seg_i = CURSEG_I(sbi, i);
1086 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1087 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1088 seg_i->next_segno = segno;
1089 reset_curseg(sbi, i, 0);
1090 seg_i->alloc_type = ckpt->alloc_type[i];
1091 seg_i->next_blkoff = blk_off;
1092
1093 if (seg_i->alloc_type == SSR)
1094 blk_off = sbi->blocks_per_seg;
1095
1096 for (j = 0; j < blk_off; j++) {
1097 struct f2fs_summary *s;
1098 s = (struct f2fs_summary *)(kaddr + offset);
1099 seg_i->sum_blk->entries[j] = *s;
1100 offset += SUMMARY_SIZE;
1101 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1102 SUM_FOOTER_SIZE)
1103 continue;
1104
1105 f2fs_put_page(page, 1);
1106 page = NULL;
1107
1108 page = get_meta_page(sbi, start++);
1109 kaddr = (unsigned char *)page_address(page);
1110 offset = 0;
1111 }
1112 }
1113 f2fs_put_page(page, 1);
1114 return 0;
1115}
1116
1117static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1118{
1119 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1120 struct f2fs_summary_block *sum;
1121 struct curseg_info *curseg;
1122 struct page *new;
1123 unsigned short blk_off;
1124 unsigned int segno = 0;
1125 block_t blk_addr = 0;
1126
1127 /* get segment number and block addr */
1128 if (IS_DATASEG(type)) {
1129 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1130 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1131 CURSEG_HOT_DATA]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001132 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001133 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1134 else
1135 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1136 } else {
1137 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1138 CURSEG_HOT_NODE]);
1139 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1140 CURSEG_HOT_NODE]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001141 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001142 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1143 type - CURSEG_HOT_NODE);
1144 else
1145 blk_addr = GET_SUM_BLOCK(sbi, segno);
1146 }
1147
1148 new = get_meta_page(sbi, blk_addr);
1149 sum = (struct f2fs_summary_block *)page_address(new);
1150
1151 if (IS_NODESEG(type)) {
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001152 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001153 struct f2fs_summary *ns = &sum->entries[0];
1154 int i;
1155 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1156 ns->version = 0;
1157 ns->ofs_in_node = 0;
1158 }
1159 } else {
1160 if (restore_node_summary(sbi, segno, sum)) {
1161 f2fs_put_page(new, 1);
1162 return -EINVAL;
1163 }
1164 }
1165 }
1166
1167 /* set uncompleted segment to curseg */
1168 curseg = CURSEG_I(sbi, type);
1169 mutex_lock(&curseg->curseg_mutex);
1170 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1171 curseg->next_segno = segno;
1172 reset_curseg(sbi, type, 0);
1173 curseg->alloc_type = ckpt->alloc_type[type];
1174 curseg->next_blkoff = blk_off;
1175 mutex_unlock(&curseg->curseg_mutex);
1176 f2fs_put_page(new, 1);
1177 return 0;
1178}
1179
1180static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1181{
1182 int type = CURSEG_HOT_DATA;
1183
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001184 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001185 /* restore for compacted data summary */
1186 if (read_compacted_summaries(sbi))
1187 return -EINVAL;
1188 type = CURSEG_HOT_NODE;
1189 }
1190
1191 for (; type <= CURSEG_COLD_NODE; type++)
1192 if (read_normal_summaries(sbi, type))
1193 return -EINVAL;
1194 return 0;
1195}
1196
1197static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1198{
1199 struct page *page;
1200 unsigned char *kaddr;
1201 struct f2fs_summary *summary;
1202 struct curseg_info *seg_i;
1203 int written_size = 0;
1204 int i, j;
1205
1206 page = grab_meta_page(sbi, blkaddr++);
1207 kaddr = (unsigned char *)page_address(page);
1208
1209 /* Step 1: write nat cache */
1210 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1211 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1212 written_size += SUM_JOURNAL_SIZE;
1213
1214 /* Step 2: write sit cache */
1215 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1216 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1217 SUM_JOURNAL_SIZE);
1218 written_size += SUM_JOURNAL_SIZE;
1219
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001220 /* Step 3: write summary entries */
1221 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1222 unsigned short blkoff;
1223 seg_i = CURSEG_I(sbi, i);
1224 if (sbi->ckpt->alloc_type[i] == SSR)
1225 blkoff = sbi->blocks_per_seg;
1226 else
1227 blkoff = curseg_blkoff(sbi, i);
1228
1229 for (j = 0; j < blkoff; j++) {
1230 if (!page) {
1231 page = grab_meta_page(sbi, blkaddr++);
1232 kaddr = (unsigned char *)page_address(page);
1233 written_size = 0;
1234 }
1235 summary = (struct f2fs_summary *)(kaddr + written_size);
1236 *summary = seg_i->sum_blk->entries[j];
1237 written_size += SUMMARY_SIZE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001238
1239 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1240 SUM_FOOTER_SIZE)
1241 continue;
1242
Chao Yue8d61a72013-10-24 15:08:28 +08001243 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001244 f2fs_put_page(page, 1);
1245 page = NULL;
1246 }
1247 }
Chao Yue8d61a72013-10-24 15:08:28 +08001248 if (page) {
1249 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001250 f2fs_put_page(page, 1);
Chao Yue8d61a72013-10-24 15:08:28 +08001251 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001252}
1253
1254static void write_normal_summaries(struct f2fs_sb_info *sbi,
1255 block_t blkaddr, int type)
1256{
1257 int i, end;
1258 if (IS_DATASEG(type))
1259 end = type + NR_CURSEG_DATA_TYPE;
1260 else
1261 end = type + NR_CURSEG_NODE_TYPE;
1262
1263 for (i = type; i < end; i++) {
1264 struct curseg_info *sum = CURSEG_I(sbi, i);
1265 mutex_lock(&sum->curseg_mutex);
1266 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1267 mutex_unlock(&sum->curseg_mutex);
1268 }
1269}
1270
1271void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1272{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001273 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001274 write_compacted_summaries(sbi, start_blk);
1275 else
1276 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1277}
1278
1279void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1280{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001281 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001282 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001283}
1284
1285int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1286 unsigned int val, int alloc)
1287{
1288 int i;
1289
1290 if (type == NAT_JOURNAL) {
1291 for (i = 0; i < nats_in_cursum(sum); i++) {
1292 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1293 return i;
1294 }
1295 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1296 return update_nats_in_cursum(sum, 1);
1297 } else if (type == SIT_JOURNAL) {
1298 for (i = 0; i < sits_in_cursum(sum); i++)
1299 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1300 return i;
1301 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1302 return update_sits_in_cursum(sum, 1);
1303 }
1304 return -1;
1305}
1306
1307static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1308 unsigned int segno)
1309{
1310 struct sit_info *sit_i = SIT_I(sbi);
1311 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1312 block_t blk_addr = sit_i->sit_base_addr + offset;
1313
1314 check_seg_range(sbi, segno);
1315
1316 /* calculate sit block address */
1317 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1318 blk_addr += sit_i->sit_blocks;
1319
1320 return get_meta_page(sbi, blk_addr);
1321}
1322
1323static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1324 unsigned int start)
1325{
1326 struct sit_info *sit_i = SIT_I(sbi);
1327 struct page *src_page, *dst_page;
1328 pgoff_t src_off, dst_off;
1329 void *src_addr, *dst_addr;
1330
1331 src_off = current_sit_addr(sbi, start);
1332 dst_off = next_sit_addr(sbi, src_off);
1333
1334 /* get current sit block page without lock */
1335 src_page = get_meta_page(sbi, src_off);
1336 dst_page = grab_meta_page(sbi, dst_off);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001337 f2fs_bug_on(PageDirty(src_page));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001338
1339 src_addr = page_address(src_page);
1340 dst_addr = page_address(dst_page);
1341 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1342
1343 set_page_dirty(dst_page);
1344 f2fs_put_page(src_page, 1);
1345
1346 set_to_next_sit(sit_i, start);
1347
1348 return dst_page;
1349}
1350
1351static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
1352{
1353 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1354 struct f2fs_summary_block *sum = curseg->sum_blk;
1355 int i;
1356
1357 /*
1358 * If the journal area in the current summary is full of sit entries,
1359 * all the sit entries will be flushed. Otherwise the sit entries
1360 * are not able to replace with newly hot sit entries.
1361 */
1362 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
1363 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1364 unsigned int segno;
1365 segno = le32_to_cpu(segno_in_journal(sum, i));
1366 __mark_sit_entry_dirty(sbi, segno);
1367 }
1368 update_sits_in_cursum(sum, -sits_in_cursum(sum));
Haicheng Licffbfa62013-10-18 17:24:07 +08001369 return true;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001370 }
Haicheng Licffbfa62013-10-18 17:24:07 +08001371 return false;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001372}
1373
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001374/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001375 * CP calls this function, which flushes SIT entries including sit_journal,
1376 * and moves prefree segs to free segs.
1377 */
1378void flush_sit_entries(struct f2fs_sb_info *sbi)
1379{
1380 struct sit_info *sit_i = SIT_I(sbi);
1381 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1382 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1383 struct f2fs_summary_block *sum = curseg->sum_blk;
1384 unsigned long nsegs = TOTAL_SEGS(sbi);
1385 struct page *page = NULL;
1386 struct f2fs_sit_block *raw_sit = NULL;
1387 unsigned int start = 0, end = 0;
1388 unsigned int segno = -1;
1389 bool flushed;
1390
1391 mutex_lock(&curseg->curseg_mutex);
1392 mutex_lock(&sit_i->sentry_lock);
1393
1394 /*
1395 * "flushed" indicates whether sit entries in journal are flushed
1396 * to the SIT area or not.
1397 */
1398 flushed = flush_sits_in_journal(sbi);
1399
1400 while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) {
1401 struct seg_entry *se = get_seg_entry(sbi, segno);
1402 int sit_offset, offset;
1403
1404 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1405
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001406 /* add discard candidates */
1407 if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards)
1408 add_discard_addrs(sbi, segno, se);
1409
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001410 if (flushed)
1411 goto to_sit_page;
1412
1413 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
1414 if (offset >= 0) {
1415 segno_in_journal(sum, offset) = cpu_to_le32(segno);
1416 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
1417 goto flush_done;
1418 }
1419to_sit_page:
1420 if (!page || (start > segno) || (segno > end)) {
1421 if (page) {
1422 f2fs_put_page(page, 1);
1423 page = NULL;
1424 }
1425
1426 start = START_SEGNO(sit_i, segno);
1427 end = start + SIT_ENTRY_PER_BLOCK - 1;
1428
1429 /* read sit block that will be updated */
1430 page = get_next_sit_page(sbi, start);
1431 raw_sit = page_address(page);
1432 }
1433
1434 /* udpate entry in SIT block */
1435 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
1436flush_done:
1437 __clear_bit(segno, bitmap);
1438 sit_i->dirty_sentries--;
1439 }
1440 mutex_unlock(&sit_i->sentry_lock);
1441 mutex_unlock(&curseg->curseg_mutex);
1442
1443 /* writeout last modified SIT block */
1444 f2fs_put_page(page, 1);
1445
1446 set_prefree_as_free_segments(sbi);
1447}
1448
1449static int build_sit_info(struct f2fs_sb_info *sbi)
1450{
1451 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1452 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1453 struct sit_info *sit_i;
1454 unsigned int sit_segs, start;
1455 char *src_bitmap, *dst_bitmap;
1456 unsigned int bitmap_size;
1457
1458 /* allocate memory for SIT information */
1459 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1460 if (!sit_i)
1461 return -ENOMEM;
1462
1463 SM_I(sbi)->sit_info = sit_i;
1464
1465 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
1466 if (!sit_i->sentries)
1467 return -ENOMEM;
1468
1469 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1470 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1471 if (!sit_i->dirty_sentries_bitmap)
1472 return -ENOMEM;
1473
1474 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1475 sit_i->sentries[start].cur_valid_map
1476 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1477 sit_i->sentries[start].ckpt_valid_map
1478 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1479 if (!sit_i->sentries[start].cur_valid_map
1480 || !sit_i->sentries[start].ckpt_valid_map)
1481 return -ENOMEM;
1482 }
1483
1484 if (sbi->segs_per_sec > 1) {
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001485 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001486 sizeof(struct sec_entry));
1487 if (!sit_i->sec_entries)
1488 return -ENOMEM;
1489 }
1490
1491 /* get information related with SIT */
1492 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1493
1494 /* setup SIT bitmap from ckeckpoint pack */
1495 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1496 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1497
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02001498 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001499 if (!dst_bitmap)
1500 return -ENOMEM;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001501
1502 /* init SIT information */
1503 sit_i->s_ops = &default_salloc_ops;
1504
1505 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1506 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1507 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1508 sit_i->sit_bitmap = dst_bitmap;
1509 sit_i->bitmap_size = bitmap_size;
1510 sit_i->dirty_sentries = 0;
1511 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1512 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1513 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1514 mutex_init(&sit_i->sentry_lock);
1515 return 0;
1516}
1517
1518static int build_free_segmap(struct f2fs_sb_info *sbi)
1519{
1520 struct f2fs_sm_info *sm_info = SM_I(sbi);
1521 struct free_segmap_info *free_i;
1522 unsigned int bitmap_size, sec_bitmap_size;
1523
1524 /* allocate memory for free segmap information */
1525 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1526 if (!free_i)
1527 return -ENOMEM;
1528
1529 SM_I(sbi)->free_info = free_i;
1530
1531 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1532 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1533 if (!free_i->free_segmap)
1534 return -ENOMEM;
1535
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001536 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001537 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1538 if (!free_i->free_secmap)
1539 return -ENOMEM;
1540
1541 /* set all segments as dirty temporarily */
1542 memset(free_i->free_segmap, 0xff, bitmap_size);
1543 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1544
1545 /* init free segmap information */
1546 free_i->start_segno =
1547 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
1548 free_i->free_segments = 0;
1549 free_i->free_sections = 0;
1550 rwlock_init(&free_i->segmap_lock);
1551 return 0;
1552}
1553
1554static int build_curseg(struct f2fs_sb_info *sbi)
1555{
Namjae Jeon1042d602012-12-01 10:56:13 +09001556 struct curseg_info *array;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001557 int i;
1558
1559 array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
1560 if (!array)
1561 return -ENOMEM;
1562
1563 SM_I(sbi)->curseg_array = array;
1564
1565 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1566 mutex_init(&array[i].curseg_mutex);
1567 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1568 if (!array[i].sum_blk)
1569 return -ENOMEM;
1570 array[i].segno = NULL_SEGNO;
1571 array[i].next_blkoff = 0;
1572 }
1573 return restore_curseg_summaries(sbi);
1574}
1575
Chao Yu74de5932013-11-22 09:09:59 +08001576static int ra_sit_pages(struct f2fs_sb_info *sbi, int start, int nrpages)
1577{
1578 struct address_space *mapping = sbi->meta_inode->i_mapping;
1579 struct page *page;
1580 block_t blk_addr, prev_blk_addr = 0;
1581 int sit_blk_cnt = SIT_BLK_CNT(sbi);
1582 int blkno = start;
1583
1584 for (; blkno < start + nrpages && blkno < sit_blk_cnt; blkno++) {
1585
1586 blk_addr = current_sit_addr(sbi, blkno * SIT_ENTRY_PER_BLOCK);
1587
1588 if (blkno != start && prev_blk_addr + 1 != blk_addr)
1589 break;
1590 prev_blk_addr = blk_addr;
1591repeat:
1592 page = grab_cache_page(mapping, blk_addr);
1593 if (!page) {
1594 cond_resched();
1595 goto repeat;
1596 }
1597 if (PageUptodate(page)) {
1598 mark_page_accessed(page);
1599 f2fs_put_page(page, 1);
1600 continue;
1601 }
1602
Fan Li63a0b7cb2013-12-09 16:09:00 +08001603 f2fs_submit_page_mbio(sbi, page, blk_addr, META, READ_SYNC);
Chao Yu74de5932013-11-22 09:09:59 +08001604
1605 mark_page_accessed(page);
1606 f2fs_put_page(page, 0);
1607 }
1608
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001609 f2fs_submit_merged_bio(sbi, META, true, READ);
Chao Yu74de5932013-11-22 09:09:59 +08001610 return blkno - start;
1611}
1612
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001613static void build_sit_entries(struct f2fs_sb_info *sbi)
1614{
1615 struct sit_info *sit_i = SIT_I(sbi);
1616 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1617 struct f2fs_summary_block *sum = curseg->sum_blk;
Chao Yu74de5932013-11-22 09:09:59 +08001618 int sit_blk_cnt = SIT_BLK_CNT(sbi);
1619 unsigned int i, start, end;
1620 unsigned int readed, start_blk = 0;
1621 int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001622
Chao Yu74de5932013-11-22 09:09:59 +08001623 do {
1624 readed = ra_sit_pages(sbi, start_blk, nrpages);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001625
Chao Yu74de5932013-11-22 09:09:59 +08001626 start = start_blk * sit_i->sents_per_block;
1627 end = (start_blk + readed) * sit_i->sents_per_block;
1628
1629 for (; start < end && start < TOTAL_SEGS(sbi); start++) {
1630 struct seg_entry *se = &sit_i->sentries[start];
1631 struct f2fs_sit_block *sit_blk;
1632 struct f2fs_sit_entry sit;
1633 struct page *page;
1634
1635 mutex_lock(&curseg->curseg_mutex);
1636 for (i = 0; i < sits_in_cursum(sum); i++) {
1637 if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
1638 sit = sit_in_journal(sum, i);
1639 mutex_unlock(&curseg->curseg_mutex);
1640 goto got_it;
1641 }
1642 }
1643 mutex_unlock(&curseg->curseg_mutex);
1644
1645 page = get_current_sit_page(sbi, start);
1646 sit_blk = (struct f2fs_sit_block *)page_address(page);
1647 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1648 f2fs_put_page(page, 1);
1649got_it:
1650 check_block_count(sbi, start, &sit);
1651 seg_info_from_raw_sit(se, &sit);
1652 if (sbi->segs_per_sec > 1) {
1653 struct sec_entry *e = get_sec_entry(sbi, start);
1654 e->valid_blocks += se->valid_blocks;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001655 }
1656 }
Chao Yu74de5932013-11-22 09:09:59 +08001657 start_blk += readed;
1658 } while (start_blk < sit_blk_cnt);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001659}
1660
1661static void init_free_segmap(struct f2fs_sb_info *sbi)
1662{
1663 unsigned int start;
1664 int type;
1665
1666 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1667 struct seg_entry *sentry = get_seg_entry(sbi, start);
1668 if (!sentry->valid_blocks)
1669 __set_free(sbi, start);
1670 }
1671
1672 /* set use the current segments */
1673 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
1674 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
1675 __set_test_and_inuse(sbi, curseg_t->segno);
1676 }
1677}
1678
1679static void init_dirty_segmap(struct f2fs_sb_info *sbi)
1680{
1681 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1682 struct free_segmap_info *free_i = FREE_I(sbi);
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001683 unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001684 unsigned short valid_blocks;
1685
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001686 while (1) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001687 /* find dirty segment based on free segmap */
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001688 segno = find_next_inuse(free_i, total_segs, offset);
1689 if (segno >= total_segs)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001690 break;
1691 offset = segno + 1;
1692 valid_blocks = get_valid_blocks(sbi, segno, 0);
1693 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks)
1694 continue;
1695 mutex_lock(&dirty_i->seglist_lock);
1696 __locate_dirty_segment(sbi, segno, DIRTY);
1697 mutex_unlock(&dirty_i->seglist_lock);
1698 }
1699}
1700
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001701static int init_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001702{
1703 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001704 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001705
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001706 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
1707 if (!dirty_i->victim_secmap)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001708 return -ENOMEM;
1709 return 0;
1710}
1711
1712static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1713{
1714 struct dirty_seglist_info *dirty_i;
1715 unsigned int bitmap_size, i;
1716
1717 /* allocate memory for dirty segments list information */
1718 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
1719 if (!dirty_i)
1720 return -ENOMEM;
1721
1722 SM_I(sbi)->dirty_info = dirty_i;
1723 mutex_init(&dirty_i->seglist_lock);
1724
1725 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1726
1727 for (i = 0; i < NR_DIRTY_TYPE; i++) {
1728 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001729 if (!dirty_i->dirty_segmap[i])
1730 return -ENOMEM;
1731 }
1732
1733 init_dirty_segmap(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001734 return init_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001735}
1736
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001737/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001738 * Update min, max modified time for cost-benefit GC algorithm
1739 */
1740static void init_min_max_mtime(struct f2fs_sb_info *sbi)
1741{
1742 struct sit_info *sit_i = SIT_I(sbi);
1743 unsigned int segno;
1744
1745 mutex_lock(&sit_i->sentry_lock);
1746
1747 sit_i->min_mtime = LLONG_MAX;
1748
1749 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
1750 unsigned int i;
1751 unsigned long long mtime = 0;
1752
1753 for (i = 0; i < sbi->segs_per_sec; i++)
1754 mtime += get_seg_entry(sbi, segno + i)->mtime;
1755
1756 mtime = div_u64(mtime, sbi->segs_per_sec);
1757
1758 if (sit_i->min_mtime > mtime)
1759 sit_i->min_mtime = mtime;
1760 }
1761 sit_i->max_mtime = get_mtime(sbi);
1762 mutex_unlock(&sit_i->sentry_lock);
1763}
1764
1765int build_segment_manager(struct f2fs_sb_info *sbi)
1766{
1767 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1768 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Namjae Jeon1042d602012-12-01 10:56:13 +09001769 struct f2fs_sm_info *sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001770 int err;
1771
1772 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
1773 if (!sm_info)
1774 return -ENOMEM;
1775
1776 /* init sm info */
1777 sbi->sm_info = sm_info;
1778 INIT_LIST_HEAD(&sm_info->wblist_head);
1779 spin_lock_init(&sm_info->wblist_lock);
1780 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1781 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1782 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
1783 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1784 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1785 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
1786 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +09001787 sm_info->rec_prefree_segments = DEF_RECLAIM_PREFREE_SEGMENTS;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001788
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09001789 INIT_LIST_HEAD(&sm_info->discard_list);
1790 sm_info->nr_discards = 0;
1791 sm_info->max_discards = 0;
1792
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001793 err = build_sit_info(sbi);
1794 if (err)
1795 return err;
1796 err = build_free_segmap(sbi);
1797 if (err)
1798 return err;
1799 err = build_curseg(sbi);
1800 if (err)
1801 return err;
1802
1803 /* reinit free segmap based on SIT */
1804 build_sit_entries(sbi);
1805
1806 init_free_segmap(sbi);
1807 err = build_dirty_segmap(sbi);
1808 if (err)
1809 return err;
1810
1811 init_min_max_mtime(sbi);
1812 return 0;
1813}
1814
1815static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
1816 enum dirty_type dirty_type)
1817{
1818 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1819
1820 mutex_lock(&dirty_i->seglist_lock);
1821 kfree(dirty_i->dirty_segmap[dirty_type]);
1822 dirty_i->nr_dirty[dirty_type] = 0;
1823 mutex_unlock(&dirty_i->seglist_lock);
1824}
1825
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001826static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001827{
1828 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001829 kfree(dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001830}
1831
1832static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
1833{
1834 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1835 int i;
1836
1837 if (!dirty_i)
1838 return;
1839
1840 /* discard pre-free/dirty segments list */
1841 for (i = 0; i < NR_DIRTY_TYPE; i++)
1842 discard_dirty_segmap(sbi, i);
1843
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001844 destroy_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001845 SM_I(sbi)->dirty_info = NULL;
1846 kfree(dirty_i);
1847}
1848
1849static void destroy_curseg(struct f2fs_sb_info *sbi)
1850{
1851 struct curseg_info *array = SM_I(sbi)->curseg_array;
1852 int i;
1853
1854 if (!array)
1855 return;
1856 SM_I(sbi)->curseg_array = NULL;
1857 for (i = 0; i < NR_CURSEG_TYPE; i++)
1858 kfree(array[i].sum_blk);
1859 kfree(array);
1860}
1861
1862static void destroy_free_segmap(struct f2fs_sb_info *sbi)
1863{
1864 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
1865 if (!free_i)
1866 return;
1867 SM_I(sbi)->free_info = NULL;
1868 kfree(free_i->free_segmap);
1869 kfree(free_i->free_secmap);
1870 kfree(free_i);
1871}
1872
1873static void destroy_sit_info(struct f2fs_sb_info *sbi)
1874{
1875 struct sit_info *sit_i = SIT_I(sbi);
1876 unsigned int start;
1877
1878 if (!sit_i)
1879 return;
1880
1881 if (sit_i->sentries) {
1882 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1883 kfree(sit_i->sentries[start].cur_valid_map);
1884 kfree(sit_i->sentries[start].ckpt_valid_map);
1885 }
1886 }
1887 vfree(sit_i->sentries);
1888 vfree(sit_i->sec_entries);
1889 kfree(sit_i->dirty_sentries_bitmap);
1890
1891 SM_I(sbi)->sit_info = NULL;
1892 kfree(sit_i->sit_bitmap);
1893 kfree(sit_i);
1894}
1895
1896void destroy_segment_manager(struct f2fs_sb_info *sbi)
1897{
1898 struct f2fs_sm_info *sm_info = SM_I(sbi);
Chao Yu3b03f722013-11-06 09:12:04 +08001899 if (!sm_info)
1900 return;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001901 destroy_dirty_segmap(sbi);
1902 destroy_curseg(sbi);
1903 destroy_free_segmap(sbi);
1904 destroy_sit_info(sbi);
1905 sbi->sm_info = NULL;
1906 kfree(sm_info);
1907}
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09001908
1909int __init create_segment_manager_caches(void)
1910{
1911 discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
1912 sizeof(struct discard_entry), NULL);
1913 if (!discard_entry_slab)
1914 return -ENOMEM;
1915 return 0;
1916}
1917
1918void destroy_segment_manager_caches(void)
1919{
1920 kmem_cache_destroy(discard_entry_slab);
1921}