blob: 0dfeebae2a50f1d8ff4b7a33e19a25fb32c2e834 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/bio.h>
14#include <linux/blkdev.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010015#include <linux/prefetch.h>
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +090016#include <linux/kthread.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090017#include <linux/vmalloc.h>
Chao Yu74de5932013-11-22 09:09:59 +080018#include <linux/swap.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090019
20#include "f2fs.h"
21#include "segment.h"
22#include "node.h"
Namjae Jeon6ec178d2013-04-23 17:51:43 +090023#include <trace/events/f2fs.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090024
Changman Lee9a7f1432013-11-15 10:42:51 +090025#define __reverse_ffz(x) __reverse_ffs(~(x))
26
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +090027static struct kmem_cache *discard_entry_slab;
28
Changman Lee9a7f1432013-11-15 10:42:51 +090029/*
30 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
31 * MSB and LSB are reversed in a byte by f2fs_set_bit.
32 */
33static inline unsigned long __reverse_ffs(unsigned long word)
34{
35 int num = 0;
36
37#if BITS_PER_LONG == 64
38 if ((word & 0xffffffff) == 0) {
39 num += 32;
40 word >>= 32;
41 }
42#endif
43 if ((word & 0xffff) == 0) {
44 num += 16;
45 word >>= 16;
46 }
47 if ((word & 0xff) == 0) {
48 num += 8;
49 word >>= 8;
50 }
51 if ((word & 0xf0) == 0)
52 num += 4;
53 else
54 word >>= 4;
55 if ((word & 0xc) == 0)
56 num += 2;
57 else
58 word >>= 2;
59 if ((word & 0x2) == 0)
60 num += 1;
61 return num;
62}
63
64/*
65 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c becasue
66 * f2fs_set_bit makes MSB and LSB reversed in a byte.
67 * Example:
68 * LSB <--> MSB
69 * f2fs_set_bit(0, bitmap) => 0000 0001
70 * f2fs_set_bit(7, bitmap) => 1000 0000
71 */
72static unsigned long __find_rev_next_bit(const unsigned long *addr,
73 unsigned long size, unsigned long offset)
74{
75 const unsigned long *p = addr + BIT_WORD(offset);
76 unsigned long result = offset & ~(BITS_PER_LONG - 1);
77 unsigned long tmp;
78 unsigned long mask, submask;
79 unsigned long quot, rest;
80
81 if (offset >= size)
82 return size;
83
84 size -= result;
85 offset %= BITS_PER_LONG;
86 if (!offset)
87 goto aligned;
88
89 tmp = *(p++);
90 quot = (offset >> 3) << 3;
91 rest = offset & 0x7;
92 mask = ~0UL << quot;
93 submask = (unsigned char)(0xff << rest) >> rest;
94 submask <<= quot;
95 mask &= submask;
96 tmp &= mask;
97 if (size < BITS_PER_LONG)
98 goto found_first;
99 if (tmp)
100 goto found_middle;
101
102 size -= BITS_PER_LONG;
103 result += BITS_PER_LONG;
104aligned:
105 while (size & ~(BITS_PER_LONG-1)) {
106 tmp = *(p++);
107 if (tmp)
108 goto found_middle;
109 result += BITS_PER_LONG;
110 size -= BITS_PER_LONG;
111 }
112 if (!size)
113 return result;
114 tmp = *p;
115found_first:
116 tmp &= (~0UL >> (BITS_PER_LONG - size));
117 if (tmp == 0UL) /* Are any bits set? */
118 return result + size; /* Nope. */
119found_middle:
120 return result + __reverse_ffs(tmp);
121}
122
123static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
124 unsigned long size, unsigned long offset)
125{
126 const unsigned long *p = addr + BIT_WORD(offset);
127 unsigned long result = offset & ~(BITS_PER_LONG - 1);
128 unsigned long tmp;
129 unsigned long mask, submask;
130 unsigned long quot, rest;
131
132 if (offset >= size)
133 return size;
134
135 size -= result;
136 offset %= BITS_PER_LONG;
137 if (!offset)
138 goto aligned;
139
140 tmp = *(p++);
141 quot = (offset >> 3) << 3;
142 rest = offset & 0x7;
143 mask = ~(~0UL << quot);
144 submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest);
145 submask <<= quot;
146 mask += submask;
147 tmp |= mask;
148 if (size < BITS_PER_LONG)
149 goto found_first;
150 if (~tmp)
151 goto found_middle;
152
153 size -= BITS_PER_LONG;
154 result += BITS_PER_LONG;
155aligned:
156 while (size & ~(BITS_PER_LONG - 1)) {
157 tmp = *(p++);
158 if (~tmp)
159 goto found_middle;
160 result += BITS_PER_LONG;
161 size -= BITS_PER_LONG;
162 }
163 if (!size)
164 return result;
165 tmp = *p;
166
167found_first:
168 tmp |= ~0UL << size;
169 if (tmp == ~0UL) /* Are any bits zero? */
170 return result + size; /* Nope. */
171found_middle:
172 return result + __reverse_ffz(tmp);
173}
174
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900175/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900176 * This function balances dirty node and dentry pages.
177 * In addition, it controls garbage collection.
178 */
179void f2fs_balance_fs(struct f2fs_sb_info *sbi)
180{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900181 /*
Jaegeuk Kim029cd282012-12-21 17:20:21 +0900182 * We should do GC or end up with checkpoint, if there are so many dirty
183 * dir/node pages without enough free segments.
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900184 */
Jaegeuk Kim43727522013-02-04 15:11:17 +0900185 if (has_not_enough_free_secs(sbi, 0)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900186 mutex_lock(&sbi->gc_mutex);
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900187 f2fs_gc(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900188 }
189}
190
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900191void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
192{
193 /* check the # of cached NAT entries and prefree segments */
194 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
195 excess_prefree_segs(sbi))
196 f2fs_sync_fs(sbi->sb, true);
197}
198
Gu Zheng2163d192014-04-27 14:21:33 +0800199static int issue_flush_thread(void *data)
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900200{
201 struct f2fs_sb_info *sbi = data;
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800202 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
203 wait_queue_head_t *q = &fcc->flush_wait_queue;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900204repeat:
205 if (kthread_should_stop())
206 return 0;
207
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800208 spin_lock(&fcc->issue_lock);
209 if (fcc->issue_list) {
210 fcc->dispatch_list = fcc->issue_list;
211 fcc->issue_list = fcc->issue_tail = NULL;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900212 }
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800213 spin_unlock(&fcc->issue_lock);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900214
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800215 if (fcc->dispatch_list) {
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900216 struct bio *bio = bio_alloc(GFP_NOIO, 0);
217 struct flush_cmd *cmd, *next;
218 int ret;
219
220 bio->bi_bdev = sbi->sb->s_bdev;
221 ret = submit_bio_wait(WRITE_FLUSH, bio);
222
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800223 for (cmd = fcc->dispatch_list; cmd; cmd = next) {
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900224 cmd->ret = ret;
225 next = cmd->next;
226 complete(&cmd->wait);
227 }
Gu Zhenga4ed23f2014-04-11 17:49:35 +0800228 bio_put(bio);
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800229 fcc->dispatch_list = NULL;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900230 }
231
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800232 wait_event_interruptible(*q,
233 kthread_should_stop() || fcc->issue_list);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900234 goto repeat;
235}
236
237int f2fs_issue_flush(struct f2fs_sb_info *sbi)
238{
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800239 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
Chao Yuadf8d902014-05-08 17:00:35 +0800240 struct flush_cmd cmd;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900241
Jaegeuk Kim24a9ee02014-07-25 17:46:10 -0700242 trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
243 test_opt(sbi, FLUSH_MERGE));
244
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700245 if (test_opt(sbi, NOBARRIER))
246 return 0;
247
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900248 if (!test_opt(sbi, FLUSH_MERGE))
249 return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL);
250
Chao Yuadf8d902014-05-08 17:00:35 +0800251 init_completion(&cmd.wait);
252 cmd.next = NULL;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900253
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800254 spin_lock(&fcc->issue_lock);
255 if (fcc->issue_list)
Chao Yuadf8d902014-05-08 17:00:35 +0800256 fcc->issue_tail->next = &cmd;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900257 else
Chao Yuadf8d902014-05-08 17:00:35 +0800258 fcc->issue_list = &cmd;
259 fcc->issue_tail = &cmd;
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800260 spin_unlock(&fcc->issue_lock);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900261
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800262 if (!fcc->dispatch_list)
263 wake_up(&fcc->flush_wait_queue);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900264
Chao Yuadf8d902014-05-08 17:00:35 +0800265 wait_for_completion(&cmd.wait);
266
267 return cmd.ret;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900268}
269
Gu Zheng2163d192014-04-27 14:21:33 +0800270int create_flush_cmd_control(struct f2fs_sb_info *sbi)
271{
272 dev_t dev = sbi->sb->s_bdev->bd_dev;
273 struct flush_cmd_control *fcc;
274 int err = 0;
275
276 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
277 if (!fcc)
278 return -ENOMEM;
279 spin_lock_init(&fcc->issue_lock);
280 init_waitqueue_head(&fcc->flush_wait_queue);
Chao Yu6b2920a2014-07-07 11:21:59 +0800281 SM_I(sbi)->cmd_control_info = fcc;
Gu Zheng2163d192014-04-27 14:21:33 +0800282 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
283 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
284 if (IS_ERR(fcc->f2fs_issue_flush)) {
285 err = PTR_ERR(fcc->f2fs_issue_flush);
286 kfree(fcc);
Chao Yu6b2920a2014-07-07 11:21:59 +0800287 SM_I(sbi)->cmd_control_info = NULL;
Gu Zheng2163d192014-04-27 14:21:33 +0800288 return err;
289 }
Gu Zheng2163d192014-04-27 14:21:33 +0800290
291 return err;
292}
293
294void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
295{
Chao Yu6b2920a2014-07-07 11:21:59 +0800296 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
Gu Zheng2163d192014-04-27 14:21:33 +0800297
298 if (fcc && fcc->f2fs_issue_flush)
299 kthread_stop(fcc->f2fs_issue_flush);
300 kfree(fcc);
Chao Yu6b2920a2014-07-07 11:21:59 +0800301 SM_I(sbi)->cmd_control_info = NULL;
Gu Zheng2163d192014-04-27 14:21:33 +0800302}
303
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900304static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
305 enum dirty_type dirty_type)
306{
307 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
308
309 /* need not be added */
310 if (IS_CURSEG(sbi, segno))
311 return;
312
313 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
314 dirty_i->nr_dirty[dirty_type]++;
315
316 if (dirty_type == DIRTY) {
317 struct seg_entry *sentry = get_seg_entry(sbi, segno);
Changman Lee4625d6a2013-10-25 17:31:57 +0900318 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900319
Changman Lee4625d6a2013-10-25 17:31:57 +0900320 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
321 dirty_i->nr_dirty[t]++;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900322 }
323}
324
325static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
326 enum dirty_type dirty_type)
327{
328 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
329
330 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
331 dirty_i->nr_dirty[dirty_type]--;
332
333 if (dirty_type == DIRTY) {
Changman Lee4625d6a2013-10-25 17:31:57 +0900334 struct seg_entry *sentry = get_seg_entry(sbi, segno);
335 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900336
Changman Lee4625d6a2013-10-25 17:31:57 +0900337 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
338 dirty_i->nr_dirty[t]--;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900339
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900340 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
341 clear_bit(GET_SECNO(sbi, segno),
342 dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900343 }
344}
345
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900346/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900347 * Should not occur error such as -ENOMEM.
348 * Adding dirty entry into seglist is not critical operation.
349 * If a given segment is one of current working segments, it won't be added.
350 */
Haicheng Li8d8451a2013-06-13 16:59:28 +0800351static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900352{
353 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
354 unsigned short valid_blocks;
355
356 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
357 return;
358
359 mutex_lock(&dirty_i->seglist_lock);
360
361 valid_blocks = get_valid_blocks(sbi, segno, 0);
362
363 if (valid_blocks == 0) {
364 __locate_dirty_segment(sbi, segno, PRE);
365 __remove_dirty_segment(sbi, segno, DIRTY);
366 } else if (valid_blocks < sbi->blocks_per_seg) {
367 __locate_dirty_segment(sbi, segno, DIRTY);
368 } else {
369 /* Recovery routine with SSR needs this */
370 __remove_dirty_segment(sbi, segno, DIRTY);
371 }
372
373 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900374}
375
Jaegeuk Kim1e87a782014-04-15 13:57:55 +0900376static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
Jaegeuk Kim37208872013-11-12 16:55:17 +0900377 block_t blkstart, block_t blklen)
378{
Jaegeuk Kimf9a4e6d2013-11-28 12:44:05 +0900379 sector_t start = SECTOR_FROM_BLOCK(sbi, blkstart);
380 sector_t len = SECTOR_FROM_BLOCK(sbi, blklen);
Jaegeuk Kim1661d072013-11-12 17:01:00 +0900381 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
Jaegeuk Kim1e87a782014-04-15 13:57:55 +0900382 return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
383}
384
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700385void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
Jaegeuk Kim1e87a782014-04-15 13:57:55 +0900386{
Jaegeuk Kim1e87a782014-04-15 13:57:55 +0900387 if (f2fs_issue_discard(sbi, blkaddr, 1)) {
388 struct page *page = grab_meta_page(sbi, blkaddr);
389 /* zero-filled page */
390 set_page_dirty(page);
391 f2fs_put_page(page, 1);
392 }
Jaegeuk Kim37208872013-11-12 16:55:17 +0900393}
394
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900395static void add_discard_addrs(struct f2fs_sb_info *sbi,
396 unsigned int segno, struct seg_entry *se)
397{
398 struct list_head *head = &SM_I(sbi)->discard_list;
399 struct discard_entry *new;
400 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
401 int max_blocks = sbi->blocks_per_seg;
402 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
403 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
404 unsigned long dmap[entries];
405 unsigned int start = 0, end = -1;
406 int i;
407
408 if (!test_opt(sbi, DISCARD))
409 return;
410
411 /* zero block will be discarded through the prefree list */
412 if (!se->valid_blocks || se->valid_blocks == max_blocks)
413 return;
414
415 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
416 for (i = 0; i < entries; i++)
417 dmap[i] = (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
418
419 while (SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
420 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
421 if (start >= max_blocks)
422 break;
423
424 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
425
426 new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
427 INIT_LIST_HEAD(&new->list);
428 new->blkaddr = START_BLOCK(sbi, segno) + start;
429 new->len = end - start;
430
431 list_add_tail(&new->list, head);
432 SM_I(sbi)->nr_discards += end - start;
433 }
434}
435
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900436/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900437 * Should call clear_prefree_segments after checkpoint is done.
438 */
439static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
440{
441 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Chao Yub65ee142014-08-04 10:10:07 +0800442 unsigned int segno;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900443 unsigned int total_segs = TOTAL_SEGS(sbi);
444
445 mutex_lock(&dirty_i->seglist_lock);
Chao Yub65ee142014-08-04 10:10:07 +0800446 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], total_segs)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900447 __set_test_and_free(sbi, segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900448 mutex_unlock(&dirty_i->seglist_lock);
449}
450
451void clear_prefree_segments(struct f2fs_sb_info *sbi)
452{
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900453 struct list_head *head = &(SM_I(sbi)->discard_list);
Chao Yu2d7b8222014-03-29 11:33:17 +0800454 struct discard_entry *entry, *this;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900455 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Changman Lee29e59c12013-11-11 09:24:37 +0900456 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900457 unsigned int total_segs = TOTAL_SEGS(sbi);
Changman Lee29e59c12013-11-11 09:24:37 +0900458 unsigned int start = 0, end = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900459
460 mutex_lock(&dirty_i->seglist_lock);
Changman Lee29e59c12013-11-11 09:24:37 +0900461
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900462 while (1) {
Changman Lee29e59c12013-11-11 09:24:37 +0900463 int i;
464 start = find_next_bit(prefree_map, total_segs, end + 1);
465 if (start >= total_segs)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900466 break;
Changman Lee29e59c12013-11-11 09:24:37 +0900467 end = find_next_zero_bit(prefree_map, total_segs, start + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900468
Changman Lee29e59c12013-11-11 09:24:37 +0900469 for (i = start; i < end; i++)
470 clear_bit(i, prefree_map);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900471
Changman Lee29e59c12013-11-11 09:24:37 +0900472 dirty_i->nr_dirty[PRE] -= end - start;
473
474 if (!test_opt(sbi, DISCARD))
475 continue;
476
Jaegeuk Kim37208872013-11-12 16:55:17 +0900477 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
478 (end - start) << sbi->log_blocks_per_seg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900479 }
480 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900481
482 /* send small discards */
Chao Yu2d7b8222014-03-29 11:33:17 +0800483 list_for_each_entry_safe(entry, this, head, list) {
Jaegeuk Kim37208872013-11-12 16:55:17 +0900484 f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900485 list_del(&entry->list);
486 SM_I(sbi)->nr_discards -= entry->len;
487 kmem_cache_free(discard_entry_slab, entry);
488 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900489}
490
491static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
492{
493 struct sit_info *sit_i = SIT_I(sbi);
494 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
495 sit_i->dirty_sentries++;
496}
497
498static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
499 unsigned int segno, int modified)
500{
501 struct seg_entry *se = get_seg_entry(sbi, segno);
502 se->type = type;
503 if (modified)
504 __mark_sit_entry_dirty(sbi, segno);
505}
506
507static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
508{
509 struct seg_entry *se;
510 unsigned int segno, offset;
511 long int new_vblocks;
512
513 segno = GET_SEGNO(sbi, blkaddr);
514
515 se = get_seg_entry(sbi, segno);
516 new_vblocks = se->valid_blocks + del;
Jaegeuk Kim491c0852014-02-04 13:01:10 +0900517 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900518
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900519 f2fs_bug_on((new_vblocks >> (sizeof(unsigned short) << 3) ||
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900520 (new_vblocks > sbi->blocks_per_seg)));
521
522 se->valid_blocks = new_vblocks;
523 se->mtime = get_mtime(sbi);
524 SIT_I(sbi)->max_mtime = se->mtime;
525
526 /* Update valid block bitmap */
527 if (del > 0) {
528 if (f2fs_set_bit(offset, se->cur_valid_map))
529 BUG();
530 } else {
531 if (!f2fs_clear_bit(offset, se->cur_valid_map))
532 BUG();
533 }
534 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
535 se->ckpt_valid_blocks += del;
536
537 __mark_sit_entry_dirty(sbi, segno);
538
539 /* update total number of valid blocks to be written in ckpt area */
540 SIT_I(sbi)->written_valid_blocks += del;
541
542 if (sbi->segs_per_sec > 1)
543 get_sec_entry(sbi, segno)->valid_blocks += del;
544}
545
Jaegeuk Kim5e443812014-01-28 12:22:14 +0900546void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900547{
Jaegeuk Kim5e443812014-01-28 12:22:14 +0900548 update_sit_entry(sbi, new, 1);
549 if (GET_SEGNO(sbi, old) != NULL_SEGNO)
550 update_sit_entry(sbi, old, -1);
551
552 locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
553 locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900554}
555
556void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
557{
558 unsigned int segno = GET_SEGNO(sbi, addr);
559 struct sit_info *sit_i = SIT_I(sbi);
560
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900561 f2fs_bug_on(addr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900562 if (addr == NEW_ADDR)
563 return;
564
565 /* add it into sit main buffer */
566 mutex_lock(&sit_i->sentry_lock);
567
568 update_sit_entry(sbi, addr, -1);
569
570 /* add it into dirty seglist */
571 locate_dirty_segment(sbi, segno);
572
573 mutex_unlock(&sit_i->sentry_lock);
574}
575
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900576/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900577 * This function should be resided under the curseg_mutex lock
578 */
579static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
Haicheng Lie79efe32013-06-13 16:59:27 +0800580 struct f2fs_summary *sum)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900581{
582 struct curseg_info *curseg = CURSEG_I(sbi, type);
583 void *addr = curseg->sum_blk;
Haicheng Lie79efe32013-06-13 16:59:27 +0800584 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900585 memcpy(addr, sum, sizeof(struct f2fs_summary));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900586}
587
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900588/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900589 * Calculate the number of current summary pages for writing
590 */
591int npages_for_summary_flush(struct f2fs_sb_info *sbi)
592{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900593 int valid_sum_count = 0;
Fan Li9a479382013-10-29 16:21:47 +0800594 int i, sum_in_page;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900595
596 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
597 if (sbi->ckpt->alloc_type[i] == SSR)
598 valid_sum_count += sbi->blocks_per_seg;
599 else
600 valid_sum_count += curseg_blkoff(sbi, i);
601 }
602
Fan Li9a479382013-10-29 16:21:47 +0800603 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
604 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
605 if (valid_sum_count <= sum_in_page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900606 return 1;
Fan Li9a479382013-10-29 16:21:47 +0800607 else if ((valid_sum_count - sum_in_page) <=
608 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900609 return 2;
610 return 3;
611}
612
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900613/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900614 * Caller should put this summary page
615 */
616struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
617{
618 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
619}
620
621static void write_sum_page(struct f2fs_sb_info *sbi,
622 struct f2fs_summary_block *sum_blk, block_t blk_addr)
623{
624 struct page *page = grab_meta_page(sbi, blk_addr);
625 void *kaddr = page_address(page);
626 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
627 set_page_dirty(page);
628 f2fs_put_page(page, 1);
629}
630
Jaegeuk Kim60374682013-03-31 13:58:51 +0900631static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
632{
633 struct curseg_info *curseg = CURSEG_I(sbi, type);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800634 unsigned int segno = curseg->segno + 1;
Jaegeuk Kim60374682013-03-31 13:58:51 +0900635 struct free_segmap_info *free_i = FREE_I(sbi);
636
Haicheng Li81fb5e82013-05-14 18:20:28 +0800637 if (segno < TOTAL_SEGS(sbi) && segno % sbi->segs_per_sec)
638 return !test_bit(segno, free_i->free_segmap);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900639 return 0;
640}
641
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900642/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900643 * Find a new segment from the free segments bitmap to right order
644 * This function should be returned with success, otherwise BUG
645 */
646static void get_new_segment(struct f2fs_sb_info *sbi,
647 unsigned int *newseg, bool new_sec, int dir)
648{
649 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900650 unsigned int segno, secno, zoneno;
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900651 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900652 unsigned int hint = *newseg / sbi->segs_per_sec;
653 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
654 unsigned int left_start = hint;
655 bool init = true;
656 int go_left = 0;
657 int i;
658
659 write_lock(&free_i->segmap_lock);
660
661 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
662 segno = find_next_zero_bit(free_i->free_segmap,
663 TOTAL_SEGS(sbi), *newseg + 1);
Jaegeuk Kim33afa7f2013-03-31 12:59:53 +0900664 if (segno - *newseg < sbi->segs_per_sec -
665 (*newseg % sbi->segs_per_sec))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900666 goto got_it;
667 }
668find_other_zone:
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900669 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
670 if (secno >= TOTAL_SECS(sbi)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900671 if (dir == ALLOC_RIGHT) {
672 secno = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900673 TOTAL_SECS(sbi), 0);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900674 f2fs_bug_on(secno >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900675 } else {
676 go_left = 1;
677 left_start = hint - 1;
678 }
679 }
680 if (go_left == 0)
681 goto skip_left;
682
683 while (test_bit(left_start, free_i->free_secmap)) {
684 if (left_start > 0) {
685 left_start--;
686 continue;
687 }
688 left_start = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim53cf9522013-03-31 12:39:49 +0900689 TOTAL_SECS(sbi), 0);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900690 f2fs_bug_on(left_start >= TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900691 break;
692 }
693 secno = left_start;
694skip_left:
695 hint = secno;
696 segno = secno * sbi->segs_per_sec;
697 zoneno = secno / sbi->secs_per_zone;
698
699 /* give up on finding another zone */
700 if (!init)
701 goto got_it;
702 if (sbi->secs_per_zone == 1)
703 goto got_it;
704 if (zoneno == old_zoneno)
705 goto got_it;
706 if (dir == ALLOC_LEFT) {
707 if (!go_left && zoneno + 1 >= total_zones)
708 goto got_it;
709 if (go_left && zoneno == 0)
710 goto got_it;
711 }
712 for (i = 0; i < NR_CURSEG_TYPE; i++)
713 if (CURSEG_I(sbi, i)->zone == zoneno)
714 break;
715
716 if (i < NR_CURSEG_TYPE) {
717 /* zone is in user, try another */
718 if (go_left)
719 hint = zoneno * sbi->secs_per_zone - 1;
720 else if (zoneno + 1 >= total_zones)
721 hint = 0;
722 else
723 hint = (zoneno + 1) * sbi->secs_per_zone;
724 init = false;
725 goto find_other_zone;
726 }
727got_it:
728 /* set it as dirty segment in free segmap */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900729 f2fs_bug_on(test_bit(segno, free_i->free_segmap));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900730 __set_inuse(sbi, segno);
731 *newseg = segno;
732 write_unlock(&free_i->segmap_lock);
733}
734
735static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
736{
737 struct curseg_info *curseg = CURSEG_I(sbi, type);
738 struct summary_footer *sum_footer;
739
740 curseg->segno = curseg->next_segno;
741 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
742 curseg->next_blkoff = 0;
743 curseg->next_segno = NULL_SEGNO;
744
745 sum_footer = &(curseg->sum_blk->footer);
746 memset(sum_footer, 0, sizeof(struct summary_footer));
747 if (IS_DATASEG(type))
748 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
749 if (IS_NODESEG(type))
750 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
751 __set_sit_entry_type(sbi, type, curseg->segno, modified);
752}
753
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900754/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900755 * Allocate a current working segment.
756 * This function always allocates a free segment in LFS manner.
757 */
758static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
759{
760 struct curseg_info *curseg = CURSEG_I(sbi, type);
761 unsigned int segno = curseg->segno;
762 int dir = ALLOC_LEFT;
763
764 write_sum_page(sbi, curseg->sum_blk,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800765 GET_SUM_BLOCK(sbi, segno));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900766 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
767 dir = ALLOC_RIGHT;
768
769 if (test_opt(sbi, NOHEAP))
770 dir = ALLOC_RIGHT;
771
772 get_new_segment(sbi, &segno, new_sec, dir);
773 curseg->next_segno = segno;
774 reset_curseg(sbi, type, 1);
775 curseg->alloc_type = LFS;
776}
777
778static void __next_free_blkoff(struct f2fs_sb_info *sbi,
779 struct curseg_info *seg, block_t start)
780{
781 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
Changman Leee81c93c2013-11-15 13:21:16 +0900782 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
783 unsigned long target_map[entries];
784 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
785 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
786 int i, pos;
787
788 for (i = 0; i < entries; i++)
789 target_map[i] = ckpt_map[i] | cur_map[i];
790
791 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
792
793 seg->next_blkoff = pos;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900794}
795
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900796/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900797 * If a segment is written by LFS manner, next block offset is just obtained
798 * by increasing the current block offset. However, if a segment is written by
799 * SSR manner, next block offset obtained by calling __next_free_blkoff
800 */
801static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
802 struct curseg_info *seg)
803{
804 if (seg->alloc_type == SSR)
805 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
806 else
807 seg->next_blkoff++;
808}
809
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900810/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900811 * This function always allocates a used segment (from dirty seglist) by SSR
812 * manner, so it should recover the existing segment information of valid blocks
813 */
814static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
815{
816 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
817 struct curseg_info *curseg = CURSEG_I(sbi, type);
818 unsigned int new_segno = curseg->next_segno;
819 struct f2fs_summary_block *sum_node;
820 struct page *sum_page;
821
822 write_sum_page(sbi, curseg->sum_blk,
823 GET_SUM_BLOCK(sbi, curseg->segno));
824 __set_test_and_inuse(sbi, new_segno);
825
826 mutex_lock(&dirty_i->seglist_lock);
827 __remove_dirty_segment(sbi, new_segno, PRE);
828 __remove_dirty_segment(sbi, new_segno, DIRTY);
829 mutex_unlock(&dirty_i->seglist_lock);
830
831 reset_curseg(sbi, type, 1);
832 curseg->alloc_type = SSR;
833 __next_free_blkoff(sbi, curseg, 0);
834
835 if (reuse) {
836 sum_page = get_sum_page(sbi, new_segno);
837 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
838 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
839 f2fs_put_page(sum_page, 1);
840 }
841}
842
Jaegeuk Kim43727522013-02-04 15:11:17 +0900843static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
844{
845 struct curseg_info *curseg = CURSEG_I(sbi, type);
846 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
847
848 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
849 return v_ops->get_victim(sbi,
850 &(curseg)->next_segno, BG_GC, type, SSR);
851
852 /* For data segments, let's do SSR more intensively */
853 for (; type >= CURSEG_HOT_DATA; type--)
854 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
855 BG_GC, type, SSR))
856 return 1;
857 return 0;
858}
859
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900860/*
861 * flush out current segment and replace it with new segment
862 * This function should be returned with success, otherwise BUG
863 */
864static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
865 int type, bool force)
866{
867 struct curseg_info *curseg = CURSEG_I(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900868
Gu Zheng7b405272013-08-19 09:41:15 +0800869 if (force)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900870 new_curseg(sbi, type, true);
Gu Zheng7b405272013-08-19 09:41:15 +0800871 else if (type == CURSEG_WARM_NODE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900872 new_curseg(sbi, type, false);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900873 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
874 new_curseg(sbi, type, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900875 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
876 change_curseg(sbi, type, true);
877 else
878 new_curseg(sbi, type, false);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900879
880 stat_inc_seg_type(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900881}
882
883void allocate_new_segments(struct f2fs_sb_info *sbi)
884{
885 struct curseg_info *curseg;
886 unsigned int old_curseg;
887 int i;
888
889 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
890 curseg = CURSEG_I(sbi, i);
891 old_curseg = curseg->segno;
892 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
893 locate_dirty_segment(sbi, old_curseg);
894 }
895}
896
897static const struct segment_allocation default_salloc_ops = {
898 .allocate_segment = allocate_segment_by_default,
899};
900
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900901static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
902{
903 struct curseg_info *curseg = CURSEG_I(sbi, type);
904 if (curseg->next_blkoff < sbi->blocks_per_seg)
905 return true;
906 return false;
907}
908
909static int __get_segment_type_2(struct page *page, enum page_type p_type)
910{
911 if (p_type == DATA)
912 return CURSEG_HOT_DATA;
913 else
914 return CURSEG_HOT_NODE;
915}
916
917static int __get_segment_type_4(struct page *page, enum page_type p_type)
918{
919 if (p_type == DATA) {
920 struct inode *inode = page->mapping->host;
921
922 if (S_ISDIR(inode->i_mode))
923 return CURSEG_HOT_DATA;
924 else
925 return CURSEG_COLD_DATA;
926 } else {
927 if (IS_DNODE(page) && !is_cold_node(page))
928 return CURSEG_HOT_NODE;
929 else
930 return CURSEG_COLD_NODE;
931 }
932}
933
934static int __get_segment_type_6(struct page *page, enum page_type p_type)
935{
936 if (p_type == DATA) {
937 struct inode *inode = page->mapping->host;
938
939 if (S_ISDIR(inode->i_mode))
940 return CURSEG_HOT_DATA;
Jaegeuk Kim354a3392013-06-14 08:52:35 +0900941 else if (is_cold_data(page) || file_is_cold(inode))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900942 return CURSEG_COLD_DATA;
943 else
944 return CURSEG_WARM_DATA;
945 } else {
946 if (IS_DNODE(page))
947 return is_cold_node(page) ? CURSEG_WARM_NODE :
948 CURSEG_HOT_NODE;
949 else
950 return CURSEG_COLD_NODE;
951 }
952}
953
954static int __get_segment_type(struct page *page, enum page_type p_type)
955{
956 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
957 switch (sbi->active_logs) {
958 case 2:
959 return __get_segment_type_2(page, p_type);
960 case 4:
961 return __get_segment_type_4(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900962 }
Jaegeuk Kim12a67142012-12-21 11:47:05 +0900963 /* NR_CURSEG_TYPE(6) logs by default */
Jaegeuk Kim5d56b672013-10-29 15:14:54 +0900964 f2fs_bug_on(sbi->active_logs != NR_CURSEG_TYPE);
Jaegeuk Kim12a67142012-12-21 11:47:05 +0900965 return __get_segment_type_6(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900966}
967
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +0900968void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
969 block_t old_blkaddr, block_t *new_blkaddr,
970 struct f2fs_summary *sum, int type)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900971{
972 struct sit_info *sit_i = SIT_I(sbi);
973 struct curseg_info *curseg;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900974
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900975 curseg = CURSEG_I(sbi, type);
976
977 mutex_lock(&curseg->curseg_mutex);
978
979 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900980
981 /*
982 * __add_sum_entry should be resided under the curseg_mutex
983 * because, this function updates a summary entry in the
984 * current summary block.
985 */
Haicheng Lie79efe32013-06-13 16:59:27 +0800986 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900987
988 mutex_lock(&sit_i->sentry_lock);
989 __refresh_next_blkoff(sbi, curseg);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +0900990
991 stat_inc_block_count(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900992
Jaegeuk Kim5e443812014-01-28 12:22:14 +0900993 if (!__has_curseg_space(sbi, type))
994 sit_i->s_ops->allocate_segment(sbi, type, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900995 /*
996 * SIT information should be updated before segment allocation,
997 * since SSR needs latest valid block information.
998 */
999 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
Jaegeuk Kim5e443812014-01-28 12:22:14 +09001000
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001001 mutex_unlock(&sit_i->sentry_lock);
1002
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001003 if (page && IS_NODESEG(type))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001004 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1005
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001006 mutex_unlock(&curseg->curseg_mutex);
1007}
1008
1009static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
1010 block_t old_blkaddr, block_t *new_blkaddr,
1011 struct f2fs_summary *sum, struct f2fs_io_info *fio)
1012{
1013 int type = __get_segment_type(page, fio->type);
1014
1015 allocate_data_block(sbi, page, old_blkaddr, new_blkaddr, sum, type);
1016
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001017 /* writeout dirty page into bdev */
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001018 f2fs_submit_page_mbio(sbi, page, *new_blkaddr, fio);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001019}
1020
Jaegeuk Kim577e3492013-01-24 19:56:11 +09001021void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001022{
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001023 struct f2fs_io_info fio = {
1024 .type = META,
Gu Zheng7e8f2302013-12-20 18:17:49 +08001025 .rw = WRITE_SYNC | REQ_META | REQ_PRIO
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001026 };
1027
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001028 set_page_writeback(page);
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001029 f2fs_submit_page_mbio(sbi, page, page->index, &fio);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001030}
1031
1032void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
Jaegeuk Kimfb5566d2014-01-08 10:09:51 +09001033 struct f2fs_io_info *fio,
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001034 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
1035{
1036 struct f2fs_summary sum;
1037 set_summary(&sum, nid, 0, 0);
Jaegeuk Kimfb5566d2014-01-08 10:09:51 +09001038 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, fio);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001039}
1040
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001041void write_data_page(struct page *page, struct dnode_of_data *dn,
1042 block_t *new_blkaddr, struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001043{
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001044 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001045 struct f2fs_summary sum;
1046 struct node_info ni;
1047
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001048 f2fs_bug_on(dn->data_blkaddr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001049 get_node_info(sbi, dn->nid, &ni);
1050 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1051
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001052 do_write_page(sbi, page, dn->data_blkaddr, new_blkaddr, &sum, fio);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001053}
1054
Chris Fries6c311ec2014-01-17 14:44:39 -06001055void rewrite_data_page(struct page *page, block_t old_blkaddr,
1056 struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001057{
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001058 struct inode *inode = page->mapping->host;
1059 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
1060 f2fs_submit_page_mbio(sbi, page, old_blkaddr, fio);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001061}
1062
1063void recover_data_page(struct f2fs_sb_info *sbi,
1064 struct page *page, struct f2fs_summary *sum,
1065 block_t old_blkaddr, block_t new_blkaddr)
1066{
1067 struct sit_info *sit_i = SIT_I(sbi);
1068 struct curseg_info *curseg;
1069 unsigned int segno, old_cursegno;
1070 struct seg_entry *se;
1071 int type;
1072
1073 segno = GET_SEGNO(sbi, new_blkaddr);
1074 se = get_seg_entry(sbi, segno);
1075 type = se->type;
1076
1077 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
1078 if (old_blkaddr == NULL_ADDR)
1079 type = CURSEG_COLD_DATA;
1080 else
1081 type = CURSEG_WARM_DATA;
1082 }
1083 curseg = CURSEG_I(sbi, type);
1084
1085 mutex_lock(&curseg->curseg_mutex);
1086 mutex_lock(&sit_i->sentry_lock);
1087
1088 old_cursegno = curseg->segno;
1089
1090 /* change the current segment */
1091 if (segno != curseg->segno) {
1092 curseg->next_segno = segno;
1093 change_curseg(sbi, type, true);
1094 }
1095
Jaegeuk Kim491c0852014-02-04 13:01:10 +09001096 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
Haicheng Lie79efe32013-06-13 16:59:27 +08001097 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001098
1099 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001100 locate_dirty_segment(sbi, old_cursegno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001101
1102 mutex_unlock(&sit_i->sentry_lock);
1103 mutex_unlock(&curseg->curseg_mutex);
1104}
1105
1106void rewrite_node_page(struct f2fs_sb_info *sbi,
1107 struct page *page, struct f2fs_summary *sum,
1108 block_t old_blkaddr, block_t new_blkaddr)
1109{
1110 struct sit_info *sit_i = SIT_I(sbi);
1111 int type = CURSEG_WARM_NODE;
1112 struct curseg_info *curseg;
1113 unsigned int segno, old_cursegno;
1114 block_t next_blkaddr = next_blkaddr_of_node(page);
1115 unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001116 struct f2fs_io_info fio = {
1117 .type = NODE,
1118 .rw = WRITE_SYNC,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001119 };
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001120
1121 curseg = CURSEG_I(sbi, type);
1122
1123 mutex_lock(&curseg->curseg_mutex);
1124 mutex_lock(&sit_i->sentry_lock);
1125
1126 segno = GET_SEGNO(sbi, new_blkaddr);
1127 old_cursegno = curseg->segno;
1128
1129 /* change the current segment */
1130 if (segno != curseg->segno) {
1131 curseg->next_segno = segno;
1132 change_curseg(sbi, type, true);
1133 }
Jaegeuk Kim491c0852014-02-04 13:01:10 +09001134 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
Haicheng Lie79efe32013-06-13 16:59:27 +08001135 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001136
1137 /* change the current log to the next block addr in advance */
1138 if (next_segno != segno) {
1139 curseg->next_segno = next_segno;
1140 change_curseg(sbi, type, true);
1141 }
Jaegeuk Kim491c0852014-02-04 13:01:10 +09001142 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, next_blkaddr);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001143
1144 /* rewrite node page */
1145 set_page_writeback(page);
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001146 f2fs_submit_page_mbio(sbi, page, new_blkaddr, &fio);
1147 f2fs_submit_merged_bio(sbi, NODE, WRITE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001148 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001149 locate_dirty_segment(sbi, old_cursegno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001150
1151 mutex_unlock(&sit_i->sentry_lock);
1152 mutex_unlock(&curseg->curseg_mutex);
1153}
1154
Chao Yudf0f8dc2014-03-22 14:57:23 +08001155static inline bool is_merged_page(struct f2fs_sb_info *sbi,
1156 struct page *page, enum page_type type)
1157{
1158 enum page_type btype = PAGE_TYPE_OF_BIO(type);
1159 struct f2fs_bio_info *io = &sbi->write_io[btype];
Chao Yudf0f8dc2014-03-22 14:57:23 +08001160 struct bio_vec *bvec;
1161 int i;
1162
1163 down_read(&io->io_rwsem);
Jaegeuk Kimce234472014-04-02 09:04:42 +09001164 if (!io->bio)
Chao Yudf0f8dc2014-03-22 14:57:23 +08001165 goto out;
1166
Jaegeuk Kimce234472014-04-02 09:04:42 +09001167 bio_for_each_segment_all(bvec, io->bio, i) {
Chao Yudf0f8dc2014-03-22 14:57:23 +08001168 if (page == bvec->bv_page) {
1169 up_read(&io->io_rwsem);
1170 return true;
1171 }
1172 }
1173
1174out:
1175 up_read(&io->io_rwsem);
1176 return false;
1177}
1178
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001179void f2fs_wait_on_page_writeback(struct page *page,
Yuan Zhong5514f0a2014-01-10 07:26:14 +00001180 enum page_type type)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001181{
1182 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
1183 if (PageWriteback(page)) {
Chao Yudf0f8dc2014-03-22 14:57:23 +08001184 if (is_merged_page(sbi, page, type))
1185 f2fs_submit_merged_bio(sbi, type, WRITE);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001186 wait_on_page_writeback(page);
1187 }
1188}
1189
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001190static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1191{
1192 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1193 struct curseg_info *seg_i;
1194 unsigned char *kaddr;
1195 struct page *page;
1196 block_t start;
1197 int i, j, offset;
1198
1199 start = start_sum_block(sbi);
1200
1201 page = get_meta_page(sbi, start++);
1202 kaddr = (unsigned char *)page_address(page);
1203
1204 /* Step 1: restore nat cache */
1205 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1206 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1207
1208 /* Step 2: restore sit cache */
1209 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1210 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1211 SUM_JOURNAL_SIZE);
1212 offset = 2 * SUM_JOURNAL_SIZE;
1213
1214 /* Step 3: restore summary entries */
1215 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1216 unsigned short blk_off;
1217 unsigned int segno;
1218
1219 seg_i = CURSEG_I(sbi, i);
1220 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1221 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1222 seg_i->next_segno = segno;
1223 reset_curseg(sbi, i, 0);
1224 seg_i->alloc_type = ckpt->alloc_type[i];
1225 seg_i->next_blkoff = blk_off;
1226
1227 if (seg_i->alloc_type == SSR)
1228 blk_off = sbi->blocks_per_seg;
1229
1230 for (j = 0; j < blk_off; j++) {
1231 struct f2fs_summary *s;
1232 s = (struct f2fs_summary *)(kaddr + offset);
1233 seg_i->sum_blk->entries[j] = *s;
1234 offset += SUMMARY_SIZE;
1235 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1236 SUM_FOOTER_SIZE)
1237 continue;
1238
1239 f2fs_put_page(page, 1);
1240 page = NULL;
1241
1242 page = get_meta_page(sbi, start++);
1243 kaddr = (unsigned char *)page_address(page);
1244 offset = 0;
1245 }
1246 }
1247 f2fs_put_page(page, 1);
1248 return 0;
1249}
1250
1251static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1252{
1253 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1254 struct f2fs_summary_block *sum;
1255 struct curseg_info *curseg;
1256 struct page *new;
1257 unsigned short blk_off;
1258 unsigned int segno = 0;
1259 block_t blk_addr = 0;
1260
1261 /* get segment number and block addr */
1262 if (IS_DATASEG(type)) {
1263 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1264 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1265 CURSEG_HOT_DATA]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001266 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001267 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1268 else
1269 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1270 } else {
1271 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1272 CURSEG_HOT_NODE]);
1273 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1274 CURSEG_HOT_NODE]);
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001275 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001276 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1277 type - CURSEG_HOT_NODE);
1278 else
1279 blk_addr = GET_SUM_BLOCK(sbi, segno);
1280 }
1281
1282 new = get_meta_page(sbi, blk_addr);
1283 sum = (struct f2fs_summary_block *)page_address(new);
1284
1285 if (IS_NODESEG(type)) {
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001286 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001287 struct f2fs_summary *ns = &sum->entries[0];
1288 int i;
1289 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1290 ns->version = 0;
1291 ns->ofs_in_node = 0;
1292 }
1293 } else {
Gu Zhengd6537882014-03-07 18:43:36 +08001294 int err;
1295
1296 err = restore_node_summary(sbi, segno, sum);
1297 if (err) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001298 f2fs_put_page(new, 1);
Gu Zhengd6537882014-03-07 18:43:36 +08001299 return err;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001300 }
1301 }
1302 }
1303
1304 /* set uncompleted segment to curseg */
1305 curseg = CURSEG_I(sbi, type);
1306 mutex_lock(&curseg->curseg_mutex);
1307 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1308 curseg->next_segno = segno;
1309 reset_curseg(sbi, type, 0);
1310 curseg->alloc_type = ckpt->alloc_type[type];
1311 curseg->next_blkoff = blk_off;
1312 mutex_unlock(&curseg->curseg_mutex);
1313 f2fs_put_page(new, 1);
1314 return 0;
1315}
1316
1317static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1318{
1319 int type = CURSEG_HOT_DATA;
Chao Yue4fc5fb2014-03-17 16:36:24 +08001320 int err;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001321
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001322 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001323 /* restore for compacted data summary */
1324 if (read_compacted_summaries(sbi))
1325 return -EINVAL;
1326 type = CURSEG_HOT_NODE;
1327 }
1328
Chao Yue4fc5fb2014-03-17 16:36:24 +08001329 for (; type <= CURSEG_COLD_NODE; type++) {
1330 err = read_normal_summaries(sbi, type);
1331 if (err)
1332 return err;
1333 }
1334
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001335 return 0;
1336}
1337
1338static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1339{
1340 struct page *page;
1341 unsigned char *kaddr;
1342 struct f2fs_summary *summary;
1343 struct curseg_info *seg_i;
1344 int written_size = 0;
1345 int i, j;
1346
1347 page = grab_meta_page(sbi, blkaddr++);
1348 kaddr = (unsigned char *)page_address(page);
1349
1350 /* Step 1: write nat cache */
1351 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1352 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1353 written_size += SUM_JOURNAL_SIZE;
1354
1355 /* Step 2: write sit cache */
1356 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1357 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1358 SUM_JOURNAL_SIZE);
1359 written_size += SUM_JOURNAL_SIZE;
1360
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001361 /* Step 3: write summary entries */
1362 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1363 unsigned short blkoff;
1364 seg_i = CURSEG_I(sbi, i);
1365 if (sbi->ckpt->alloc_type[i] == SSR)
1366 blkoff = sbi->blocks_per_seg;
1367 else
1368 blkoff = curseg_blkoff(sbi, i);
1369
1370 for (j = 0; j < blkoff; j++) {
1371 if (!page) {
1372 page = grab_meta_page(sbi, blkaddr++);
1373 kaddr = (unsigned char *)page_address(page);
1374 written_size = 0;
1375 }
1376 summary = (struct f2fs_summary *)(kaddr + written_size);
1377 *summary = seg_i->sum_blk->entries[j];
1378 written_size += SUMMARY_SIZE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001379
1380 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1381 SUM_FOOTER_SIZE)
1382 continue;
1383
Chao Yue8d61a72013-10-24 15:08:28 +08001384 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001385 f2fs_put_page(page, 1);
1386 page = NULL;
1387 }
1388 }
Chao Yue8d61a72013-10-24 15:08:28 +08001389 if (page) {
1390 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001391 f2fs_put_page(page, 1);
Chao Yue8d61a72013-10-24 15:08:28 +08001392 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001393}
1394
1395static void write_normal_summaries(struct f2fs_sb_info *sbi,
1396 block_t blkaddr, int type)
1397{
1398 int i, end;
1399 if (IS_DATASEG(type))
1400 end = type + NR_CURSEG_DATA_TYPE;
1401 else
1402 end = type + NR_CURSEG_NODE_TYPE;
1403
1404 for (i = type; i < end; i++) {
1405 struct curseg_info *sum = CURSEG_I(sbi, i);
1406 mutex_lock(&sum->curseg_mutex);
1407 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1408 mutex_unlock(&sum->curseg_mutex);
1409 }
1410}
1411
1412void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1413{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001414 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001415 write_compacted_summaries(sbi, start_blk);
1416 else
1417 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1418}
1419
1420void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1421{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001422 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001423 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001424}
1425
1426int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1427 unsigned int val, int alloc)
1428{
1429 int i;
1430
1431 if (type == NAT_JOURNAL) {
1432 for (i = 0; i < nats_in_cursum(sum); i++) {
1433 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1434 return i;
1435 }
1436 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1437 return update_nats_in_cursum(sum, 1);
1438 } else if (type == SIT_JOURNAL) {
1439 for (i = 0; i < sits_in_cursum(sum); i++)
1440 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1441 return i;
1442 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1443 return update_sits_in_cursum(sum, 1);
1444 }
1445 return -1;
1446}
1447
1448static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1449 unsigned int segno)
1450{
1451 struct sit_info *sit_i = SIT_I(sbi);
1452 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1453 block_t blk_addr = sit_i->sit_base_addr + offset;
1454
1455 check_seg_range(sbi, segno);
1456
1457 /* calculate sit block address */
1458 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1459 blk_addr += sit_i->sit_blocks;
1460
1461 return get_meta_page(sbi, blk_addr);
1462}
1463
1464static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1465 unsigned int start)
1466{
1467 struct sit_info *sit_i = SIT_I(sbi);
1468 struct page *src_page, *dst_page;
1469 pgoff_t src_off, dst_off;
1470 void *src_addr, *dst_addr;
1471
1472 src_off = current_sit_addr(sbi, start);
1473 dst_off = next_sit_addr(sbi, src_off);
1474
1475 /* get current sit block page without lock */
1476 src_page = get_meta_page(sbi, src_off);
1477 dst_page = grab_meta_page(sbi, dst_off);
Jaegeuk Kim5d56b672013-10-29 15:14:54 +09001478 f2fs_bug_on(PageDirty(src_page));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001479
1480 src_addr = page_address(src_page);
1481 dst_addr = page_address(dst_page);
1482 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1483
1484 set_page_dirty(dst_page);
1485 f2fs_put_page(src_page, 1);
1486
1487 set_to_next_sit(sit_i, start);
1488
1489 return dst_page;
1490}
1491
1492static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
1493{
1494 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1495 struct f2fs_summary_block *sum = curseg->sum_blk;
1496 int i;
1497
1498 /*
1499 * If the journal area in the current summary is full of sit entries,
1500 * all the sit entries will be flushed. Otherwise the sit entries
1501 * are not able to replace with newly hot sit entries.
1502 */
1503 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
1504 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1505 unsigned int segno;
1506 segno = le32_to_cpu(segno_in_journal(sum, i));
1507 __mark_sit_entry_dirty(sbi, segno);
1508 }
1509 update_sits_in_cursum(sum, -sits_in_cursum(sum));
Haicheng Licffbfa62013-10-18 17:24:07 +08001510 return true;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001511 }
Haicheng Licffbfa62013-10-18 17:24:07 +08001512 return false;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001513}
1514
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001515/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001516 * CP calls this function, which flushes SIT entries including sit_journal,
1517 * and moves prefree segs to free segs.
1518 */
1519void flush_sit_entries(struct f2fs_sb_info *sbi)
1520{
1521 struct sit_info *sit_i = SIT_I(sbi);
1522 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1523 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1524 struct f2fs_summary_block *sum = curseg->sum_blk;
1525 unsigned long nsegs = TOTAL_SEGS(sbi);
1526 struct page *page = NULL;
1527 struct f2fs_sit_block *raw_sit = NULL;
1528 unsigned int start = 0, end = 0;
Chao Yub65ee142014-08-04 10:10:07 +08001529 unsigned int segno;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001530 bool flushed;
1531
1532 mutex_lock(&curseg->curseg_mutex);
1533 mutex_lock(&sit_i->sentry_lock);
1534
1535 /*
1536 * "flushed" indicates whether sit entries in journal are flushed
1537 * to the SIT area or not.
1538 */
1539 flushed = flush_sits_in_journal(sbi);
1540
Chao Yub65ee142014-08-04 10:10:07 +08001541 for_each_set_bit(segno, bitmap, nsegs) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001542 struct seg_entry *se = get_seg_entry(sbi, segno);
1543 int sit_offset, offset;
1544
1545 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1546
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001547 /* add discard candidates */
1548 if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards)
1549 add_discard_addrs(sbi, segno, se);
1550
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001551 if (flushed)
1552 goto to_sit_page;
1553
1554 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
1555 if (offset >= 0) {
1556 segno_in_journal(sum, offset) = cpu_to_le32(segno);
1557 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
1558 goto flush_done;
1559 }
1560to_sit_page:
1561 if (!page || (start > segno) || (segno > end)) {
1562 if (page) {
1563 f2fs_put_page(page, 1);
1564 page = NULL;
1565 }
1566
1567 start = START_SEGNO(sit_i, segno);
1568 end = start + SIT_ENTRY_PER_BLOCK - 1;
1569
1570 /* read sit block that will be updated */
1571 page = get_next_sit_page(sbi, start);
1572 raw_sit = page_address(page);
1573 }
1574
1575 /* udpate entry in SIT block */
1576 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
1577flush_done:
1578 __clear_bit(segno, bitmap);
1579 sit_i->dirty_sentries--;
1580 }
1581 mutex_unlock(&sit_i->sentry_lock);
1582 mutex_unlock(&curseg->curseg_mutex);
1583
1584 /* writeout last modified SIT block */
1585 f2fs_put_page(page, 1);
1586
1587 set_prefree_as_free_segments(sbi);
1588}
1589
1590static int build_sit_info(struct f2fs_sb_info *sbi)
1591{
1592 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1593 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1594 struct sit_info *sit_i;
1595 unsigned int sit_segs, start;
1596 char *src_bitmap, *dst_bitmap;
1597 unsigned int bitmap_size;
1598
1599 /* allocate memory for SIT information */
1600 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1601 if (!sit_i)
1602 return -ENOMEM;
1603
1604 SM_I(sbi)->sit_info = sit_i;
1605
1606 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
1607 if (!sit_i->sentries)
1608 return -ENOMEM;
1609
1610 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1611 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1612 if (!sit_i->dirty_sentries_bitmap)
1613 return -ENOMEM;
1614
1615 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1616 sit_i->sentries[start].cur_valid_map
1617 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1618 sit_i->sentries[start].ckpt_valid_map
1619 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1620 if (!sit_i->sentries[start].cur_valid_map
1621 || !sit_i->sentries[start].ckpt_valid_map)
1622 return -ENOMEM;
1623 }
1624
1625 if (sbi->segs_per_sec > 1) {
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001626 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001627 sizeof(struct sec_entry));
1628 if (!sit_i->sec_entries)
1629 return -ENOMEM;
1630 }
1631
1632 /* get information related with SIT */
1633 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1634
1635 /* setup SIT bitmap from ckeckpoint pack */
1636 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1637 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1638
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02001639 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001640 if (!dst_bitmap)
1641 return -ENOMEM;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001642
1643 /* init SIT information */
1644 sit_i->s_ops = &default_salloc_ops;
1645
1646 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1647 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1648 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1649 sit_i->sit_bitmap = dst_bitmap;
1650 sit_i->bitmap_size = bitmap_size;
1651 sit_i->dirty_sentries = 0;
1652 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1653 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1654 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1655 mutex_init(&sit_i->sentry_lock);
1656 return 0;
1657}
1658
1659static int build_free_segmap(struct f2fs_sb_info *sbi)
1660{
1661 struct f2fs_sm_info *sm_info = SM_I(sbi);
1662 struct free_segmap_info *free_i;
1663 unsigned int bitmap_size, sec_bitmap_size;
1664
1665 /* allocate memory for free segmap information */
1666 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1667 if (!free_i)
1668 return -ENOMEM;
1669
1670 SM_I(sbi)->free_info = free_i;
1671
1672 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1673 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1674 if (!free_i->free_segmap)
1675 return -ENOMEM;
1676
Jaegeuk Kim53cf9522013-03-31 12:39:49 +09001677 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001678 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1679 if (!free_i->free_secmap)
1680 return -ENOMEM;
1681
1682 /* set all segments as dirty temporarily */
1683 memset(free_i->free_segmap, 0xff, bitmap_size);
1684 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1685
1686 /* init free segmap information */
1687 free_i->start_segno =
1688 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
1689 free_i->free_segments = 0;
1690 free_i->free_sections = 0;
1691 rwlock_init(&free_i->segmap_lock);
1692 return 0;
1693}
1694
1695static int build_curseg(struct f2fs_sb_info *sbi)
1696{
Namjae Jeon1042d602012-12-01 10:56:13 +09001697 struct curseg_info *array;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001698 int i;
1699
Fabian Frederickb434bab2014-06-23 18:39:15 +02001700 array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001701 if (!array)
1702 return -ENOMEM;
1703
1704 SM_I(sbi)->curseg_array = array;
1705
1706 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1707 mutex_init(&array[i].curseg_mutex);
1708 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1709 if (!array[i].sum_blk)
1710 return -ENOMEM;
1711 array[i].segno = NULL_SEGNO;
1712 array[i].next_blkoff = 0;
1713 }
1714 return restore_curseg_summaries(sbi);
1715}
1716
1717static void build_sit_entries(struct f2fs_sb_info *sbi)
1718{
1719 struct sit_info *sit_i = SIT_I(sbi);
1720 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1721 struct f2fs_summary_block *sum = curseg->sum_blk;
Chao Yu74de5932013-11-22 09:09:59 +08001722 int sit_blk_cnt = SIT_BLK_CNT(sbi);
1723 unsigned int i, start, end;
1724 unsigned int readed, start_blk = 0;
1725 int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001726
Chao Yu74de5932013-11-22 09:09:59 +08001727 do {
Chao Yu662befd2014-02-07 16:11:53 +08001728 readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001729
Chao Yu74de5932013-11-22 09:09:59 +08001730 start = start_blk * sit_i->sents_per_block;
1731 end = (start_blk + readed) * sit_i->sents_per_block;
1732
1733 for (; start < end && start < TOTAL_SEGS(sbi); start++) {
1734 struct seg_entry *se = &sit_i->sentries[start];
1735 struct f2fs_sit_block *sit_blk;
1736 struct f2fs_sit_entry sit;
1737 struct page *page;
1738
1739 mutex_lock(&curseg->curseg_mutex);
1740 for (i = 0; i < sits_in_cursum(sum); i++) {
Chris Fries6c311ec2014-01-17 14:44:39 -06001741 if (le32_to_cpu(segno_in_journal(sum, i))
1742 == start) {
Chao Yu74de5932013-11-22 09:09:59 +08001743 sit = sit_in_journal(sum, i);
1744 mutex_unlock(&curseg->curseg_mutex);
1745 goto got_it;
1746 }
1747 }
1748 mutex_unlock(&curseg->curseg_mutex);
1749
1750 page = get_current_sit_page(sbi, start);
1751 sit_blk = (struct f2fs_sit_block *)page_address(page);
1752 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1753 f2fs_put_page(page, 1);
1754got_it:
1755 check_block_count(sbi, start, &sit);
1756 seg_info_from_raw_sit(se, &sit);
1757 if (sbi->segs_per_sec > 1) {
1758 struct sec_entry *e = get_sec_entry(sbi, start);
1759 e->valid_blocks += se->valid_blocks;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001760 }
1761 }
Chao Yu74de5932013-11-22 09:09:59 +08001762 start_blk += readed;
1763 } while (start_blk < sit_blk_cnt);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001764}
1765
1766static void init_free_segmap(struct f2fs_sb_info *sbi)
1767{
1768 unsigned int start;
1769 int type;
1770
1771 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1772 struct seg_entry *sentry = get_seg_entry(sbi, start);
1773 if (!sentry->valid_blocks)
1774 __set_free(sbi, start);
1775 }
1776
1777 /* set use the current segments */
1778 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
1779 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
1780 __set_test_and_inuse(sbi, curseg_t->segno);
1781 }
1782}
1783
1784static void init_dirty_segmap(struct f2fs_sb_info *sbi)
1785{
1786 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1787 struct free_segmap_info *free_i = FREE_I(sbi);
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001788 unsigned int segno = 0, offset = 0, total_segs = TOTAL_SEGS(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001789 unsigned short valid_blocks;
1790
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001791 while (1) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001792 /* find dirty segment based on free segmap */
Namjae Jeon8736fbf2013-06-16 09:49:11 +09001793 segno = find_next_inuse(free_i, total_segs, offset);
1794 if (segno >= total_segs)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001795 break;
1796 offset = segno + 1;
1797 valid_blocks = get_valid_blocks(sbi, segno, 0);
1798 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks)
1799 continue;
1800 mutex_lock(&dirty_i->seglist_lock);
1801 __locate_dirty_segment(sbi, segno, DIRTY);
1802 mutex_unlock(&dirty_i->seglist_lock);
1803 }
1804}
1805
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001806static int init_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001807{
1808 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001809 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001810
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001811 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
1812 if (!dirty_i->victim_secmap)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001813 return -ENOMEM;
1814 return 0;
1815}
1816
1817static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1818{
1819 struct dirty_seglist_info *dirty_i;
1820 unsigned int bitmap_size, i;
1821
1822 /* allocate memory for dirty segments list information */
1823 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
1824 if (!dirty_i)
1825 return -ENOMEM;
1826
1827 SM_I(sbi)->dirty_info = dirty_i;
1828 mutex_init(&dirty_i->seglist_lock);
1829
1830 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1831
1832 for (i = 0; i < NR_DIRTY_TYPE; i++) {
1833 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001834 if (!dirty_i->dirty_segmap[i])
1835 return -ENOMEM;
1836 }
1837
1838 init_dirty_segmap(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001839 return init_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001840}
1841
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001842/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001843 * Update min, max modified time for cost-benefit GC algorithm
1844 */
1845static void init_min_max_mtime(struct f2fs_sb_info *sbi)
1846{
1847 struct sit_info *sit_i = SIT_I(sbi);
1848 unsigned int segno;
1849
1850 mutex_lock(&sit_i->sentry_lock);
1851
1852 sit_i->min_mtime = LLONG_MAX;
1853
1854 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
1855 unsigned int i;
1856 unsigned long long mtime = 0;
1857
1858 for (i = 0; i < sbi->segs_per_sec; i++)
1859 mtime += get_seg_entry(sbi, segno + i)->mtime;
1860
1861 mtime = div_u64(mtime, sbi->segs_per_sec);
1862
1863 if (sit_i->min_mtime > mtime)
1864 sit_i->min_mtime = mtime;
1865 }
1866 sit_i->max_mtime = get_mtime(sbi);
1867 mutex_unlock(&sit_i->sentry_lock);
1868}
1869
1870int build_segment_manager(struct f2fs_sb_info *sbi)
1871{
1872 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1873 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Namjae Jeon1042d602012-12-01 10:56:13 +09001874 struct f2fs_sm_info *sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001875 int err;
1876
1877 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
1878 if (!sm_info)
1879 return -ENOMEM;
1880
1881 /* init sm info */
1882 sbi->sm_info = sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001883 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1884 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1885 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
1886 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1887 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1888 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
1889 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
Jaegeuk Kim58c41032014-03-19 14:17:21 +09001890 sm_info->rec_prefree_segments = sm_info->main_segments *
1891 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
Jaegeuk Kim216fbd62013-11-07 13:13:42 +09001892 sm_info->ipu_policy = F2FS_IPU_DISABLE;
1893 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001894
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09001895 INIT_LIST_HEAD(&sm_info->discard_list);
1896 sm_info->nr_discards = 0;
1897 sm_info->max_discards = 0;
1898
Gu Zhengb270ad62014-04-11 17:49:55 +08001899 if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
Gu Zheng2163d192014-04-27 14:21:33 +08001900 err = create_flush_cmd_control(sbi);
1901 if (err)
Gu Zhenga688b9d9e2014-04-27 14:21:21 +08001902 return err;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +09001903 }
1904
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001905 err = build_sit_info(sbi);
1906 if (err)
1907 return err;
1908 err = build_free_segmap(sbi);
1909 if (err)
1910 return err;
1911 err = build_curseg(sbi);
1912 if (err)
1913 return err;
1914
1915 /* reinit free segmap based on SIT */
1916 build_sit_entries(sbi);
1917
1918 init_free_segmap(sbi);
1919 err = build_dirty_segmap(sbi);
1920 if (err)
1921 return err;
1922
1923 init_min_max_mtime(sbi);
1924 return 0;
1925}
1926
1927static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
1928 enum dirty_type dirty_type)
1929{
1930 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1931
1932 mutex_lock(&dirty_i->seglist_lock);
1933 kfree(dirty_i->dirty_segmap[dirty_type]);
1934 dirty_i->nr_dirty[dirty_type] = 0;
1935 mutex_unlock(&dirty_i->seglist_lock);
1936}
1937
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001938static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001939{
1940 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001941 kfree(dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001942}
1943
1944static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
1945{
1946 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1947 int i;
1948
1949 if (!dirty_i)
1950 return;
1951
1952 /* discard pre-free/dirty segments list */
1953 for (i = 0; i < NR_DIRTY_TYPE; i++)
1954 discard_dirty_segmap(sbi, i);
1955
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001956 destroy_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001957 SM_I(sbi)->dirty_info = NULL;
1958 kfree(dirty_i);
1959}
1960
1961static void destroy_curseg(struct f2fs_sb_info *sbi)
1962{
1963 struct curseg_info *array = SM_I(sbi)->curseg_array;
1964 int i;
1965
1966 if (!array)
1967 return;
1968 SM_I(sbi)->curseg_array = NULL;
1969 for (i = 0; i < NR_CURSEG_TYPE; i++)
1970 kfree(array[i].sum_blk);
1971 kfree(array);
1972}
1973
1974static void destroy_free_segmap(struct f2fs_sb_info *sbi)
1975{
1976 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
1977 if (!free_i)
1978 return;
1979 SM_I(sbi)->free_info = NULL;
1980 kfree(free_i->free_segmap);
1981 kfree(free_i->free_secmap);
1982 kfree(free_i);
1983}
1984
1985static void destroy_sit_info(struct f2fs_sb_info *sbi)
1986{
1987 struct sit_info *sit_i = SIT_I(sbi);
1988 unsigned int start;
1989
1990 if (!sit_i)
1991 return;
1992
1993 if (sit_i->sentries) {
1994 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1995 kfree(sit_i->sentries[start].cur_valid_map);
1996 kfree(sit_i->sentries[start].ckpt_valid_map);
1997 }
1998 }
1999 vfree(sit_i->sentries);
2000 vfree(sit_i->sec_entries);
2001 kfree(sit_i->dirty_sentries_bitmap);
2002
2003 SM_I(sbi)->sit_info = NULL;
2004 kfree(sit_i->sit_bitmap);
2005 kfree(sit_i);
2006}
2007
2008void destroy_segment_manager(struct f2fs_sb_info *sbi)
2009{
2010 struct f2fs_sm_info *sm_info = SM_I(sbi);
Gu Zhenga688b9d9e2014-04-27 14:21:21 +08002011
Chao Yu3b03f722013-11-06 09:12:04 +08002012 if (!sm_info)
2013 return;
Gu Zheng2163d192014-04-27 14:21:33 +08002014 destroy_flush_cmd_control(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002015 destroy_dirty_segmap(sbi);
2016 destroy_curseg(sbi);
2017 destroy_free_segmap(sbi);
2018 destroy_sit_info(sbi);
2019 sbi->sm_info = NULL;
2020 kfree(sm_info);
2021}
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09002022
2023int __init create_segment_manager_caches(void)
2024{
2025 discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
Gu Zhenge8512d22014-03-07 18:43:28 +08002026 sizeof(struct discard_entry));
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09002027 if (!discard_entry_slab)
2028 return -ENOMEM;
2029 return 0;
2030}
2031
2032void destroy_segment_manager_caches(void)
2033{
2034 kmem_cache_destroy(discard_entry_slab);
2035}