blob: 342e0f761d8f717bec40fbb332e0013363a2a94d [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/bio.h>
14#include <linux/blkdev.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010015#include <linux/prefetch.h>
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +090016#include <linux/kthread.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090017#include <linux/vmalloc.h>
Chao Yu74de5932013-11-22 09:09:59 +080018#include <linux/swap.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090019
20#include "f2fs.h"
21#include "segment.h"
22#include "node.h"
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -080023#include "trace.h"
Namjae Jeon6ec178d2013-04-23 17:51:43 +090024#include <trace/events/f2fs.h>
Jaegeuk Kim351df4b2012-11-02 17:09:16 +090025
Changman Lee9a7f1432013-11-15 10:42:51 +090026#define __reverse_ffz(x) __reverse_ffs(~(x))
27
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +090028static struct kmem_cache *discard_entry_slab;
Chao Yu184a5cd2014-09-04 18:13:01 +080029static struct kmem_cache *sit_entry_set_slab;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -070030static struct kmem_cache *inmem_entry_slab;
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +090031
Changman Lee9a7f1432013-11-15 10:42:51 +090032/*
33 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
34 * MSB and LSB are reversed in a byte by f2fs_set_bit.
35 */
36static inline unsigned long __reverse_ffs(unsigned long word)
37{
38 int num = 0;
39
40#if BITS_PER_LONG == 64
41 if ((word & 0xffffffff) == 0) {
42 num += 32;
43 word >>= 32;
44 }
45#endif
46 if ((word & 0xffff) == 0) {
47 num += 16;
48 word >>= 16;
49 }
50 if ((word & 0xff) == 0) {
51 num += 8;
52 word >>= 8;
53 }
54 if ((word & 0xf0) == 0)
55 num += 4;
56 else
57 word >>= 4;
58 if ((word & 0xc) == 0)
59 num += 2;
60 else
61 word >>= 2;
62 if ((word & 0x2) == 0)
63 num += 1;
64 return num;
65}
66
67/*
arter97e1c42042014-08-06 23:22:50 +090068 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
Changman Lee9a7f1432013-11-15 10:42:51 +090069 * f2fs_set_bit makes MSB and LSB reversed in a byte.
70 * Example:
71 * LSB <--> MSB
72 * f2fs_set_bit(0, bitmap) => 0000 0001
73 * f2fs_set_bit(7, bitmap) => 1000 0000
74 */
75static unsigned long __find_rev_next_bit(const unsigned long *addr,
76 unsigned long size, unsigned long offset)
77{
78 const unsigned long *p = addr + BIT_WORD(offset);
79 unsigned long result = offset & ~(BITS_PER_LONG - 1);
80 unsigned long tmp;
81 unsigned long mask, submask;
82 unsigned long quot, rest;
83
84 if (offset >= size)
85 return size;
86
87 size -= result;
88 offset %= BITS_PER_LONG;
89 if (!offset)
90 goto aligned;
91
92 tmp = *(p++);
93 quot = (offset >> 3) << 3;
94 rest = offset & 0x7;
95 mask = ~0UL << quot;
96 submask = (unsigned char)(0xff << rest) >> rest;
97 submask <<= quot;
98 mask &= submask;
99 tmp &= mask;
100 if (size < BITS_PER_LONG)
101 goto found_first;
102 if (tmp)
103 goto found_middle;
104
105 size -= BITS_PER_LONG;
106 result += BITS_PER_LONG;
107aligned:
108 while (size & ~(BITS_PER_LONG-1)) {
109 tmp = *(p++);
110 if (tmp)
111 goto found_middle;
112 result += BITS_PER_LONG;
113 size -= BITS_PER_LONG;
114 }
115 if (!size)
116 return result;
117 tmp = *p;
118found_first:
119 tmp &= (~0UL >> (BITS_PER_LONG - size));
120 if (tmp == 0UL) /* Are any bits set? */
121 return result + size; /* Nope. */
122found_middle:
123 return result + __reverse_ffs(tmp);
124}
125
126static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
127 unsigned long size, unsigned long offset)
128{
129 const unsigned long *p = addr + BIT_WORD(offset);
130 unsigned long result = offset & ~(BITS_PER_LONG - 1);
131 unsigned long tmp;
132 unsigned long mask, submask;
133 unsigned long quot, rest;
134
135 if (offset >= size)
136 return size;
137
138 size -= result;
139 offset %= BITS_PER_LONG;
140 if (!offset)
141 goto aligned;
142
143 tmp = *(p++);
144 quot = (offset >> 3) << 3;
145 rest = offset & 0x7;
146 mask = ~(~0UL << quot);
147 submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest);
148 submask <<= quot;
149 mask += submask;
150 tmp |= mask;
151 if (size < BITS_PER_LONG)
152 goto found_first;
153 if (~tmp)
154 goto found_middle;
155
156 size -= BITS_PER_LONG;
157 result += BITS_PER_LONG;
158aligned:
159 while (size & ~(BITS_PER_LONG - 1)) {
160 tmp = *(p++);
161 if (~tmp)
162 goto found_middle;
163 result += BITS_PER_LONG;
164 size -= BITS_PER_LONG;
165 }
166 if (!size)
167 return result;
168 tmp = *p;
169
170found_first:
171 tmp |= ~0UL << size;
172 if (tmp == ~0UL) /* Are any bits zero? */
173 return result + size; /* Nope. */
174found_middle:
175 return result + __reverse_ffz(tmp);
176}
177
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700178void register_inmem_page(struct inode *inode, struct page *page)
179{
180 struct f2fs_inode_info *fi = F2FS_I(inode);
181 struct inmem_pages *new;
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -0700182 int err;
Jaegeuk Kim9be32d72014-12-05 10:39:49 -0800183
Jaegeuk Kim0722b102014-12-05 11:58:02 -0800184 SetPagePrivate(page);
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -0800185 f2fs_trace_pid(page);
Jaegeuk Kim0722b102014-12-05 11:58:02 -0800186
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700187 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
188
189 /* add atomic page indices to the list */
190 new->page = page;
191 INIT_LIST_HEAD(&new->list);
Jaegeuk Kim9be32d72014-12-05 10:39:49 -0800192retry:
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700193 /* increase reference count with clean state */
194 mutex_lock(&fi->inmem_lock);
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -0700195 err = radix_tree_insert(&fi->inmem_root, page->index, new);
196 if (err == -EEXIST) {
197 mutex_unlock(&fi->inmem_lock);
198 kmem_cache_free(inmem_entry_slab, new);
199 return;
200 } else if (err) {
201 mutex_unlock(&fi->inmem_lock);
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -0700202 goto retry;
203 }
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700204 get_page(page);
205 list_add_tail(&new->list, &fi->inmem_pages);
Jaegeuk Kim8dcf2ff72014-12-05 17:18:15 -0800206 inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700207 mutex_unlock(&fi->inmem_lock);
Jaegeuk Kim8ce67cb2015-03-17 17:58:08 -0700208
209 trace_f2fs_register_inmem_page(page, INMEM);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700210}
211
212void commit_inmem_pages(struct inode *inode, bool abort)
213{
214 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
215 struct f2fs_inode_info *fi = F2FS_I(inode);
216 struct inmem_pages *cur, *tmp;
217 bool submit_bio = false;
218 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700219 .sbi = sbi,
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700220 .type = DATA,
Jaegeuk Kim1e843712014-12-09 06:08:59 -0800221 .rw = WRITE_SYNC | REQ_PRIO,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700222 .encrypted_page = NULL,
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700223 };
224
Jaegeuk Kim03418452014-11-21 16:37:40 -0800225 /*
226 * The abort is true only when f2fs_evict_inode is called.
227 * Basically, the f2fs_evict_inode doesn't produce any data writes, so
228 * that we don't need to call f2fs_balance_fs.
229 * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this
230 * inode becomes free by iget_locked in f2fs_iget.
231 */
Jaegeuk Kim70c640b2014-12-10 13:59:33 -0800232 if (!abort) {
Jaegeuk Kim03418452014-11-21 16:37:40 -0800233 f2fs_balance_fs(sbi);
Jaegeuk Kim70c640b2014-12-10 13:59:33 -0800234 f2fs_lock_op(sbi);
235 }
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700236
237 mutex_lock(&fi->inmem_lock);
238 list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
Jaegeuk Kim70c640b2014-12-10 13:59:33 -0800239 if (!abort) {
240 lock_page(cur->page);
241 if (cur->page->mapping == inode->i_mapping) {
242 f2fs_wait_on_page_writeback(cur->page, DATA);
243 if (clear_page_dirty_for_io(cur->page))
244 inode_dec_dirty_pages(inode);
Jaegeuk Kim8ce67cb2015-03-17 17:58:08 -0700245 trace_f2fs_commit_inmem_page(cur->page, INMEM);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700246 fio.page = cur->page;
247 do_write_data_page(&fio);
Jaegeuk Kim70c640b2014-12-10 13:59:33 -0800248 submit_bio = true;
249 }
250 f2fs_put_page(cur->page, 1);
251 } else {
Jaegeuk Kim8ce67cb2015-03-17 17:58:08 -0700252 trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP);
Jaegeuk Kim70c640b2014-12-10 13:59:33 -0800253 put_page(cur->page);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700254 }
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -0700255 radix_tree_delete(&fi->inmem_root, cur->page->index);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700256 list_del(&cur->list);
257 kmem_cache_free(inmem_entry_slab, cur);
Jaegeuk Kim8dcf2ff72014-12-05 17:18:15 -0800258 dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700259 }
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700260 mutex_unlock(&fi->inmem_lock);
261
Jaegeuk Kim70c640b2014-12-10 13:59:33 -0800262 if (!abort) {
263 f2fs_unlock_op(sbi);
264 if (submit_bio)
265 f2fs_submit_merged_bio(sbi, DATA, WRITE);
266 }
Jaegeuk Kim88b88a62014-10-06 17:39:50 -0700267}
268
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900269/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900270 * This function balances dirty node and dentry pages.
271 * In addition, it controls garbage collection.
272 */
273void f2fs_balance_fs(struct f2fs_sb_info *sbi)
274{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900275 /*
Jaegeuk Kim029cd282012-12-21 17:20:21 +0900276 * We should do GC or end up with checkpoint, if there are so many dirty
277 * dir/node pages without enough free segments.
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900278 */
Jaegeuk Kim43727522013-02-04 15:11:17 +0900279 if (has_not_enough_free_secs(sbi, 0)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900280 mutex_lock(&sbi->gc_mutex);
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900281 f2fs_gc(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900282 }
283}
284
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900285void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
286{
Chao Yu1dcc3362015-02-05 17:57:31 +0800287 /* try to shrink extent cache when there is no enough memory */
288 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
289
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900290 /* check the # of cached NAT entries and prefree segments */
291 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
Jaegeuk Kime5e7ea32014-11-06 15:24:46 -0800292 excess_prefree_segs(sbi) ||
Jaegeuk Kim88a70a62014-12-10 15:20:48 -0800293 !available_free_memory(sbi, INO_ENTRIES))
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900294 f2fs_sync_fs(sbi->sb, true);
295}
296
Gu Zheng2163d192014-04-27 14:21:33 +0800297static int issue_flush_thread(void *data)
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900298{
299 struct f2fs_sb_info *sbi = data;
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800300 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
301 wait_queue_head_t *q = &fcc->flush_wait_queue;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900302repeat:
303 if (kthread_should_stop())
304 return 0;
305
Gu Zheng721bd4d2014-09-05 18:31:00 +0800306 if (!llist_empty(&fcc->issue_list)) {
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900307 struct bio *bio = bio_alloc(GFP_NOIO, 0);
308 struct flush_cmd *cmd, *next;
309 int ret;
310
Gu Zheng721bd4d2014-09-05 18:31:00 +0800311 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
312 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
313
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900314 bio->bi_bdev = sbi->sb->s_bdev;
315 ret = submit_bio_wait(WRITE_FLUSH, bio);
316
Gu Zheng721bd4d2014-09-05 18:31:00 +0800317 llist_for_each_entry_safe(cmd, next,
318 fcc->dispatch_list, llnode) {
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900319 cmd->ret = ret;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900320 complete(&cmd->wait);
321 }
Gu Zhenga4ed23f2014-04-11 17:49:35 +0800322 bio_put(bio);
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800323 fcc->dispatch_list = NULL;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900324 }
325
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800326 wait_event_interruptible(*q,
Gu Zheng721bd4d2014-09-05 18:31:00 +0800327 kthread_should_stop() || !llist_empty(&fcc->issue_list));
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900328 goto repeat;
329}
330
331int f2fs_issue_flush(struct f2fs_sb_info *sbi)
332{
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800333 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
Chao Yuadf8d902014-05-08 17:00:35 +0800334 struct flush_cmd cmd;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900335
Jaegeuk Kim24a9ee02014-07-25 17:46:10 -0700336 trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
337 test_opt(sbi, FLUSH_MERGE));
338
Jaegeuk Kim0f7b2ab2014-07-23 09:57:31 -0700339 if (test_opt(sbi, NOBARRIER))
340 return 0;
341
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900342 if (!test_opt(sbi, FLUSH_MERGE))
343 return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL);
344
Chao Yuadf8d902014-05-08 17:00:35 +0800345 init_completion(&cmd.wait);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900346
Gu Zheng721bd4d2014-09-05 18:31:00 +0800347 llist_add(&cmd.llnode, &fcc->issue_list);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900348
Gu Zhenga688b9d9e2014-04-27 14:21:21 +0800349 if (!fcc->dispatch_list)
350 wake_up(&fcc->flush_wait_queue);
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900351
Chao Yuadf8d902014-05-08 17:00:35 +0800352 wait_for_completion(&cmd.wait);
353
354 return cmd.ret;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +0900355}
356
Gu Zheng2163d192014-04-27 14:21:33 +0800357int create_flush_cmd_control(struct f2fs_sb_info *sbi)
358{
359 dev_t dev = sbi->sb->s_bdev->bd_dev;
360 struct flush_cmd_control *fcc;
361 int err = 0;
362
363 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
364 if (!fcc)
365 return -ENOMEM;
Gu Zheng2163d192014-04-27 14:21:33 +0800366 init_waitqueue_head(&fcc->flush_wait_queue);
Gu Zheng721bd4d2014-09-05 18:31:00 +0800367 init_llist_head(&fcc->issue_list);
Chao Yu6b2920a2014-07-07 11:21:59 +0800368 SM_I(sbi)->cmd_control_info = fcc;
Gu Zheng2163d192014-04-27 14:21:33 +0800369 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
370 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
371 if (IS_ERR(fcc->f2fs_issue_flush)) {
372 err = PTR_ERR(fcc->f2fs_issue_flush);
373 kfree(fcc);
Chao Yu6b2920a2014-07-07 11:21:59 +0800374 SM_I(sbi)->cmd_control_info = NULL;
Gu Zheng2163d192014-04-27 14:21:33 +0800375 return err;
376 }
Gu Zheng2163d192014-04-27 14:21:33 +0800377
378 return err;
379}
380
381void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
382{
Chao Yu6b2920a2014-07-07 11:21:59 +0800383 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
Gu Zheng2163d192014-04-27 14:21:33 +0800384
385 if (fcc && fcc->f2fs_issue_flush)
386 kthread_stop(fcc->f2fs_issue_flush);
387 kfree(fcc);
Chao Yu6b2920a2014-07-07 11:21:59 +0800388 SM_I(sbi)->cmd_control_info = NULL;
Gu Zheng2163d192014-04-27 14:21:33 +0800389}
390
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900391static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
392 enum dirty_type dirty_type)
393{
394 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
395
396 /* need not be added */
397 if (IS_CURSEG(sbi, segno))
398 return;
399
400 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
401 dirty_i->nr_dirty[dirty_type]++;
402
403 if (dirty_type == DIRTY) {
404 struct seg_entry *sentry = get_seg_entry(sbi, segno);
Changman Lee4625d6a2013-10-25 17:31:57 +0900405 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900406
Jaegeuk Kimec325b52014-09-02 16:24:11 -0700407 if (unlikely(t >= DIRTY)) {
408 f2fs_bug_on(sbi, 1);
409 return;
410 }
Changman Lee4625d6a2013-10-25 17:31:57 +0900411 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
412 dirty_i->nr_dirty[t]++;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900413 }
414}
415
416static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
417 enum dirty_type dirty_type)
418{
419 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
420
421 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
422 dirty_i->nr_dirty[dirty_type]--;
423
424 if (dirty_type == DIRTY) {
Changman Lee4625d6a2013-10-25 17:31:57 +0900425 struct seg_entry *sentry = get_seg_entry(sbi, segno);
426 enum dirty_type t = sentry->type;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900427
Changman Lee4625d6a2013-10-25 17:31:57 +0900428 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
429 dirty_i->nr_dirty[t]--;
Jaegeuk Kimb2f2c392013-04-01 13:52:09 +0900430
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900431 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
432 clear_bit(GET_SECNO(sbi, segno),
433 dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900434 }
435}
436
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900437/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900438 * Should not occur error such as -ENOMEM.
439 * Adding dirty entry into seglist is not critical operation.
440 * If a given segment is one of current working segments, it won't be added.
441 */
Haicheng Li8d8451a2013-06-13 16:59:28 +0800442static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900443{
444 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
445 unsigned short valid_blocks;
446
447 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
448 return;
449
450 mutex_lock(&dirty_i->seglist_lock);
451
452 valid_blocks = get_valid_blocks(sbi, segno, 0);
453
454 if (valid_blocks == 0) {
455 __locate_dirty_segment(sbi, segno, PRE);
456 __remove_dirty_segment(sbi, segno, DIRTY);
457 } else if (valid_blocks < sbi->blocks_per_seg) {
458 __locate_dirty_segment(sbi, segno, DIRTY);
459 } else {
460 /* Recovery routine with SSR needs this */
461 __remove_dirty_segment(sbi, segno, DIRTY);
462 }
463
464 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900465}
466
Jaegeuk Kim1e87a782014-04-15 13:57:55 +0900467static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
Jaegeuk Kim37208872013-11-12 16:55:17 +0900468 block_t blkstart, block_t blklen)
469{
Chao Yu55cf9cb2014-09-15 18:01:10 +0800470 sector_t start = SECTOR_FROM_BLOCK(blkstart);
471 sector_t len = SECTOR_FROM_BLOCK(blklen);
Jaegeuk Kima66cdd92015-04-30 22:37:50 -0700472 struct seg_entry *se;
473 unsigned int offset;
474 block_t i;
475
476 for (i = blkstart; i < blkstart + blklen; i++) {
477 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
478 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
479
480 if (!f2fs_test_and_set_bit(offset, se->discard_map))
481 sbi->discard_blks--;
482 }
Jaegeuk Kim1661d072013-11-12 17:01:00 +0900483 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
Jaegeuk Kim1e87a782014-04-15 13:57:55 +0900484 return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
485}
486
Jaegeuk Kimcf2271e2014-07-25 15:47:25 -0700487void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
Jaegeuk Kim1e87a782014-04-15 13:57:55 +0900488{
Jaegeuk Kim40a02be2015-05-11 20:03:49 -0700489 int err = -ENOTSUPP;
490
491 if (test_opt(sbi, DISCARD)) {
492 struct seg_entry *se = get_seg_entry(sbi,
493 GET_SEGNO(sbi, blkaddr));
494 unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
495
496 if (f2fs_test_bit(offset, se->discard_map))
497 return;
498
499 err = f2fs_issue_discard(sbi, blkaddr, 1);
500 }
501
502 if (err) {
Jaegeuk Kim1e87a782014-04-15 13:57:55 +0900503 struct page *page = grab_meta_page(sbi, blkaddr);
504 /* zero-filled page */
505 set_page_dirty(page);
506 f2fs_put_page(page, 1);
507 }
Jaegeuk Kim37208872013-11-12 16:55:17 +0900508}
509
Jaegeuk Kimadf49832014-10-28 22:27:59 -0700510static void __add_discard_entry(struct f2fs_sb_info *sbi,
Jaegeuk Kima66cdd92015-04-30 22:37:50 -0700511 struct cp_control *cpc, struct seg_entry *se,
512 unsigned int start, unsigned int end)
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900513{
514 struct list_head *head = &SM_I(sbi)->discard_list;
Jaegeuk Kimadf49832014-10-28 22:27:59 -0700515 struct discard_entry *new, *last;
516
517 if (!list_empty(head)) {
518 last = list_last_entry(head, struct discard_entry, list);
519 if (START_BLOCK(sbi, cpc->trim_start) + start ==
520 last->blkaddr + last->len) {
521 last->len += end - start;
522 goto done;
523 }
524 }
525
526 new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
527 INIT_LIST_HEAD(&new->list);
528 new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
529 new->len = end - start;
530 list_add_tail(&new->list, head);
531done:
532 SM_I(sbi)->nr_discards += end - start;
533 cpc->trimmed += end - start;
534}
535
536static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
537{
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900538 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
539 int max_blocks = sbi->blocks_per_seg;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -0700540 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900541 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
542 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
Jaegeuk Kima66cdd92015-04-30 22:37:50 -0700543 unsigned long *discard_map = (unsigned long *)se->discard_map;
Jaegeuk Kim60a3b782015-02-10 16:44:29 -0800544 unsigned long *dmap = SIT_I(sbi)->tmp_map;
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900545 unsigned int start = 0, end = -1;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -0700546 bool force = (cpc->reason == CP_DISCARD);
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900547 int i;
548
Jaegeuk Kima66cdd92015-04-30 22:37:50 -0700549 if (se->valid_blocks == max_blocks)
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900550 return;
551
Jaegeuk Kima66cdd92015-04-30 22:37:50 -0700552 if (!force) {
553 if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
554 SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards)
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -0700555 return;
556 }
557
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900558 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
559 for (i = 0; i < entries; i++)
Jaegeuk Kima66cdd92015-04-30 22:37:50 -0700560 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
Jaegeuk Kimd7bc2482014-12-12 13:53:41 -0800561 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900562
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -0700563 while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900564 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
565 if (start >= max_blocks)
566 break;
567
568 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
Jaegeuk Kima66cdd92015-04-30 22:37:50 -0700569 __add_discard_entry(sbi, cpc, se, start, end);
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900570 }
571}
572
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -0700573void release_discard_addrs(struct f2fs_sb_info *sbi)
574{
575 struct list_head *head = &(SM_I(sbi)->discard_list);
576 struct discard_entry *entry, *this;
577
578 /* drop caches */
579 list_for_each_entry_safe(entry, this, head, list) {
580 list_del(&entry->list);
581 kmem_cache_free(discard_entry_slab, entry);
582 }
583}
584
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900585/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900586 * Should call clear_prefree_segments after checkpoint is done.
587 */
588static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
589{
590 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Chao Yub65ee142014-08-04 10:10:07 +0800591 unsigned int segno;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900592
593 mutex_lock(&dirty_i->seglist_lock);
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700594 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900595 __set_test_and_free(sbi, segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900596 mutex_unlock(&dirty_i->seglist_lock);
597}
598
Jaegeuk Kim836b5a62015-04-30 22:50:06 -0700599void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900600{
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900601 struct list_head *head = &(SM_I(sbi)->discard_list);
Chao Yu2d7b8222014-03-29 11:33:17 +0800602 struct discard_entry *entry, *this;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900603 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Changman Lee29e59c12013-11-11 09:24:37 +0900604 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
Changman Lee29e59c12013-11-11 09:24:37 +0900605 unsigned int start = 0, end = -1;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900606
607 mutex_lock(&dirty_i->seglist_lock);
Changman Lee29e59c12013-11-11 09:24:37 +0900608
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900609 while (1) {
Changman Lee29e59c12013-11-11 09:24:37 +0900610 int i;
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700611 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
612 if (start >= MAIN_SEGS(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900613 break;
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700614 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
615 start + 1);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900616
Changman Lee29e59c12013-11-11 09:24:37 +0900617 for (i = start; i < end; i++)
618 clear_bit(i, prefree_map);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900619
Changman Lee29e59c12013-11-11 09:24:37 +0900620 dirty_i->nr_dirty[PRE] -= end - start;
621
622 if (!test_opt(sbi, DISCARD))
623 continue;
624
Jaegeuk Kim37208872013-11-12 16:55:17 +0900625 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
626 (end - start) << sbi->log_blocks_per_seg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900627 }
628 mutex_unlock(&dirty_i->seglist_lock);
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900629
630 /* send small discards */
Chao Yu2d7b8222014-03-29 11:33:17 +0800631 list_for_each_entry_safe(entry, this, head, list) {
Jaegeuk Kim836b5a62015-04-30 22:50:06 -0700632 if (cpc->reason == CP_DISCARD && entry->len < cpc->trim_minlen)
633 goto skip;
Jaegeuk Kim37208872013-11-12 16:55:17 +0900634 f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
Jaegeuk Kim836b5a62015-04-30 22:50:06 -0700635skip:
Jaegeuk Kimb2955552013-11-12 14:49:56 +0900636 list_del(&entry->list);
637 SM_I(sbi)->nr_discards -= entry->len;
638 kmem_cache_free(discard_entry_slab, entry);
639 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900640}
641
Chao Yu184a5cd2014-09-04 18:13:01 +0800642static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900643{
644 struct sit_info *sit_i = SIT_I(sbi);
Chao Yu184a5cd2014-09-04 18:13:01 +0800645
646 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900647 sit_i->dirty_sentries++;
Chao Yu184a5cd2014-09-04 18:13:01 +0800648 return false;
649 }
650
651 return true;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900652}
653
654static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
655 unsigned int segno, int modified)
656{
657 struct seg_entry *se = get_seg_entry(sbi, segno);
658 se->type = type;
659 if (modified)
660 __mark_sit_entry_dirty(sbi, segno);
661}
662
663static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
664{
665 struct seg_entry *se;
666 unsigned int segno, offset;
667 long int new_vblocks;
668
669 segno = GET_SEGNO(sbi, blkaddr);
670
671 se = get_seg_entry(sbi, segno);
672 new_vblocks = se->valid_blocks + del;
Jaegeuk Kim491c0852014-02-04 13:01:10 +0900673 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900674
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700675 f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900676 (new_vblocks > sbi->blocks_per_seg)));
677
678 se->valid_blocks = new_vblocks;
679 se->mtime = get_mtime(sbi);
680 SIT_I(sbi)->max_mtime = se->mtime;
681
682 /* Update valid block bitmap */
683 if (del > 0) {
Gu Zheng52aca072014-10-20 17:45:51 +0800684 if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
Jaegeuk Kim05796762014-09-02 16:05:00 -0700685 f2fs_bug_on(sbi, 1);
Jaegeuk Kima66cdd92015-04-30 22:37:50 -0700686 if (!f2fs_test_and_set_bit(offset, se->discard_map))
687 sbi->discard_blks--;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900688 } else {
Gu Zheng52aca072014-10-20 17:45:51 +0800689 if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
Jaegeuk Kim05796762014-09-02 16:05:00 -0700690 f2fs_bug_on(sbi, 1);
Jaegeuk Kima66cdd92015-04-30 22:37:50 -0700691 if (f2fs_test_and_clear_bit(offset, se->discard_map))
692 sbi->discard_blks++;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900693 }
694 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
695 se->ckpt_valid_blocks += del;
696
697 __mark_sit_entry_dirty(sbi, segno);
698
699 /* update total number of valid blocks to be written in ckpt area */
700 SIT_I(sbi)->written_valid_blocks += del;
701
702 if (sbi->segs_per_sec > 1)
703 get_sec_entry(sbi, segno)->valid_blocks += del;
704}
705
Jaegeuk Kim5e443812014-01-28 12:22:14 +0900706void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900707{
Jaegeuk Kim5e443812014-01-28 12:22:14 +0900708 update_sit_entry(sbi, new, 1);
709 if (GET_SEGNO(sbi, old) != NULL_SEGNO)
710 update_sit_entry(sbi, old, -1);
711
712 locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
713 locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900714}
715
716void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
717{
718 unsigned int segno = GET_SEGNO(sbi, addr);
719 struct sit_info *sit_i = SIT_I(sbi);
720
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700721 f2fs_bug_on(sbi, addr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900722 if (addr == NEW_ADDR)
723 return;
724
725 /* add it into sit main buffer */
726 mutex_lock(&sit_i->sentry_lock);
727
728 update_sit_entry(sbi, addr, -1);
729
730 /* add it into dirty seglist */
731 locate_dirty_segment(sbi, segno);
732
733 mutex_unlock(&sit_i->sentry_lock);
734}
735
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900736/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900737 * This function should be resided under the curseg_mutex lock
738 */
739static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
Haicheng Lie79efe32013-06-13 16:59:27 +0800740 struct f2fs_summary *sum)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900741{
742 struct curseg_info *curseg = CURSEG_I(sbi, type);
743 void *addr = curseg->sum_blk;
Haicheng Lie79efe32013-06-13 16:59:27 +0800744 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900745 memcpy(addr, sum, sizeof(struct f2fs_summary));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900746}
747
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900748/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900749 * Calculate the number of current summary pages for writing
750 */
Chao Yu3fa06d72014-12-09 14:21:46 +0800751int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900752{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900753 int valid_sum_count = 0;
Fan Li9a479382013-10-29 16:21:47 +0800754 int i, sum_in_page;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900755
756 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
757 if (sbi->ckpt->alloc_type[i] == SSR)
758 valid_sum_count += sbi->blocks_per_seg;
Chao Yu3fa06d72014-12-09 14:21:46 +0800759 else {
760 if (for_ra)
761 valid_sum_count += le16_to_cpu(
762 F2FS_CKPT(sbi)->cur_data_blkoff[i]);
763 else
764 valid_sum_count += curseg_blkoff(sbi, i);
765 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900766 }
767
Fan Li9a479382013-10-29 16:21:47 +0800768 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
769 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
770 if (valid_sum_count <= sum_in_page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900771 return 1;
Fan Li9a479382013-10-29 16:21:47 +0800772 else if ((valid_sum_count - sum_in_page) <=
773 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900774 return 2;
775 return 3;
776}
777
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900778/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900779 * Caller should put this summary page
780 */
781struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
782{
783 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
784}
785
786static void write_sum_page(struct f2fs_sb_info *sbi,
787 struct f2fs_summary_block *sum_blk, block_t blk_addr)
788{
789 struct page *page = grab_meta_page(sbi, blk_addr);
790 void *kaddr = page_address(page);
791 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
792 set_page_dirty(page);
793 f2fs_put_page(page, 1);
794}
795
Jaegeuk Kim60374682013-03-31 13:58:51 +0900796static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
797{
798 struct curseg_info *curseg = CURSEG_I(sbi, type);
Haicheng Li81fb5e82013-05-14 18:20:28 +0800799 unsigned int segno = curseg->segno + 1;
Jaegeuk Kim60374682013-03-31 13:58:51 +0900800 struct free_segmap_info *free_i = FREE_I(sbi);
801
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700802 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
Haicheng Li81fb5e82013-05-14 18:20:28 +0800803 return !test_bit(segno, free_i->free_segmap);
Jaegeuk Kim60374682013-03-31 13:58:51 +0900804 return 0;
805}
806
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900807/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900808 * Find a new segment from the free segments bitmap to right order
809 * This function should be returned with success, otherwise BUG
810 */
811static void get_new_segment(struct f2fs_sb_info *sbi,
812 unsigned int *newseg, bool new_sec, int dir)
813{
814 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900815 unsigned int segno, secno, zoneno;
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700816 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900817 unsigned int hint = *newseg / sbi->segs_per_sec;
818 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
819 unsigned int left_start = hint;
820 bool init = true;
821 int go_left = 0;
822 int i;
823
Chao Yu1a118cc2015-02-11 18:20:38 +0800824 spin_lock(&free_i->segmap_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900825
826 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
827 segno = find_next_zero_bit(free_i->free_segmap,
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700828 MAIN_SEGS(sbi), *newseg + 1);
Jaegeuk Kim33afa7f2013-03-31 12:59:53 +0900829 if (segno - *newseg < sbi->segs_per_sec -
830 (*newseg % sbi->segs_per_sec))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900831 goto got_it;
832 }
833find_other_zone:
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700834 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
835 if (secno >= MAIN_SECS(sbi)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900836 if (dir == ALLOC_RIGHT) {
837 secno = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700838 MAIN_SECS(sbi), 0);
839 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900840 } else {
841 go_left = 1;
842 left_start = hint - 1;
843 }
844 }
845 if (go_left == 0)
846 goto skip_left;
847
848 while (test_bit(left_start, free_i->free_secmap)) {
849 if (left_start > 0) {
850 left_start--;
851 continue;
852 }
853 left_start = find_next_zero_bit(free_i->free_secmap,
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700854 MAIN_SECS(sbi), 0);
855 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900856 break;
857 }
858 secno = left_start;
859skip_left:
860 hint = secno;
861 segno = secno * sbi->segs_per_sec;
862 zoneno = secno / sbi->secs_per_zone;
863
864 /* give up on finding another zone */
865 if (!init)
866 goto got_it;
867 if (sbi->secs_per_zone == 1)
868 goto got_it;
869 if (zoneno == old_zoneno)
870 goto got_it;
871 if (dir == ALLOC_LEFT) {
872 if (!go_left && zoneno + 1 >= total_zones)
873 goto got_it;
874 if (go_left && zoneno == 0)
875 goto got_it;
876 }
877 for (i = 0; i < NR_CURSEG_TYPE; i++)
878 if (CURSEG_I(sbi, i)->zone == zoneno)
879 break;
880
881 if (i < NR_CURSEG_TYPE) {
882 /* zone is in user, try another */
883 if (go_left)
884 hint = zoneno * sbi->secs_per_zone - 1;
885 else if (zoneno + 1 >= total_zones)
886 hint = 0;
887 else
888 hint = (zoneno + 1) * sbi->secs_per_zone;
889 init = false;
890 goto find_other_zone;
891 }
892got_it:
893 /* set it as dirty segment in free segmap */
Jaegeuk Kim9850cf42014-09-02 15:52:58 -0700894 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900895 __set_inuse(sbi, segno);
896 *newseg = segno;
Chao Yu1a118cc2015-02-11 18:20:38 +0800897 spin_unlock(&free_i->segmap_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900898}
899
900static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
901{
902 struct curseg_info *curseg = CURSEG_I(sbi, type);
903 struct summary_footer *sum_footer;
904
905 curseg->segno = curseg->next_segno;
906 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
907 curseg->next_blkoff = 0;
908 curseg->next_segno = NULL_SEGNO;
909
910 sum_footer = &(curseg->sum_blk->footer);
911 memset(sum_footer, 0, sizeof(struct summary_footer));
912 if (IS_DATASEG(type))
913 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
914 if (IS_NODESEG(type))
915 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
916 __set_sit_entry_type(sbi, type, curseg->segno, modified);
917}
918
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900919/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900920 * Allocate a current working segment.
921 * This function always allocates a free segment in LFS manner.
922 */
923static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
924{
925 struct curseg_info *curseg = CURSEG_I(sbi, type);
926 unsigned int segno = curseg->segno;
927 int dir = ALLOC_LEFT;
928
929 write_sum_page(sbi, curseg->sum_blk,
Haicheng Li81fb5e82013-05-14 18:20:28 +0800930 GET_SUM_BLOCK(sbi, segno));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900931 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
932 dir = ALLOC_RIGHT;
933
934 if (test_opt(sbi, NOHEAP))
935 dir = ALLOC_RIGHT;
936
937 get_new_segment(sbi, &segno, new_sec, dir);
938 curseg->next_segno = segno;
939 reset_curseg(sbi, type, 1);
940 curseg->alloc_type = LFS;
941}
942
943static void __next_free_blkoff(struct f2fs_sb_info *sbi,
944 struct curseg_info *seg, block_t start)
945{
946 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
Changman Leee81c93c2013-11-15 13:21:16 +0900947 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
Jaegeuk Kim60a3b782015-02-10 16:44:29 -0800948 unsigned long *target_map = SIT_I(sbi)->tmp_map;
Changman Leee81c93c2013-11-15 13:21:16 +0900949 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
950 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
951 int i, pos;
952
953 for (i = 0; i < entries; i++)
954 target_map[i] = ckpt_map[i] | cur_map[i];
955
956 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
957
958 seg->next_blkoff = pos;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900959}
960
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900961/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900962 * If a segment is written by LFS manner, next block offset is just obtained
963 * by increasing the current block offset. However, if a segment is written by
964 * SSR manner, next block offset obtained by calling __next_free_blkoff
965 */
966static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
967 struct curseg_info *seg)
968{
969 if (seg->alloc_type == SSR)
970 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
971 else
972 seg->next_blkoff++;
973}
974
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900975/*
arter97e1c42042014-08-06 23:22:50 +0900976 * This function always allocates a used segment(from dirty seglist) by SSR
Jaegeuk Kim351df4b2012-11-02 17:09:16 +0900977 * manner, so it should recover the existing segment information of valid blocks
978 */
979static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
980{
981 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
982 struct curseg_info *curseg = CURSEG_I(sbi, type);
983 unsigned int new_segno = curseg->next_segno;
984 struct f2fs_summary_block *sum_node;
985 struct page *sum_page;
986
987 write_sum_page(sbi, curseg->sum_blk,
988 GET_SUM_BLOCK(sbi, curseg->segno));
989 __set_test_and_inuse(sbi, new_segno);
990
991 mutex_lock(&dirty_i->seglist_lock);
992 __remove_dirty_segment(sbi, new_segno, PRE);
993 __remove_dirty_segment(sbi, new_segno, DIRTY);
994 mutex_unlock(&dirty_i->seglist_lock);
995
996 reset_curseg(sbi, type, 1);
997 curseg->alloc_type = SSR;
998 __next_free_blkoff(sbi, curseg, 0);
999
1000 if (reuse) {
1001 sum_page = get_sum_page(sbi, new_segno);
1002 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
1003 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
1004 f2fs_put_page(sum_page, 1);
1005 }
1006}
1007
Jaegeuk Kim43727522013-02-04 15:11:17 +09001008static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
1009{
1010 struct curseg_info *curseg = CURSEG_I(sbi, type);
1011 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
1012
1013 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
1014 return v_ops->get_victim(sbi,
1015 &(curseg)->next_segno, BG_GC, type, SSR);
1016
1017 /* For data segments, let's do SSR more intensively */
1018 for (; type >= CURSEG_HOT_DATA; type--)
1019 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
1020 BG_GC, type, SSR))
1021 return 1;
1022 return 0;
1023}
1024
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001025/*
1026 * flush out current segment and replace it with new segment
1027 * This function should be returned with success, otherwise BUG
1028 */
1029static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
1030 int type, bool force)
1031{
1032 struct curseg_info *curseg = CURSEG_I(sbi, type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001033
Gu Zheng7b405272013-08-19 09:41:15 +08001034 if (force)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001035 new_curseg(sbi, type, true);
Gu Zheng7b405272013-08-19 09:41:15 +08001036 else if (type == CURSEG_WARM_NODE)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001037 new_curseg(sbi, type, false);
Jaegeuk Kim60374682013-03-31 13:58:51 +09001038 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
1039 new_curseg(sbi, type, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001040 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
1041 change_curseg(sbi, type, true);
1042 else
1043 new_curseg(sbi, type, false);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +09001044
1045 stat_inc_seg_type(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001046}
1047
Jaegeuk Kim38aa0882015-01-05 16:02:20 -08001048static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type)
1049{
1050 struct curseg_info *curseg = CURSEG_I(sbi, type);
1051 unsigned int old_segno;
1052
1053 old_segno = curseg->segno;
1054 SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
1055 locate_dirty_segment(sbi, old_segno);
1056}
1057
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001058void allocate_new_segments(struct f2fs_sb_info *sbi)
1059{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001060 int i;
1061
Jaegeuk Kim38aa0882015-01-05 16:02:20 -08001062 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
1063 __allocate_new_segments(sbi, i);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001064}
1065
1066static const struct segment_allocation default_salloc_ops = {
1067 .allocate_segment = allocate_segment_by_default,
1068};
1069
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001070int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
1071{
Jaegeuk Kimf7ef9b82015-02-09 12:02:44 -08001072 __u64 start = F2FS_BYTES_TO_BLK(range->start);
1073 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001074 unsigned int start_segno, end_segno;
1075 struct cp_control cpc;
1076
Jaegeuk Kim836b5a62015-04-30 22:50:06 -07001077 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001078 return -EINVAL;
1079
Jan Kara9bd27ae2014-10-21 14:07:33 +02001080 cpc.trimmed = 0;
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001081 if (end <= MAIN_BLKADDR(sbi))
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001082 goto out;
1083
1084 /* start/end segment number in main_area */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001085 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
1086 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
1087 GET_SEGNO(sbi, end);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001088 cpc.reason = CP_DISCARD;
Jaegeuk Kim836b5a62015-04-30 22:50:06 -07001089 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001090
1091 /* do checkpoint to issue discard commands safely */
Jaegeuk Kimbba681c2015-01-26 17:41:23 -08001092 for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
1093 cpc.trim_start = start_segno;
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001094
1095 if (sbi->discard_blks == 0)
1096 break;
1097 else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
1098 cpc.trim_end = end_segno;
1099 else
1100 cpc.trim_end = min_t(unsigned int,
1101 rounddown(start_segno +
Jaegeuk Kimbba681c2015-01-26 17:41:23 -08001102 BATCHED_TRIM_SEGMENTS(sbi),
1103 sbi->segs_per_sec) - 1, end_segno);
1104
1105 mutex_lock(&sbi->gc_mutex);
1106 write_checkpoint(sbi, &cpc);
1107 mutex_unlock(&sbi->gc_mutex);
1108 }
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001109out:
Jaegeuk Kimf7ef9b82015-02-09 12:02:44 -08001110 range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001111 return 0;
1112}
1113
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001114static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
1115{
1116 struct curseg_info *curseg = CURSEG_I(sbi, type);
1117 if (curseg->next_blkoff < sbi->blocks_per_seg)
1118 return true;
1119 return false;
1120}
1121
1122static int __get_segment_type_2(struct page *page, enum page_type p_type)
1123{
1124 if (p_type == DATA)
1125 return CURSEG_HOT_DATA;
1126 else
1127 return CURSEG_HOT_NODE;
1128}
1129
1130static int __get_segment_type_4(struct page *page, enum page_type p_type)
1131{
1132 if (p_type == DATA) {
1133 struct inode *inode = page->mapping->host;
1134
1135 if (S_ISDIR(inode->i_mode))
1136 return CURSEG_HOT_DATA;
1137 else
1138 return CURSEG_COLD_DATA;
1139 } else {
Jaegeuk Kima344b9f2014-11-05 20:05:53 -08001140 if (IS_DNODE(page) && is_cold_node(page))
1141 return CURSEG_WARM_NODE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001142 else
1143 return CURSEG_COLD_NODE;
1144 }
1145}
1146
1147static int __get_segment_type_6(struct page *page, enum page_type p_type)
1148{
1149 if (p_type == DATA) {
1150 struct inode *inode = page->mapping->host;
1151
1152 if (S_ISDIR(inode->i_mode))
1153 return CURSEG_HOT_DATA;
Jaegeuk Kim354a3392013-06-14 08:52:35 +09001154 else if (is_cold_data(page) || file_is_cold(inode))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001155 return CURSEG_COLD_DATA;
1156 else
1157 return CURSEG_WARM_DATA;
1158 } else {
1159 if (IS_DNODE(page))
1160 return is_cold_node(page) ? CURSEG_WARM_NODE :
1161 CURSEG_HOT_NODE;
1162 else
1163 return CURSEG_COLD_NODE;
1164 }
1165}
1166
1167static int __get_segment_type(struct page *page, enum page_type p_type)
1168{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001169 switch (F2FS_P_SB(page)->active_logs) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001170 case 2:
1171 return __get_segment_type_2(page, p_type);
1172 case 4:
1173 return __get_segment_type_4(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001174 }
Jaegeuk Kim12a67142012-12-21 11:47:05 +09001175 /* NR_CURSEG_TYPE(6) logs by default */
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07001176 f2fs_bug_on(F2FS_P_SB(page),
1177 F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
Jaegeuk Kim12a67142012-12-21 11:47:05 +09001178 return __get_segment_type_6(page, p_type);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001179}
1180
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001181void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
1182 block_t old_blkaddr, block_t *new_blkaddr,
1183 struct f2fs_summary *sum, int type)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001184{
1185 struct sit_info *sit_i = SIT_I(sbi);
1186 struct curseg_info *curseg;
Jaegeuk Kim38aa0882015-01-05 16:02:20 -08001187 bool direct_io = (type == CURSEG_DIRECT_IO);
1188
1189 type = direct_io ? CURSEG_WARM_DATA : type;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001190
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001191 curseg = CURSEG_I(sbi, type);
1192
1193 mutex_lock(&curseg->curseg_mutex);
Jaegeuk Kim21cb1d92015-03-11 13:42:48 -04001194 mutex_lock(&sit_i->sentry_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001195
Jaegeuk Kim38aa0882015-01-05 16:02:20 -08001196 /* direct_io'ed data is aligned to the segment for better performance */
1197 if (direct_io && curseg->next_blkoff)
1198 __allocate_new_segments(sbi, type);
1199
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001200 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001201
1202 /*
1203 * __add_sum_entry should be resided under the curseg_mutex
1204 * because, this function updates a summary entry in the
1205 * current summary block.
1206 */
Haicheng Lie79efe32013-06-13 16:59:27 +08001207 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001208
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001209 __refresh_next_blkoff(sbi, curseg);
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +09001210
1211 stat_inc_block_count(sbi, curseg);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001212
Jaegeuk Kim5e443812014-01-28 12:22:14 +09001213 if (!__has_curseg_space(sbi, type))
1214 sit_i->s_ops->allocate_segment(sbi, type, false);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001215 /*
1216 * SIT information should be updated before segment allocation,
1217 * since SSR needs latest valid block information.
1218 */
1219 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
Jaegeuk Kim5e443812014-01-28 12:22:14 +09001220
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001221 mutex_unlock(&sit_i->sentry_lock);
1222
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001223 if (page && IS_NODESEG(type))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001224 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1225
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001226 mutex_unlock(&curseg->curseg_mutex);
1227}
1228
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001229static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001230{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001231 int type = __get_segment_type(fio->page, fio->type);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001232
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001233 allocate_data_block(fio->sbi, fio->page, fio->blk_addr,
1234 &fio->blk_addr, sum, type);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001235
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001236 /* writeout dirty page into bdev */
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001237 f2fs_submit_page_mbio(fio);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001238}
1239
Jaegeuk Kim577e3492013-01-24 19:56:11 +09001240void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001241{
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001242 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001243 .sbi = sbi,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001244 .type = META,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -08001245 .rw = WRITE_SYNC | REQ_META | REQ_PRIO,
1246 .blk_addr = page->index,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001247 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001248 .encrypted_page = NULL,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001249 };
1250
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001251 set_page_writeback(page);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001252 f2fs_submit_page_mbio(&fio);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001253}
1254
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001255void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001256{
1257 struct f2fs_summary sum;
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001258
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001259 set_summary(&sum, nid, 0, 0);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001260 do_write_page(&sum, fio);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001261}
1262
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001263void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001264{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001265 struct f2fs_sb_info *sbi = fio->sbi;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001266 struct f2fs_summary sum;
1267 struct node_info ni;
1268
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07001269 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001270 get_node_info(sbi, dn->nid, &ni);
1271 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001272 do_write_page(&sum, fio);
Jaegeuk Kime1509cf2014-12-30 22:57:55 -08001273 dn->data_blkaddr = fio->blk_addr;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001274}
1275
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001276void rewrite_data_page(struct f2fs_io_info *fio)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001277{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07001278 stat_inc_inplace_blocks(fio->sbi);
1279 f2fs_submit_page_mbio(fio);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001280}
1281
Chao Yu19f106b2015-05-06 13:08:06 +08001282void f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1283 block_t old_blkaddr, block_t new_blkaddr,
1284 bool recover_curseg)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001285{
1286 struct sit_info *sit_i = SIT_I(sbi);
1287 struct curseg_info *curseg;
1288 unsigned int segno, old_cursegno;
1289 struct seg_entry *se;
1290 int type;
Chao Yu19f106b2015-05-06 13:08:06 +08001291 unsigned short old_blkoff;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001292
1293 segno = GET_SEGNO(sbi, new_blkaddr);
1294 se = get_seg_entry(sbi, segno);
1295 type = se->type;
1296
Chao Yu19f106b2015-05-06 13:08:06 +08001297 if (!recover_curseg) {
1298 /* for recovery flow */
1299 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
1300 if (old_blkaddr == NULL_ADDR)
1301 type = CURSEG_COLD_DATA;
1302 else
1303 type = CURSEG_WARM_DATA;
1304 }
1305 } else {
1306 if (!IS_CURSEG(sbi, segno))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001307 type = CURSEG_WARM_DATA;
1308 }
Chao Yu19f106b2015-05-06 13:08:06 +08001309
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001310 curseg = CURSEG_I(sbi, type);
1311
1312 mutex_lock(&curseg->curseg_mutex);
1313 mutex_lock(&sit_i->sentry_lock);
1314
1315 old_cursegno = curseg->segno;
Chao Yu19f106b2015-05-06 13:08:06 +08001316 old_blkoff = curseg->next_blkoff;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001317
1318 /* change the current segment */
1319 if (segno != curseg->segno) {
1320 curseg->next_segno = segno;
1321 change_curseg(sbi, type, true);
1322 }
1323
Jaegeuk Kim491c0852014-02-04 13:01:10 +09001324 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
Haicheng Lie79efe32013-06-13 16:59:27 +08001325 __add_sum_entry(sbi, type, sum);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001326
1327 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001328 locate_dirty_segment(sbi, old_cursegno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001329
Chao Yu19f106b2015-05-06 13:08:06 +08001330 if (recover_curseg) {
1331 if (old_cursegno != curseg->segno) {
1332 curseg->next_segno = old_cursegno;
1333 change_curseg(sbi, type, true);
1334 }
1335 curseg->next_blkoff = old_blkoff;
1336 }
1337
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001338 mutex_unlock(&sit_i->sentry_lock);
1339 mutex_unlock(&curseg->curseg_mutex);
1340}
1341
Chao Yudf0f8dc2014-03-22 14:57:23 +08001342static inline bool is_merged_page(struct f2fs_sb_info *sbi,
1343 struct page *page, enum page_type type)
1344{
1345 enum page_type btype = PAGE_TYPE_OF_BIO(type);
1346 struct f2fs_bio_info *io = &sbi->write_io[btype];
Chao Yudf0f8dc2014-03-22 14:57:23 +08001347 struct bio_vec *bvec;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001348 struct page *target;
Chao Yudf0f8dc2014-03-22 14:57:23 +08001349 int i;
1350
1351 down_read(&io->io_rwsem);
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001352 if (!io->bio) {
1353 up_read(&io->io_rwsem);
1354 return false;
1355 }
Chao Yudf0f8dc2014-03-22 14:57:23 +08001356
Jaegeuk Kimce234472014-04-02 09:04:42 +09001357 bio_for_each_segment_all(bvec, io->bio, i) {
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001358
1359 if (bvec->bv_page->mapping) {
1360 target = bvec->bv_page;
1361 } else {
1362 struct f2fs_crypto_ctx *ctx;
1363
1364 /* encrypted page */
1365 ctx = (struct f2fs_crypto_ctx *)page_private(
1366 bvec->bv_page);
1367 target = ctx->control_page;
1368 }
1369
1370 if (page == target) {
Chao Yudf0f8dc2014-03-22 14:57:23 +08001371 up_read(&io->io_rwsem);
1372 return true;
1373 }
1374 }
1375
Chao Yudf0f8dc2014-03-22 14:57:23 +08001376 up_read(&io->io_rwsem);
1377 return false;
1378}
1379
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001380void f2fs_wait_on_page_writeback(struct page *page,
Yuan Zhong5514f0a2014-01-10 07:26:14 +00001381 enum page_type type)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001382{
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001383 if (PageWriteback(page)) {
Jaegeuk Kim40813632014-09-02 15:31:18 -07001384 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1385
Chao Yudf0f8dc2014-03-22 14:57:23 +08001386 if (is_merged_page(sbi, page, type))
1387 f2fs_submit_merged_bio(sbi, type, WRITE);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001388 wait_on_page_writeback(page);
1389 }
1390}
1391
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001392static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1393{
1394 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1395 struct curseg_info *seg_i;
1396 unsigned char *kaddr;
1397 struct page *page;
1398 block_t start;
1399 int i, j, offset;
1400
1401 start = start_sum_block(sbi);
1402
1403 page = get_meta_page(sbi, start++);
1404 kaddr = (unsigned char *)page_address(page);
1405
1406 /* Step 1: restore nat cache */
1407 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1408 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1409
1410 /* Step 2: restore sit cache */
1411 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1412 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1413 SUM_JOURNAL_SIZE);
1414 offset = 2 * SUM_JOURNAL_SIZE;
1415
1416 /* Step 3: restore summary entries */
1417 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1418 unsigned short blk_off;
1419 unsigned int segno;
1420
1421 seg_i = CURSEG_I(sbi, i);
1422 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1423 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1424 seg_i->next_segno = segno;
1425 reset_curseg(sbi, i, 0);
1426 seg_i->alloc_type = ckpt->alloc_type[i];
1427 seg_i->next_blkoff = blk_off;
1428
1429 if (seg_i->alloc_type == SSR)
1430 blk_off = sbi->blocks_per_seg;
1431
1432 for (j = 0; j < blk_off; j++) {
1433 struct f2fs_summary *s;
1434 s = (struct f2fs_summary *)(kaddr + offset);
1435 seg_i->sum_blk->entries[j] = *s;
1436 offset += SUMMARY_SIZE;
1437 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1438 SUM_FOOTER_SIZE)
1439 continue;
1440
1441 f2fs_put_page(page, 1);
1442 page = NULL;
1443
1444 page = get_meta_page(sbi, start++);
1445 kaddr = (unsigned char *)page_address(page);
1446 offset = 0;
1447 }
1448 }
1449 f2fs_put_page(page, 1);
1450 return 0;
1451}
1452
1453static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1454{
1455 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1456 struct f2fs_summary_block *sum;
1457 struct curseg_info *curseg;
1458 struct page *new;
1459 unsigned short blk_off;
1460 unsigned int segno = 0;
1461 block_t blk_addr = 0;
1462
1463 /* get segment number and block addr */
1464 if (IS_DATASEG(type)) {
1465 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1466 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1467 CURSEG_HOT_DATA]);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001468 if (__exist_node_summaries(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001469 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1470 else
1471 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1472 } else {
1473 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1474 CURSEG_HOT_NODE]);
1475 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1476 CURSEG_HOT_NODE]);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001477 if (__exist_node_summaries(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001478 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1479 type - CURSEG_HOT_NODE);
1480 else
1481 blk_addr = GET_SUM_BLOCK(sbi, segno);
1482 }
1483
1484 new = get_meta_page(sbi, blk_addr);
1485 sum = (struct f2fs_summary_block *)page_address(new);
1486
1487 if (IS_NODESEG(type)) {
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001488 if (__exist_node_summaries(sbi)) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001489 struct f2fs_summary *ns = &sum->entries[0];
1490 int i;
1491 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1492 ns->version = 0;
1493 ns->ofs_in_node = 0;
1494 }
1495 } else {
Gu Zhengd6537882014-03-07 18:43:36 +08001496 int err;
1497
1498 err = restore_node_summary(sbi, segno, sum);
1499 if (err) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001500 f2fs_put_page(new, 1);
Gu Zhengd6537882014-03-07 18:43:36 +08001501 return err;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001502 }
1503 }
1504 }
1505
1506 /* set uncompleted segment to curseg */
1507 curseg = CURSEG_I(sbi, type);
1508 mutex_lock(&curseg->curseg_mutex);
1509 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1510 curseg->next_segno = segno;
1511 reset_curseg(sbi, type, 0);
1512 curseg->alloc_type = ckpt->alloc_type[type];
1513 curseg->next_blkoff = blk_off;
1514 mutex_unlock(&curseg->curseg_mutex);
1515 f2fs_put_page(new, 1);
1516 return 0;
1517}
1518
1519static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1520{
1521 int type = CURSEG_HOT_DATA;
Chao Yue4fc5fb2014-03-17 16:36:24 +08001522 int err;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001523
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001524 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
Chao Yu3fa06d72014-12-09 14:21:46 +08001525 int npages = npages_for_summary_flush(sbi, true);
1526
1527 if (npages >= 2)
1528 ra_meta_pages(sbi, start_sum_block(sbi), npages,
1529 META_CP);
1530
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001531 /* restore for compacted data summary */
1532 if (read_compacted_summaries(sbi))
1533 return -EINVAL;
1534 type = CURSEG_HOT_NODE;
1535 }
1536
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001537 if (__exist_node_summaries(sbi))
Chao Yu3fa06d72014-12-09 14:21:46 +08001538 ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
1539 NR_CURSEG_TYPE - type, META_CP);
1540
Chao Yue4fc5fb2014-03-17 16:36:24 +08001541 for (; type <= CURSEG_COLD_NODE; type++) {
1542 err = read_normal_summaries(sbi, type);
1543 if (err)
1544 return err;
1545 }
1546
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001547 return 0;
1548}
1549
1550static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1551{
1552 struct page *page;
1553 unsigned char *kaddr;
1554 struct f2fs_summary *summary;
1555 struct curseg_info *seg_i;
1556 int written_size = 0;
1557 int i, j;
1558
1559 page = grab_meta_page(sbi, blkaddr++);
1560 kaddr = (unsigned char *)page_address(page);
1561
1562 /* Step 1: write nat cache */
1563 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1564 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1565 written_size += SUM_JOURNAL_SIZE;
1566
1567 /* Step 2: write sit cache */
1568 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1569 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1570 SUM_JOURNAL_SIZE);
1571 written_size += SUM_JOURNAL_SIZE;
1572
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001573 /* Step 3: write summary entries */
1574 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1575 unsigned short blkoff;
1576 seg_i = CURSEG_I(sbi, i);
1577 if (sbi->ckpt->alloc_type[i] == SSR)
1578 blkoff = sbi->blocks_per_seg;
1579 else
1580 blkoff = curseg_blkoff(sbi, i);
1581
1582 for (j = 0; j < blkoff; j++) {
1583 if (!page) {
1584 page = grab_meta_page(sbi, blkaddr++);
1585 kaddr = (unsigned char *)page_address(page);
1586 written_size = 0;
1587 }
1588 summary = (struct f2fs_summary *)(kaddr + written_size);
1589 *summary = seg_i->sum_blk->entries[j];
1590 written_size += SUMMARY_SIZE;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001591
1592 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1593 SUM_FOOTER_SIZE)
1594 continue;
1595
Chao Yue8d61a72013-10-24 15:08:28 +08001596 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001597 f2fs_put_page(page, 1);
1598 page = NULL;
1599 }
1600 }
Chao Yue8d61a72013-10-24 15:08:28 +08001601 if (page) {
1602 set_page_dirty(page);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001603 f2fs_put_page(page, 1);
Chao Yue8d61a72013-10-24 15:08:28 +08001604 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001605}
1606
1607static void write_normal_summaries(struct f2fs_sb_info *sbi,
1608 block_t blkaddr, int type)
1609{
1610 int i, end;
1611 if (IS_DATASEG(type))
1612 end = type + NR_CURSEG_DATA_TYPE;
1613 else
1614 end = type + NR_CURSEG_NODE_TYPE;
1615
1616 for (i = type; i < end; i++) {
1617 struct curseg_info *sum = CURSEG_I(sbi, i);
1618 mutex_lock(&sum->curseg_mutex);
1619 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1620 mutex_unlock(&sum->curseg_mutex);
1621 }
1622}
1623
1624void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1625{
Jaegeuk Kim25ca9232012-11-28 16:12:41 +09001626 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001627 write_compacted_summaries(sbi, start_blk);
1628 else
1629 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1630}
1631
1632void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1633{
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001634 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001635}
1636
1637int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1638 unsigned int val, int alloc)
1639{
1640 int i;
1641
1642 if (type == NAT_JOURNAL) {
1643 for (i = 0; i < nats_in_cursum(sum); i++) {
1644 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1645 return i;
1646 }
1647 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1648 return update_nats_in_cursum(sum, 1);
1649 } else if (type == SIT_JOURNAL) {
1650 for (i = 0; i < sits_in_cursum(sum); i++)
1651 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1652 return i;
1653 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1654 return update_sits_in_cursum(sum, 1);
1655 }
1656 return -1;
1657}
1658
1659static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1660 unsigned int segno)
1661{
Gu Zheng2cc22182014-10-20 17:45:49 +08001662 return get_meta_page(sbi, current_sit_addr(sbi, segno));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001663}
1664
1665static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1666 unsigned int start)
1667{
1668 struct sit_info *sit_i = SIT_I(sbi);
1669 struct page *src_page, *dst_page;
1670 pgoff_t src_off, dst_off;
1671 void *src_addr, *dst_addr;
1672
1673 src_off = current_sit_addr(sbi, start);
1674 dst_off = next_sit_addr(sbi, src_off);
1675
1676 /* get current sit block page without lock */
1677 src_page = get_meta_page(sbi, src_off);
1678 dst_page = grab_meta_page(sbi, dst_off);
Jaegeuk Kim9850cf42014-09-02 15:52:58 -07001679 f2fs_bug_on(sbi, PageDirty(src_page));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001680
1681 src_addr = page_address(src_page);
1682 dst_addr = page_address(dst_page);
1683 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1684
1685 set_page_dirty(dst_page);
1686 f2fs_put_page(src_page, 1);
1687
1688 set_to_next_sit(sit_i, start);
1689
1690 return dst_page;
1691}
1692
Chao Yu184a5cd2014-09-04 18:13:01 +08001693static struct sit_entry_set *grab_sit_entry_set(void)
1694{
1695 struct sit_entry_set *ses =
1696 f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC);
1697
1698 ses->entry_cnt = 0;
1699 INIT_LIST_HEAD(&ses->set_list);
1700 return ses;
1701}
1702
1703static void release_sit_entry_set(struct sit_entry_set *ses)
1704{
1705 list_del(&ses->set_list);
1706 kmem_cache_free(sit_entry_set_slab, ses);
1707}
1708
1709static void adjust_sit_entry_set(struct sit_entry_set *ses,
1710 struct list_head *head)
1711{
1712 struct sit_entry_set *next = ses;
1713
1714 if (list_is_last(&ses->set_list, head))
1715 return;
1716
1717 list_for_each_entry_continue(next, head, set_list)
1718 if (ses->entry_cnt <= next->entry_cnt)
1719 break;
1720
1721 list_move_tail(&ses->set_list, &next->set_list);
1722}
1723
1724static void add_sit_entry(unsigned int segno, struct list_head *head)
1725{
1726 struct sit_entry_set *ses;
1727 unsigned int start_segno = START_SEGNO(segno);
1728
1729 list_for_each_entry(ses, head, set_list) {
1730 if (ses->start_segno == start_segno) {
1731 ses->entry_cnt++;
1732 adjust_sit_entry_set(ses, head);
1733 return;
1734 }
1735 }
1736
1737 ses = grab_sit_entry_set();
1738
1739 ses->start_segno = start_segno;
1740 ses->entry_cnt++;
1741 list_add(&ses->set_list, head);
1742}
1743
1744static void add_sits_in_set(struct f2fs_sb_info *sbi)
1745{
1746 struct f2fs_sm_info *sm_info = SM_I(sbi);
1747 struct list_head *set_list = &sm_info->sit_entry_set;
1748 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
Chao Yu184a5cd2014-09-04 18:13:01 +08001749 unsigned int segno;
1750
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001751 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
Chao Yu184a5cd2014-09-04 18:13:01 +08001752 add_sit_entry(segno, set_list);
1753}
1754
1755static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001756{
1757 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1758 struct f2fs_summary_block *sum = curseg->sum_blk;
1759 int i;
1760
Chao Yu184a5cd2014-09-04 18:13:01 +08001761 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1762 unsigned int segno;
1763 bool dirtied;
1764
1765 segno = le32_to_cpu(segno_in_journal(sum, i));
1766 dirtied = __mark_sit_entry_dirty(sbi, segno);
1767
1768 if (!dirtied)
1769 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001770 }
Chao Yu184a5cd2014-09-04 18:13:01 +08001771 update_sits_in_cursum(sum, -sits_in_cursum(sum));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001772}
1773
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001774/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001775 * CP calls this function, which flushes SIT entries including sit_journal,
1776 * and moves prefree segs to free segs.
1777 */
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001778void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001779{
1780 struct sit_info *sit_i = SIT_I(sbi);
1781 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1782 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1783 struct f2fs_summary_block *sum = curseg->sum_blk;
Chao Yu184a5cd2014-09-04 18:13:01 +08001784 struct sit_entry_set *ses, *tmp;
1785 struct list_head *head = &SM_I(sbi)->sit_entry_set;
Chao Yu184a5cd2014-09-04 18:13:01 +08001786 bool to_journal = true;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001787 struct seg_entry *se;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001788
1789 mutex_lock(&curseg->curseg_mutex);
1790 mutex_lock(&sit_i->sentry_lock);
1791
Wanpeng Li2b11a742015-02-27 16:52:50 +08001792 if (!sit_i->dirty_sentries)
1793 goto out;
1794
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001795 /*
Chao Yu184a5cd2014-09-04 18:13:01 +08001796 * add and account sit entries of dirty bitmap in sit entry
1797 * set temporarily
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001798 */
Chao Yu184a5cd2014-09-04 18:13:01 +08001799 add_sits_in_set(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001800
Chao Yu184a5cd2014-09-04 18:13:01 +08001801 /*
1802 * if there are no enough space in journal to store dirty sit
1803 * entries, remove all entries from journal and add and account
1804 * them in sit entry set.
1805 */
1806 if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL))
1807 remove_sits_in_journal(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001808
Chao Yu184a5cd2014-09-04 18:13:01 +08001809 /*
1810 * there are two steps to flush sit entries:
1811 * #1, flush sit entries to journal in current cold data summary block.
1812 * #2, flush sit entries to sit page.
1813 */
1814 list_for_each_entry_safe(ses, tmp, head, set_list) {
Jaegeuk Kim4a257ed2014-10-16 11:43:30 -07001815 struct page *page = NULL;
Chao Yu184a5cd2014-09-04 18:13:01 +08001816 struct f2fs_sit_block *raw_sit = NULL;
1817 unsigned int start_segno = ses->start_segno;
1818 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001819 (unsigned long)MAIN_SEGS(sbi));
Chao Yu184a5cd2014-09-04 18:13:01 +08001820 unsigned int segno = start_segno;
Jaegeuk Kimb2955552013-11-12 14:49:56 +09001821
Chao Yu184a5cd2014-09-04 18:13:01 +08001822 if (to_journal &&
1823 !__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL))
1824 to_journal = false;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001825
Chao Yu184a5cd2014-09-04 18:13:01 +08001826 if (!to_journal) {
1827 page = get_next_sit_page(sbi, start_segno);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001828 raw_sit = page_address(page);
1829 }
1830
Chao Yu184a5cd2014-09-04 18:13:01 +08001831 /* flush dirty sit entries in region of current sit set */
1832 for_each_set_bit_from(segno, bitmap, end) {
1833 int offset, sit_offset;
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001834
1835 se = get_seg_entry(sbi, segno);
Chao Yu184a5cd2014-09-04 18:13:01 +08001836
1837 /* add discard candidates */
Jaegeuk Kimd7bc2482014-12-12 13:53:41 -08001838 if (cpc->reason != CP_DISCARD) {
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001839 cpc->trim_start = segno;
1840 add_discard_addrs(sbi, cpc);
1841 }
Chao Yu184a5cd2014-09-04 18:13:01 +08001842
1843 if (to_journal) {
1844 offset = lookup_journal_in_cursum(sum,
1845 SIT_JOURNAL, segno, 1);
1846 f2fs_bug_on(sbi, offset < 0);
1847 segno_in_journal(sum, offset) =
1848 cpu_to_le32(segno);
1849 seg_info_to_raw_sit(se,
1850 &sit_in_journal(sum, offset));
1851 } else {
1852 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1853 seg_info_to_raw_sit(se,
1854 &raw_sit->entries[sit_offset]);
1855 }
1856
1857 __clear_bit(segno, bitmap);
1858 sit_i->dirty_sentries--;
1859 ses->entry_cnt--;
1860 }
1861
1862 if (!to_journal)
1863 f2fs_put_page(page, 1);
1864
1865 f2fs_bug_on(sbi, ses->entry_cnt);
1866 release_sit_entry_set(ses);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001867 }
Chao Yu184a5cd2014-09-04 18:13:01 +08001868
1869 f2fs_bug_on(sbi, !list_empty(head));
1870 f2fs_bug_on(sbi, sit_i->dirty_sentries);
Chao Yu184a5cd2014-09-04 18:13:01 +08001871out:
Jaegeuk Kim4b2fecc2014-09-20 22:06:39 -07001872 if (cpc->reason == CP_DISCARD) {
1873 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
1874 add_discard_addrs(sbi, cpc);
1875 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001876 mutex_unlock(&sit_i->sentry_lock);
1877 mutex_unlock(&curseg->curseg_mutex);
1878
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001879 set_prefree_as_free_segments(sbi);
1880}
1881
1882static int build_sit_info(struct f2fs_sb_info *sbi)
1883{
1884 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1885 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1886 struct sit_info *sit_i;
1887 unsigned int sit_segs, start;
1888 char *src_bitmap, *dst_bitmap;
1889 unsigned int bitmap_size;
1890
1891 /* allocate memory for SIT information */
1892 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1893 if (!sit_i)
1894 return -ENOMEM;
1895
1896 SM_I(sbi)->sit_info = sit_i;
1897
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001898 sit_i->sentries = vzalloc(MAIN_SEGS(sbi) * sizeof(struct seg_entry));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001899 if (!sit_i->sentries)
1900 return -ENOMEM;
1901
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001902 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001903 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1904 if (!sit_i->dirty_sentries_bitmap)
1905 return -ENOMEM;
1906
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001907 for (start = 0; start < MAIN_SEGS(sbi); start++) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001908 sit_i->sentries[start].cur_valid_map
1909 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1910 sit_i->sentries[start].ckpt_valid_map
1911 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07001912 sit_i->sentries[start].discard_map
1913 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1914 if (!sit_i->sentries[start].cur_valid_map ||
1915 !sit_i->sentries[start].ckpt_valid_map ||
1916 !sit_i->sentries[start].discard_map)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001917 return -ENOMEM;
1918 }
1919
Jaegeuk Kim60a3b782015-02-10 16:44:29 -08001920 sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1921 if (!sit_i->tmp_map)
1922 return -ENOMEM;
1923
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001924 if (sbi->segs_per_sec > 1) {
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001925 sit_i->sec_entries = vzalloc(MAIN_SECS(sbi) *
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001926 sizeof(struct sec_entry));
1927 if (!sit_i->sec_entries)
1928 return -ENOMEM;
1929 }
1930
1931 /* get information related with SIT */
1932 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1933
1934 /* setup SIT bitmap from ckeckpoint pack */
1935 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1936 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1937
Alexandru Gheorghiu79b57932013-03-28 02:24:53 +02001938 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001939 if (!dst_bitmap)
1940 return -ENOMEM;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001941
1942 /* init SIT information */
1943 sit_i->s_ops = &default_salloc_ops;
1944
1945 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1946 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1947 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1948 sit_i->sit_bitmap = dst_bitmap;
1949 sit_i->bitmap_size = bitmap_size;
1950 sit_i->dirty_sentries = 0;
1951 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1952 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1953 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1954 mutex_init(&sit_i->sentry_lock);
1955 return 0;
1956}
1957
1958static int build_free_segmap(struct f2fs_sb_info *sbi)
1959{
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001960 struct free_segmap_info *free_i;
1961 unsigned int bitmap_size, sec_bitmap_size;
1962
1963 /* allocate memory for free segmap information */
1964 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1965 if (!free_i)
1966 return -ENOMEM;
1967
1968 SM_I(sbi)->free_info = free_i;
1969
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001970 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001971 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1972 if (!free_i->free_segmap)
1973 return -ENOMEM;
1974
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001975 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001976 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1977 if (!free_i->free_secmap)
1978 return -ENOMEM;
1979
1980 /* set all segments as dirty temporarily */
1981 memset(free_i->free_segmap, 0xff, bitmap_size);
1982 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1983
1984 /* init free segmap information */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07001985 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001986 free_i->free_segments = 0;
1987 free_i->free_sections = 0;
Chao Yu1a118cc2015-02-11 18:20:38 +08001988 spin_lock_init(&free_i->segmap_lock);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001989 return 0;
1990}
1991
1992static int build_curseg(struct f2fs_sb_info *sbi)
1993{
Namjae Jeon1042d602012-12-01 10:56:13 +09001994 struct curseg_info *array;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001995 int i;
1996
Fabian Frederickb434bab2014-06-23 18:39:15 +02001997 array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09001998 if (!array)
1999 return -ENOMEM;
2000
2001 SM_I(sbi)->curseg_array = array;
2002
2003 for (i = 0; i < NR_CURSEG_TYPE; i++) {
2004 mutex_init(&array[i].curseg_mutex);
2005 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
2006 if (!array[i].sum_blk)
2007 return -ENOMEM;
2008 array[i].segno = NULL_SEGNO;
2009 array[i].next_blkoff = 0;
2010 }
2011 return restore_curseg_summaries(sbi);
2012}
2013
2014static void build_sit_entries(struct f2fs_sb_info *sbi)
2015{
2016 struct sit_info *sit_i = SIT_I(sbi);
2017 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2018 struct f2fs_summary_block *sum = curseg->sum_blk;
Chao Yu74de5932013-11-22 09:09:59 +08002019 int sit_blk_cnt = SIT_BLK_CNT(sbi);
2020 unsigned int i, start, end;
2021 unsigned int readed, start_blk = 0;
Jaegeuk Kim90a893c2014-09-22 16:21:07 -07002022 int nrpages = MAX_BIO_BLOCKS(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002023
Chao Yu74de5932013-11-22 09:09:59 +08002024 do {
Chao Yu662befd2014-02-07 16:11:53 +08002025 readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002026
Chao Yu74de5932013-11-22 09:09:59 +08002027 start = start_blk * sit_i->sents_per_block;
2028 end = (start_blk + readed) * sit_i->sents_per_block;
2029
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002030 for (; start < end && start < MAIN_SEGS(sbi); start++) {
Chao Yu74de5932013-11-22 09:09:59 +08002031 struct seg_entry *se = &sit_i->sentries[start];
2032 struct f2fs_sit_block *sit_blk;
2033 struct f2fs_sit_entry sit;
2034 struct page *page;
2035
2036 mutex_lock(&curseg->curseg_mutex);
2037 for (i = 0; i < sits_in_cursum(sum); i++) {
Chris Fries6c311ec2014-01-17 14:44:39 -06002038 if (le32_to_cpu(segno_in_journal(sum, i))
2039 == start) {
Chao Yu74de5932013-11-22 09:09:59 +08002040 sit = sit_in_journal(sum, i);
2041 mutex_unlock(&curseg->curseg_mutex);
2042 goto got_it;
2043 }
2044 }
2045 mutex_unlock(&curseg->curseg_mutex);
2046
2047 page = get_current_sit_page(sbi, start);
2048 sit_blk = (struct f2fs_sit_block *)page_address(page);
2049 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
2050 f2fs_put_page(page, 1);
2051got_it:
2052 check_block_count(sbi, start, &sit);
2053 seg_info_from_raw_sit(se, &sit);
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07002054
2055 /* build discard map only one time */
2056 memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2057 sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks;
2058
Chao Yu74de5932013-11-22 09:09:59 +08002059 if (sbi->segs_per_sec > 1) {
2060 struct sec_entry *e = get_sec_entry(sbi, start);
2061 e->valid_blocks += se->valid_blocks;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002062 }
2063 }
Chao Yu74de5932013-11-22 09:09:59 +08002064 start_blk += readed;
2065 } while (start_blk < sit_blk_cnt);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002066}
2067
2068static void init_free_segmap(struct f2fs_sb_info *sbi)
2069{
2070 unsigned int start;
2071 int type;
2072
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002073 for (start = 0; start < MAIN_SEGS(sbi); start++) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002074 struct seg_entry *sentry = get_seg_entry(sbi, start);
2075 if (!sentry->valid_blocks)
2076 __set_free(sbi, start);
2077 }
2078
2079 /* set use the current segments */
2080 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
2081 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
2082 __set_test_and_inuse(sbi, curseg_t->segno);
2083 }
2084}
2085
2086static void init_dirty_segmap(struct f2fs_sb_info *sbi)
2087{
2088 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2089 struct free_segmap_info *free_i = FREE_I(sbi);
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002090 unsigned int segno = 0, offset = 0;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002091 unsigned short valid_blocks;
2092
Namjae Jeon8736fbf2013-06-16 09:49:11 +09002093 while (1) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002094 /* find dirty segment based on free segmap */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002095 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
2096 if (segno >= MAIN_SEGS(sbi))
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002097 break;
2098 offset = segno + 1;
2099 valid_blocks = get_valid_blocks(sbi, segno, 0);
Jaegeuk Kimec325b52014-09-02 16:24:11 -07002100 if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002101 continue;
Jaegeuk Kimec325b52014-09-02 16:24:11 -07002102 if (valid_blocks > sbi->blocks_per_seg) {
2103 f2fs_bug_on(sbi, 1);
2104 continue;
2105 }
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002106 mutex_lock(&dirty_i->seglist_lock);
2107 __locate_dirty_segment(sbi, segno, DIRTY);
2108 mutex_unlock(&dirty_i->seglist_lock);
2109 }
2110}
2111
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09002112static int init_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002113{
2114 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002115 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002116
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09002117 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
2118 if (!dirty_i->victim_secmap)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002119 return -ENOMEM;
2120 return 0;
2121}
2122
2123static int build_dirty_segmap(struct f2fs_sb_info *sbi)
2124{
2125 struct dirty_seglist_info *dirty_i;
2126 unsigned int bitmap_size, i;
2127
2128 /* allocate memory for dirty segments list information */
2129 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
2130 if (!dirty_i)
2131 return -ENOMEM;
2132
2133 SM_I(sbi)->dirty_info = dirty_i;
2134 mutex_init(&dirty_i->seglist_lock);
2135
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002136 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002137
2138 for (i = 0; i < NR_DIRTY_TYPE; i++) {
2139 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002140 if (!dirty_i->dirty_segmap[i])
2141 return -ENOMEM;
2142 }
2143
2144 init_dirty_segmap(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09002145 return init_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002146}
2147
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002148/*
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002149 * Update min, max modified time for cost-benefit GC algorithm
2150 */
2151static void init_min_max_mtime(struct f2fs_sb_info *sbi)
2152{
2153 struct sit_info *sit_i = SIT_I(sbi);
2154 unsigned int segno;
2155
2156 mutex_lock(&sit_i->sentry_lock);
2157
2158 sit_i->min_mtime = LLONG_MAX;
2159
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002160 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002161 unsigned int i;
2162 unsigned long long mtime = 0;
2163
2164 for (i = 0; i < sbi->segs_per_sec; i++)
2165 mtime += get_seg_entry(sbi, segno + i)->mtime;
2166
2167 mtime = div_u64(mtime, sbi->segs_per_sec);
2168
2169 if (sit_i->min_mtime > mtime)
2170 sit_i->min_mtime = mtime;
2171 }
2172 sit_i->max_mtime = get_mtime(sbi);
2173 mutex_unlock(&sit_i->sentry_lock);
2174}
2175
2176int build_segment_manager(struct f2fs_sb_info *sbi)
2177{
2178 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2179 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Namjae Jeon1042d602012-12-01 10:56:13 +09002180 struct f2fs_sm_info *sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002181 int err;
2182
2183 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
2184 if (!sm_info)
2185 return -ENOMEM;
2186
2187 /* init sm info */
2188 sbi->sm_info = sm_info;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002189 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2190 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2191 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
2192 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2193 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2194 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
2195 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
Jaegeuk Kim58c41032014-03-19 14:17:21 +09002196 sm_info->rec_prefree_segments = sm_info->main_segments *
2197 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
Jaegeuk Kim9b5f1362014-09-16 18:30:54 -07002198 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
Jaegeuk Kim216fbd62013-11-07 13:13:42 +09002199 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
Jaegeuk Kimc1ce1b02014-09-10 16:53:02 -07002200 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002201
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09002202 INIT_LIST_HEAD(&sm_info->discard_list);
2203 sm_info->nr_discards = 0;
2204 sm_info->max_discards = 0;
2205
Jaegeuk Kimbba681c2015-01-26 17:41:23 -08002206 sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
2207
Chao Yu184a5cd2014-09-04 18:13:01 +08002208 INIT_LIST_HEAD(&sm_info->sit_entry_set);
2209
Gu Zhengb270ad62014-04-11 17:49:55 +08002210 if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
Gu Zheng2163d192014-04-27 14:21:33 +08002211 err = create_flush_cmd_control(sbi);
2212 if (err)
Gu Zhenga688b9d9e2014-04-27 14:21:21 +08002213 return err;
Jaegeuk Kim6b4afdd2014-04-02 15:34:36 +09002214 }
2215
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002216 err = build_sit_info(sbi);
2217 if (err)
2218 return err;
2219 err = build_free_segmap(sbi);
2220 if (err)
2221 return err;
2222 err = build_curseg(sbi);
2223 if (err)
2224 return err;
2225
2226 /* reinit free segmap based on SIT */
2227 build_sit_entries(sbi);
2228
2229 init_free_segmap(sbi);
2230 err = build_dirty_segmap(sbi);
2231 if (err)
2232 return err;
2233
2234 init_min_max_mtime(sbi);
2235 return 0;
2236}
2237
2238static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
2239 enum dirty_type dirty_type)
2240{
2241 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2242
2243 mutex_lock(&dirty_i->seglist_lock);
2244 kfree(dirty_i->dirty_segmap[dirty_type]);
2245 dirty_i->nr_dirty[dirty_type] = 0;
2246 mutex_unlock(&dirty_i->seglist_lock);
2247}
2248
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09002249static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002250{
2251 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09002252 kfree(dirty_i->victim_secmap);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002253}
2254
2255static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
2256{
2257 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2258 int i;
2259
2260 if (!dirty_i)
2261 return;
2262
2263 /* discard pre-free/dirty segments list */
2264 for (i = 0; i < NR_DIRTY_TYPE; i++)
2265 discard_dirty_segmap(sbi, i);
2266
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09002267 destroy_victim_secmap(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002268 SM_I(sbi)->dirty_info = NULL;
2269 kfree(dirty_i);
2270}
2271
2272static void destroy_curseg(struct f2fs_sb_info *sbi)
2273{
2274 struct curseg_info *array = SM_I(sbi)->curseg_array;
2275 int i;
2276
2277 if (!array)
2278 return;
2279 SM_I(sbi)->curseg_array = NULL;
2280 for (i = 0; i < NR_CURSEG_TYPE; i++)
2281 kfree(array[i].sum_blk);
2282 kfree(array);
2283}
2284
2285static void destroy_free_segmap(struct f2fs_sb_info *sbi)
2286{
2287 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
2288 if (!free_i)
2289 return;
2290 SM_I(sbi)->free_info = NULL;
2291 kfree(free_i->free_segmap);
2292 kfree(free_i->free_secmap);
2293 kfree(free_i);
2294}
2295
2296static void destroy_sit_info(struct f2fs_sb_info *sbi)
2297{
2298 struct sit_info *sit_i = SIT_I(sbi);
2299 unsigned int start;
2300
2301 if (!sit_i)
2302 return;
2303
2304 if (sit_i->sentries) {
Jaegeuk Kim7cd85582014-09-23 11:23:01 -07002305 for (start = 0; start < MAIN_SEGS(sbi); start++) {
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002306 kfree(sit_i->sentries[start].cur_valid_map);
2307 kfree(sit_i->sentries[start].ckpt_valid_map);
Jaegeuk Kima66cdd92015-04-30 22:37:50 -07002308 kfree(sit_i->sentries[start].discard_map);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002309 }
2310 }
Jaegeuk Kim60a3b782015-02-10 16:44:29 -08002311 kfree(sit_i->tmp_map);
2312
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002313 vfree(sit_i->sentries);
2314 vfree(sit_i->sec_entries);
2315 kfree(sit_i->dirty_sentries_bitmap);
2316
2317 SM_I(sbi)->sit_info = NULL;
2318 kfree(sit_i->sit_bitmap);
2319 kfree(sit_i);
2320}
2321
2322void destroy_segment_manager(struct f2fs_sb_info *sbi)
2323{
2324 struct f2fs_sm_info *sm_info = SM_I(sbi);
Gu Zhenga688b9d9e2014-04-27 14:21:21 +08002325
Chao Yu3b03f722013-11-06 09:12:04 +08002326 if (!sm_info)
2327 return;
Gu Zheng2163d192014-04-27 14:21:33 +08002328 destroy_flush_cmd_control(sbi);
Jaegeuk Kim351df4b2012-11-02 17:09:16 +09002329 destroy_dirty_segmap(sbi);
2330 destroy_curseg(sbi);
2331 destroy_free_segmap(sbi);
2332 destroy_sit_info(sbi);
2333 sbi->sm_info = NULL;
2334 kfree(sm_info);
2335}
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09002336
2337int __init create_segment_manager_caches(void)
2338{
2339 discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
Gu Zhenge8512d22014-03-07 18:43:28 +08002340 sizeof(struct discard_entry));
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09002341 if (!discard_entry_slab)
Chao Yu184a5cd2014-09-04 18:13:01 +08002342 goto fail;
2343
2344 sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
Changman Leec9ee0082014-11-21 15:42:07 +09002345 sizeof(struct sit_entry_set));
Chao Yu184a5cd2014-09-04 18:13:01 +08002346 if (!sit_entry_set_slab)
2347 goto destory_discard_entry;
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002348
2349 inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
2350 sizeof(struct inmem_pages));
2351 if (!inmem_entry_slab)
2352 goto destroy_sit_entry_set;
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09002353 return 0;
Chao Yu184a5cd2014-09-04 18:13:01 +08002354
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002355destroy_sit_entry_set:
2356 kmem_cache_destroy(sit_entry_set_slab);
Chao Yu184a5cd2014-09-04 18:13:01 +08002357destory_discard_entry:
2358 kmem_cache_destroy(discard_entry_slab);
2359fail:
2360 return -ENOMEM;
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09002361}
2362
2363void destroy_segment_manager_caches(void)
2364{
Chao Yu184a5cd2014-09-04 18:13:01 +08002365 kmem_cache_destroy(sit_entry_set_slab);
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09002366 kmem_cache_destroy(discard_entry_slab);
Jaegeuk Kim88b88a62014-10-06 17:39:50 -07002367 kmem_cache_destroy(inmem_entry_slab);
Jaegeuk Kim7fd9e542013-11-15 13:55:58 +09002368}