blob: b6e03ebc703ca81988e6ed8a941d64f012e2f309 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09002 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/backing-dev.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090014#include <linux/init.h>
15#include <linux/f2fs_fs.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/freezer.h>
19#include <linux/blkdev.h>
20
21#include "f2fs.h"
22#include "node.h"
23#include "segment.h"
24#include "gc.h"
Namjae Jeon8e46b3e2013-04-23 16:42:53 +090025#include <trace/events/f2fs.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090026
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090027static int gc_thread_func(void *data)
28{
29 struct f2fs_sb_info *sbi = data;
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090030 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090031 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
32 long wait_ms;
33
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090034 wait_ms = gc_th->min_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090035
36 do {
37 if (try_to_freeze())
38 continue;
39 else
40 wait_event_interruptible_timeout(*wq,
41 kthread_should_stop(),
42 msecs_to_jiffies(wait_ms));
43 if (kthread_should_stop())
44 break;
45
Changman Leed6212a52013-01-29 18:30:07 +090046 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
Chao Yu88dd8932015-01-26 20:24:21 +080047 increase_sleep_time(gc_th, &wait_ms);
Changman Leed6212a52013-01-29 18:30:07 +090048 continue;
49 }
50
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090051 /*
52 * [GC triggering condition]
53 * 0. GC is not conducted currently.
54 * 1. There are enough dirty segments.
55 * 2. IO subsystem is idle by checking the # of writeback pages.
56 * 3. IO subsystem is idle by checking the # of requests in
57 * bdev's request list.
58 *
arter97e1c42042014-08-06 23:22:50 +090059 * Note) We have to avoid triggering GCs frequently.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090060 * Because it is possible that some segments can be
61 * invalidated soon after by user update or deletion.
62 * So, I'd like to wait some time to collect dirty segments.
63 */
64 if (!mutex_trylock(&sbi->gc_mutex))
65 continue;
66
67 if (!is_idle(sbi)) {
Chao Yu88dd8932015-01-26 20:24:21 +080068 increase_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090069 mutex_unlock(&sbi->gc_mutex);
70 continue;
71 }
72
73 if (has_enough_invalid_blocks(sbi))
Chao Yu88dd8932015-01-26 20:24:21 +080074 decrease_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090075 else
Chao Yu88dd8932015-01-26 20:24:21 +080076 increase_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090077
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +090078 stat_inc_bggc_count(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090079
Jaegeuk Kim43727522013-02-04 15:11:17 +090080 /* if return value is not zero, no victim was selected */
81 if (f2fs_gc(sbi))
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090082 wait_ms = gc_th->no_gc_sleep_time;
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +090083
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +090084 /* balancing f2fs's metadata periodically */
85 f2fs_balance_fs_bg(sbi);
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +090086
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090087 } while (!kthread_should_stop());
88 return 0;
89}
90
91int start_gc_thread(struct f2fs_sb_info *sbi)
92{
Namjae Jeon1042d602012-12-01 10:56:13 +090093 struct f2fs_gc_kthread *gc_th;
Namjae Jeonec7b1f22013-02-02 23:52:28 +090094 dev_t dev = sbi->sb->s_bdev->bd_dev;
Namjae Jeon7a267f82013-05-26 11:05:32 +090095 int err = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090096
97 gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
Namjae Jeon7a267f82013-05-26 11:05:32 +090098 if (!gc_th) {
99 err = -ENOMEM;
100 goto out;
101 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900102
Namjae Jeonb59d0ba2013-08-04 23:09:40 +0900103 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
104 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
105 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
106
Namjae Jeond2dc0952013-08-04 23:10:15 +0900107 gc_th->gc_idle = 0;
108
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900109 sbi->gc_thread = gc_th;
110 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
111 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
Namjae Jeonec7b1f22013-02-02 23:52:28 +0900112 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900113 if (IS_ERR(gc_th->f2fs_gc_task)) {
Namjae Jeon7a267f82013-05-26 11:05:32 +0900114 err = PTR_ERR(gc_th->f2fs_gc_task);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900115 kfree(gc_th);
Namjae Jeon25718422013-02-02 23:52:42 +0900116 sbi->gc_thread = NULL;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900117 }
Namjae Jeon7a267f82013-05-26 11:05:32 +0900118out:
119 return err;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900120}
121
122void stop_gc_thread(struct f2fs_sb_info *sbi)
123{
124 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
125 if (!gc_th)
126 return;
127 kthread_stop(gc_th->f2fs_gc_task);
128 kfree(gc_th);
129 sbi->gc_thread = NULL;
130}
131
Namjae Jeond2dc0952013-08-04 23:10:15 +0900132static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900133{
Namjae Jeond2dc0952013-08-04 23:10:15 +0900134 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
135
136 if (gc_th && gc_th->gc_idle) {
137 if (gc_th->gc_idle == 1)
138 gc_mode = GC_CB;
139 else if (gc_th->gc_idle == 2)
140 gc_mode = GC_GREEDY;
141 }
142 return gc_mode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900143}
144
145static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
146 int type, struct victim_sel_policy *p)
147{
148 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
149
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900150 if (p->alloc_mode == SSR) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900151 p->gc_mode = GC_GREEDY;
152 p->dirty_segmap = dirty_i->dirty_segmap[type];
Jin Xua26b7c82013-09-05 12:45:26 +0800153 p->max_search = dirty_i->nr_dirty[type];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900154 p->ofs_unit = 1;
155 } else {
Namjae Jeond2dc0952013-08-04 23:10:15 +0900156 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900157 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
Jin Xua26b7c82013-09-05 12:45:26 +0800158 p->max_search = dirty_i->nr_dirty[DIRTY];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900159 p->ofs_unit = sbi->segs_per_sec;
160 }
Jin Xua26b7c82013-09-05 12:45:26 +0800161
Jaegeuk Kimb1c57c12014-01-08 13:45:08 +0900162 if (p->max_search > sbi->max_victim_search)
163 p->max_search = sbi->max_victim_search;
Jin Xua26b7c82013-09-05 12:45:26 +0800164
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900165 p->offset = sbi->last_victim[p->gc_mode];
166}
167
168static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
169 struct victim_sel_policy *p)
170{
Jaegeuk Kimb7250d22013-02-05 13:19:28 +0900171 /* SSR allocates in a segment unit */
172 if (p->alloc_mode == SSR)
173 return 1 << sbi->log_blocks_per_seg;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900174 if (p->gc_mode == GC_GREEDY)
175 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
176 else if (p->gc_mode == GC_CB)
177 return UINT_MAX;
178 else /* No other gc_mode */
179 return 0;
180}
181
182static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
183{
184 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900185 unsigned int secno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900186
187 /*
188 * If the gc_type is FG_GC, we can select victim segments
189 * selected by background GC before.
190 * Those segments guarantee they have small valid blocks.
191 */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700192 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900193 if (sec_usage_check(sbi, secno))
Chao Yub65ee142014-08-04 10:10:07 +0800194 continue;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900195 clear_bit(secno, dirty_i->victim_secmap);
196 return secno * sbi->segs_per_sec;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900197 }
198 return NULL_SEGNO;
199}
200
201static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
202{
203 struct sit_info *sit_i = SIT_I(sbi);
204 unsigned int secno = GET_SECNO(sbi, segno);
205 unsigned int start = secno * sbi->segs_per_sec;
206 unsigned long long mtime = 0;
207 unsigned int vblocks;
208 unsigned char age = 0;
209 unsigned char u;
210 unsigned int i;
211
212 for (i = 0; i < sbi->segs_per_sec; i++)
213 mtime += get_seg_entry(sbi, start + i)->mtime;
214 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
215
216 mtime = div_u64(mtime, sbi->segs_per_sec);
217 vblocks = div_u64(vblocks, sbi->segs_per_sec);
218
219 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
220
arter97e1c42042014-08-06 23:22:50 +0900221 /* Handle if the system time has changed by the user */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900222 if (mtime < sit_i->min_mtime)
223 sit_i->min_mtime = mtime;
224 if (mtime > sit_i->max_mtime)
225 sit_i->max_mtime = mtime;
226 if (sit_i->max_mtime != sit_i->min_mtime)
227 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
228 sit_i->max_mtime - sit_i->min_mtime);
229
230 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
231}
232
Jin Xua57e5642013-09-13 08:38:54 +0800233static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
234 unsigned int segno, struct victim_sel_policy *p)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900235{
236 if (p->alloc_mode == SSR)
237 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
238
239 /* alloc_mode == LFS */
240 if (p->gc_mode == GC_GREEDY)
241 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
242 else
243 return get_cb_cost(sbi, segno);
244}
245
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900246/*
Masanari Iida111d2492013-03-19 08:03:35 +0900247 * This function is called from two paths.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900248 * One is garbage collection and the other is SSR segment selection.
249 * When it is called during GC, it just gets a victim segment
250 * and it does not remove it from dirty seglist.
251 * When it is called from SSR segment selection, it finds a segment
252 * which has minimum valid blocks and removes it from dirty seglist.
253 */
254static int get_victim_by_default(struct f2fs_sb_info *sbi,
255 unsigned int *result, int gc_type, int type, char alloc_mode)
256{
257 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
258 struct victim_sel_policy p;
Namjae Jeonb2b34602013-06-01 16:20:26 +0900259 unsigned int secno, max_cost;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900260 int nsearched = 0;
261
Chao Yu210f41b2014-09-15 18:05:44 +0800262 mutex_lock(&dirty_i->seglist_lock);
263
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900264 p.alloc_mode = alloc_mode;
265 select_policy(sbi, gc_type, type, &p);
266
267 p.min_segno = NULL_SEGNO;
Namjae Jeonb2b34602013-06-01 16:20:26 +0900268 p.min_cost = max_cost = get_max_cost(sbi, &p);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900269
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900270 if (p.alloc_mode == LFS && gc_type == FG_GC) {
271 p.min_segno = check_bg_victims(sbi);
272 if (p.min_segno != NULL_SEGNO)
273 goto got_it;
274 }
275
276 while (1) {
277 unsigned long cost;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900278 unsigned int segno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900279
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700280 segno = find_next_bit(p.dirty_segmap, MAIN_SEGS(sbi), p.offset);
281 if (segno >= MAIN_SEGS(sbi)) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900282 if (sbi->last_victim[p.gc_mode]) {
283 sbi->last_victim[p.gc_mode] = 0;
284 p.offset = 0;
285 continue;
286 }
287 break;
288 }
Jin Xua57e5642013-09-13 08:38:54 +0800289
290 p.offset = segno + p.ofs_unit;
291 if (p.ofs_unit > 1)
292 p.offset -= segno % p.ofs_unit;
293
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900294 secno = GET_SECNO(sbi, segno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900295
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900296 if (sec_usage_check(sbi, secno))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900297 continue;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900298 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900299 continue;
300
301 cost = get_gc_cost(sbi, segno, &p);
302
303 if (p.min_cost > cost) {
304 p.min_segno = segno;
305 p.min_cost = cost;
Jin Xua57e5642013-09-13 08:38:54 +0800306 } else if (unlikely(cost == max_cost)) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900307 continue;
Jin Xua57e5642013-09-13 08:38:54 +0800308 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900309
Jin Xua26b7c82013-09-05 12:45:26 +0800310 if (nsearched++ >= p.max_search) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900311 sbi->last_victim[p.gc_mode] = segno;
312 break;
313 }
314 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900315 if (p.min_segno != NULL_SEGNO) {
Namjae Jeonb2b34602013-06-01 16:20:26 +0900316got_it:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900317 if (p.alloc_mode == LFS) {
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900318 secno = GET_SECNO(sbi, p.min_segno);
319 if (gc_type == FG_GC)
320 sbi->cur_victim_sec = secno;
321 else
322 set_bit(secno, dirty_i->victim_secmap);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900323 }
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900324 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
Namjae Jeon8e46b3e2013-04-23 16:42:53 +0900325
326 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
327 sbi->cur_victim_sec,
328 prefree_segments(sbi), free_segments(sbi));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900329 }
330 mutex_unlock(&dirty_i->seglist_lock);
331
332 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
333}
334
335static const struct victim_selection default_v_ops = {
336 .get_victim = get_victim_by_default,
337};
338
Changman Lee7dda2af2014-11-28 15:49:40 +0000339static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900340{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900341 struct inode_entry *ie;
342
Changman Lee7dda2af2014-11-28 15:49:40 +0000343 ie = radix_tree_lookup(&gc_list->iroot, ino);
344 if (ie)
345 return ie->inode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900346 return NULL;
347}
348
Changman Lee7dda2af2014-11-28 15:49:40 +0000349static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900350{
Gu Zheng6cc4af52013-06-20 17:52:39 +0800351 struct inode_entry *new_ie;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900352
Changman Lee7dda2af2014-11-28 15:49:40 +0000353 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
Gu Zheng6cc4af52013-06-20 17:52:39 +0800354 iput(inode);
355 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900356 }
Chao Yu06292072014-12-29 15:56:18 +0800357 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900358 new_ie->inode = inode;
Chao Yuf28e5032015-01-23 20:37:53 +0800359
360 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
Changman Lee7dda2af2014-11-28 15:49:40 +0000361 list_add_tail(&new_ie->list, &gc_list->ilist);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900362}
363
Changman Lee7dda2af2014-11-28 15:49:40 +0000364static void put_gc_inode(struct gc_inode_list *gc_list)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900365{
366 struct inode_entry *ie, *next_ie;
Changman Lee7dda2af2014-11-28 15:49:40 +0000367 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
368 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900369 iput(ie->inode);
370 list_del(&ie->list);
Chao Yu06292072014-12-29 15:56:18 +0800371 kmem_cache_free(inode_entry_slab, ie);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900372 }
373}
374
375static int check_valid_map(struct f2fs_sb_info *sbi,
376 unsigned int segno, int offset)
377{
378 struct sit_info *sit_i = SIT_I(sbi);
379 struct seg_entry *sentry;
380 int ret;
381
382 mutex_lock(&sit_i->sentry_lock);
383 sentry = get_seg_entry(sbi, segno);
384 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
385 mutex_unlock(&sit_i->sentry_lock);
Jaegeuk Kim43727522013-02-04 15:11:17 +0900386 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900387}
388
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900389/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900390 * This function compares node address got in summary with that in NAT.
391 * On validity, copy that node with cold status, otherwise (invalid node)
392 * ignore that.
393 */
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700394static int gc_node_segment(struct f2fs_sb_info *sbi,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900395 struct f2fs_summary *sum, unsigned int segno, int gc_type)
396{
397 bool initial = true;
398 struct f2fs_summary *entry;
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700399 block_t start_addr;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900400 int off;
401
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700402 start_addr = START_BLOCK(sbi, segno);
403
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900404next_step:
405 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900406
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900407 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
408 nid_t nid = le32_to_cpu(entry->nid);
409 struct page *node_page;
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700410 struct node_info ni;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900411
Jaegeuk Kim43727522013-02-04 15:11:17 +0900412 /* stop BG_GC if there is not enough free sections. */
413 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700414 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900415
Jaegeuk Kim43727522013-02-04 15:11:17 +0900416 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900417 continue;
418
419 if (initial) {
420 ra_node_page(sbi, nid);
421 continue;
422 }
423 node_page = get_node_page(sbi, nid);
424 if (IS_ERR(node_page))
425 continue;
426
Huang Ying9a01b562014-09-07 11:05:20 +0800427 /* block may become invalid during get_node_page */
428 if (check_valid_map(sbi, segno, off) == 0) {
429 f2fs_put_page(node_page, 1);
430 continue;
431 }
432
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700433 get_node_info(sbi, nid, &ni);
434 if (ni.blk_addr != start_addr + off) {
435 f2fs_put_page(node_page, 1);
436 continue;
437 }
438
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900439 /* set page dirty and write it */
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900440 if (gc_type == FG_GC) {
Yuan Zhong5514f0a2014-01-10 07:26:14 +0000441 f2fs_wait_on_page_writeback(node_page, NODE);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900442 set_page_dirty(node_page);
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900443 } else {
444 if (!PageWriteback(node_page))
445 set_page_dirty(node_page);
446 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900447 f2fs_put_page(node_page, 1);
Changman Leee1235982014-12-23 08:37:39 +0900448 stat_inc_node_blk_count(sbi, 1, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900449 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900450
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900451 if (initial) {
452 initial = false;
453 goto next_step;
454 }
455
456 if (gc_type == FG_GC) {
457 struct writeback_control wbc = {
458 .sync_mode = WB_SYNC_ALL,
459 .nr_to_write = LONG_MAX,
460 .for_reclaim = 0,
461 };
462 sync_node_pages(sbi, 0, &wbc);
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900463
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700464 /* return 1 only if FG_GC succefully reclaimed one */
465 if (get_valid_blocks(sbi, segno, 1) == 0)
466 return 1;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900467 }
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700468 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900469}
470
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900471/*
Jaegeuk Kim9af45ef2013-01-21 17:34:21 +0900472 * Calculate start block index indicating the given node offset.
473 * Be careful, caller should give this node offset only indicating direct node
474 * blocks. If any node offsets, which point the other types of node blocks such
475 * as indirect or double indirect node blocks, are given, it must be a caller's
476 * bug.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900477 */
Jaegeuk Kimde936532013-08-12 21:08:03 +0900478block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900479{
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900480 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
481 unsigned int bidx;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900482
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900483 if (node_ofs == 0)
484 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900485
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900486 if (node_ofs <= 2) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900487 bidx = node_ofs - 1;
488 } else if (node_ofs <= indirect_blks) {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900489 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900490 bidx = node_ofs - 2 - dec;
491 } else {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900492 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900493 bidx = node_ofs - 5 - dec;
494 }
Jaegeuk Kimde936532013-08-12 21:08:03 +0900495 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900496}
497
Nicholas Krausec1079892015-06-30 21:37:21 -0400498static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900499 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
500{
501 struct page *node_page;
502 nid_t nid;
503 unsigned int ofs_in_node;
504 block_t source_blkaddr;
505
506 nid = le32_to_cpu(sum->nid);
507 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
508
509 node_page = get_node_page(sbi, nid);
510 if (IS_ERR(node_page))
Nicholas Krausec1079892015-06-30 21:37:21 -0400511 return false;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900512
513 get_node_info(sbi, nid, dni);
514
515 if (sum->version != dni->version) {
516 f2fs_put_page(node_page, 1);
Nicholas Krausec1079892015-06-30 21:37:21 -0400517 return false;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900518 }
519
520 *nofs = ofs_of_node(node_page);
521 source_blkaddr = datablock_addr(node_page, ofs_in_node);
522 f2fs_put_page(node_page, 1);
523
524 if (source_blkaddr != blkaddr)
Nicholas Krausec1079892015-06-30 21:37:21 -0400525 return false;
526 return true;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900527}
528
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700529static void move_encrypted_block(struct inode *inode, block_t bidx)
530{
531 struct f2fs_io_info fio = {
532 .sbi = F2FS_I_SB(inode),
533 .type = DATA,
534 .rw = READ_SYNC,
535 .encrypted_page = NULL,
536 };
537 struct dnode_of_data dn;
538 struct f2fs_summary sum;
539 struct node_info ni;
540 struct page *page;
541 int err;
542
543 /* do not read out */
544 page = grab_cache_page(inode->i_mapping, bidx);
545 if (!page)
546 return;
547
548 set_new_dnode(&dn, inode, NULL, NULL, 0);
549 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
550 if (err)
551 goto out;
552
553 if (unlikely(dn.data_blkaddr == NULL_ADDR))
554 goto put_out;
555
556 get_node_info(fio.sbi, dn.nid, &ni);
557 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
558
559 /* read page */
560 fio.page = page;
561 fio.blk_addr = dn.data_blkaddr;
562
Jaegeuk Kim1b77c412015-07-13 18:31:24 -0700563 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi),
564 fio.blk_addr,
565 FGP_LOCK|FGP_CREAT,
566 GFP_NOFS);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700567 if (!fio.encrypted_page)
568 goto put_out;
569
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700570 err = f2fs_submit_page_bio(&fio);
571 if (err)
572 goto put_page_out;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700573
574 /* write page */
575 lock_page(fio.encrypted_page);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700576
577 if (unlikely(!PageUptodate(fio.encrypted_page)))
578 goto put_page_out;
579 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
580 goto put_page_out;
581
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700582 set_page_dirty(fio.encrypted_page);
583 f2fs_wait_on_page_writeback(fio.encrypted_page, META);
584 if (clear_page_dirty_for_io(fio.encrypted_page))
585 dec_page_count(fio.sbi, F2FS_DIRTY_META);
586
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700587 set_page_writeback(fio.encrypted_page);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700588
589 /* allocate block address */
590 f2fs_wait_on_page_writeback(dn.node_page, NODE);
591 allocate_data_block(fio.sbi, NULL, fio.blk_addr,
592 &fio.blk_addr, &sum, CURSEG_COLD_DATA);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700593 fio.rw = WRITE_SYNC;
594 f2fs_submit_page_mbio(&fio);
595
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700596 dn.data_blkaddr = fio.blk_addr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700597 set_data_blkaddr(&dn);
598 f2fs_update_extent_cache(&dn);
599 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
600 if (page->index == 0)
601 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700602put_page_out:
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700603 f2fs_put_page(fio.encrypted_page, 1);
604put_out:
605 f2fs_put_dnode(&dn);
606out:
607 f2fs_put_page(page, 1);
608}
609
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700610static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900611{
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700612 struct page *page;
613
614 page = get_lock_data_page(inode, bidx);
615 if (IS_ERR(page))
616 return;
Fan Li63a0b7cb2013-12-09 16:09:00 +0800617
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900618 if (gc_type == BG_GC) {
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900619 if (PageWriteback(page))
620 goto out;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900621 set_page_dirty(page);
622 set_cold_data(page);
623 } else {
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700624 struct f2fs_io_info fio = {
625 .sbi = F2FS_I_SB(inode),
626 .type = DATA,
627 .rw = WRITE_SYNC,
628 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700629 .encrypted_page = NULL,
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700630 };
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700631 set_page_dirty(page);
Yuan Zhong5514f0a2014-01-10 07:26:14 +0000632 f2fs_wait_on_page_writeback(page, DATA);
Jaegeuk Kim1fe54f92014-02-07 10:00:06 +0900633 if (clear_page_dirty_for_io(page))
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -0700634 inode_dec_dirty_pages(inode);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900635 set_cold_data(page);
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700636 do_write_data_page(&fio);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900637 clear_cold_data(page);
638 }
639out:
640 f2fs_put_page(page, 1);
641}
642
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900643/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900644 * This function tries to get parent node of victim data block, and identifies
645 * data block validity. If the block is valid, copy that with cold status and
646 * modify parent node.
647 * If the parent node is not valid or the data block address is different,
648 * the victim data block is ignored.
649 */
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700650static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Changman Lee7dda2af2014-11-28 15:49:40 +0000651 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900652{
653 struct super_block *sb = sbi->sb;
654 struct f2fs_summary *entry;
655 block_t start_addr;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900656 int off;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900657 int phase = 0;
658
659 start_addr = START_BLOCK(sbi, segno);
660
661next_step:
662 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900663
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900664 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
665 struct page *data_page;
666 struct inode *inode;
667 struct node_info dni; /* dnode info for the data */
668 unsigned int ofs_in_node, nofs;
669 block_t start_bidx;
670
Jaegeuk Kim43727522013-02-04 15:11:17 +0900671 /* stop BG_GC if there is not enough free sections. */
672 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700673 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900674
Jaegeuk Kim43727522013-02-04 15:11:17 +0900675 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900676 continue;
677
678 if (phase == 0) {
679 ra_node_page(sbi, le32_to_cpu(entry->nid));
680 continue;
681 }
682
683 /* Get an inode by ino with checking validity */
Nicholas Krausec1079892015-06-30 21:37:21 -0400684 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900685 continue;
686
687 if (phase == 1) {
688 ra_node_page(sbi, dni.ino);
689 continue;
690 }
691
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900692 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
693
694 if (phase == 2) {
Jaegeuk Kimd4686d562013-01-31 15:36:04 +0900695 inode = f2fs_iget(sb, dni.ino);
Chao Yub73e5282014-08-30 09:52:34 +0800696 if (IS_ERR(inode) || is_bad_inode(inode))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900697 continue;
698
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700699 /* if encrypted inode, let's go phase 3 */
700 if (f2fs_encrypted_inode(inode) &&
701 S_ISREG(inode->i_mode)) {
702 add_gc_inode(gc_list, inode);
703 continue;
704 }
705
Jaegeuk Kimde936532013-08-12 21:08:03 +0900706 start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700707 data_page = get_read_data_page(inode,
708 start_bidx + ofs_in_node, READA);
Changman Lee31a32682014-11-27 16:03:08 +0900709 if (IS_ERR(data_page)) {
710 iput(inode);
711 continue;
712 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900713
714 f2fs_put_page(data_page, 0);
Changman Lee7dda2af2014-11-28 15:49:40 +0000715 add_gc_inode(gc_list, inode);
Changman Lee31a32682014-11-27 16:03:08 +0900716 continue;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900717 }
Changman Lee31a32682014-11-27 16:03:08 +0900718
719 /* phase 3 */
Changman Lee7dda2af2014-11-28 15:49:40 +0000720 inode = find_gc_inode(gc_list, dni.ino);
Changman Lee31a32682014-11-27 16:03:08 +0900721 if (inode) {
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700722 start_bidx = start_bidx_of_node(nofs, F2FS_I(inode))
723 + ofs_in_node;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700724 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
725 move_encrypted_block(inode, start_bidx);
726 else
727 move_data_page(inode, start_bidx, gc_type);
Changman Leee1235982014-12-23 08:37:39 +0900728 stat_inc_data_blk_count(sbi, 1, gc_type);
Changman Lee31a32682014-11-27 16:03:08 +0900729 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900730 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900731
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900732 if (++phase < 4)
733 goto next_step;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900734
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900735 if (gc_type == FG_GC) {
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900736 f2fs_submit_merged_bio(sbi, DATA, WRITE);
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900737
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700738 /* return 1 only if FG_GC succefully reclaimed one */
739 if (get_valid_blocks(sbi, segno, 1) == 0)
740 return 1;
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900741 }
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700742 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900743}
744
745static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800746 int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900747{
748 struct sit_info *sit_i = SIT_I(sbi);
749 int ret;
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800750
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900751 mutex_lock(&sit_i->sentry_lock);
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800752 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
753 NO_CHECK_TYPE, LFS);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900754 mutex_unlock(&sit_i->sentry_lock);
755 return ret;
756}
757
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700758static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
Changman Lee7dda2af2014-11-28 15:49:40 +0000759 struct gc_inode_list *gc_list, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900760{
761 struct page *sum_page;
762 struct f2fs_summary_block *sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900763 struct blk_plug plug;
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700764 int nfree = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900765
766 /* read segment summary of victim */
767 sum_page = get_sum_page(sbi, segno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900768
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900769 blk_start_plug(&plug);
770
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900771 sum = page_address(sum_page);
772
Jaegeuk Kim9236cac2015-05-28 18:19:17 -0700773 /*
774 * this is to avoid deadlock:
775 * - lock_page(sum_page) - f2fs_replace_block
776 * - check_valid_map() - mutex_lock(sentry_lock)
777 * - mutex_lock(sentry_lock) - change_curseg()
778 * - lock_page(sum_page)
779 */
780 unlock_page(sum_page);
781
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900782 switch (GET_SUM_TYPE((&sum->footer))) {
783 case SUM_TYPE_NODE:
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700784 nfree = gc_node_segment(sbi, sum->entries, segno, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900785 break;
786 case SUM_TYPE_DATA:
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700787 nfree = gc_data_segment(sbi, sum->entries, gc_list,
788 segno, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900789 break;
790 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900791 blk_finish_plug(&plug);
792
Changman Leee1235982014-12-23 08:37:39 +0900793 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900794 stat_inc_call_count(sbi->stat_info);
795
Jaegeuk Kim9236cac2015-05-28 18:19:17 -0700796 f2fs_put_page(sum_page, 0);
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700797 return nfree;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900798}
799
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900800int f2fs_gc(struct f2fs_sb_info *sbi)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900801{
Jaegeuk Kim798c1b12015-08-11 21:59:49 -0700802 unsigned int segno = NULL_SEGNO;
803 unsigned int i;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900804 int gc_type = BG_GC;
Chao Yu45fe8492015-09-28 17:42:24 +0800805 int sec_freed = 0;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900806 int ret = -1;
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700807 struct cp_control cpc;
Changman Lee7dda2af2014-11-28 15:49:40 +0000808 struct gc_inode_list gc_list = {
809 .ilist = LIST_HEAD_INIT(gc_list.ilist),
Jaegeuk Kim769ec6e2014-12-03 20:47:26 -0800810 .iroot = RADIX_TREE_INIT(GFP_NOFS),
Changman Lee7dda2af2014-11-28 15:49:40 +0000811 };
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700812
Jaegeuk Kim119ee912015-01-29 11:45:33 -0800813 cpc.reason = __get_cp_reason(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900814gc_more:
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900815 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900816 goto stop;
Jaegeuk Kim1e968fd2014-08-11 16:49:25 -0700817 if (unlikely(f2fs_cp_error(sbi)))
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900818 goto stop;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900819
Chao Yu45fe8492015-09-28 17:42:24 +0800820 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900821 gc_type = FG_GC;
Jaegeuk Kim798c1b12015-08-11 21:59:49 -0700822 if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
823 write_checkpoint(sbi, &cpc);
Jaegeuk Kimd64f8042013-04-08 16:01:00 +0900824 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900825
Jaegeuk Kim798c1b12015-08-11 21:59:49 -0700826 if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900827 goto stop;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900828 ret = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900829
Chao Yu81c1a0f12014-02-27 19:12:24 +0800830 /* readahead multi ssa blocks those have contiguous address */
831 if (sbi->segs_per_sec > 1)
832 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
833 META_SSA);
834
Chao Yu45fe8492015-09-28 17:42:24 +0800835 for (i = 0; i < sbi->segs_per_sec; i++) {
836 /*
837 * for FG_GC case, halt gcing left segments once failed one
838 * of segments in selected section to avoid long latency.
839 */
840 if (!do_garbage_collect(sbi, segno + i, &gc_list, gc_type) &&
841 gc_type == FG_GC)
842 break;
843 }
844
845 if (i == sbi->segs_per_sec && gc_type == FG_GC)
846 sec_freed++;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900847
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700848 if (gc_type == FG_GC)
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900849 sbi->cur_victim_sec = NULL_SEGNO;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900850
Chao Yu45fe8492015-09-28 17:42:24 +0800851 if (has_not_enough_free_secs(sbi, sec_freed))
Jaegeuk Kim43727522013-02-04 15:11:17 +0900852 goto gc_more;
853
854 if (gc_type == FG_GC)
Jaegeuk Kim75ab4cb2014-09-20 21:57:51 -0700855 write_checkpoint(sbi, &cpc);
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900856stop:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900857 mutex_unlock(&sbi->gc_mutex);
858
Changman Lee7dda2af2014-11-28 15:49:40 +0000859 put_gc_inode(&gc_list);
Jaegeuk Kim43727522013-02-04 15:11:17 +0900860 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900861}
862
863void build_gc_manager(struct f2fs_sb_info *sbi)
864{
865 DIRTY_I(sbi)->v_ops = &default_v_ops;
866}