blob: 06cfb94cc3db22a142c0b3c2346911d4cb7a001c [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09002 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/backing-dev.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090014#include <linux/init.h>
15#include <linux/f2fs_fs.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/freezer.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090019
20#include "f2fs.h"
21#include "node.h"
22#include "segment.h"
23#include "gc.h"
Namjae Jeon8e46b3e2013-04-23 16:42:53 +090024#include <trace/events/f2fs.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090025
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090026static int gc_thread_func(void *data)
27{
28 struct f2fs_sb_info *sbi = data;
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090029 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090030 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
31 long wait_ms;
32
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090033 wait_ms = gc_th->min_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090034
35 do {
36 if (try_to_freeze())
37 continue;
38 else
39 wait_event_interruptible_timeout(*wq,
40 kthread_should_stop(),
41 msecs_to_jiffies(wait_ms));
42 if (kthread_should_stop())
43 break;
44
Changman Leed6212a52013-01-29 18:30:07 +090045 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
Chao Yu88dd8932015-01-26 20:24:21 +080046 increase_sleep_time(gc_th, &wait_ms);
Changman Leed6212a52013-01-29 18:30:07 +090047 continue;
48 }
49
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090050 /*
51 * [GC triggering condition]
52 * 0. GC is not conducted currently.
53 * 1. There are enough dirty segments.
54 * 2. IO subsystem is idle by checking the # of writeback pages.
55 * 3. IO subsystem is idle by checking the # of requests in
56 * bdev's request list.
57 *
arter97e1c42042014-08-06 23:22:50 +090058 * Note) We have to avoid triggering GCs frequently.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090059 * Because it is possible that some segments can be
60 * invalidated soon after by user update or deletion.
61 * So, I'd like to wait some time to collect dirty segments.
62 */
63 if (!mutex_trylock(&sbi->gc_mutex))
64 continue;
65
66 if (!is_idle(sbi)) {
Chao Yu88dd8932015-01-26 20:24:21 +080067 increase_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090068 mutex_unlock(&sbi->gc_mutex);
69 continue;
70 }
71
72 if (has_enough_invalid_blocks(sbi))
Chao Yu88dd8932015-01-26 20:24:21 +080073 decrease_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090074 else
Chao Yu88dd8932015-01-26 20:24:21 +080075 increase_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090076
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +090077 stat_inc_bggc_count(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090078
Jaegeuk Kim43727522013-02-04 15:11:17 +090079 /* if return value is not zero, no victim was selected */
Jaegeuk Kim6aefd932015-10-05 11:02:54 -070080 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC)))
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090081 wait_ms = gc_th->no_gc_sleep_time;
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +090082
Jaegeuk Kim84e42142015-10-13 10:00:53 -070083 trace_f2fs_background_gc(sbi->sb, wait_ms,
84 prefree_segments(sbi), free_segments(sbi));
85
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +090086 /* balancing f2fs's metadata periodically */
87 f2fs_balance_fs_bg(sbi);
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +090088
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090089 } while (!kthread_should_stop());
90 return 0;
91}
92
93int start_gc_thread(struct f2fs_sb_info *sbi)
94{
Namjae Jeon1042d602012-12-01 10:56:13 +090095 struct f2fs_gc_kthread *gc_th;
Namjae Jeonec7b1f22013-02-02 23:52:28 +090096 dev_t dev = sbi->sb->s_bdev->bd_dev;
Namjae Jeon7a267f82013-05-26 11:05:32 +090097 int err = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090098
Jaegeuk Kim0414b002016-04-29 15:16:42 -070099 gc_th = f2fs_kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
Namjae Jeon7a267f82013-05-26 11:05:32 +0900100 if (!gc_th) {
101 err = -ENOMEM;
102 goto out;
103 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900104
Namjae Jeonb59d0ba2013-08-04 23:09:40 +0900105 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
106 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
107 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
108
Namjae Jeond2dc0952013-08-04 23:10:15 +0900109 gc_th->gc_idle = 0;
110
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900111 sbi->gc_thread = gc_th;
112 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
113 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
Namjae Jeonec7b1f22013-02-02 23:52:28 +0900114 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900115 if (IS_ERR(gc_th->f2fs_gc_task)) {
Namjae Jeon7a267f82013-05-26 11:05:32 +0900116 err = PTR_ERR(gc_th->f2fs_gc_task);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900117 kfree(gc_th);
Namjae Jeon25718422013-02-02 23:52:42 +0900118 sbi->gc_thread = NULL;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900119 }
Namjae Jeon7a267f82013-05-26 11:05:32 +0900120out:
121 return err;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900122}
123
124void stop_gc_thread(struct f2fs_sb_info *sbi)
125{
126 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
127 if (!gc_th)
128 return;
129 kthread_stop(gc_th->f2fs_gc_task);
130 kfree(gc_th);
131 sbi->gc_thread = NULL;
132}
133
Namjae Jeond2dc0952013-08-04 23:10:15 +0900134static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900135{
Namjae Jeond2dc0952013-08-04 23:10:15 +0900136 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
137
138 if (gc_th && gc_th->gc_idle) {
139 if (gc_th->gc_idle == 1)
140 gc_mode = GC_CB;
141 else if (gc_th->gc_idle == 2)
142 gc_mode = GC_GREEDY;
143 }
144 return gc_mode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900145}
146
147static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
148 int type, struct victim_sel_policy *p)
149{
150 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
151
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900152 if (p->alloc_mode == SSR) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900153 p->gc_mode = GC_GREEDY;
154 p->dirty_segmap = dirty_i->dirty_segmap[type];
Jin Xua26b7c82013-09-05 12:45:26 +0800155 p->max_search = dirty_i->nr_dirty[type];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900156 p->ofs_unit = 1;
157 } else {
Namjae Jeond2dc0952013-08-04 23:10:15 +0900158 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900159 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
Jin Xua26b7c82013-09-05 12:45:26 +0800160 p->max_search = dirty_i->nr_dirty[DIRTY];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900161 p->ofs_unit = sbi->segs_per_sec;
162 }
Jin Xua26b7c82013-09-05 12:45:26 +0800163
Jaegeuk Kimb1c57c12014-01-08 13:45:08 +0900164 if (p->max_search > sbi->max_victim_search)
165 p->max_search = sbi->max_victim_search;
Jin Xua26b7c82013-09-05 12:45:26 +0800166
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900167 p->offset = sbi->last_victim[p->gc_mode];
168}
169
170static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
171 struct victim_sel_policy *p)
172{
Jaegeuk Kimb7250d22013-02-05 13:19:28 +0900173 /* SSR allocates in a segment unit */
174 if (p->alloc_mode == SSR)
Chao Yu3519e3f2015-12-01 11:56:52 +0800175 return sbi->blocks_per_seg;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900176 if (p->gc_mode == GC_GREEDY)
Chao Yu3519e3f2015-12-01 11:56:52 +0800177 return sbi->blocks_per_seg * p->ofs_unit;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900178 else if (p->gc_mode == GC_CB)
179 return UINT_MAX;
180 else /* No other gc_mode */
181 return 0;
182}
183
184static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
185{
186 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900187 unsigned int secno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900188
189 /*
190 * If the gc_type is FG_GC, we can select victim segments
191 * selected by background GC before.
192 * Those segments guarantee they have small valid blocks.
193 */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700194 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900195 if (sec_usage_check(sbi, secno))
Chao Yub65ee142014-08-04 10:10:07 +0800196 continue;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900197 clear_bit(secno, dirty_i->victim_secmap);
198 return secno * sbi->segs_per_sec;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900199 }
200 return NULL_SEGNO;
201}
202
203static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
204{
205 struct sit_info *sit_i = SIT_I(sbi);
206 unsigned int secno = GET_SECNO(sbi, segno);
207 unsigned int start = secno * sbi->segs_per_sec;
208 unsigned long long mtime = 0;
209 unsigned int vblocks;
210 unsigned char age = 0;
211 unsigned char u;
212 unsigned int i;
213
214 for (i = 0; i < sbi->segs_per_sec; i++)
215 mtime += get_seg_entry(sbi, start + i)->mtime;
216 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
217
218 mtime = div_u64(mtime, sbi->segs_per_sec);
219 vblocks = div_u64(vblocks, sbi->segs_per_sec);
220
221 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
222
arter97e1c42042014-08-06 23:22:50 +0900223 /* Handle if the system time has changed by the user */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900224 if (mtime < sit_i->min_mtime)
225 sit_i->min_mtime = mtime;
226 if (mtime > sit_i->max_mtime)
227 sit_i->max_mtime = mtime;
228 if (sit_i->max_mtime != sit_i->min_mtime)
229 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
230 sit_i->max_mtime - sit_i->min_mtime);
231
232 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
233}
234
Jin Xua57e5642013-09-13 08:38:54 +0800235static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
236 unsigned int segno, struct victim_sel_policy *p)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900237{
238 if (p->alloc_mode == SSR)
239 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
240
241 /* alloc_mode == LFS */
242 if (p->gc_mode == GC_GREEDY)
243 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
244 else
245 return get_cb_cost(sbi, segno);
246}
247
Fan Li688159b2016-02-03 16:21:57 +0800248static unsigned int count_bits(const unsigned long *addr,
249 unsigned int offset, unsigned int len)
250{
251 unsigned int end = offset + len, sum = 0;
252
253 while (offset < end) {
254 if (test_bit(offset++, addr))
255 ++sum;
256 }
257 return sum;
258}
259
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900260/*
Masanari Iida111d2492013-03-19 08:03:35 +0900261 * This function is called from two paths.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900262 * One is garbage collection and the other is SSR segment selection.
263 * When it is called during GC, it just gets a victim segment
264 * and it does not remove it from dirty seglist.
265 * When it is called from SSR segment selection, it finds a segment
266 * which has minimum valid blocks and removes it from dirty seglist.
267 */
268static int get_victim_by_default(struct f2fs_sb_info *sbi,
269 unsigned int *result, int gc_type, int type, char alloc_mode)
270{
271 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
272 struct victim_sel_policy p;
Jaegeuk Kim4ce53772016-02-18 16:34:38 -0800273 unsigned int secno, max_cost, last_victim;
Chao Yua43f7ec2015-10-05 22:19:24 +0800274 unsigned int last_segment = MAIN_SEGS(sbi);
Fan Li688159b2016-02-03 16:21:57 +0800275 unsigned int nsearched = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900276
Chao Yu210f41b2014-09-15 18:05:44 +0800277 mutex_lock(&dirty_i->seglist_lock);
278
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900279 p.alloc_mode = alloc_mode;
280 select_policy(sbi, gc_type, type, &p);
281
282 p.min_segno = NULL_SEGNO;
Namjae Jeonb2b34602013-06-01 16:20:26 +0900283 p.min_cost = max_cost = get_max_cost(sbi, &p);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900284
Chao Yu3342bb32015-10-05 22:20:40 +0800285 if (p.max_search == 0)
286 goto out;
287
Jaegeuk Kim4ce53772016-02-18 16:34:38 -0800288 last_victim = sbi->last_victim[p.gc_mode];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900289 if (p.alloc_mode == LFS && gc_type == FG_GC) {
290 p.min_segno = check_bg_victims(sbi);
291 if (p.min_segno != NULL_SEGNO)
292 goto got_it;
293 }
294
295 while (1) {
296 unsigned long cost;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900297 unsigned int segno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900298
Chao Yua43f7ec2015-10-05 22:19:24 +0800299 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
300 if (segno >= last_segment) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900301 if (sbi->last_victim[p.gc_mode]) {
Chao Yua43f7ec2015-10-05 22:19:24 +0800302 last_segment = sbi->last_victim[p.gc_mode];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900303 sbi->last_victim[p.gc_mode] = 0;
304 p.offset = 0;
305 continue;
306 }
307 break;
308 }
Jin Xua57e5642013-09-13 08:38:54 +0800309
310 p.offset = segno + p.ofs_unit;
Fan Li688159b2016-02-03 16:21:57 +0800311 if (p.ofs_unit > 1) {
Jin Xua57e5642013-09-13 08:38:54 +0800312 p.offset -= segno % p.ofs_unit;
Fan Li688159b2016-02-03 16:21:57 +0800313 nsearched += count_bits(p.dirty_segmap,
314 p.offset - p.ofs_unit,
315 p.ofs_unit);
316 } else {
317 nsearched++;
318 }
319
Jin Xua57e5642013-09-13 08:38:54 +0800320
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900321 secno = GET_SECNO(sbi, segno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900322
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900323 if (sec_usage_check(sbi, secno))
Fan Li688159b2016-02-03 16:21:57 +0800324 goto next;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900325 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
Fan Li688159b2016-02-03 16:21:57 +0800326 goto next;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900327
328 cost = get_gc_cost(sbi, segno, &p);
329
330 if (p.min_cost > cost) {
331 p.min_segno = segno;
332 p.min_cost = cost;
Jin Xua57e5642013-09-13 08:38:54 +0800333 }
Fan Li688159b2016-02-03 16:21:57 +0800334next:
335 if (nsearched >= p.max_search) {
Jaegeuk Kim4ce53772016-02-18 16:34:38 -0800336 if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
337 sbi->last_victim[p.gc_mode] = last_victim + 1;
338 else
339 sbi->last_victim[p.gc_mode] = segno + 1;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900340 break;
341 }
342 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900343 if (p.min_segno != NULL_SEGNO) {
Namjae Jeonb2b34602013-06-01 16:20:26 +0900344got_it:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900345 if (p.alloc_mode == LFS) {
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900346 secno = GET_SECNO(sbi, p.min_segno);
347 if (gc_type == FG_GC)
348 sbi->cur_victim_sec = secno;
349 else
350 set_bit(secno, dirty_i->victim_secmap);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900351 }
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900352 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
Namjae Jeon8e46b3e2013-04-23 16:42:53 +0900353
354 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
355 sbi->cur_victim_sec,
356 prefree_segments(sbi), free_segments(sbi));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900357 }
Chao Yu3342bb32015-10-05 22:20:40 +0800358out:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900359 mutex_unlock(&dirty_i->seglist_lock);
360
361 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
362}
363
364static const struct victim_selection default_v_ops = {
365 .get_victim = get_victim_by_default,
366};
367
Changman Lee7dda2af2014-11-28 15:49:40 +0000368static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900369{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900370 struct inode_entry *ie;
371
Changman Lee7dda2af2014-11-28 15:49:40 +0000372 ie = radix_tree_lookup(&gc_list->iroot, ino);
373 if (ie)
374 return ie->inode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900375 return NULL;
376}
377
Changman Lee7dda2af2014-11-28 15:49:40 +0000378static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900379{
Gu Zheng6cc4af52013-06-20 17:52:39 +0800380 struct inode_entry *new_ie;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900381
Changman Lee7dda2af2014-11-28 15:49:40 +0000382 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
Gu Zheng6cc4af52013-06-20 17:52:39 +0800383 iput(inode);
384 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900385 }
Chao Yu06292072014-12-29 15:56:18 +0800386 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900387 new_ie->inode = inode;
Chao Yuf28e5032015-01-23 20:37:53 +0800388
389 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
Changman Lee7dda2af2014-11-28 15:49:40 +0000390 list_add_tail(&new_ie->list, &gc_list->ilist);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900391}
392
Changman Lee7dda2af2014-11-28 15:49:40 +0000393static void put_gc_inode(struct gc_inode_list *gc_list)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900394{
395 struct inode_entry *ie, *next_ie;
Changman Lee7dda2af2014-11-28 15:49:40 +0000396 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
397 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900398 iput(ie->inode);
399 list_del(&ie->list);
Chao Yu06292072014-12-29 15:56:18 +0800400 kmem_cache_free(inode_entry_slab, ie);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900401 }
402}
403
404static int check_valid_map(struct f2fs_sb_info *sbi,
405 unsigned int segno, int offset)
406{
407 struct sit_info *sit_i = SIT_I(sbi);
408 struct seg_entry *sentry;
409 int ret;
410
411 mutex_lock(&sit_i->sentry_lock);
412 sentry = get_seg_entry(sbi, segno);
413 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
414 mutex_unlock(&sit_i->sentry_lock);
Jaegeuk Kim43727522013-02-04 15:11:17 +0900415 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900416}
417
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900418/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900419 * This function compares node address got in summary with that in NAT.
420 * On validity, copy that node with cold status, otherwise (invalid node)
421 * ignore that.
422 */
Chao Yu718e53f2016-01-23 16:23:55 +0800423static void gc_node_segment(struct f2fs_sb_info *sbi,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900424 struct f2fs_summary *sum, unsigned int segno, int gc_type)
425{
426 bool initial = true;
427 struct f2fs_summary *entry;
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700428 block_t start_addr;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900429 int off;
430
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700431 start_addr = START_BLOCK(sbi, segno);
432
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900433next_step:
434 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900435
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900436 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
437 nid_t nid = le32_to_cpu(entry->nid);
438 struct page *node_page;
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700439 struct node_info ni;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900440
Jaegeuk Kim43727522013-02-04 15:11:17 +0900441 /* stop BG_GC if there is not enough free sections. */
442 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
Chao Yu718e53f2016-01-23 16:23:55 +0800443 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900444
Jaegeuk Kim43727522013-02-04 15:11:17 +0900445 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900446 continue;
447
448 if (initial) {
449 ra_node_page(sbi, nid);
450 continue;
451 }
452 node_page = get_node_page(sbi, nid);
453 if (IS_ERR(node_page))
454 continue;
455
Huang Ying9a01b562014-09-07 11:05:20 +0800456 /* block may become invalid during get_node_page */
457 if (check_valid_map(sbi, segno, off) == 0) {
458 f2fs_put_page(node_page, 1);
459 continue;
460 }
461
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700462 get_node_info(sbi, nid, &ni);
463 if (ni.blk_addr != start_addr + off) {
464 f2fs_put_page(node_page, 1);
465 continue;
466 }
467
Chao Yuda011cc2016-04-27 21:40:15 +0800468 move_node_page(node_page, gc_type);
Changman Leee1235982014-12-23 08:37:39 +0900469 stat_inc_node_blk_count(sbi, 1, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900470 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900471
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900472 if (initial) {
473 initial = false;
474 goto next_step;
475 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900476}
477
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900478/*
Jaegeuk Kim9af45ef2013-01-21 17:34:21 +0900479 * Calculate start block index indicating the given node offset.
480 * Be careful, caller should give this node offset only indicating direct node
481 * blocks. If any node offsets, which point the other types of node blocks such
482 * as indirect or double indirect node blocks, are given, it must be a caller's
483 * bug.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900484 */
Chao Yu81ca7352016-01-26 15:39:35 +0800485block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900486{
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900487 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
488 unsigned int bidx;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900489
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900490 if (node_ofs == 0)
491 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900492
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900493 if (node_ofs <= 2) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900494 bidx = node_ofs - 1;
495 } else if (node_ofs <= indirect_blks) {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900496 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900497 bidx = node_ofs - 2 - dec;
498 } else {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900499 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900500 bidx = node_ofs - 5 - dec;
501 }
Chao Yu81ca7352016-01-26 15:39:35 +0800502 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900503}
504
Nicholas Krausec1079892015-06-30 21:37:21 -0400505static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900506 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
507{
508 struct page *node_page;
509 nid_t nid;
510 unsigned int ofs_in_node;
511 block_t source_blkaddr;
512
513 nid = le32_to_cpu(sum->nid);
514 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
515
516 node_page = get_node_page(sbi, nid);
517 if (IS_ERR(node_page))
Nicholas Krausec1079892015-06-30 21:37:21 -0400518 return false;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900519
520 get_node_info(sbi, nid, dni);
521
522 if (sum->version != dni->version) {
523 f2fs_put_page(node_page, 1);
Nicholas Krausec1079892015-06-30 21:37:21 -0400524 return false;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900525 }
526
527 *nofs = ofs_of_node(node_page);
528 source_blkaddr = datablock_addr(node_page, ofs_in_node);
529 f2fs_put_page(node_page, 1);
530
531 if (source_blkaddr != blkaddr)
Nicholas Krausec1079892015-06-30 21:37:21 -0400532 return false;
533 return true;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900534}
535
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700536static void move_encrypted_block(struct inode *inode, block_t bidx)
537{
538 struct f2fs_io_info fio = {
539 .sbi = F2FS_I_SB(inode),
540 .type = DATA,
541 .rw = READ_SYNC,
542 .encrypted_page = NULL,
543 };
544 struct dnode_of_data dn;
545 struct f2fs_summary sum;
546 struct node_info ni;
547 struct page *page;
Chao Yu4356e482016-02-23 17:52:43 +0800548 block_t newaddr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700549 int err;
550
551 /* do not read out */
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700552 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700553 if (!page)
554 return;
555
556 set_new_dnode(&dn, inode, NULL, NULL, 0);
557 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
558 if (err)
559 goto out;
560
Chao Yu08b39fb2015-10-08 13:27:34 +0800561 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
562 ClearPageUptodate(page);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700563 goto put_out;
Chao Yu08b39fb2015-10-08 13:27:34 +0800564 }
565
566 /*
567 * don't cache encrypted data into meta inode until previous dirty
568 * data were writebacked to avoid racing between GC and flush.
569 */
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800570 f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700571
572 get_node_info(fio.sbi, dn.nid, &ni);
573 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
574
575 /* read page */
576 fio.page = page;
Chao Yu7a9d7542016-02-22 18:36:38 +0800577 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700578
Chao Yu4356e482016-02-23 17:52:43 +0800579 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
580 &sum, CURSEG_COLD_DATA);
581
582 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
583 FGP_LOCK | FGP_CREAT, GFP_NOFS);
584 if (!fio.encrypted_page) {
585 err = -ENOMEM;
586 goto recover_block;
587 }
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700588
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700589 err = f2fs_submit_page_bio(&fio);
590 if (err)
591 goto put_page_out;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700592
593 /* write page */
594 lock_page(fio.encrypted_page);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700595
Chao Yu1563ac72016-07-03 22:05:12 +0800596 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
Chao Yu4356e482016-02-23 17:52:43 +0800597 err = -EIO;
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700598 goto put_page_out;
Chao Yu4356e482016-02-23 17:52:43 +0800599 }
Chao Yu1563ac72016-07-03 22:05:12 +0800600 if (unlikely(!PageUptodate(fio.encrypted_page))) {
Chao Yu4356e482016-02-23 17:52:43 +0800601 err = -EIO;
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700602 goto put_page_out;
Chao Yu4356e482016-02-23 17:52:43 +0800603 }
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700604
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700605 set_page_dirty(fio.encrypted_page);
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800606 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700607 if (clear_page_dirty_for_io(fio.encrypted_page))
608 dec_page_count(fio.sbi, F2FS_DIRTY_META);
609
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700610 set_page_writeback(fio.encrypted_page);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700611
612 /* allocate block address */
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800613 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
Chao Yu4356e482016-02-23 17:52:43 +0800614
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700615 fio.rw = WRITE_SYNC;
Chao Yu4356e482016-02-23 17:52:43 +0800616 fio.new_blkaddr = newaddr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700617 f2fs_submit_page_mbio(&fio);
618
Chao Yuf28b3432016-02-24 17:16:47 +0800619 f2fs_update_data_blkaddr(&dn, newaddr);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700620 set_inode_flag(inode, FI_APPEND_WRITE);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700621 if (page->index == 0)
Jaegeuk Kim91942322016-05-20 10:13:22 -0700622 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700623put_page_out:
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700624 f2fs_put_page(fio.encrypted_page, 1);
Chao Yu4356e482016-02-23 17:52:43 +0800625recover_block:
626 if (err)
627 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
628 true, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700629put_out:
630 f2fs_put_dnode(&dn);
631out:
632 f2fs_put_page(page, 1);
633}
634
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700635static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900636{
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700637 struct page *page;
638
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700639 page = get_lock_data_page(inode, bidx, true);
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700640 if (IS_ERR(page))
641 return;
Fan Li63a0b7cb2013-12-09 16:09:00 +0800642
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900643 if (gc_type == BG_GC) {
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900644 if (PageWriteback(page))
645 goto out;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900646 set_page_dirty(page);
647 set_cold_data(page);
648 } else {
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700649 struct f2fs_io_info fio = {
650 .sbi = F2FS_I_SB(inode),
651 .type = DATA,
652 .rw = WRITE_SYNC,
653 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700654 .encrypted_page = NULL,
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700655 };
Chao Yu72e1c792016-07-03 22:05:13 +0800656 bool is_dirty = PageDirty(page);
657 int err;
658
659retry:
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700660 set_page_dirty(page);
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800661 f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kim1fe54f92014-02-07 10:00:06 +0900662 if (clear_page_dirty_for_io(page))
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -0700663 inode_dec_dirty_pages(inode);
Chao Yu72e1c792016-07-03 22:05:13 +0800664
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900665 set_cold_data(page);
Chao Yu72e1c792016-07-03 22:05:13 +0800666
667 err = do_write_data_page(&fio);
668 if (err == -ENOMEM && is_dirty) {
669 congestion_wait(BLK_RW_ASYNC, HZ/50);
670 goto retry;
671 }
672
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900673 clear_cold_data(page);
674 }
675out:
676 f2fs_put_page(page, 1);
677}
678
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900679/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900680 * This function tries to get parent node of victim data block, and identifies
681 * data block validity. If the block is valid, copy that with cold status and
682 * modify parent node.
683 * If the parent node is not valid or the data block address is different,
684 * the victim data block is ignored.
685 */
Chao Yu718e53f2016-01-23 16:23:55 +0800686static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Changman Lee7dda2af2014-11-28 15:49:40 +0000687 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900688{
689 struct super_block *sb = sbi->sb;
690 struct f2fs_summary *entry;
691 block_t start_addr;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900692 int off;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900693 int phase = 0;
694
695 start_addr = START_BLOCK(sbi, segno);
696
697next_step:
698 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900699
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900700 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
701 struct page *data_page;
702 struct inode *inode;
703 struct node_info dni; /* dnode info for the data */
704 unsigned int ofs_in_node, nofs;
705 block_t start_bidx;
706
Jaegeuk Kim43727522013-02-04 15:11:17 +0900707 /* stop BG_GC if there is not enough free sections. */
708 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
Chao Yu718e53f2016-01-23 16:23:55 +0800709 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900710
Jaegeuk Kim43727522013-02-04 15:11:17 +0900711 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900712 continue;
713
714 if (phase == 0) {
715 ra_node_page(sbi, le32_to_cpu(entry->nid));
716 continue;
717 }
718
719 /* Get an inode by ino with checking validity */
Nicholas Krausec1079892015-06-30 21:37:21 -0400720 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900721 continue;
722
723 if (phase == 1) {
724 ra_node_page(sbi, dni.ino);
725 continue;
726 }
727
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900728 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
729
730 if (phase == 2) {
Jaegeuk Kimd4686d562013-01-31 15:36:04 +0900731 inode = f2fs_iget(sb, dni.ino);
Chao Yub73e5282014-08-30 09:52:34 +0800732 if (IS_ERR(inode) || is_bad_inode(inode))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900733 continue;
734
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700735 /* if encrypted inode, let's go phase 3 */
736 if (f2fs_encrypted_inode(inode) &&
737 S_ISREG(inode->i_mode)) {
738 add_gc_inode(gc_list, inode);
739 continue;
740 }
741
Chao Yu81ca7352016-01-26 15:39:35 +0800742 start_bidx = start_bidx_of_node(nofs, inode);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700743 data_page = get_read_data_page(inode,
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700744 start_bidx + ofs_in_node, READA, true);
Changman Lee31a32682014-11-27 16:03:08 +0900745 if (IS_ERR(data_page)) {
746 iput(inode);
747 continue;
748 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900749
750 f2fs_put_page(data_page, 0);
Changman Lee7dda2af2014-11-28 15:49:40 +0000751 add_gc_inode(gc_list, inode);
Changman Lee31a32682014-11-27 16:03:08 +0900752 continue;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900753 }
Changman Lee31a32682014-11-27 16:03:08 +0900754
755 /* phase 3 */
Changman Lee7dda2af2014-11-28 15:49:40 +0000756 inode = find_gc_inode(gc_list, dni.ino);
Changman Lee31a32682014-11-27 16:03:08 +0900757 if (inode) {
Chao Yu82e0a5a2016-07-13 09:18:29 +0800758 struct f2fs_inode_info *fi = F2FS_I(inode);
759 bool locked = false;
760
761 if (S_ISREG(inode->i_mode)) {
762 if (!down_write_trylock(&fi->dio_rwsem[READ]))
763 continue;
764 if (!down_write_trylock(
765 &fi->dio_rwsem[WRITE])) {
766 up_write(&fi->dio_rwsem[READ]);
767 continue;
768 }
769 locked = true;
770 }
771
Chao Yu81ca7352016-01-26 15:39:35 +0800772 start_bidx = start_bidx_of_node(nofs, inode)
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700773 + ofs_in_node;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700774 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
775 move_encrypted_block(inode, start_bidx);
776 else
777 move_data_page(inode, start_bidx, gc_type);
Chao Yu82e0a5a2016-07-13 09:18:29 +0800778
779 if (locked) {
780 up_write(&fi->dio_rwsem[WRITE]);
781 up_write(&fi->dio_rwsem[READ]);
782 }
783
Changman Leee1235982014-12-23 08:37:39 +0900784 stat_inc_data_blk_count(sbi, 1, gc_type);
Changman Lee31a32682014-11-27 16:03:08 +0900785 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900786 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900787
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900788 if (++phase < 4)
789 goto next_step;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900790}
791
792static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800793 int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900794{
795 struct sit_info *sit_i = SIT_I(sbi);
796 int ret;
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800797
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900798 mutex_lock(&sit_i->sentry_lock);
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800799 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
800 NO_CHECK_TYPE, LFS);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900801 mutex_unlock(&sit_i->sentry_lock);
802 return ret;
803}
804
Chao Yu718e53f2016-01-23 16:23:55 +0800805static int do_garbage_collect(struct f2fs_sb_info *sbi,
806 unsigned int start_segno,
Changman Lee7dda2af2014-11-28 15:49:40 +0000807 struct gc_inode_list *gc_list, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900808{
809 struct page *sum_page;
810 struct f2fs_summary_block *sum;
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -0700811 struct blk_plug plug;
Chao Yu718e53f2016-01-23 16:23:55 +0800812 unsigned int segno = start_segno;
813 unsigned int end_segno = start_segno + sbi->segs_per_sec;
814 int seg_freed = 0;
815 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
816 SUM_TYPE_DATA : SUM_TYPE_NODE;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900817
Chao Yu718e53f2016-01-23 16:23:55 +0800818 /* readahead multi ssa blocks those have contiguous address */
819 if (sbi->segs_per_sec > 1)
820 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
821 sbi->segs_per_sec, META_SSA, true);
822
823 /* reference all summary page */
824 while (segno < end_segno) {
825 sum_page = get_sum_page(sbi, segno++);
826 unlock_page(sum_page);
827 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900828
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -0700829 blk_start_plug(&plug);
830
Chao Yu718e53f2016-01-23 16:23:55 +0800831 for (segno = start_segno; segno < end_segno; segno++) {
Jaegeuk Kimaa987272016-06-06 18:49:54 -0700832
833 if (get_valid_blocks(sbi, segno, 1) == 0)
834 continue;
835
Chao Yu718e53f2016-01-23 16:23:55 +0800836 /* find segment summary of victim */
837 sum_page = find_get_page(META_MAPPING(sbi),
838 GET_SUM_BLOCK(sbi, segno));
839 f2fs_bug_on(sbi, !PageUptodate(sum_page));
840 f2fs_put_page(sum_page, 0);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900841
Chao Yu718e53f2016-01-23 16:23:55 +0800842 sum = page_address(sum_page);
843 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
Jaegeuk Kim9236cac2015-05-28 18:19:17 -0700844
Chao Yu718e53f2016-01-23 16:23:55 +0800845 /*
846 * this is to avoid deadlock:
847 * - lock_page(sum_page) - f2fs_replace_block
848 * - check_valid_map() - mutex_lock(sentry_lock)
849 * - mutex_lock(sentry_lock) - change_curseg()
850 * - lock_page(sum_page)
851 */
852
853 if (type == SUM_TYPE_NODE)
854 gc_node_segment(sbi, sum->entries, segno, gc_type);
855 else
856 gc_data_segment(sbi, sum->entries, gc_list, segno,
857 gc_type);
858
859 stat_inc_seg_count(sbi, type, gc_type);
Chao Yu718e53f2016-01-23 16:23:55 +0800860
861 f2fs_put_page(sum_page, 0);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900862 }
Chao Yu718e53f2016-01-23 16:23:55 +0800863
Chao Yuda011cc2016-04-27 21:40:15 +0800864 if (gc_type == FG_GC)
865 f2fs_submit_merged_bio(sbi,
866 (type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
Chao Yu718e53f2016-01-23 16:23:55 +0800867
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -0700868 blk_finish_plug(&plug);
869
Chao Yu718e53f2016-01-23 16:23:55 +0800870 if (gc_type == FG_GC) {
871 while (start_segno < end_segno)
872 if (get_valid_blocks(sbi, start_segno++, 1) == 0)
873 seg_freed++;
874 }
Chao Yu17d899d2016-02-22 18:32:13 +0800875
876 stat_inc_call_count(sbi->stat_info);
877
Chao Yu718e53f2016-01-23 16:23:55 +0800878 return seg_freed;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900879}
880
Chao Yud530d4d2015-10-05 22:22:44 +0800881int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900882{
Chao Yu718e53f2016-01-23 16:23:55 +0800883 unsigned int segno;
Chao Yud530d4d2015-10-05 22:22:44 +0800884 int gc_type = sync ? FG_GC : BG_GC;
Chao Yu718e53f2016-01-23 16:23:55 +0800885 int sec_freed = 0, seg_freed;
Chao Yud530d4d2015-10-05 22:22:44 +0800886 int ret = -EINVAL;
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700887 struct cp_control cpc;
Changman Lee7dda2af2014-11-28 15:49:40 +0000888 struct gc_inode_list gc_list = {
889 .ilist = LIST_HEAD_INIT(gc_list.ilist),
Jaegeuk Kim769ec6e2014-12-03 20:47:26 -0800890 .iroot = RADIX_TREE_INIT(GFP_NOFS),
Changman Lee7dda2af2014-11-28 15:49:40 +0000891 };
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700892
Jaegeuk Kim119ee912015-01-29 11:45:33 -0800893 cpc.reason = __get_cp_reason(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900894gc_more:
Jaegeuk Kimab126cf2015-09-18 17:33:00 -0700895 segno = NULL_SEGNO;
896
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900897 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900898 goto stop;
Chao Yu6d5a1492015-12-24 18:04:56 +0800899 if (unlikely(f2fs_cp_error(sbi))) {
900 ret = -EIO;
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900901 goto stop;
Chao Yu6d5a1492015-12-24 18:04:56 +0800902 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900903
Chao Yu45fe8492015-09-28 17:42:24 +0800904 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900905 gc_type = FG_GC;
Jaegeuk Kim6e17bfb2016-01-23 22:00:57 +0800906 /*
907 * If there is no victim and no prefree segment but still not
908 * enough free sections, we should flush dent/node blocks and do
909 * garbage collections.
910 */
Yunlei Hefe947932016-07-22 19:08:31 +0800911 if (__get_victim(sbi, &segno, gc_type) ||
912 prefree_segments(sbi)) {
Jaegeuk Kim798c1b12015-08-11 21:59:49 -0700913 write_checkpoint(sbi, &cpc);
Yunlei Hefe947932016-07-22 19:08:31 +0800914 segno = NULL_SEGNO;
915 } else if (has_not_enough_free_secs(sbi, 0)) {
Jaegeuk Kim6e17bfb2016-01-23 22:00:57 +0800916 write_checkpoint(sbi, &cpc);
Yunlei Hefe947932016-07-22 19:08:31 +0800917 }
Jaegeuk Kimd64f8042013-04-08 16:01:00 +0900918 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900919
Jaegeuk Kim798c1b12015-08-11 21:59:49 -0700920 if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900921 goto stop;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900922 ret = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900923
Chao Yu718e53f2016-01-23 16:23:55 +0800924 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
Chao Yu81c1a0f12014-02-27 19:12:24 +0800925
Chao Yu718e53f2016-01-23 16:23:55 +0800926 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
Chao Yu45fe8492015-09-28 17:42:24 +0800927 sec_freed++;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900928
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700929 if (gc_type == FG_GC)
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900930 sbi->cur_victim_sec = NULL_SEGNO;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900931
Chao Yud530d4d2015-10-05 22:22:44 +0800932 if (!sync) {
933 if (has_not_enough_free_secs(sbi, sec_freed))
934 goto gc_more;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900935
Chao Yud530d4d2015-10-05 22:22:44 +0800936 if (gc_type == FG_GC)
937 write_checkpoint(sbi, &cpc);
938 }
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900939stop:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900940 mutex_unlock(&sbi->gc_mutex);
941
Changman Lee7dda2af2014-11-28 15:49:40 +0000942 put_gc_inode(&gc_list);
Chao Yud530d4d2015-10-05 22:22:44 +0800943
944 if (sync)
945 ret = sec_freed ? 0 : -EAGAIN;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900946 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900947}
948
949void build_gc_manager(struct f2fs_sb_info *sbi)
950{
951 DIRTY_I(sbi)->v_ops = &default_v_ops;
952}