blob: 17ab23f64bba28093cbb9a34aa98beee083558ad [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09002 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/backing-dev.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090014#include <linux/init.h>
15#include <linux/f2fs_fs.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/freezer.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090019
20#include "f2fs.h"
21#include "node.h"
22#include "segment.h"
23#include "gc.h"
Namjae Jeon8e46b3e2013-04-23 16:42:53 +090024#include <trace/events/f2fs.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090025
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090026static int gc_thread_func(void *data)
27{
28 struct f2fs_sb_info *sbi = data;
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090029 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090030 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
31 long wait_ms;
32
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090033 wait_ms = gc_th->min_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090034
35 do {
36 if (try_to_freeze())
37 continue;
38 else
39 wait_event_interruptible_timeout(*wq,
40 kthread_should_stop(),
41 msecs_to_jiffies(wait_ms));
42 if (kthread_should_stop())
43 break;
44
Changman Leed6212a52013-01-29 18:30:07 +090045 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
Chao Yu88dd8932015-01-26 20:24:21 +080046 increase_sleep_time(gc_th, &wait_ms);
Changman Leed6212a52013-01-29 18:30:07 +090047 continue;
48 }
49
Chao Yu0f348022016-09-26 19:45:55 +080050#ifdef CONFIG_F2FS_FAULT_INJECTION
51 if (time_to_inject(sbi, FAULT_CHECKPOINT))
52 f2fs_stop_checkpoint(sbi, false);
53#endif
54
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090055 /*
56 * [GC triggering condition]
57 * 0. GC is not conducted currently.
58 * 1. There are enough dirty segments.
59 * 2. IO subsystem is idle by checking the # of writeback pages.
60 * 3. IO subsystem is idle by checking the # of requests in
61 * bdev's request list.
62 *
arter97e1c42042014-08-06 23:22:50 +090063 * Note) We have to avoid triggering GCs frequently.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090064 * Because it is possible that some segments can be
65 * invalidated soon after by user update or deletion.
66 * So, I'd like to wait some time to collect dirty segments.
67 */
68 if (!mutex_trylock(&sbi->gc_mutex))
69 continue;
70
71 if (!is_idle(sbi)) {
Chao Yu88dd8932015-01-26 20:24:21 +080072 increase_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090073 mutex_unlock(&sbi->gc_mutex);
74 continue;
75 }
76
77 if (has_enough_invalid_blocks(sbi))
Chao Yu88dd8932015-01-26 20:24:21 +080078 decrease_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090079 else
Chao Yu88dd8932015-01-26 20:24:21 +080080 increase_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090081
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +090082 stat_inc_bggc_count(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090083
Jaegeuk Kim43727522013-02-04 15:11:17 +090084 /* if return value is not zero, no victim was selected */
Jaegeuk Kim6aefd932015-10-05 11:02:54 -070085 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC)))
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090086 wait_ms = gc_th->no_gc_sleep_time;
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +090087
Jaegeuk Kim84e42142015-10-13 10:00:53 -070088 trace_f2fs_background_gc(sbi->sb, wait_ms,
89 prefree_segments(sbi), free_segments(sbi));
90
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +090091 /* balancing f2fs's metadata periodically */
92 f2fs_balance_fs_bg(sbi);
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +090093
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090094 } while (!kthread_should_stop());
95 return 0;
96}
97
98int start_gc_thread(struct f2fs_sb_info *sbi)
99{
Namjae Jeon1042d602012-12-01 10:56:13 +0900100 struct f2fs_gc_kthread *gc_th;
Namjae Jeonec7b1f22013-02-02 23:52:28 +0900101 dev_t dev = sbi->sb->s_bdev->bd_dev;
Namjae Jeon7a267f82013-05-26 11:05:32 +0900102 int err = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900103
Chao Yu1ecc0c52016-09-23 21:30:09 +0800104 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
Namjae Jeon7a267f82013-05-26 11:05:32 +0900105 if (!gc_th) {
106 err = -ENOMEM;
107 goto out;
108 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900109
Namjae Jeonb59d0ba2013-08-04 23:09:40 +0900110 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
111 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
112 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
113
Namjae Jeond2dc0952013-08-04 23:10:15 +0900114 gc_th->gc_idle = 0;
115
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900116 sbi->gc_thread = gc_th;
117 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
118 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
Namjae Jeonec7b1f22013-02-02 23:52:28 +0900119 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900120 if (IS_ERR(gc_th->f2fs_gc_task)) {
Namjae Jeon7a267f82013-05-26 11:05:32 +0900121 err = PTR_ERR(gc_th->f2fs_gc_task);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900122 kfree(gc_th);
Namjae Jeon25718422013-02-02 23:52:42 +0900123 sbi->gc_thread = NULL;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900124 }
Namjae Jeon7a267f82013-05-26 11:05:32 +0900125out:
126 return err;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900127}
128
129void stop_gc_thread(struct f2fs_sb_info *sbi)
130{
131 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
132 if (!gc_th)
133 return;
134 kthread_stop(gc_th->f2fs_gc_task);
135 kfree(gc_th);
136 sbi->gc_thread = NULL;
137}
138
Namjae Jeond2dc0952013-08-04 23:10:15 +0900139static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900140{
Namjae Jeond2dc0952013-08-04 23:10:15 +0900141 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
142
143 if (gc_th && gc_th->gc_idle) {
144 if (gc_th->gc_idle == 1)
145 gc_mode = GC_CB;
146 else if (gc_th->gc_idle == 2)
147 gc_mode = GC_GREEDY;
148 }
149 return gc_mode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900150}
151
152static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
153 int type, struct victim_sel_policy *p)
154{
155 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
156
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900157 if (p->alloc_mode == SSR) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900158 p->gc_mode = GC_GREEDY;
159 p->dirty_segmap = dirty_i->dirty_segmap[type];
Jin Xua26b7c82013-09-05 12:45:26 +0800160 p->max_search = dirty_i->nr_dirty[type];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900161 p->ofs_unit = 1;
162 } else {
Namjae Jeond2dc0952013-08-04 23:10:15 +0900163 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900164 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
Jin Xua26b7c82013-09-05 12:45:26 +0800165 p->max_search = dirty_i->nr_dirty[DIRTY];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900166 p->ofs_unit = sbi->segs_per_sec;
167 }
Jin Xua26b7c82013-09-05 12:45:26 +0800168
Hou Pengyang4992ba22017-02-16 12:34:31 +0000169 /* we need to check every dirty segments in the FG_GC case */
170 if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
Jaegeuk Kimb1c57c12014-01-08 13:45:08 +0900171 p->max_search = sbi->max_victim_search;
Jin Xua26b7c82013-09-05 12:45:26 +0800172
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900173 p->offset = sbi->last_victim[p->gc_mode];
174}
175
176static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
177 struct victim_sel_policy *p)
178{
Jaegeuk Kimb7250d22013-02-05 13:19:28 +0900179 /* SSR allocates in a segment unit */
180 if (p->alloc_mode == SSR)
Chao Yu3519e3f2015-12-01 11:56:52 +0800181 return sbi->blocks_per_seg;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900182 if (p->gc_mode == GC_GREEDY)
Chao Yu3519e3f2015-12-01 11:56:52 +0800183 return sbi->blocks_per_seg * p->ofs_unit;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900184 else if (p->gc_mode == GC_CB)
185 return UINT_MAX;
186 else /* No other gc_mode */
187 return 0;
188}
189
190static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
191{
192 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900193 unsigned int secno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900194
195 /*
196 * If the gc_type is FG_GC, we can select victim segments
197 * selected by background GC before.
198 * Those segments guarantee they have small valid blocks.
199 */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700200 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900201 if (sec_usage_check(sbi, secno))
Chao Yub65ee142014-08-04 10:10:07 +0800202 continue;
Hou Pengyang4992ba22017-02-16 12:34:31 +0000203
204 if (no_fggc_candidate(sbi, secno))
205 continue;
206
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900207 clear_bit(secno, dirty_i->victim_secmap);
208 return secno * sbi->segs_per_sec;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900209 }
210 return NULL_SEGNO;
211}
212
213static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
214{
215 struct sit_info *sit_i = SIT_I(sbi);
216 unsigned int secno = GET_SECNO(sbi, segno);
217 unsigned int start = secno * sbi->segs_per_sec;
218 unsigned long long mtime = 0;
219 unsigned int vblocks;
220 unsigned char age = 0;
221 unsigned char u;
222 unsigned int i;
223
224 for (i = 0; i < sbi->segs_per_sec; i++)
225 mtime += get_seg_entry(sbi, start + i)->mtime;
226 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
227
228 mtime = div_u64(mtime, sbi->segs_per_sec);
229 vblocks = div_u64(vblocks, sbi->segs_per_sec);
230
231 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
232
arter97e1c42042014-08-06 23:22:50 +0900233 /* Handle if the system time has changed by the user */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900234 if (mtime < sit_i->min_mtime)
235 sit_i->min_mtime = mtime;
236 if (mtime > sit_i->max_mtime)
237 sit_i->max_mtime = mtime;
238 if (sit_i->max_mtime != sit_i->min_mtime)
239 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
240 sit_i->max_mtime - sit_i->min_mtime);
241
242 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
243}
244
Jin Xua57e5642013-09-13 08:38:54 +0800245static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
246 unsigned int segno, struct victim_sel_policy *p)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900247{
248 if (p->alloc_mode == SSR)
249 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
250
251 /* alloc_mode == LFS */
252 if (p->gc_mode == GC_GREEDY)
253 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
254 else
255 return get_cb_cost(sbi, segno);
256}
257
Fan Li688159b2016-02-03 16:21:57 +0800258static unsigned int count_bits(const unsigned long *addr,
259 unsigned int offset, unsigned int len)
260{
261 unsigned int end = offset + len, sum = 0;
262
263 while (offset < end) {
264 if (test_bit(offset++, addr))
265 ++sum;
266 }
267 return sum;
268}
269
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900270/*
Masanari Iida111d2492013-03-19 08:03:35 +0900271 * This function is called from two paths.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900272 * One is garbage collection and the other is SSR segment selection.
273 * When it is called during GC, it just gets a victim segment
274 * and it does not remove it from dirty seglist.
275 * When it is called from SSR segment selection, it finds a segment
276 * which has minimum valid blocks and removes it from dirty seglist.
277 */
278static int get_victim_by_default(struct f2fs_sb_info *sbi,
279 unsigned int *result, int gc_type, int type, char alloc_mode)
280{
281 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
282 struct victim_sel_policy p;
Sheng Yong3fa56502016-09-29 18:37:31 +0800283 unsigned int secno, last_victim;
Chao Yua43f7ec2015-10-05 22:19:24 +0800284 unsigned int last_segment = MAIN_SEGS(sbi);
Fan Li688159b2016-02-03 16:21:57 +0800285 unsigned int nsearched = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900286
Chao Yu210f41b2014-09-15 18:05:44 +0800287 mutex_lock(&dirty_i->seglist_lock);
288
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900289 p.alloc_mode = alloc_mode;
290 select_policy(sbi, gc_type, type, &p);
291
292 p.min_segno = NULL_SEGNO;
Sheng Yong3fa56502016-09-29 18:37:31 +0800293 p.min_cost = get_max_cost(sbi, &p);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900294
Chao Yu3342bb32015-10-05 22:20:40 +0800295 if (p.max_search == 0)
296 goto out;
297
Jaegeuk Kim4ce53772016-02-18 16:34:38 -0800298 last_victim = sbi->last_victim[p.gc_mode];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900299 if (p.alloc_mode == LFS && gc_type == FG_GC) {
300 p.min_segno = check_bg_victims(sbi);
301 if (p.min_segno != NULL_SEGNO)
302 goto got_it;
303 }
304
305 while (1) {
306 unsigned long cost;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900307 unsigned int segno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900308
Chao Yua43f7ec2015-10-05 22:19:24 +0800309 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
310 if (segno >= last_segment) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900311 if (sbi->last_victim[p.gc_mode]) {
Chao Yua43f7ec2015-10-05 22:19:24 +0800312 last_segment = sbi->last_victim[p.gc_mode];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900313 sbi->last_victim[p.gc_mode] = 0;
314 p.offset = 0;
315 continue;
316 }
317 break;
318 }
Jin Xua57e5642013-09-13 08:38:54 +0800319
320 p.offset = segno + p.ofs_unit;
Fan Li688159b2016-02-03 16:21:57 +0800321 if (p.ofs_unit > 1) {
Jin Xua57e5642013-09-13 08:38:54 +0800322 p.offset -= segno % p.ofs_unit;
Fan Li688159b2016-02-03 16:21:57 +0800323 nsearched += count_bits(p.dirty_segmap,
324 p.offset - p.ofs_unit,
325 p.ofs_unit);
326 } else {
327 nsearched++;
328 }
329
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900330 secno = GET_SECNO(sbi, segno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900331
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900332 if (sec_usage_check(sbi, secno))
Fan Li688159b2016-02-03 16:21:57 +0800333 goto next;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900334 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
Fan Li688159b2016-02-03 16:21:57 +0800335 goto next;
Hou Pengyang4992ba22017-02-16 12:34:31 +0000336 if (gc_type == FG_GC && p.alloc_mode == LFS &&
337 no_fggc_candidate(sbi, secno))
338 goto next;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900339
340 cost = get_gc_cost(sbi, segno, &p);
341
342 if (p.min_cost > cost) {
343 p.min_segno = segno;
344 p.min_cost = cost;
Jin Xua57e5642013-09-13 08:38:54 +0800345 }
Fan Li688159b2016-02-03 16:21:57 +0800346next:
347 if (nsearched >= p.max_search) {
Jaegeuk Kim4ce53772016-02-18 16:34:38 -0800348 if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
349 sbi->last_victim[p.gc_mode] = last_victim + 1;
350 else
351 sbi->last_victim[p.gc_mode] = segno + 1;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900352 break;
353 }
354 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900355 if (p.min_segno != NULL_SEGNO) {
Namjae Jeonb2b34602013-06-01 16:20:26 +0900356got_it:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900357 if (p.alloc_mode == LFS) {
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900358 secno = GET_SECNO(sbi, p.min_segno);
359 if (gc_type == FG_GC)
360 sbi->cur_victim_sec = secno;
361 else
362 set_bit(secno, dirty_i->victim_secmap);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900363 }
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900364 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
Namjae Jeon8e46b3e2013-04-23 16:42:53 +0900365
366 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
367 sbi->cur_victim_sec,
368 prefree_segments(sbi), free_segments(sbi));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900369 }
Chao Yu3342bb32015-10-05 22:20:40 +0800370out:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900371 mutex_unlock(&dirty_i->seglist_lock);
372
373 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
374}
375
376static const struct victim_selection default_v_ops = {
377 .get_victim = get_victim_by_default,
378};
379
Changman Lee7dda2af2014-11-28 15:49:40 +0000380static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900381{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900382 struct inode_entry *ie;
383
Changman Lee7dda2af2014-11-28 15:49:40 +0000384 ie = radix_tree_lookup(&gc_list->iroot, ino);
385 if (ie)
386 return ie->inode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900387 return NULL;
388}
389
Changman Lee7dda2af2014-11-28 15:49:40 +0000390static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900391{
Gu Zheng6cc4af52013-06-20 17:52:39 +0800392 struct inode_entry *new_ie;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900393
Changman Lee7dda2af2014-11-28 15:49:40 +0000394 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
Gu Zheng6cc4af52013-06-20 17:52:39 +0800395 iput(inode);
396 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900397 }
Chao Yu06292072014-12-29 15:56:18 +0800398 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900399 new_ie->inode = inode;
Chao Yuf28e5032015-01-23 20:37:53 +0800400
401 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
Changman Lee7dda2af2014-11-28 15:49:40 +0000402 list_add_tail(&new_ie->list, &gc_list->ilist);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900403}
404
Changman Lee7dda2af2014-11-28 15:49:40 +0000405static void put_gc_inode(struct gc_inode_list *gc_list)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900406{
407 struct inode_entry *ie, *next_ie;
Changman Lee7dda2af2014-11-28 15:49:40 +0000408 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
409 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900410 iput(ie->inode);
411 list_del(&ie->list);
Chao Yu06292072014-12-29 15:56:18 +0800412 kmem_cache_free(inode_entry_slab, ie);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900413 }
414}
415
416static int check_valid_map(struct f2fs_sb_info *sbi,
417 unsigned int segno, int offset)
418{
419 struct sit_info *sit_i = SIT_I(sbi);
420 struct seg_entry *sentry;
421 int ret;
422
423 mutex_lock(&sit_i->sentry_lock);
424 sentry = get_seg_entry(sbi, segno);
425 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
426 mutex_unlock(&sit_i->sentry_lock);
Jaegeuk Kim43727522013-02-04 15:11:17 +0900427 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900428}
429
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900430/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900431 * This function compares node address got in summary with that in NAT.
432 * On validity, copy that node with cold status, otherwise (invalid node)
433 * ignore that.
434 */
Chao Yu718e53f2016-01-23 16:23:55 +0800435static void gc_node_segment(struct f2fs_sb_info *sbi,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900436 struct f2fs_summary *sum, unsigned int segno, int gc_type)
437{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900438 struct f2fs_summary *entry;
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700439 block_t start_addr;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900440 int off;
Chao Yu7ea984b2016-08-27 00:14:31 +0800441 int phase = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900442
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700443 start_addr = START_BLOCK(sbi, segno);
444
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900445next_step:
446 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900447
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900448 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
449 nid_t nid = le32_to_cpu(entry->nid);
450 struct page *node_page;
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700451 struct node_info ni;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900452
Jaegeuk Kim43727522013-02-04 15:11:17 +0900453 /* stop BG_GC if there is not enough free sections. */
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700454 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
Chao Yu718e53f2016-01-23 16:23:55 +0800455 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900456
Jaegeuk Kim43727522013-02-04 15:11:17 +0900457 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900458 continue;
459
Chao Yu7ea984b2016-08-27 00:14:31 +0800460 if (phase == 0) {
461 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
462 META_NAT, true);
463 continue;
464 }
465
466 if (phase == 1) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900467 ra_node_page(sbi, nid);
468 continue;
469 }
Chao Yu7ea984b2016-08-27 00:14:31 +0800470
471 /* phase == 2 */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900472 node_page = get_node_page(sbi, nid);
473 if (IS_ERR(node_page))
474 continue;
475
Huang Ying9a01b562014-09-07 11:05:20 +0800476 /* block may become invalid during get_node_page */
477 if (check_valid_map(sbi, segno, off) == 0) {
478 f2fs_put_page(node_page, 1);
479 continue;
480 }
481
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700482 get_node_info(sbi, nid, &ni);
483 if (ni.blk_addr != start_addr + off) {
484 f2fs_put_page(node_page, 1);
485 continue;
486 }
487
Chao Yuda011cc2016-04-27 21:40:15 +0800488 move_node_page(node_page, gc_type);
Changman Leee1235982014-12-23 08:37:39 +0900489 stat_inc_node_blk_count(sbi, 1, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900490 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900491
Chao Yu7ea984b2016-08-27 00:14:31 +0800492 if (++phase < 3)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900493 goto next_step;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900494}
495
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900496/*
Jaegeuk Kim9af45ef2013-01-21 17:34:21 +0900497 * Calculate start block index indicating the given node offset.
498 * Be careful, caller should give this node offset only indicating direct node
499 * blocks. If any node offsets, which point the other types of node blocks such
500 * as indirect or double indirect node blocks, are given, it must be a caller's
501 * bug.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900502 */
Chao Yu81ca7352016-01-26 15:39:35 +0800503block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900504{
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900505 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
506 unsigned int bidx;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900507
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900508 if (node_ofs == 0)
509 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900510
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900511 if (node_ofs <= 2) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900512 bidx = node_ofs - 1;
513 } else if (node_ofs <= indirect_blks) {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900514 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900515 bidx = node_ofs - 2 - dec;
516 } else {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900517 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900518 bidx = node_ofs - 5 - dec;
519 }
Chao Yu81ca7352016-01-26 15:39:35 +0800520 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900521}
522
Nicholas Krausec1079892015-06-30 21:37:21 -0400523static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900524 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
525{
526 struct page *node_page;
527 nid_t nid;
528 unsigned int ofs_in_node;
529 block_t source_blkaddr;
530
531 nid = le32_to_cpu(sum->nid);
532 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
533
534 node_page = get_node_page(sbi, nid);
535 if (IS_ERR(node_page))
Nicholas Krausec1079892015-06-30 21:37:21 -0400536 return false;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900537
538 get_node_info(sbi, nid, dni);
539
540 if (sum->version != dni->version) {
Jaegeuk Kim68c23532017-03-21 10:59:50 -0400541 f2fs_msg(sbi->sb, KERN_WARNING,
542 "%s: valid data with mismatched node version.",
543 __func__);
544 set_sbi_flag(sbi, SBI_NEED_FSCK);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900545 }
546
547 *nofs = ofs_of_node(node_page);
548 source_blkaddr = datablock_addr(node_page, ofs_in_node);
549 f2fs_put_page(node_page, 1);
550
551 if (source_blkaddr != blkaddr)
Nicholas Krausec1079892015-06-30 21:37:21 -0400552 return false;
553 return true;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900554}
555
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700556static void move_encrypted_block(struct inode *inode, block_t bidx)
557{
558 struct f2fs_io_info fio = {
559 .sbi = F2FS_I_SB(inode),
560 .type = DATA,
Mike Christie04d328d2016-06-05 14:31:55 -0500561 .op = REQ_OP_READ,
562 .op_flags = READ_SYNC,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700563 .encrypted_page = NULL,
564 };
565 struct dnode_of_data dn;
566 struct f2fs_summary sum;
567 struct node_info ni;
568 struct page *page;
Chao Yu4356e482016-02-23 17:52:43 +0800569 block_t newaddr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700570 int err;
571
572 /* do not read out */
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700573 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700574 if (!page)
575 return;
576
577 set_new_dnode(&dn, inode, NULL, NULL, 0);
578 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
579 if (err)
580 goto out;
581
Chao Yu08b39fb2015-10-08 13:27:34 +0800582 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
583 ClearPageUptodate(page);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700584 goto put_out;
Chao Yu08b39fb2015-10-08 13:27:34 +0800585 }
586
587 /*
588 * don't cache encrypted data into meta inode until previous dirty
589 * data were writebacked to avoid racing between GC and flush.
590 */
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800591 f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700592
593 get_node_info(fio.sbi, dn.nid, &ni);
594 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
595
596 /* read page */
597 fio.page = page;
Chao Yu7a9d7542016-02-22 18:36:38 +0800598 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700599
Chao Yu4356e482016-02-23 17:52:43 +0800600 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
601 &sum, CURSEG_COLD_DATA);
602
603 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
604 FGP_LOCK | FGP_CREAT, GFP_NOFS);
605 if (!fio.encrypted_page) {
606 err = -ENOMEM;
607 goto recover_block;
608 }
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700609
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700610 err = f2fs_submit_page_bio(&fio);
611 if (err)
612 goto put_page_out;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700613
614 /* write page */
615 lock_page(fio.encrypted_page);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700616
Chao Yu1563ac72016-07-03 22:05:12 +0800617 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
Chao Yu4356e482016-02-23 17:52:43 +0800618 err = -EIO;
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700619 goto put_page_out;
Chao Yu4356e482016-02-23 17:52:43 +0800620 }
Chao Yu1563ac72016-07-03 22:05:12 +0800621 if (unlikely(!PageUptodate(fio.encrypted_page))) {
Chao Yu4356e482016-02-23 17:52:43 +0800622 err = -EIO;
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700623 goto put_page_out;
Chao Yu4356e482016-02-23 17:52:43 +0800624 }
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700625
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700626 set_page_dirty(fio.encrypted_page);
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800627 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700628 if (clear_page_dirty_for_io(fio.encrypted_page))
629 dec_page_count(fio.sbi, F2FS_DIRTY_META);
630
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700631 set_page_writeback(fio.encrypted_page);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700632
633 /* allocate block address */
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800634 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
Chao Yu4356e482016-02-23 17:52:43 +0800635
Mike Christie04d328d2016-06-05 14:31:55 -0500636 fio.op = REQ_OP_WRITE;
637 fio.op_flags = WRITE_SYNC;
Chao Yu4356e482016-02-23 17:52:43 +0800638 fio.new_blkaddr = newaddr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700639 f2fs_submit_page_mbio(&fio);
640
Chao Yuf28b3432016-02-24 17:16:47 +0800641 f2fs_update_data_blkaddr(&dn, newaddr);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700642 set_inode_flag(inode, FI_APPEND_WRITE);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700643 if (page->index == 0)
Jaegeuk Kim91942322016-05-20 10:13:22 -0700644 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700645put_page_out:
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700646 f2fs_put_page(fio.encrypted_page, 1);
Chao Yu4356e482016-02-23 17:52:43 +0800647recover_block:
648 if (err)
649 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
650 true, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700651put_out:
652 f2fs_put_dnode(&dn);
653out:
654 f2fs_put_page(page, 1);
655}
656
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700657static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900658{
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700659 struct page *page;
660
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700661 page = get_lock_data_page(inode, bidx, true);
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700662 if (IS_ERR(page))
663 return;
Fan Li63a0b7cb2013-12-09 16:09:00 +0800664
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900665 if (gc_type == BG_GC) {
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900666 if (PageWriteback(page))
667 goto out;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900668 set_page_dirty(page);
669 set_cold_data(page);
670 } else {
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700671 struct f2fs_io_info fio = {
672 .sbi = F2FS_I_SB(inode),
673 .type = DATA,
Mike Christie04d328d2016-06-05 14:31:55 -0500674 .op = REQ_OP_WRITE,
675 .op_flags = WRITE_SYNC,
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700676 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700677 .encrypted_page = NULL,
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700678 };
Chao Yu72e1c792016-07-03 22:05:13 +0800679 bool is_dirty = PageDirty(page);
680 int err;
681
682retry:
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700683 set_page_dirty(page);
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800684 f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kim1fe54f92014-02-07 10:00:06 +0900685 if (clear_page_dirty_for_io(page))
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -0700686 inode_dec_dirty_pages(inode);
Chao Yu72e1c792016-07-03 22:05:13 +0800687
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900688 set_cold_data(page);
Chao Yu72e1c792016-07-03 22:05:13 +0800689
690 err = do_write_data_page(&fio);
691 if (err == -ENOMEM && is_dirty) {
692 congestion_wait(BLK_RW_ASYNC, HZ/50);
693 goto retry;
694 }
695
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900696 clear_cold_data(page);
697 }
698out:
699 f2fs_put_page(page, 1);
700}
701
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900702/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900703 * This function tries to get parent node of victim data block, and identifies
704 * data block validity. If the block is valid, copy that with cold status and
705 * modify parent node.
706 * If the parent node is not valid or the data block address is different,
707 * the victim data block is ignored.
708 */
Chao Yu718e53f2016-01-23 16:23:55 +0800709static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Changman Lee7dda2af2014-11-28 15:49:40 +0000710 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900711{
712 struct super_block *sb = sbi->sb;
713 struct f2fs_summary *entry;
714 block_t start_addr;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900715 int off;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900716 int phase = 0;
717
718 start_addr = START_BLOCK(sbi, segno);
719
720next_step:
721 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900722
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900723 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
724 struct page *data_page;
725 struct inode *inode;
726 struct node_info dni; /* dnode info for the data */
727 unsigned int ofs_in_node, nofs;
728 block_t start_bidx;
Chao Yu7ea984b2016-08-27 00:14:31 +0800729 nid_t nid = le32_to_cpu(entry->nid);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900730
Jaegeuk Kim43727522013-02-04 15:11:17 +0900731 /* stop BG_GC if there is not enough free sections. */
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700732 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
Chao Yu718e53f2016-01-23 16:23:55 +0800733 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900734
Jaegeuk Kim43727522013-02-04 15:11:17 +0900735 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900736 continue;
737
738 if (phase == 0) {
Chao Yu7ea984b2016-08-27 00:14:31 +0800739 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
740 META_NAT, true);
741 continue;
742 }
743
744 if (phase == 1) {
745 ra_node_page(sbi, nid);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900746 continue;
747 }
748
749 /* Get an inode by ino with checking validity */
Nicholas Krausec1079892015-06-30 21:37:21 -0400750 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900751 continue;
752
Chao Yu7ea984b2016-08-27 00:14:31 +0800753 if (phase == 2) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900754 ra_node_page(sbi, dni.ino);
755 continue;
756 }
757
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900758 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
759
Chao Yu7ea984b2016-08-27 00:14:31 +0800760 if (phase == 3) {
Jaegeuk Kimd4686d562013-01-31 15:36:04 +0900761 inode = f2fs_iget(sb, dni.ino);
Chao Yub73e5282014-08-30 09:52:34 +0800762 if (IS_ERR(inode) || is_bad_inode(inode))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900763 continue;
764
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700765 /* if encrypted inode, let's go phase 3 */
766 if (f2fs_encrypted_inode(inode) &&
767 S_ISREG(inode->i_mode)) {
768 add_gc_inode(gc_list, inode);
769 continue;
770 }
771
Chao Yu81ca7352016-01-26 15:39:35 +0800772 start_bidx = start_bidx_of_node(nofs, inode);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700773 data_page = get_read_data_page(inode,
Christoph Hellwig70246282016-07-19 11:28:41 +0200774 start_bidx + ofs_in_node, REQ_RAHEAD,
775 true);
Changman Lee31a32682014-11-27 16:03:08 +0900776 if (IS_ERR(data_page)) {
777 iput(inode);
778 continue;
779 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900780
781 f2fs_put_page(data_page, 0);
Changman Lee7dda2af2014-11-28 15:49:40 +0000782 add_gc_inode(gc_list, inode);
Changman Lee31a32682014-11-27 16:03:08 +0900783 continue;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900784 }
Changman Lee31a32682014-11-27 16:03:08 +0900785
Chao Yu7ea984b2016-08-27 00:14:31 +0800786 /* phase 4 */
Changman Lee7dda2af2014-11-28 15:49:40 +0000787 inode = find_gc_inode(gc_list, dni.ino);
Changman Lee31a32682014-11-27 16:03:08 +0900788 if (inode) {
Chao Yu82e0a5a2016-07-13 09:18:29 +0800789 struct f2fs_inode_info *fi = F2FS_I(inode);
790 bool locked = false;
791
792 if (S_ISREG(inode->i_mode)) {
793 if (!down_write_trylock(&fi->dio_rwsem[READ]))
794 continue;
795 if (!down_write_trylock(
796 &fi->dio_rwsem[WRITE])) {
797 up_write(&fi->dio_rwsem[READ]);
798 continue;
799 }
800 locked = true;
801 }
802
Chao Yu81ca7352016-01-26 15:39:35 +0800803 start_bidx = start_bidx_of_node(nofs, inode)
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700804 + ofs_in_node;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700805 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
806 move_encrypted_block(inode, start_bidx);
807 else
808 move_data_page(inode, start_bidx, gc_type);
Chao Yu82e0a5a2016-07-13 09:18:29 +0800809
810 if (locked) {
811 up_write(&fi->dio_rwsem[WRITE]);
812 up_write(&fi->dio_rwsem[READ]);
813 }
814
Changman Leee1235982014-12-23 08:37:39 +0900815 stat_inc_data_blk_count(sbi, 1, gc_type);
Changman Lee31a32682014-11-27 16:03:08 +0900816 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900817 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900818
Chao Yu7ea984b2016-08-27 00:14:31 +0800819 if (++phase < 5)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900820 goto next_step;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900821}
822
823static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800824 int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900825{
826 struct sit_info *sit_i = SIT_I(sbi);
827 int ret;
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800828
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900829 mutex_lock(&sit_i->sentry_lock);
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800830 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
831 NO_CHECK_TYPE, LFS);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900832 mutex_unlock(&sit_i->sentry_lock);
833 return ret;
834}
835
Chao Yu718e53f2016-01-23 16:23:55 +0800836static int do_garbage_collect(struct f2fs_sb_info *sbi,
837 unsigned int start_segno,
Changman Lee7dda2af2014-11-28 15:49:40 +0000838 struct gc_inode_list *gc_list, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900839{
840 struct page *sum_page;
841 struct f2fs_summary_block *sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900842 struct blk_plug plug;
Chao Yu718e53f2016-01-23 16:23:55 +0800843 unsigned int segno = start_segno;
844 unsigned int end_segno = start_segno + sbi->segs_per_sec;
Chao Yu43ced842016-08-19 23:13:46 +0800845 int sec_freed = 0;
Chao Yu718e53f2016-01-23 16:23:55 +0800846 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
847 SUM_TYPE_DATA : SUM_TYPE_NODE;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900848
Chao Yu718e53f2016-01-23 16:23:55 +0800849 /* readahead multi ssa blocks those have contiguous address */
850 if (sbi->segs_per_sec > 1)
851 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
852 sbi->segs_per_sec, META_SSA, true);
853
854 /* reference all summary page */
855 while (segno < end_segno) {
856 sum_page = get_sum_page(sbi, segno++);
857 unlock_page(sum_page);
858 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900859
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900860 blk_start_plug(&plug);
861
Chao Yu718e53f2016-01-23 16:23:55 +0800862 for (segno = start_segno; segno < end_segno; segno++) {
Jaegeuk Kimaa987272016-06-06 18:49:54 -0700863
Chao Yu718e53f2016-01-23 16:23:55 +0800864 /* find segment summary of victim */
865 sum_page = find_get_page(META_MAPPING(sbi),
866 GET_SUM_BLOCK(sbi, segno));
Chao Yu718e53f2016-01-23 16:23:55 +0800867 f2fs_put_page(sum_page, 0);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900868
Jaegeuk Kimde0dcc42016-10-12 13:38:41 -0700869 if (get_valid_blocks(sbi, segno, 1) == 0 ||
870 !PageUptodate(sum_page) ||
871 unlikely(f2fs_cp_error(sbi)))
872 goto next;
873
Chao Yu718e53f2016-01-23 16:23:55 +0800874 sum = page_address(sum_page);
875 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
Jaegeuk Kim9236cac2015-05-28 18:19:17 -0700876
Chao Yu718e53f2016-01-23 16:23:55 +0800877 /*
878 * this is to avoid deadlock:
879 * - lock_page(sum_page) - f2fs_replace_block
880 * - check_valid_map() - mutex_lock(sentry_lock)
881 * - mutex_lock(sentry_lock) - change_curseg()
882 * - lock_page(sum_page)
883 */
884
885 if (type == SUM_TYPE_NODE)
886 gc_node_segment(sbi, sum->entries, segno, gc_type);
887 else
888 gc_data_segment(sbi, sum->entries, gc_list, segno,
889 gc_type);
890
891 stat_inc_seg_count(sbi, type, gc_type);
Jaegeuk Kimf6fe2be2016-09-21 09:34:48 -0700892next:
Chao Yu718e53f2016-01-23 16:23:55 +0800893 f2fs_put_page(sum_page, 0);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900894 }
Chao Yu718e53f2016-01-23 16:23:55 +0800895
Chao Yuda011cc2016-04-27 21:40:15 +0800896 if (gc_type == FG_GC)
897 f2fs_submit_merged_bio(sbi,
898 (type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
Chao Yu718e53f2016-01-23 16:23:55 +0800899
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900900 blk_finish_plug(&plug);
901
Chao Yu43ced842016-08-19 23:13:46 +0800902 if (gc_type == FG_GC &&
903 get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
904 sec_freed = 1;
Chao Yu17d899d2016-02-22 18:32:13 +0800905
906 stat_inc_call_count(sbi->stat_info);
907
Chao Yu43ced842016-08-19 23:13:46 +0800908 return sec_freed;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900909}
910
Chao Yud530d4d2015-10-05 22:22:44 +0800911int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900912{
Chao Yu718e53f2016-01-23 16:23:55 +0800913 unsigned int segno;
Chao Yud530d4d2015-10-05 22:22:44 +0800914 int gc_type = sync ? FG_GC : BG_GC;
Chao Yu43ced842016-08-19 23:13:46 +0800915 int sec_freed = 0;
Chao Yud530d4d2015-10-05 22:22:44 +0800916 int ret = -EINVAL;
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700917 struct cp_control cpc;
Changman Lee7dda2af2014-11-28 15:49:40 +0000918 struct gc_inode_list gc_list = {
919 .ilist = LIST_HEAD_INIT(gc_list.ilist),
Jaegeuk Kim769ec6e2014-12-03 20:47:26 -0800920 .iroot = RADIX_TREE_INIT(GFP_NOFS),
Changman Lee7dda2af2014-11-28 15:49:40 +0000921 };
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700922
Jaegeuk Kim119ee912015-01-29 11:45:33 -0800923 cpc.reason = __get_cp_reason(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900924gc_more:
Jaegeuk Kimab126cf2015-09-18 17:33:00 -0700925 segno = NULL_SEGNO;
926
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900927 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900928 goto stop;
Chao Yu6d5a1492015-12-24 18:04:56 +0800929 if (unlikely(f2fs_cp_error(sbi))) {
930 ret = -EIO;
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900931 goto stop;
Chao Yu6d5a1492015-12-24 18:04:56 +0800932 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900933
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700934 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed, 0)) {
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900935 gc_type = FG_GC;
Jaegeuk Kim6e17bfb2016-01-23 22:00:57 +0800936 /*
937 * If there is no victim and no prefree segment but still not
938 * enough free sections, we should flush dent/node blocks and do
939 * garbage collections.
940 */
Yunlei Hefe947932016-07-22 19:08:31 +0800941 if (__get_victim(sbi, &segno, gc_type) ||
942 prefree_segments(sbi)) {
Jaegeuk Kim2956e452016-09-21 09:28:06 -0700943 ret = write_checkpoint(sbi, &cpc);
944 if (ret)
945 goto stop;
Yunlei Hefe947932016-07-22 19:08:31 +0800946 segno = NULL_SEGNO;
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700947 } else if (has_not_enough_free_secs(sbi, 0, 0)) {
Jaegeuk Kim2956e452016-09-21 09:28:06 -0700948 ret = write_checkpoint(sbi, &cpc);
949 if (ret)
950 goto stop;
Yunlei Hefe947932016-07-22 19:08:31 +0800951 }
Jaegeuk Kimd64f8042013-04-08 16:01:00 +0900952 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900953
Jaegeuk Kim798c1b12015-08-11 21:59:49 -0700954 if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900955 goto stop;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900956 ret = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900957
Chao Yu43ced842016-08-19 23:13:46 +0800958 if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
959 gc_type == FG_GC)
Chao Yu45fe8492015-09-28 17:42:24 +0800960 sec_freed++;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900961
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700962 if (gc_type == FG_GC)
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900963 sbi->cur_victim_sec = NULL_SEGNO;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900964
Chao Yud530d4d2015-10-05 22:22:44 +0800965 if (!sync) {
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700966 if (has_not_enough_free_secs(sbi, sec_freed, 0))
Chao Yud530d4d2015-10-05 22:22:44 +0800967 goto gc_more;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900968
Chao Yud530d4d2015-10-05 22:22:44 +0800969 if (gc_type == FG_GC)
Jaegeuk Kim2956e452016-09-21 09:28:06 -0700970 ret = write_checkpoint(sbi, &cpc);
Chao Yud530d4d2015-10-05 22:22:44 +0800971 }
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900972stop:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900973 mutex_unlock(&sbi->gc_mutex);
974
Changman Lee7dda2af2014-11-28 15:49:40 +0000975 put_gc_inode(&gc_list);
Chao Yud530d4d2015-10-05 22:22:44 +0800976
977 if (sync)
978 ret = sec_freed ? 0 : -EAGAIN;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900979 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900980}
981
982void build_gc_manager(struct f2fs_sb_info *sbi)
983{
Hou Pengyang4992ba22017-02-16 12:34:31 +0000984 u64 main_count, resv_count, ovp_count, blocks_per_sec;
985
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900986 DIRTY_I(sbi)->v_ops = &default_v_ops;
Hou Pengyang4992ba22017-02-16 12:34:31 +0000987
988 /* threshold of # of valid blocks in a section for victims of FG_GC */
989 main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
990 resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
991 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
992 blocks_per_sec = sbi->blocks_per_seg * sbi->segs_per_sec;
993
994 sbi->fggc_threshold = div_u64((main_count - ovp_count) * blocks_per_sec,
995 (main_count - resv_count));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900996}