blob: e2b13558a915cbed7fcc99490ed33dd974406a5b [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09002 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/backing-dev.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090014#include <linux/init.h>
15#include <linux/f2fs_fs.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/freezer.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090019
20#include "f2fs.h"
21#include "node.h"
22#include "segment.h"
23#include "gc.h"
Namjae Jeon8e46b3e2013-04-23 16:42:53 +090024#include <trace/events/f2fs.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090025
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090026static int gc_thread_func(void *data)
27{
28 struct f2fs_sb_info *sbi = data;
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090029 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090030 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
31 long wait_ms;
32
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090033 wait_ms = gc_th->min_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090034
35 do {
36 if (try_to_freeze())
37 continue;
38 else
39 wait_event_interruptible_timeout(*wq,
40 kthread_should_stop(),
41 msecs_to_jiffies(wait_ms));
42 if (kthread_should_stop())
43 break;
44
Changman Leed6212a52013-01-29 18:30:07 +090045 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
Chao Yu88dd8932015-01-26 20:24:21 +080046 increase_sleep_time(gc_th, &wait_ms);
Changman Leed6212a52013-01-29 18:30:07 +090047 continue;
48 }
49
Chao Yu0f348022016-09-26 19:45:55 +080050#ifdef CONFIG_F2FS_FAULT_INJECTION
Jaegeuk Kime6b120d2017-07-10 12:55:09 -070051 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
52 f2fs_show_injection_info(FAULT_CHECKPOINT);
Chao Yu0f348022016-09-26 19:45:55 +080053 f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -070054 }
Chao Yu0f348022016-09-26 19:45:55 +080055#endif
56
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090057 /*
58 * [GC triggering condition]
59 * 0. GC is not conducted currently.
60 * 1. There are enough dirty segments.
61 * 2. IO subsystem is idle by checking the # of writeback pages.
62 * 3. IO subsystem is idle by checking the # of requests in
63 * bdev's request list.
64 *
arter97e1c42042014-08-06 23:22:50 +090065 * Note) We have to avoid triggering GCs frequently.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090066 * Because it is possible that some segments can be
67 * invalidated soon after by user update or deletion.
68 * So, I'd like to wait some time to collect dirty segments.
69 */
70 if (!mutex_trylock(&sbi->gc_mutex))
71 continue;
72
73 if (!is_idle(sbi)) {
Chao Yu88dd8932015-01-26 20:24:21 +080074 increase_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090075 mutex_unlock(&sbi->gc_mutex);
76 continue;
77 }
78
79 if (has_enough_invalid_blocks(sbi))
Chao Yu88dd8932015-01-26 20:24:21 +080080 decrease_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090081 else
Chao Yu88dd8932015-01-26 20:24:21 +080082 increase_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090083
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +090084 stat_inc_bggc_count(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090085
Jaegeuk Kim43727522013-02-04 15:11:17 +090086 /* if return value is not zero, no victim was selected */
Jaegeuk Kime6b120d2017-07-10 12:55:09 -070087 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090088 wait_ms = gc_th->no_gc_sleep_time;
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +090089
Jaegeuk Kim84e42142015-10-13 10:00:53 -070090 trace_f2fs_background_gc(sbi->sb, wait_ms,
91 prefree_segments(sbi), free_segments(sbi));
92
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +090093 /* balancing f2fs's metadata periodically */
94 f2fs_balance_fs_bg(sbi);
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +090095
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090096 } while (!kthread_should_stop());
97 return 0;
98}
99
100int start_gc_thread(struct f2fs_sb_info *sbi)
101{
Namjae Jeon1042d602012-12-01 10:56:13 +0900102 struct f2fs_gc_kthread *gc_th;
Namjae Jeonec7b1f22013-02-02 23:52:28 +0900103 dev_t dev = sbi->sb->s_bdev->bd_dev;
Namjae Jeon7a267f82013-05-26 11:05:32 +0900104 int err = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900105
Chao Yu1ecc0c52016-09-23 21:30:09 +0800106 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
Namjae Jeon7a267f82013-05-26 11:05:32 +0900107 if (!gc_th) {
108 err = -ENOMEM;
109 goto out;
110 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900111
Namjae Jeonb59d0ba2013-08-04 23:09:40 +0900112 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
113 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
114 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
115
Namjae Jeond2dc0952013-08-04 23:10:15 +0900116 gc_th->gc_idle = 0;
117
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900118 sbi->gc_thread = gc_th;
119 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
120 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
Namjae Jeonec7b1f22013-02-02 23:52:28 +0900121 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900122 if (IS_ERR(gc_th->f2fs_gc_task)) {
Namjae Jeon7a267f82013-05-26 11:05:32 +0900123 err = PTR_ERR(gc_th->f2fs_gc_task);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900124 kfree(gc_th);
Namjae Jeon25718422013-02-02 23:52:42 +0900125 sbi->gc_thread = NULL;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900126 }
Namjae Jeon7a267f82013-05-26 11:05:32 +0900127out:
128 return err;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900129}
130
131void stop_gc_thread(struct f2fs_sb_info *sbi)
132{
133 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
134 if (!gc_th)
135 return;
136 kthread_stop(gc_th->f2fs_gc_task);
137 kfree(gc_th);
138 sbi->gc_thread = NULL;
139}
140
Namjae Jeond2dc0952013-08-04 23:10:15 +0900141static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900142{
Namjae Jeond2dc0952013-08-04 23:10:15 +0900143 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
144
145 if (gc_th && gc_th->gc_idle) {
146 if (gc_th->gc_idle == 1)
147 gc_mode = GC_CB;
148 else if (gc_th->gc_idle == 2)
149 gc_mode = GC_GREEDY;
150 }
151 return gc_mode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900152}
153
154static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
155 int type, struct victim_sel_policy *p)
156{
157 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
158
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900159 if (p->alloc_mode == SSR) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900160 p->gc_mode = GC_GREEDY;
161 p->dirty_segmap = dirty_i->dirty_segmap[type];
Jin Xua26b7c82013-09-05 12:45:26 +0800162 p->max_search = dirty_i->nr_dirty[type];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900163 p->ofs_unit = 1;
164 } else {
Namjae Jeond2dc0952013-08-04 23:10:15 +0900165 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900166 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
Jin Xua26b7c82013-09-05 12:45:26 +0800167 p->max_search = dirty_i->nr_dirty[DIRTY];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900168 p->ofs_unit = sbi->segs_per_sec;
169 }
Jin Xua26b7c82013-09-05 12:45:26 +0800170
Hou Pengyang4992ba22017-02-16 12:34:31 +0000171 /* we need to check every dirty segments in the FG_GC case */
172 if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
Jaegeuk Kimb1c57c12014-01-08 13:45:08 +0900173 p->max_search = sbi->max_victim_search;
Jin Xua26b7c82013-09-05 12:45:26 +0800174
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700175 /* let's select beginning hot/small space first */
176 if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
177 p->offset = 0;
178 else
179 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900180}
181
182static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
183 struct victim_sel_policy *p)
184{
Jaegeuk Kimb7250d22013-02-05 13:19:28 +0900185 /* SSR allocates in a segment unit */
186 if (p->alloc_mode == SSR)
Chao Yu3519e3f2015-12-01 11:56:52 +0800187 return sbi->blocks_per_seg;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900188 if (p->gc_mode == GC_GREEDY)
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700189 return 2 * sbi->blocks_per_seg * p->ofs_unit;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900190 else if (p->gc_mode == GC_CB)
191 return UINT_MAX;
192 else /* No other gc_mode */
193 return 0;
194}
195
196static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
197{
198 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900199 unsigned int secno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900200
201 /*
202 * If the gc_type is FG_GC, we can select victim segments
203 * selected by background GC before.
204 * Those segments guarantee they have small valid blocks.
205 */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700206 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900207 if (sec_usage_check(sbi, secno))
Chao Yub65ee142014-08-04 10:10:07 +0800208 continue;
Hou Pengyang4992ba22017-02-16 12:34:31 +0000209
210 if (no_fggc_candidate(sbi, secno))
211 continue;
212
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900213 clear_bit(secno, dirty_i->victim_secmap);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700214 return GET_SEG_FROM_SEC(sbi, secno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900215 }
216 return NULL_SEGNO;
217}
218
219static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
220{
221 struct sit_info *sit_i = SIT_I(sbi);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700222 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
223 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900224 unsigned long long mtime = 0;
225 unsigned int vblocks;
226 unsigned char age = 0;
227 unsigned char u;
228 unsigned int i;
229
230 for (i = 0; i < sbi->segs_per_sec; i++)
231 mtime += get_seg_entry(sbi, start + i)->mtime;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700232 vblocks = get_valid_blocks(sbi, segno, true);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900233
234 mtime = div_u64(mtime, sbi->segs_per_sec);
235 vblocks = div_u64(vblocks, sbi->segs_per_sec);
236
237 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
238
arter97e1c42042014-08-06 23:22:50 +0900239 /* Handle if the system time has changed by the user */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900240 if (mtime < sit_i->min_mtime)
241 sit_i->min_mtime = mtime;
242 if (mtime > sit_i->max_mtime)
243 sit_i->max_mtime = mtime;
244 if (sit_i->max_mtime != sit_i->min_mtime)
245 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
246 sit_i->max_mtime - sit_i->min_mtime);
247
248 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
249}
250
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700251static unsigned int get_greedy_cost(struct f2fs_sb_info *sbi,
252 unsigned int segno)
253{
254 unsigned int valid_blocks =
255 get_valid_blocks(sbi, segno, true);
256
257 return IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
258 valid_blocks * 2 : valid_blocks;
259}
260
Jin Xua57e5642013-09-13 08:38:54 +0800261static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
262 unsigned int segno, struct victim_sel_policy *p)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900263{
264 if (p->alloc_mode == SSR)
265 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
266
267 /* alloc_mode == LFS */
268 if (p->gc_mode == GC_GREEDY)
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700269 return get_greedy_cost(sbi, segno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900270 else
271 return get_cb_cost(sbi, segno);
272}
273
Fan Li688159b2016-02-03 16:21:57 +0800274static unsigned int count_bits(const unsigned long *addr,
275 unsigned int offset, unsigned int len)
276{
277 unsigned int end = offset + len, sum = 0;
278
279 while (offset < end) {
280 if (test_bit(offset++, addr))
281 ++sum;
282 }
283 return sum;
284}
285
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900286/*
Masanari Iida111d2492013-03-19 08:03:35 +0900287 * This function is called from two paths.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900288 * One is garbage collection and the other is SSR segment selection.
289 * When it is called during GC, it just gets a victim segment
290 * and it does not remove it from dirty seglist.
291 * When it is called from SSR segment selection, it finds a segment
292 * which has minimum valid blocks and removes it from dirty seglist.
293 */
294static int get_victim_by_default(struct f2fs_sb_info *sbi,
295 unsigned int *result, int gc_type, int type, char alloc_mode)
296{
297 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700298 struct sit_info *sm = SIT_I(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900299 struct victim_sel_policy p;
Sheng Yong3fa56502016-09-29 18:37:31 +0800300 unsigned int secno, last_victim;
Chao Yua43f7ec2015-10-05 22:19:24 +0800301 unsigned int last_segment = MAIN_SEGS(sbi);
Fan Li688159b2016-02-03 16:21:57 +0800302 unsigned int nsearched = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900303
Chao Yu210f41b2014-09-15 18:05:44 +0800304 mutex_lock(&dirty_i->seglist_lock);
305
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900306 p.alloc_mode = alloc_mode;
307 select_policy(sbi, gc_type, type, &p);
308
309 p.min_segno = NULL_SEGNO;
Sheng Yong3fa56502016-09-29 18:37:31 +0800310 p.min_cost = get_max_cost(sbi, &p);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900311
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700312 if (*result != NULL_SEGNO) {
313 if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
314 get_valid_blocks(sbi, *result, false) &&
315 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
316 p.min_segno = *result;
317 goto out;
318 }
319
Chao Yu3342bb32015-10-05 22:20:40 +0800320 if (p.max_search == 0)
321 goto out;
322
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700323 last_victim = sm->last_victim[p.gc_mode];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900324 if (p.alloc_mode == LFS && gc_type == FG_GC) {
325 p.min_segno = check_bg_victims(sbi);
326 if (p.min_segno != NULL_SEGNO)
327 goto got_it;
328 }
329
330 while (1) {
331 unsigned long cost;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900332 unsigned int segno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900333
Chao Yua43f7ec2015-10-05 22:19:24 +0800334 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
335 if (segno >= last_segment) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700336 if (sm->last_victim[p.gc_mode]) {
337 last_segment =
338 sm->last_victim[p.gc_mode];
339 sm->last_victim[p.gc_mode] = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900340 p.offset = 0;
341 continue;
342 }
343 break;
344 }
Jin Xua57e5642013-09-13 08:38:54 +0800345
346 p.offset = segno + p.ofs_unit;
Fan Li688159b2016-02-03 16:21:57 +0800347 if (p.ofs_unit > 1) {
Jin Xua57e5642013-09-13 08:38:54 +0800348 p.offset -= segno % p.ofs_unit;
Fan Li688159b2016-02-03 16:21:57 +0800349 nsearched += count_bits(p.dirty_segmap,
350 p.offset - p.ofs_unit,
351 p.ofs_unit);
352 } else {
353 nsearched++;
354 }
355
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700356 secno = GET_SEC_FROM_SEG(sbi, segno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900357
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900358 if (sec_usage_check(sbi, secno))
Fan Li688159b2016-02-03 16:21:57 +0800359 goto next;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900360 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
Fan Li688159b2016-02-03 16:21:57 +0800361 goto next;
Hou Pengyang4992ba22017-02-16 12:34:31 +0000362 if (gc_type == FG_GC && p.alloc_mode == LFS &&
363 no_fggc_candidate(sbi, secno))
364 goto next;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900365
366 cost = get_gc_cost(sbi, segno, &p);
367
368 if (p.min_cost > cost) {
369 p.min_segno = segno;
370 p.min_cost = cost;
Jin Xua57e5642013-09-13 08:38:54 +0800371 }
Fan Li688159b2016-02-03 16:21:57 +0800372next:
373 if (nsearched >= p.max_search) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700374 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
375 sm->last_victim[p.gc_mode] = last_victim + 1;
Jaegeuk Kim4ce53772016-02-18 16:34:38 -0800376 else
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700377 sm->last_victim[p.gc_mode] = segno + 1;
378 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900379 break;
380 }
381 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900382 if (p.min_segno != NULL_SEGNO) {
Namjae Jeonb2b34602013-06-01 16:20:26 +0900383got_it:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900384 if (p.alloc_mode == LFS) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700385 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900386 if (gc_type == FG_GC)
387 sbi->cur_victim_sec = secno;
388 else
389 set_bit(secno, dirty_i->victim_secmap);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900390 }
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900391 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
Namjae Jeon8e46b3e2013-04-23 16:42:53 +0900392
393 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
394 sbi->cur_victim_sec,
395 prefree_segments(sbi), free_segments(sbi));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900396 }
Chao Yu3342bb32015-10-05 22:20:40 +0800397out:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900398 mutex_unlock(&dirty_i->seglist_lock);
399
400 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
401}
402
403static const struct victim_selection default_v_ops = {
404 .get_victim = get_victim_by_default,
405};
406
Changman Lee7dda2af2014-11-28 15:49:40 +0000407static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900408{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900409 struct inode_entry *ie;
410
Changman Lee7dda2af2014-11-28 15:49:40 +0000411 ie = radix_tree_lookup(&gc_list->iroot, ino);
412 if (ie)
413 return ie->inode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900414 return NULL;
415}
416
Changman Lee7dda2af2014-11-28 15:49:40 +0000417static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900418{
Gu Zheng6cc4af52013-06-20 17:52:39 +0800419 struct inode_entry *new_ie;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900420
Changman Lee7dda2af2014-11-28 15:49:40 +0000421 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
Gu Zheng6cc4af52013-06-20 17:52:39 +0800422 iput(inode);
423 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900424 }
Chao Yu06292072014-12-29 15:56:18 +0800425 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900426 new_ie->inode = inode;
Chao Yuf28e5032015-01-23 20:37:53 +0800427
428 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
Changman Lee7dda2af2014-11-28 15:49:40 +0000429 list_add_tail(&new_ie->list, &gc_list->ilist);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900430}
431
Changman Lee7dda2af2014-11-28 15:49:40 +0000432static void put_gc_inode(struct gc_inode_list *gc_list)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900433{
434 struct inode_entry *ie, *next_ie;
Changman Lee7dda2af2014-11-28 15:49:40 +0000435 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
436 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900437 iput(ie->inode);
438 list_del(&ie->list);
Chao Yu06292072014-12-29 15:56:18 +0800439 kmem_cache_free(inode_entry_slab, ie);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900440 }
441}
442
443static int check_valid_map(struct f2fs_sb_info *sbi,
444 unsigned int segno, int offset)
445{
446 struct sit_info *sit_i = SIT_I(sbi);
447 struct seg_entry *sentry;
448 int ret;
449
450 mutex_lock(&sit_i->sentry_lock);
451 sentry = get_seg_entry(sbi, segno);
452 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
453 mutex_unlock(&sit_i->sentry_lock);
Jaegeuk Kim43727522013-02-04 15:11:17 +0900454 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900455}
456
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900457/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900458 * This function compares node address got in summary with that in NAT.
459 * On validity, copy that node with cold status, otherwise (invalid node)
460 * ignore that.
461 */
Chao Yu718e53f2016-01-23 16:23:55 +0800462static void gc_node_segment(struct f2fs_sb_info *sbi,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900463 struct f2fs_summary *sum, unsigned int segno, int gc_type)
464{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900465 struct f2fs_summary *entry;
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700466 block_t start_addr;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900467 int off;
Chao Yu7ea984b2016-08-27 00:14:31 +0800468 int phase = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900469
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700470 start_addr = START_BLOCK(sbi, segno);
471
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900472next_step:
473 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900474
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900475 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
476 nid_t nid = le32_to_cpu(entry->nid);
477 struct page *node_page;
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700478 struct node_info ni;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900479
Jaegeuk Kim43727522013-02-04 15:11:17 +0900480 /* stop BG_GC if there is not enough free sections. */
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700481 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
Chao Yu718e53f2016-01-23 16:23:55 +0800482 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900483
Jaegeuk Kim43727522013-02-04 15:11:17 +0900484 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900485 continue;
486
Chao Yu7ea984b2016-08-27 00:14:31 +0800487 if (phase == 0) {
488 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
489 META_NAT, true);
490 continue;
491 }
492
493 if (phase == 1) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900494 ra_node_page(sbi, nid);
495 continue;
496 }
Chao Yu7ea984b2016-08-27 00:14:31 +0800497
498 /* phase == 2 */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900499 node_page = get_node_page(sbi, nid);
500 if (IS_ERR(node_page))
501 continue;
502
Huang Ying9a01b562014-09-07 11:05:20 +0800503 /* block may become invalid during get_node_page */
504 if (check_valid_map(sbi, segno, off) == 0) {
505 f2fs_put_page(node_page, 1);
506 continue;
507 }
508
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700509 get_node_info(sbi, nid, &ni);
510 if (ni.blk_addr != start_addr + off) {
511 f2fs_put_page(node_page, 1);
512 continue;
513 }
514
Chao Yuda011cc2016-04-27 21:40:15 +0800515 move_node_page(node_page, gc_type);
Changman Leee1235982014-12-23 08:37:39 +0900516 stat_inc_node_blk_count(sbi, 1, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900517 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900518
Chao Yu7ea984b2016-08-27 00:14:31 +0800519 if (++phase < 3)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900520 goto next_step;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900521}
522
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900523/*
Jaegeuk Kim9af45ef2013-01-21 17:34:21 +0900524 * Calculate start block index indicating the given node offset.
525 * Be careful, caller should give this node offset only indicating direct node
526 * blocks. If any node offsets, which point the other types of node blocks such
527 * as indirect or double indirect node blocks, are given, it must be a caller's
528 * bug.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900529 */
Chao Yu81ca7352016-01-26 15:39:35 +0800530block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900531{
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900532 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
533 unsigned int bidx;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900534
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900535 if (node_ofs == 0)
536 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900537
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900538 if (node_ofs <= 2) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900539 bidx = node_ofs - 1;
540 } else if (node_ofs <= indirect_blks) {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900541 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900542 bidx = node_ofs - 2 - dec;
543 } else {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900544 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900545 bidx = node_ofs - 5 - dec;
546 }
Chao Yu81ca7352016-01-26 15:39:35 +0800547 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900548}
549
Nicholas Krausec1079892015-06-30 21:37:21 -0400550static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900551 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
552{
553 struct page *node_page;
554 nid_t nid;
555 unsigned int ofs_in_node;
556 block_t source_blkaddr;
557
558 nid = le32_to_cpu(sum->nid);
559 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
560
561 node_page = get_node_page(sbi, nid);
562 if (IS_ERR(node_page))
Nicholas Krausec1079892015-06-30 21:37:21 -0400563 return false;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900564
565 get_node_info(sbi, nid, dni);
566
567 if (sum->version != dni->version) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700568 f2fs_msg(sbi->sb, KERN_WARNING,
569 "%s: valid data with mismatched node version.",
570 __func__);
571 set_sbi_flag(sbi, SBI_NEED_FSCK);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900572 }
573
574 *nofs = ofs_of_node(node_page);
575 source_blkaddr = datablock_addr(node_page, ofs_in_node);
576 f2fs_put_page(node_page, 1);
577
578 if (source_blkaddr != blkaddr)
Nicholas Krausec1079892015-06-30 21:37:21 -0400579 return false;
580 return true;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900581}
582
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700583static void move_encrypted_block(struct inode *inode, block_t bidx,
584 unsigned int segno, int off)
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700585{
586 struct f2fs_io_info fio = {
587 .sbi = F2FS_I_SB(inode),
588 .type = DATA,
Jaegeuk Kimc4127262017-05-10 11:18:25 -0700589 .temp = COLD,
Mike Christie04d328d2016-06-05 14:31:55 -0500590 .op = REQ_OP_READ,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700591 .op_flags = 0,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700592 .encrypted_page = NULL,
593 };
594 struct dnode_of_data dn;
595 struct f2fs_summary sum;
596 struct node_info ni;
597 struct page *page;
Chao Yu4356e482016-02-23 17:52:43 +0800598 block_t newaddr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700599 int err;
600
601 /* do not read out */
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700602 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700603 if (!page)
604 return;
605
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700606 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
607 goto out;
608
609 if (f2fs_is_atomic_file(inode))
610 goto out;
611
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700612 set_new_dnode(&dn, inode, NULL, NULL, 0);
613 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
614 if (err)
615 goto out;
616
Chao Yu08b39fb2015-10-08 13:27:34 +0800617 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
618 ClearPageUptodate(page);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700619 goto put_out;
Chao Yu08b39fb2015-10-08 13:27:34 +0800620 }
621
622 /*
623 * don't cache encrypted data into meta inode until previous dirty
624 * data were writebacked to avoid racing between GC and flush.
625 */
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800626 f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700627
628 get_node_info(fio.sbi, dn.nid, &ni);
629 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
630
631 /* read page */
632 fio.page = page;
Chao Yu7a9d7542016-02-22 18:36:38 +0800633 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700634
Chao Yu4356e482016-02-23 17:52:43 +0800635 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
636 &sum, CURSEG_COLD_DATA);
637
638 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
639 FGP_LOCK | FGP_CREAT, GFP_NOFS);
640 if (!fio.encrypted_page) {
641 err = -ENOMEM;
642 goto recover_block;
643 }
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700644
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700645 err = f2fs_submit_page_bio(&fio);
646 if (err)
647 goto put_page_out;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700648
649 /* write page */
650 lock_page(fio.encrypted_page);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700651
Chao Yu1563ac72016-07-03 22:05:12 +0800652 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
Chao Yu4356e482016-02-23 17:52:43 +0800653 err = -EIO;
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700654 goto put_page_out;
Chao Yu4356e482016-02-23 17:52:43 +0800655 }
Chao Yu1563ac72016-07-03 22:05:12 +0800656 if (unlikely(!PageUptodate(fio.encrypted_page))) {
Chao Yu4356e482016-02-23 17:52:43 +0800657 err = -EIO;
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700658 goto put_page_out;
Chao Yu4356e482016-02-23 17:52:43 +0800659 }
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700660
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700661 set_page_dirty(fio.encrypted_page);
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800662 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700663 if (clear_page_dirty_for_io(fio.encrypted_page))
664 dec_page_count(fio.sbi, F2FS_DIRTY_META);
665
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700666 set_page_writeback(fio.encrypted_page);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700667
668 /* allocate block address */
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800669 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
Chao Yu4356e482016-02-23 17:52:43 +0800670
Mike Christie04d328d2016-06-05 14:31:55 -0500671 fio.op = REQ_OP_WRITE;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700672 fio.op_flags = REQ_SYNC;
Chao Yu4356e482016-02-23 17:52:43 +0800673 fio.new_blkaddr = newaddr;
Jaegeuk Kim9bc576a2017-05-10 11:28:38 -0700674 f2fs_submit_page_write(&fio);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700675
Chao Yuf28b3432016-02-24 17:16:47 +0800676 f2fs_update_data_blkaddr(&dn, newaddr);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700677 set_inode_flag(inode, FI_APPEND_WRITE);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700678 if (page->index == 0)
Jaegeuk Kim91942322016-05-20 10:13:22 -0700679 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700680put_page_out:
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700681 f2fs_put_page(fio.encrypted_page, 1);
Chao Yu4356e482016-02-23 17:52:43 +0800682recover_block:
683 if (err)
684 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
685 true, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700686put_out:
687 f2fs_put_dnode(&dn);
688out:
689 f2fs_put_page(page, 1);
690}
691
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700692static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
693 unsigned int segno, int off)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900694{
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700695 struct page *page;
696
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700697 page = get_lock_data_page(inode, bidx, true);
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700698 if (IS_ERR(page))
699 return;
Fan Li63a0b7cb2013-12-09 16:09:00 +0800700
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700701 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
702 goto out;
703
704 if (f2fs_is_atomic_file(inode))
705 goto out;
706
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900707 if (gc_type == BG_GC) {
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900708 if (PageWriteback(page))
709 goto out;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900710 set_page_dirty(page);
711 set_cold_data(page);
712 } else {
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700713 struct f2fs_io_info fio = {
714 .sbi = F2FS_I_SB(inode),
715 .type = DATA,
Jaegeuk Kimc4127262017-05-10 11:18:25 -0700716 .temp = COLD,
Mike Christie04d328d2016-06-05 14:31:55 -0500717 .op = REQ_OP_WRITE,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700718 .op_flags = REQ_SYNC,
719 .old_blkaddr = NULL_ADDR,
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700720 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700721 .encrypted_page = NULL,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700722 .need_lock = true,
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700723 };
Chao Yu72e1c792016-07-03 22:05:13 +0800724 bool is_dirty = PageDirty(page);
725 int err;
726
727retry:
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700728 set_page_dirty(page);
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800729 f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700730 if (clear_page_dirty_for_io(page)) {
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -0700731 inode_dec_dirty_pages(inode);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700732 remove_dirty_inode(inode);
733 }
Chao Yu72e1c792016-07-03 22:05:13 +0800734
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900735 set_cold_data(page);
Chao Yu72e1c792016-07-03 22:05:13 +0800736
737 err = do_write_data_page(&fio);
738 if (err == -ENOMEM && is_dirty) {
739 congestion_wait(BLK_RW_ASYNC, HZ/50);
740 goto retry;
741 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900742 }
743out:
744 f2fs_put_page(page, 1);
745}
746
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900747/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900748 * This function tries to get parent node of victim data block, and identifies
749 * data block validity. If the block is valid, copy that with cold status and
750 * modify parent node.
751 * If the parent node is not valid or the data block address is different,
752 * the victim data block is ignored.
753 */
Chao Yu718e53f2016-01-23 16:23:55 +0800754static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Changman Lee7dda2af2014-11-28 15:49:40 +0000755 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900756{
757 struct super_block *sb = sbi->sb;
758 struct f2fs_summary *entry;
759 block_t start_addr;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900760 int off;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900761 int phase = 0;
762
763 start_addr = START_BLOCK(sbi, segno);
764
765next_step:
766 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900767
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900768 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
769 struct page *data_page;
770 struct inode *inode;
771 struct node_info dni; /* dnode info for the data */
772 unsigned int ofs_in_node, nofs;
773 block_t start_bidx;
Chao Yu7ea984b2016-08-27 00:14:31 +0800774 nid_t nid = le32_to_cpu(entry->nid);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900775
Jaegeuk Kim43727522013-02-04 15:11:17 +0900776 /* stop BG_GC if there is not enough free sections. */
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700777 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
Chao Yu718e53f2016-01-23 16:23:55 +0800778 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900779
Jaegeuk Kim43727522013-02-04 15:11:17 +0900780 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900781 continue;
782
783 if (phase == 0) {
Chao Yu7ea984b2016-08-27 00:14:31 +0800784 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
785 META_NAT, true);
786 continue;
787 }
788
789 if (phase == 1) {
790 ra_node_page(sbi, nid);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900791 continue;
792 }
793
794 /* Get an inode by ino with checking validity */
Nicholas Krausec1079892015-06-30 21:37:21 -0400795 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900796 continue;
797
Chao Yu7ea984b2016-08-27 00:14:31 +0800798 if (phase == 2) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900799 ra_node_page(sbi, dni.ino);
800 continue;
801 }
802
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900803 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
804
Chao Yu7ea984b2016-08-27 00:14:31 +0800805 if (phase == 3) {
Jaegeuk Kimd4686d562013-01-31 15:36:04 +0900806 inode = f2fs_iget(sb, dni.ino);
Chao Yub73e5282014-08-30 09:52:34 +0800807 if (IS_ERR(inode) || is_bad_inode(inode))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900808 continue;
809
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700810 /* if encrypted inode, let's go phase 3 */
811 if (f2fs_encrypted_inode(inode) &&
812 S_ISREG(inode->i_mode)) {
813 add_gc_inode(gc_list, inode);
814 continue;
815 }
816
Chao Yu81ca7352016-01-26 15:39:35 +0800817 start_bidx = start_bidx_of_node(nofs, inode);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700818 data_page = get_read_data_page(inode,
Christoph Hellwig70246282016-07-19 11:28:41 +0200819 start_bidx + ofs_in_node, REQ_RAHEAD,
820 true);
Changman Lee31a32682014-11-27 16:03:08 +0900821 if (IS_ERR(data_page)) {
822 iput(inode);
823 continue;
824 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900825
826 f2fs_put_page(data_page, 0);
Changman Lee7dda2af2014-11-28 15:49:40 +0000827 add_gc_inode(gc_list, inode);
Changman Lee31a32682014-11-27 16:03:08 +0900828 continue;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900829 }
Changman Lee31a32682014-11-27 16:03:08 +0900830
Chao Yu7ea984b2016-08-27 00:14:31 +0800831 /* phase 4 */
Changman Lee7dda2af2014-11-28 15:49:40 +0000832 inode = find_gc_inode(gc_list, dni.ino);
Changman Lee31a32682014-11-27 16:03:08 +0900833 if (inode) {
Chao Yu82e0a5a2016-07-13 09:18:29 +0800834 struct f2fs_inode_info *fi = F2FS_I(inode);
835 bool locked = false;
836
837 if (S_ISREG(inode->i_mode)) {
838 if (!down_write_trylock(&fi->dio_rwsem[READ]))
839 continue;
840 if (!down_write_trylock(
841 &fi->dio_rwsem[WRITE])) {
842 up_write(&fi->dio_rwsem[READ]);
843 continue;
844 }
845 locked = true;
846 }
847
Chao Yu81ca7352016-01-26 15:39:35 +0800848 start_bidx = start_bidx_of_node(nofs, inode)
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700849 + ofs_in_node;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700850 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700851 move_encrypted_block(inode, start_bidx, segno, off);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700852 else
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700853 move_data_page(inode, start_bidx, gc_type, segno, off);
Chao Yu82e0a5a2016-07-13 09:18:29 +0800854
855 if (locked) {
856 up_write(&fi->dio_rwsem[WRITE]);
857 up_write(&fi->dio_rwsem[READ]);
858 }
859
Changman Leee1235982014-12-23 08:37:39 +0900860 stat_inc_data_blk_count(sbi, 1, gc_type);
Changman Lee31a32682014-11-27 16:03:08 +0900861 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900862 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900863
Chao Yu7ea984b2016-08-27 00:14:31 +0800864 if (++phase < 5)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900865 goto next_step;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900866}
867
868static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800869 int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900870{
871 struct sit_info *sit_i = SIT_I(sbi);
872 int ret;
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800873
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900874 mutex_lock(&sit_i->sentry_lock);
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800875 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
876 NO_CHECK_TYPE, LFS);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900877 mutex_unlock(&sit_i->sentry_lock);
878 return ret;
879}
880
Chao Yu718e53f2016-01-23 16:23:55 +0800881static int do_garbage_collect(struct f2fs_sb_info *sbi,
882 unsigned int start_segno,
Changman Lee7dda2af2014-11-28 15:49:40 +0000883 struct gc_inode_list *gc_list, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900884{
885 struct page *sum_page;
886 struct f2fs_summary_block *sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900887 struct blk_plug plug;
Chao Yu718e53f2016-01-23 16:23:55 +0800888 unsigned int segno = start_segno;
889 unsigned int end_segno = start_segno + sbi->segs_per_sec;
Chao Yu43ced842016-08-19 23:13:46 +0800890 int sec_freed = 0;
Chao Yu718e53f2016-01-23 16:23:55 +0800891 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
892 SUM_TYPE_DATA : SUM_TYPE_NODE;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900893
Chao Yu718e53f2016-01-23 16:23:55 +0800894 /* readahead multi ssa blocks those have contiguous address */
895 if (sbi->segs_per_sec > 1)
896 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
897 sbi->segs_per_sec, META_SSA, true);
898
899 /* reference all summary page */
900 while (segno < end_segno) {
901 sum_page = get_sum_page(sbi, segno++);
902 unlock_page(sum_page);
903 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900904
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900905 blk_start_plug(&plug);
906
Chao Yu718e53f2016-01-23 16:23:55 +0800907 for (segno = start_segno; segno < end_segno; segno++) {
Jaegeuk Kimaa987272016-06-06 18:49:54 -0700908
Chao Yu718e53f2016-01-23 16:23:55 +0800909 /* find segment summary of victim */
910 sum_page = find_get_page(META_MAPPING(sbi),
911 GET_SUM_BLOCK(sbi, segno));
Chao Yu718e53f2016-01-23 16:23:55 +0800912 f2fs_put_page(sum_page, 0);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900913
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700914 if (get_valid_blocks(sbi, segno, false) == 0 ||
Jaegeuk Kimde0dcc42016-10-12 13:38:41 -0700915 !PageUptodate(sum_page) ||
916 unlikely(f2fs_cp_error(sbi)))
917 goto next;
918
Chao Yu718e53f2016-01-23 16:23:55 +0800919 sum = page_address(sum_page);
920 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
Jaegeuk Kim9236cac2015-05-28 18:19:17 -0700921
Chao Yu718e53f2016-01-23 16:23:55 +0800922 /*
923 * this is to avoid deadlock:
924 * - lock_page(sum_page) - f2fs_replace_block
925 * - check_valid_map() - mutex_lock(sentry_lock)
926 * - mutex_lock(sentry_lock) - change_curseg()
927 * - lock_page(sum_page)
928 */
Chao Yu718e53f2016-01-23 16:23:55 +0800929 if (type == SUM_TYPE_NODE)
930 gc_node_segment(sbi, sum->entries, segno, gc_type);
931 else
932 gc_data_segment(sbi, sum->entries, gc_list, segno,
933 gc_type);
934
935 stat_inc_seg_count(sbi, type, gc_type);
Jaegeuk Kimf6fe2be2016-09-21 09:34:48 -0700936next:
Chao Yu718e53f2016-01-23 16:23:55 +0800937 f2fs_put_page(sum_page, 0);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900938 }
Chao Yu718e53f2016-01-23 16:23:55 +0800939
Chao Yuda011cc2016-04-27 21:40:15 +0800940 if (gc_type == FG_GC)
Jaegeuk Kim9bc576a2017-05-10 11:28:38 -0700941 f2fs_submit_merged_write(sbi,
942 (type == SUM_TYPE_NODE) ? NODE : DATA);
Chao Yu718e53f2016-01-23 16:23:55 +0800943
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900944 blk_finish_plug(&plug);
945
Chao Yu43ced842016-08-19 23:13:46 +0800946 if (gc_type == FG_GC &&
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700947 get_valid_blocks(sbi, start_segno, true) == 0)
Chao Yu43ced842016-08-19 23:13:46 +0800948 sec_freed = 1;
Chao Yu17d899d2016-02-22 18:32:13 +0800949
950 stat_inc_call_count(sbi->stat_info);
951
Chao Yu43ced842016-08-19 23:13:46 +0800952 return sec_freed;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900953}
954
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700955int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
956 bool background, unsigned int segno)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900957{
Chao Yud530d4d2015-10-05 22:22:44 +0800958 int gc_type = sync ? FG_GC : BG_GC;
Chao Yu43ced842016-08-19 23:13:46 +0800959 int sec_freed = 0;
Weichao Guo74f36262017-05-11 04:28:00 +0800960 int ret;
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700961 struct cp_control cpc;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700962 unsigned int init_segno = segno;
Changman Lee7dda2af2014-11-28 15:49:40 +0000963 struct gc_inode_list gc_list = {
964 .ilist = LIST_HEAD_INIT(gc_list.ilist),
Jaegeuk Kim769ec6e2014-12-03 20:47:26 -0800965 .iroot = RADIX_TREE_INIT(GFP_NOFS),
Changman Lee7dda2af2014-11-28 15:49:40 +0000966 };
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700967
Jaegeuk Kim119ee912015-01-29 11:45:33 -0800968 cpc.reason = __get_cp_reason(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900969gc_more:
Weichao Guo74f36262017-05-11 04:28:00 +0800970 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
971 ret = -EINVAL;
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900972 goto stop;
Weichao Guo74f36262017-05-11 04:28:00 +0800973 }
Chao Yu6d5a1492015-12-24 18:04:56 +0800974 if (unlikely(f2fs_cp_error(sbi))) {
975 ret = -EIO;
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900976 goto stop;
Chao Yu6d5a1492015-12-24 18:04:56 +0800977 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900978
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700979 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
Jaegeuk Kim6e17bfb2016-01-23 22:00:57 +0800980 /*
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700981 * For example, if there are many prefree_segments below given
982 * threshold, we can make them free by checkpoint. Then, we
983 * secure free segments which doesn't need fggc any more.
Jaegeuk Kim6e17bfb2016-01-23 22:00:57 +0800984 */
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700985 if (prefree_segments(sbi)) {
Jaegeuk Kim2956e452016-09-21 09:28:06 -0700986 ret = write_checkpoint(sbi, &cpc);
987 if (ret)
988 goto stop;
Yunlei Hefe947932016-07-22 19:08:31 +0800989 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700990 if (has_not_enough_free_secs(sbi, 0, 0))
991 gc_type = FG_GC;
Jaegeuk Kimd64f8042013-04-08 16:01:00 +0900992 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900993
Weichao Guo74f36262017-05-11 04:28:00 +0800994 ret = -EINVAL;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700995 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
996 if (gc_type == BG_GC && !background)
997 goto stop;
998 if (!__get_victim(sbi, &segno, gc_type))
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900999 goto stop;
Jaegeuk Kim43727522013-02-04 15:11:17 +09001000 ret = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001001
Chao Yu43ced842016-08-19 23:13:46 +08001002 if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
1003 gc_type == FG_GC)
Chao Yu45fe8492015-09-28 17:42:24 +08001004 sec_freed++;
Jaegeuk Kim43727522013-02-04 15:11:17 +09001005
Jaegeuk Kim5ee52932015-08-15 22:06:08 -07001006 if (gc_type == FG_GC)
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001007 sbi->cur_victim_sec = NULL_SEGNO;
Jaegeuk Kim43727522013-02-04 15:11:17 +09001008
Chao Yud530d4d2015-10-05 22:22:44 +08001009 if (!sync) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001010 if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1011 segno = NULL_SEGNO;
Chao Yud530d4d2015-10-05 22:22:44 +08001012 goto gc_more;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001013 }
Jaegeuk Kim43727522013-02-04 15:11:17 +09001014
Chao Yud530d4d2015-10-05 22:22:44 +08001015 if (gc_type == FG_GC)
Jaegeuk Kim2956e452016-09-21 09:28:06 -07001016 ret = write_checkpoint(sbi, &cpc);
Chao Yud530d4d2015-10-05 22:22:44 +08001017 }
Jaegeuk Kim408e9372013-01-03 17:55:52 +09001018stop:
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001019 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1020 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001021 mutex_unlock(&sbi->gc_mutex);
1022
Changman Lee7dda2af2014-11-28 15:49:40 +00001023 put_gc_inode(&gc_list);
Chao Yud530d4d2015-10-05 22:22:44 +08001024
1025 if (sync)
1026 ret = sec_freed ? 0 : -EAGAIN;
Jaegeuk Kim43727522013-02-04 15:11:17 +09001027 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001028}
1029
1030void build_gc_manager(struct f2fs_sb_info *sbi)
1031{
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001032 u64 main_count, resv_count, ovp_count;
Hou Pengyang4992ba22017-02-16 12:34:31 +00001033
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001034 DIRTY_I(sbi)->v_ops = &default_v_ops;
Hou Pengyang4992ba22017-02-16 12:34:31 +00001035
1036 /* threshold of # of valid blocks in a section for victims of FG_GC */
1037 main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
1038 resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
1039 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
Hou Pengyang4992ba22017-02-16 12:34:31 +00001040
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001041 sbi->fggc_threshold = div64_u64((main_count - ovp_count) *
1042 BLKS_PER_SEC(sbi), (main_count - resv_count));
1043
1044 /* give warm/cold data area from slower device */
1045 if (sbi->s_ndevs && sbi->segs_per_sec == 1)
1046 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1047 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001048}