blob: 92878e208ea2163008a834e8facbd305d929d17e [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09002 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/backing-dev.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090014#include <linux/init.h>
15#include <linux/f2fs_fs.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/freezer.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090019
20#include "f2fs.h"
21#include "node.h"
22#include "segment.h"
23#include "gc.h"
Namjae Jeon8e46b3e2013-04-23 16:42:53 +090024#include <trace/events/f2fs.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090025
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090026static int gc_thread_func(void *data)
27{
28 struct f2fs_sb_info *sbi = data;
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090029 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090030 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
Chao Yu80049562017-08-07 23:12:46 +080031 unsigned int wait_ms;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090032
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090033 wait_ms = gc_th->min_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090034
Jaegeuk Kimd430af22017-05-17 10:36:58 -070035 set_freezable();
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090036 do {
Jaegeuk Kimd430af22017-05-17 10:36:58 -070037 wait_event_interruptible_timeout(*wq,
Jaegeuk Kimbd2ffc22017-08-06 22:09:00 -070038 kthread_should_stop() || freezing(current) ||
39 gc_th->gc_wake,
Jaegeuk Kimd430af22017-05-17 10:36:58 -070040 msecs_to_jiffies(wait_ms));
41
Jaegeuk Kimbd2ffc22017-08-06 22:09:00 -070042 /* give it a try one time */
43 if (gc_th->gc_wake)
44 gc_th->gc_wake = 0;
45
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090046 if (try_to_freeze())
47 continue;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090048 if (kthread_should_stop())
49 break;
50
Changman Leed6212a52013-01-29 18:30:07 +090051 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
Chao Yu88dd8932015-01-26 20:24:21 +080052 increase_sleep_time(gc_th, &wait_ms);
Changman Leed6212a52013-01-29 18:30:07 +090053 continue;
54 }
55
Chao Yu0f348022016-09-26 19:45:55 +080056#ifdef CONFIG_F2FS_FAULT_INJECTION
Jaegeuk Kime6b120d2017-07-10 12:55:09 -070057 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
58 f2fs_show_injection_info(FAULT_CHECKPOINT);
Chao Yu0f348022016-09-26 19:45:55 +080059 f2fs_stop_checkpoint(sbi, false);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -070060 }
Chao Yu0f348022016-09-26 19:45:55 +080061#endif
62
Chao Yuce8bd4b2017-07-22 08:52:23 +080063 if (!sb_start_write_trylock(sbi->sb))
64 continue;
65
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090066 /*
67 * [GC triggering condition]
68 * 0. GC is not conducted currently.
69 * 1. There are enough dirty segments.
70 * 2. IO subsystem is idle by checking the # of writeback pages.
71 * 3. IO subsystem is idle by checking the # of requests in
72 * bdev's request list.
73 *
arter97e1c42042014-08-06 23:22:50 +090074 * Note) We have to avoid triggering GCs frequently.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090075 * Because it is possible that some segments can be
76 * invalidated soon after by user update or deletion.
77 * So, I'd like to wait some time to collect dirty segments.
78 */
Jaegeuk Kim0390d832018-05-07 14:22:40 -070079 if (sbi->gc_mode == GC_URGENT) {
Jaegeuk Kimbd2ffc22017-08-06 22:09:00 -070080 wait_ms = gc_th->urgent_sleep_time;
Jaegeuk Kimecd02f52018-02-26 09:19:47 -080081 mutex_lock(&sbi->gc_mutex);
Jaegeuk Kimbd2ffc22017-08-06 22:09:00 -070082 goto do_gc;
83 }
84
Jaegeuk Kimecd02f52018-02-26 09:19:47 -080085 if (!mutex_trylock(&sbi->gc_mutex))
86 goto next;
87
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090088 if (!is_idle(sbi)) {
Chao Yu88dd8932015-01-26 20:24:21 +080089 increase_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090090 mutex_unlock(&sbi->gc_mutex);
Chao Yuce8bd4b2017-07-22 08:52:23 +080091 goto next;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090092 }
93
94 if (has_enough_invalid_blocks(sbi))
Chao Yu88dd8932015-01-26 20:24:21 +080095 decrease_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090096 else
Chao Yu88dd8932015-01-26 20:24:21 +080097 increase_sleep_time(gc_th, &wait_ms);
Jaegeuk Kimbd2ffc22017-08-06 22:09:00 -070098do_gc:
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +090099 stat_inc_bggc_count(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900100
Jaegeuk Kim43727522013-02-04 15:11:17 +0900101 /* if return value is not zero, no victim was selected */
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700102 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
Namjae Jeonb59d0ba2013-08-04 23:09:40 +0900103 wait_ms = gc_th->no_gc_sleep_time;
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +0900104
Jaegeuk Kim84e42142015-10-13 10:00:53 -0700105 trace_f2fs_background_gc(sbi->sb, wait_ms,
106 prefree_segments(sbi), free_segments(sbi));
107
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +0900108 /* balancing f2fs's metadata periodically */
109 f2fs_balance_fs_bg(sbi);
Chao Yuce8bd4b2017-07-22 08:52:23 +0800110next:
111 sb_end_write(sbi->sb);
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +0900112
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900113 } while (!kthread_should_stop());
114 return 0;
115}
116
117int start_gc_thread(struct f2fs_sb_info *sbi)
118{
Namjae Jeon1042d602012-12-01 10:56:13 +0900119 struct f2fs_gc_kthread *gc_th;
Namjae Jeonec7b1f22013-02-02 23:52:28 +0900120 dev_t dev = sbi->sb->s_bdev->bd_dev;
Namjae Jeon7a267f82013-05-26 11:05:32 +0900121 int err = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900122
Chao Yu1ecc0c52016-09-23 21:30:09 +0800123 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
Namjae Jeon7a267f82013-05-26 11:05:32 +0900124 if (!gc_th) {
125 err = -ENOMEM;
126 goto out;
127 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900128
Jaegeuk Kimbd2ffc22017-08-06 22:09:00 -0700129 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
Namjae Jeonb59d0ba2013-08-04 23:09:40 +0900130 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
131 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
132 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
133
Jaegeuk Kimbd2ffc22017-08-06 22:09:00 -0700134 gc_th->gc_wake= 0;
Namjae Jeond2dc0952013-08-04 23:10:15 +0900135
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900136 sbi->gc_thread = gc_th;
137 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
138 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
Namjae Jeonec7b1f22013-02-02 23:52:28 +0900139 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900140 if (IS_ERR(gc_th->f2fs_gc_task)) {
Namjae Jeon7a267f82013-05-26 11:05:32 +0900141 err = PTR_ERR(gc_th->f2fs_gc_task);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900142 kfree(gc_th);
Namjae Jeon25718422013-02-02 23:52:42 +0900143 sbi->gc_thread = NULL;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900144 }
Namjae Jeon7a267f82013-05-26 11:05:32 +0900145out:
146 return err;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900147}
148
149void stop_gc_thread(struct f2fs_sb_info *sbi)
150{
151 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
152 if (!gc_th)
153 return;
154 kthread_stop(gc_th->f2fs_gc_task);
155 kfree(gc_th);
156 sbi->gc_thread = NULL;
157}
158
Jaegeuk Kim0390d832018-05-07 14:22:40 -0700159static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900160{
Namjae Jeond2dc0952013-08-04 23:10:15 +0900161 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
162
Jaegeuk Kim0390d832018-05-07 14:22:40 -0700163 switch (sbi->gc_mode) {
164 case GC_IDLE_CB:
165 gc_mode = GC_CB;
166 break;
167 case GC_IDLE_GREEDY:
168 case GC_URGENT:
Jaegeuk Kime87b13e2018-02-26 15:40:30 -0800169 gc_mode = GC_GREEDY;
Jaegeuk Kim0390d832018-05-07 14:22:40 -0700170 break;
171 }
Namjae Jeond2dc0952013-08-04 23:10:15 +0900172 return gc_mode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900173}
174
175static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
176 int type, struct victim_sel_policy *p)
177{
178 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
179
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900180 if (p->alloc_mode == SSR) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900181 p->gc_mode = GC_GREEDY;
182 p->dirty_segmap = dirty_i->dirty_segmap[type];
Jin Xua26b7c82013-09-05 12:45:26 +0800183 p->max_search = dirty_i->nr_dirty[type];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900184 p->ofs_unit = 1;
185 } else {
Jaegeuk Kim0390d832018-05-07 14:22:40 -0700186 p->gc_mode = select_gc_type(sbi, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900187 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
Jin Xua26b7c82013-09-05 12:45:26 +0800188 p->max_search = dirty_i->nr_dirty[DIRTY];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900189 p->ofs_unit = sbi->segs_per_sec;
190 }
Jin Xua26b7c82013-09-05 12:45:26 +0800191
Hou Pengyang4992ba22017-02-16 12:34:31 +0000192 /* we need to check every dirty segments in the FG_GC case */
Jaegeuk Kime87b13e2018-02-26 15:40:30 -0800193 if (gc_type != FG_GC &&
Jaegeuk Kim0390d832018-05-07 14:22:40 -0700194 (sbi->gc_mode != GC_URGENT) &&
Jaegeuk Kime87b13e2018-02-26 15:40:30 -0800195 p->max_search > sbi->max_victim_search)
Jaegeuk Kimb1c57c12014-01-08 13:45:08 +0900196 p->max_search = sbi->max_victim_search;
Jin Xua26b7c82013-09-05 12:45:26 +0800197
Yunlong Songaae506a2018-01-29 11:37:45 +0800198 /* let's select beginning hot/small space first in no_heap mode*/
199 if (test_opt(sbi, NOHEAP) &&
200 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700201 p->offset = 0;
202 else
203 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900204}
205
206static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
207 struct victim_sel_policy *p)
208{
Jaegeuk Kimb7250d22013-02-05 13:19:28 +0900209 /* SSR allocates in a segment unit */
210 if (p->alloc_mode == SSR)
Chao Yu3519e3f2015-12-01 11:56:52 +0800211 return sbi->blocks_per_seg;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900212 if (p->gc_mode == GC_GREEDY)
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700213 return 2 * sbi->blocks_per_seg * p->ofs_unit;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900214 else if (p->gc_mode == GC_CB)
215 return UINT_MAX;
216 else /* No other gc_mode */
217 return 0;
218}
219
220static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
221{
222 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900223 unsigned int secno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900224
225 /*
226 * If the gc_type is FG_GC, we can select victim segments
227 * selected by background GC before.
228 * Those segments guarantee they have small valid blocks.
229 */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700230 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900231 if (sec_usage_check(sbi, secno))
Chao Yub65ee142014-08-04 10:10:07 +0800232 continue;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900233 clear_bit(secno, dirty_i->victim_secmap);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700234 return GET_SEG_FROM_SEC(sbi, secno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900235 }
236 return NULL_SEGNO;
237}
238
239static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
240{
241 struct sit_info *sit_i = SIT_I(sbi);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700242 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
243 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900244 unsigned long long mtime = 0;
245 unsigned int vblocks;
246 unsigned char age = 0;
247 unsigned char u;
248 unsigned int i;
249
250 for (i = 0; i < sbi->segs_per_sec; i++)
251 mtime += get_seg_entry(sbi, start + i)->mtime;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700252 vblocks = get_valid_blocks(sbi, segno, true);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900253
254 mtime = div_u64(mtime, sbi->segs_per_sec);
255 vblocks = div_u64(vblocks, sbi->segs_per_sec);
256
257 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
258
arter97e1c42042014-08-06 23:22:50 +0900259 /* Handle if the system time has changed by the user */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900260 if (mtime < sit_i->min_mtime)
261 sit_i->min_mtime = mtime;
262 if (mtime > sit_i->max_mtime)
263 sit_i->max_mtime = mtime;
264 if (sit_i->max_mtime != sit_i->min_mtime)
265 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
266 sit_i->max_mtime - sit_i->min_mtime);
267
268 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
269}
270
Jin Xua57e5642013-09-13 08:38:54 +0800271static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
272 unsigned int segno, struct victim_sel_policy *p)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900273{
274 if (p->alloc_mode == SSR)
Yunlong Songe6ba5f22017-09-04 11:10:18 +0800275 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900276
277 /* alloc_mode == LFS */
278 if (p->gc_mode == GC_GREEDY)
Yunlong Songc19928e2017-09-23 17:02:18 +0800279 return get_valid_blocks(sbi, segno, true);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900280 else
281 return get_cb_cost(sbi, segno);
282}
283
Fan Li688159b2016-02-03 16:21:57 +0800284static unsigned int count_bits(const unsigned long *addr,
285 unsigned int offset, unsigned int len)
286{
287 unsigned int end = offset + len, sum = 0;
288
289 while (offset < end) {
290 if (test_bit(offset++, addr))
291 ++sum;
292 }
293 return sum;
294}
295
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900296/*
Masanari Iida111d2492013-03-19 08:03:35 +0900297 * This function is called from two paths.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900298 * One is garbage collection and the other is SSR segment selection.
299 * When it is called during GC, it just gets a victim segment
300 * and it does not remove it from dirty seglist.
301 * When it is called from SSR segment selection, it finds a segment
302 * which has minimum valid blocks and removes it from dirty seglist.
303 */
304static int get_victim_by_default(struct f2fs_sb_info *sbi,
305 unsigned int *result, int gc_type, int type, char alloc_mode)
306{
307 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700308 struct sit_info *sm = SIT_I(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900309 struct victim_sel_policy p;
Sheng Yong3fa56502016-09-29 18:37:31 +0800310 unsigned int secno, last_victim;
Chao Yua43f7ec2015-10-05 22:19:24 +0800311 unsigned int last_segment = MAIN_SEGS(sbi);
Fan Li688159b2016-02-03 16:21:57 +0800312 unsigned int nsearched = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900313
Chao Yu210f41b2014-09-15 18:05:44 +0800314 mutex_lock(&dirty_i->seglist_lock);
315
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900316 p.alloc_mode = alloc_mode;
317 select_policy(sbi, gc_type, type, &p);
318
319 p.min_segno = NULL_SEGNO;
Sheng Yong3fa56502016-09-29 18:37:31 +0800320 p.min_cost = get_max_cost(sbi, &p);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900321
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700322 if (*result != NULL_SEGNO) {
323 if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
324 get_valid_blocks(sbi, *result, false) &&
325 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
326 p.min_segno = *result;
327 goto out;
328 }
329
Chao Yu3342bb32015-10-05 22:20:40 +0800330 if (p.max_search == 0)
331 goto out;
332
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700333 last_victim = sm->last_victim[p.gc_mode];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900334 if (p.alloc_mode == LFS && gc_type == FG_GC) {
335 p.min_segno = check_bg_victims(sbi);
336 if (p.min_segno != NULL_SEGNO)
337 goto got_it;
338 }
339
340 while (1) {
341 unsigned long cost;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900342 unsigned int segno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900343
Chao Yua43f7ec2015-10-05 22:19:24 +0800344 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
345 if (segno >= last_segment) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700346 if (sm->last_victim[p.gc_mode]) {
347 last_segment =
348 sm->last_victim[p.gc_mode];
349 sm->last_victim[p.gc_mode] = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900350 p.offset = 0;
351 continue;
352 }
353 break;
354 }
Jin Xua57e5642013-09-13 08:38:54 +0800355
356 p.offset = segno + p.ofs_unit;
Fan Li688159b2016-02-03 16:21:57 +0800357 if (p.ofs_unit > 1) {
Jin Xua57e5642013-09-13 08:38:54 +0800358 p.offset -= segno % p.ofs_unit;
Fan Li688159b2016-02-03 16:21:57 +0800359 nsearched += count_bits(p.dirty_segmap,
360 p.offset - p.ofs_unit,
361 p.ofs_unit);
362 } else {
363 nsearched++;
364 }
365
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700366 secno = GET_SEC_FROM_SEG(sbi, segno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900367
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900368 if (sec_usage_check(sbi, secno))
Fan Li688159b2016-02-03 16:21:57 +0800369 goto next;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900370 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
Fan Li688159b2016-02-03 16:21:57 +0800371 goto next;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900372
373 cost = get_gc_cost(sbi, segno, &p);
374
375 if (p.min_cost > cost) {
376 p.min_segno = segno;
377 p.min_cost = cost;
Jin Xua57e5642013-09-13 08:38:54 +0800378 }
Fan Li688159b2016-02-03 16:21:57 +0800379next:
380 if (nsearched >= p.max_search) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700381 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
382 sm->last_victim[p.gc_mode] = last_victim + 1;
Jaegeuk Kim4ce53772016-02-18 16:34:38 -0800383 else
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700384 sm->last_victim[p.gc_mode] = segno + 1;
385 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900386 break;
387 }
388 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900389 if (p.min_segno != NULL_SEGNO) {
Namjae Jeonb2b34602013-06-01 16:20:26 +0900390got_it:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900391 if (p.alloc_mode == LFS) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700392 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900393 if (gc_type == FG_GC)
394 sbi->cur_victim_sec = secno;
395 else
396 set_bit(secno, dirty_i->victim_secmap);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900397 }
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900398 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
Namjae Jeon8e46b3e2013-04-23 16:42:53 +0900399
400 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
401 sbi->cur_victim_sec,
402 prefree_segments(sbi), free_segments(sbi));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900403 }
Chao Yu3342bb32015-10-05 22:20:40 +0800404out:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900405 mutex_unlock(&dirty_i->seglist_lock);
406
407 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
408}
409
410static const struct victim_selection default_v_ops = {
411 .get_victim = get_victim_by_default,
412};
413
Changman Lee7dda2af2014-11-28 15:49:40 +0000414static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900415{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900416 struct inode_entry *ie;
417
Changman Lee7dda2af2014-11-28 15:49:40 +0000418 ie = radix_tree_lookup(&gc_list->iroot, ino);
419 if (ie)
420 return ie->inode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900421 return NULL;
422}
423
Changman Lee7dda2af2014-11-28 15:49:40 +0000424static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900425{
Gu Zheng6cc4af52013-06-20 17:52:39 +0800426 struct inode_entry *new_ie;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900427
Changman Lee7dda2af2014-11-28 15:49:40 +0000428 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
Gu Zheng6cc4af52013-06-20 17:52:39 +0800429 iput(inode);
430 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900431 }
Chao Yu06292072014-12-29 15:56:18 +0800432 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900433 new_ie->inode = inode;
Chao Yuf28e5032015-01-23 20:37:53 +0800434
435 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
Changman Lee7dda2af2014-11-28 15:49:40 +0000436 list_add_tail(&new_ie->list, &gc_list->ilist);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900437}
438
Changman Lee7dda2af2014-11-28 15:49:40 +0000439static void put_gc_inode(struct gc_inode_list *gc_list)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900440{
441 struct inode_entry *ie, *next_ie;
Changman Lee7dda2af2014-11-28 15:49:40 +0000442 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
443 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900444 iput(ie->inode);
445 list_del(&ie->list);
Chao Yu06292072014-12-29 15:56:18 +0800446 kmem_cache_free(inode_entry_slab, ie);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900447 }
448}
449
450static int check_valid_map(struct f2fs_sb_info *sbi,
451 unsigned int segno, int offset)
452{
453 struct sit_info *sit_i = SIT_I(sbi);
454 struct seg_entry *sentry;
455 int ret;
456
Chao Yu25d0bec2017-10-30 17:49:53 +0800457 down_read(&sit_i->sentry_lock);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900458 sentry = get_seg_entry(sbi, segno);
459 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
Chao Yu25d0bec2017-10-30 17:49:53 +0800460 up_read(&sit_i->sentry_lock);
Jaegeuk Kim43727522013-02-04 15:11:17 +0900461 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900462}
463
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900464/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900465 * This function compares node address got in summary with that in NAT.
466 * On validity, copy that node with cold status, otherwise (invalid node)
467 * ignore that.
468 */
Chao Yu718e53f2016-01-23 16:23:55 +0800469static void gc_node_segment(struct f2fs_sb_info *sbi,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900470 struct f2fs_summary *sum, unsigned int segno, int gc_type)
471{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900472 struct f2fs_summary *entry;
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700473 block_t start_addr;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900474 int off;
Chao Yu7ea984b2016-08-27 00:14:31 +0800475 int phase = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900476
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700477 start_addr = START_BLOCK(sbi, segno);
478
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900479next_step:
480 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900481
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900482 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
483 nid_t nid = le32_to_cpu(entry->nid);
484 struct page *node_page;
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700485 struct node_info ni;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900486
Jaegeuk Kim43727522013-02-04 15:11:17 +0900487 /* stop BG_GC if there is not enough free sections. */
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700488 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
Chao Yu718e53f2016-01-23 16:23:55 +0800489 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900490
Jaegeuk Kim43727522013-02-04 15:11:17 +0900491 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900492 continue;
493
Chao Yu7ea984b2016-08-27 00:14:31 +0800494 if (phase == 0) {
495 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
496 META_NAT, true);
497 continue;
498 }
499
500 if (phase == 1) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900501 ra_node_page(sbi, nid);
502 continue;
503 }
Chao Yu7ea984b2016-08-27 00:14:31 +0800504
505 /* phase == 2 */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900506 node_page = get_node_page(sbi, nid);
507 if (IS_ERR(node_page))
508 continue;
509
Huang Ying9a01b562014-09-07 11:05:20 +0800510 /* block may become invalid during get_node_page */
511 if (check_valid_map(sbi, segno, off) == 0) {
512 f2fs_put_page(node_page, 1);
513 continue;
514 }
515
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700516 get_node_info(sbi, nid, &ni);
517 if (ni.blk_addr != start_addr + off) {
518 f2fs_put_page(node_page, 1);
519 continue;
520 }
521
Chao Yuda011cc2016-04-27 21:40:15 +0800522 move_node_page(node_page, gc_type);
Changman Leee1235982014-12-23 08:37:39 +0900523 stat_inc_node_blk_count(sbi, 1, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900524 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900525
Chao Yu7ea984b2016-08-27 00:14:31 +0800526 if (++phase < 3)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900527 goto next_step;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900528}
529
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900530/*
Jaegeuk Kim9af45ef2013-01-21 17:34:21 +0900531 * Calculate start block index indicating the given node offset.
532 * Be careful, caller should give this node offset only indicating direct node
533 * blocks. If any node offsets, which point the other types of node blocks such
534 * as indirect or double indirect node blocks, are given, it must be a caller's
535 * bug.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900536 */
Chao Yu81ca7352016-01-26 15:39:35 +0800537block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900538{
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900539 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
540 unsigned int bidx;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900541
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900542 if (node_ofs == 0)
543 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900544
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900545 if (node_ofs <= 2) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900546 bidx = node_ofs - 1;
547 } else if (node_ofs <= indirect_blks) {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900548 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900549 bidx = node_ofs - 2 - dec;
550 } else {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900551 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900552 bidx = node_ofs - 5 - dec;
553 }
Chao Yu81ca7352016-01-26 15:39:35 +0800554 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900555}
556
Nicholas Krausec1079892015-06-30 21:37:21 -0400557static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900558 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
559{
560 struct page *node_page;
561 nid_t nid;
562 unsigned int ofs_in_node;
563 block_t source_blkaddr;
564
565 nid = le32_to_cpu(sum->nid);
566 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
567
568 node_page = get_node_page(sbi, nid);
569 if (IS_ERR(node_page))
Nicholas Krausec1079892015-06-30 21:37:21 -0400570 return false;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900571
572 get_node_info(sbi, nid, dni);
573
574 if (sum->version != dni->version) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700575 f2fs_msg(sbi->sb, KERN_WARNING,
576 "%s: valid data with mismatched node version.",
577 __func__);
578 set_sbi_flag(sbi, SBI_NEED_FSCK);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900579 }
580
581 *nofs = ofs_of_node(node_page);
Chao Yufbcf9312017-07-19 00:19:06 +0800582 source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900583 f2fs_put_page(node_page, 1);
584
585 if (source_blkaddr != blkaddr)
Nicholas Krausec1079892015-06-30 21:37:21 -0400586 return false;
587 return true;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900588}
589
Jaegeuk Kimfb605d02017-09-05 17:04:35 -0700590/*
591 * Move data block via META_MAPPING while keeping locked data page.
592 * This can be used to move blocks, aka LBAs, directly on disk.
593 */
594static void move_data_block(struct inode *inode, block_t bidx,
595 unsigned int segno, int off)
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700596{
597 struct f2fs_io_info fio = {
598 .sbi = F2FS_I_SB(inode),
Chao Yuf014be82017-09-29 13:59:38 +0800599 .ino = inode->i_ino,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700600 .type = DATA,
Jaegeuk Kimc4127262017-05-10 11:18:25 -0700601 .temp = COLD,
Mike Christie04d328d2016-06-05 14:31:55 -0500602 .op = REQ_OP_READ,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700603 .op_flags = 0,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700604 .encrypted_page = NULL,
Chao Yuc52dc0f2017-05-19 23:37:01 +0800605 .in_list = false,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700606 };
607 struct dnode_of_data dn;
608 struct f2fs_summary sum;
609 struct node_info ni;
610 struct page *page;
Chao Yu4356e482016-02-23 17:52:43 +0800611 block_t newaddr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700612 int err;
Chao Yubbab2dc2018-05-26 09:00:13 +0800613 bool lfs_mode = test_opt(fio.sbi, LFS);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700614
615 /* do not read out */
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700616 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700617 if (!page)
618 return;
619
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700620 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
621 goto out;
622
623 if (f2fs_is_atomic_file(inode))
624 goto out;
625
Jaegeuk Kim9ce3d6b2017-12-07 16:25:39 -0800626 if (f2fs_is_pinned_file(inode)) {
627 f2fs_pin_file_control(inode, true);
628 goto out;
629 }
630
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700631 set_new_dnode(&dn, inode, NULL, NULL, 0);
632 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
633 if (err)
634 goto out;
635
Chao Yu08b39fb2015-10-08 13:27:34 +0800636 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
637 ClearPageUptodate(page);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700638 goto put_out;
Chao Yu08b39fb2015-10-08 13:27:34 +0800639 }
640
641 /*
642 * don't cache encrypted data into meta inode until previous dirty
643 * data were writebacked to avoid racing between GC and flush.
644 */
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800645 f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700646
647 get_node_info(fio.sbi, dn.nid, &ni);
648 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
649
650 /* read page */
651 fio.page = page;
Chao Yu7a9d7542016-02-22 18:36:38 +0800652 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700653
Chao Yubbab2dc2018-05-26 09:00:13 +0800654 if (lfs_mode)
655 down_write(&fio.sbi->io_order_lock);
656
Chao Yu4356e482016-02-23 17:52:43 +0800657 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
Chao Yuc52dc0f2017-05-19 23:37:01 +0800658 &sum, CURSEG_COLD_DATA, NULL, false);
Chao Yu4356e482016-02-23 17:52:43 +0800659
Chao Yu58ddec82017-10-28 16:52:30 +0800660 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
661 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
Chao Yu4356e482016-02-23 17:52:43 +0800662 if (!fio.encrypted_page) {
663 err = -ENOMEM;
664 goto recover_block;
665 }
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700666
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700667 err = f2fs_submit_page_bio(&fio);
668 if (err)
669 goto put_page_out;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700670
671 /* write page */
672 lock_page(fio.encrypted_page);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700673
Chao Yu1563ac72016-07-03 22:05:12 +0800674 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
Chao Yu4356e482016-02-23 17:52:43 +0800675 err = -EIO;
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700676 goto put_page_out;
Chao Yu4356e482016-02-23 17:52:43 +0800677 }
Chao Yu1563ac72016-07-03 22:05:12 +0800678 if (unlikely(!PageUptodate(fio.encrypted_page))) {
Chao Yu4356e482016-02-23 17:52:43 +0800679 err = -EIO;
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700680 goto put_page_out;
Chao Yu4356e482016-02-23 17:52:43 +0800681 }
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700682
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700683 set_page_dirty(fio.encrypted_page);
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800684 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700685 if (clear_page_dirty_for_io(fio.encrypted_page))
686 dec_page_count(fio.sbi, F2FS_DIRTY_META);
687
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700688 set_page_writeback(fio.encrypted_page);
Jaegeuk Kimf4531472018-04-11 23:09:04 -0700689 ClearPageError(page);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700690
691 /* allocate block address */
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800692 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
Chao Yu4356e482016-02-23 17:52:43 +0800693
Mike Christie04d328d2016-06-05 14:31:55 -0500694 fio.op = REQ_OP_WRITE;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700695 fio.op_flags = REQ_SYNC;
Chao Yu4356e482016-02-23 17:52:43 +0800696 fio.new_blkaddr = newaddr;
Sheng Yongcf882112018-01-17 12:11:31 +0800697 err = f2fs_submit_page_write(&fio);
698 if (err) {
699 if (PageWriteback(fio.encrypted_page))
700 end_page_writeback(fio.encrypted_page);
701 goto put_page_out;
702 }
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700703
Chao Yuc0fe4882017-08-02 23:21:48 +0800704 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
705
Chao Yuf28b3432016-02-24 17:16:47 +0800706 f2fs_update_data_blkaddr(&dn, newaddr);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700707 set_inode_flag(inode, FI_APPEND_WRITE);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700708 if (page->index == 0)
Jaegeuk Kim91942322016-05-20 10:13:22 -0700709 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700710put_page_out:
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700711 f2fs_put_page(fio.encrypted_page, 1);
Chao Yu4356e482016-02-23 17:52:43 +0800712recover_block:
Chao Yubbab2dc2018-05-26 09:00:13 +0800713 if (lfs_mode)
714 up_write(&fio.sbi->io_order_lock);
Chao Yu4356e482016-02-23 17:52:43 +0800715 if (err)
716 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
717 true, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700718put_out:
719 f2fs_put_dnode(&dn);
720out:
721 f2fs_put_page(page, 1);
722}
723
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700724static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
725 unsigned int segno, int off)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900726{
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700727 struct page *page;
728
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700729 page = get_lock_data_page(inode, bidx, true);
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700730 if (IS_ERR(page))
731 return;
Fan Li63a0b7cb2013-12-09 16:09:00 +0800732
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700733 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
734 goto out;
735
736 if (f2fs_is_atomic_file(inode))
737 goto out;
Jaegeuk Kim9ce3d6b2017-12-07 16:25:39 -0800738 if (f2fs_is_pinned_file(inode)) {
739 if (gc_type == FG_GC)
740 f2fs_pin_file_control(inode, true);
741 goto out;
742 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700743
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900744 if (gc_type == BG_GC) {
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900745 if (PageWriteback(page))
746 goto out;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900747 set_page_dirty(page);
748 set_cold_data(page);
749 } else {
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700750 struct f2fs_io_info fio = {
751 .sbi = F2FS_I_SB(inode),
Chao Yuf014be82017-09-29 13:59:38 +0800752 .ino = inode->i_ino,
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700753 .type = DATA,
Jaegeuk Kimc4127262017-05-10 11:18:25 -0700754 .temp = COLD,
Mike Christie04d328d2016-06-05 14:31:55 -0500755 .op = REQ_OP_WRITE,
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700756 .op_flags = REQ_SYNC,
757 .old_blkaddr = NULL_ADDR,
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700758 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700759 .encrypted_page = NULL,
Jaegeuk Kim1b48ed62017-05-12 13:51:34 -0700760 .need_lock = LOCK_REQ,
Chao Yuc0fe4882017-08-02 23:21:48 +0800761 .io_type = FS_GC_DATA_IO,
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700762 };
Chao Yu72e1c792016-07-03 22:05:13 +0800763 bool is_dirty = PageDirty(page);
764 int err;
765
766retry:
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700767 set_page_dirty(page);
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800768 f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700769 if (clear_page_dirty_for_io(page)) {
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -0700770 inode_dec_dirty_pages(inode);
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700771 remove_dirty_inode(inode);
772 }
Chao Yu72e1c792016-07-03 22:05:13 +0800773
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900774 set_cold_data(page);
Chao Yu72e1c792016-07-03 22:05:13 +0800775
776 err = do_write_data_page(&fio);
777 if (err == -ENOMEM && is_dirty) {
778 congestion_wait(BLK_RW_ASYNC, HZ/50);
779 goto retry;
780 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900781 }
782out:
783 f2fs_put_page(page, 1);
784}
785
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900786/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900787 * This function tries to get parent node of victim data block, and identifies
788 * data block validity. If the block is valid, copy that with cold status and
789 * modify parent node.
790 * If the parent node is not valid or the data block address is different,
791 * the victim data block is ignored.
792 */
Chao Yu718e53f2016-01-23 16:23:55 +0800793static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Changman Lee7dda2af2014-11-28 15:49:40 +0000794 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900795{
796 struct super_block *sb = sbi->sb;
797 struct f2fs_summary *entry;
798 block_t start_addr;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900799 int off;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900800 int phase = 0;
801
802 start_addr = START_BLOCK(sbi, segno);
803
804next_step:
805 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900806
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900807 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
808 struct page *data_page;
809 struct inode *inode;
810 struct node_info dni; /* dnode info for the data */
811 unsigned int ofs_in_node, nofs;
812 block_t start_bidx;
Chao Yu7ea984b2016-08-27 00:14:31 +0800813 nid_t nid = le32_to_cpu(entry->nid);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900814
Jaegeuk Kim43727522013-02-04 15:11:17 +0900815 /* stop BG_GC if there is not enough free sections. */
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700816 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
Chao Yu718e53f2016-01-23 16:23:55 +0800817 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900818
Jaegeuk Kim43727522013-02-04 15:11:17 +0900819 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900820 continue;
821
822 if (phase == 0) {
Chao Yu7ea984b2016-08-27 00:14:31 +0800823 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
824 META_NAT, true);
825 continue;
826 }
827
828 if (phase == 1) {
829 ra_node_page(sbi, nid);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900830 continue;
831 }
832
833 /* Get an inode by ino with checking validity */
Nicholas Krausec1079892015-06-30 21:37:21 -0400834 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900835 continue;
836
Chao Yu7ea984b2016-08-27 00:14:31 +0800837 if (phase == 2) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900838 ra_node_page(sbi, dni.ino);
839 continue;
840 }
841
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900842 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
843
Chao Yu7ea984b2016-08-27 00:14:31 +0800844 if (phase == 3) {
Jaegeuk Kimd4686d562013-01-31 15:36:04 +0900845 inode = f2fs_iget(sb, dni.ino);
Chao Yub73e5282014-08-30 09:52:34 +0800846 if (IS_ERR(inode) || is_bad_inode(inode))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900847 continue;
848
Eric Biggersf69e8142018-04-18 11:09:48 -0700849 /* if inode uses special I/O path, let's go phase 3 */
850 if (f2fs_post_read_required(inode)) {
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700851 add_gc_inode(gc_list, inode);
852 continue;
853 }
854
Chao Yu5f3fdd22017-11-03 10:21:05 +0800855 if (!down_write_trylock(
Chao Yuf6bd7d42018-04-24 10:55:28 +0800856 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
Chao Yu5f3fdd22017-11-03 10:21:05 +0800857 iput(inode);
858 continue;
859 }
860
Chao Yu81ca7352016-01-26 15:39:35 +0800861 start_bidx = start_bidx_of_node(nofs, inode);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700862 data_page = get_read_data_page(inode,
Christoph Hellwig70246282016-07-19 11:28:41 +0200863 start_bidx + ofs_in_node, REQ_RAHEAD,
864 true);
Chao Yuf6bd7d42018-04-24 10:55:28 +0800865 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Changman Lee31a32682014-11-27 16:03:08 +0900866 if (IS_ERR(data_page)) {
867 iput(inode);
868 continue;
869 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900870
871 f2fs_put_page(data_page, 0);
Changman Lee7dda2af2014-11-28 15:49:40 +0000872 add_gc_inode(gc_list, inode);
Changman Lee31a32682014-11-27 16:03:08 +0900873 continue;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900874 }
Changman Lee31a32682014-11-27 16:03:08 +0900875
Chao Yu7ea984b2016-08-27 00:14:31 +0800876 /* phase 4 */
Changman Lee7dda2af2014-11-28 15:49:40 +0000877 inode = find_gc_inode(gc_list, dni.ino);
Changman Lee31a32682014-11-27 16:03:08 +0900878 if (inode) {
Chao Yu82e0a5a2016-07-13 09:18:29 +0800879 struct f2fs_inode_info *fi = F2FS_I(inode);
880 bool locked = false;
881
882 if (S_ISREG(inode->i_mode)) {
Chao Yuf6bd7d42018-04-24 10:55:28 +0800883 if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
Chao Yu82e0a5a2016-07-13 09:18:29 +0800884 continue;
885 if (!down_write_trylock(
Chao Yuf6bd7d42018-04-24 10:55:28 +0800886 &fi->i_gc_rwsem[WRITE])) {
887 up_write(&fi->i_gc_rwsem[READ]);
Chao Yu82e0a5a2016-07-13 09:18:29 +0800888 continue;
889 }
890 locked = true;
Chao Yu09ed2bc2017-08-23 18:23:24 +0800891
892 /* wait for all inflight aio data */
893 inode_dio_wait(inode);
Chao Yu82e0a5a2016-07-13 09:18:29 +0800894 }
895
Chao Yu81ca7352016-01-26 15:39:35 +0800896 start_bidx = start_bidx_of_node(nofs, inode)
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700897 + ofs_in_node;
Eric Biggersf69e8142018-04-18 11:09:48 -0700898 if (f2fs_post_read_required(inode))
Jaegeuk Kimfb605d02017-09-05 17:04:35 -0700899 move_data_block(inode, start_bidx, segno, off);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700900 else
Jaegeuk Kimfb605d02017-09-05 17:04:35 -0700901 move_data_page(inode, start_bidx, gc_type,
902 segno, off);
Chao Yu82e0a5a2016-07-13 09:18:29 +0800903
904 if (locked) {
Chao Yuf6bd7d42018-04-24 10:55:28 +0800905 up_write(&fi->i_gc_rwsem[WRITE]);
906 up_write(&fi->i_gc_rwsem[READ]);
Chao Yu82e0a5a2016-07-13 09:18:29 +0800907 }
908
Changman Leee1235982014-12-23 08:37:39 +0900909 stat_inc_data_blk_count(sbi, 1, gc_type);
Changman Lee31a32682014-11-27 16:03:08 +0900910 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900911 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900912
Chao Yu7ea984b2016-08-27 00:14:31 +0800913 if (++phase < 5)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900914 goto next_step;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900915}
916
917static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800918 int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900919{
920 struct sit_info *sit_i = SIT_I(sbi);
921 int ret;
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800922
Chao Yu25d0bec2017-10-30 17:49:53 +0800923 down_write(&sit_i->sentry_lock);
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800924 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
925 NO_CHECK_TYPE, LFS);
Chao Yu25d0bec2017-10-30 17:49:53 +0800926 up_write(&sit_i->sentry_lock);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900927 return ret;
928}
929
Chao Yu718e53f2016-01-23 16:23:55 +0800930static int do_garbage_collect(struct f2fs_sb_info *sbi,
931 unsigned int start_segno,
Changman Lee7dda2af2014-11-28 15:49:40 +0000932 struct gc_inode_list *gc_list, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900933{
934 struct page *sum_page;
935 struct f2fs_summary_block *sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900936 struct blk_plug plug;
Chao Yu718e53f2016-01-23 16:23:55 +0800937 unsigned int segno = start_segno;
938 unsigned int end_segno = start_segno + sbi->segs_per_sec;
Chao Yu796786d2017-08-11 18:00:15 +0800939 int seg_freed = 0;
Chao Yu718e53f2016-01-23 16:23:55 +0800940 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
941 SUM_TYPE_DATA : SUM_TYPE_NODE;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900942
Chao Yu718e53f2016-01-23 16:23:55 +0800943 /* readahead multi ssa blocks those have contiguous address */
944 if (sbi->segs_per_sec > 1)
945 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
946 sbi->segs_per_sec, META_SSA, true);
947
948 /* reference all summary page */
949 while (segno < end_segno) {
950 sum_page = get_sum_page(sbi, segno++);
951 unlock_page(sum_page);
952 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900953
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900954 blk_start_plug(&plug);
955
Chao Yu718e53f2016-01-23 16:23:55 +0800956 for (segno = start_segno; segno < end_segno; segno++) {
Jaegeuk Kimaa987272016-06-06 18:49:54 -0700957
Chao Yu718e53f2016-01-23 16:23:55 +0800958 /* find segment summary of victim */
959 sum_page = find_get_page(META_MAPPING(sbi),
960 GET_SUM_BLOCK(sbi, segno));
Chao Yu718e53f2016-01-23 16:23:55 +0800961 f2fs_put_page(sum_page, 0);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900962
Jaegeuk Kime6b120d2017-07-10 12:55:09 -0700963 if (get_valid_blocks(sbi, segno, false) == 0 ||
Jaegeuk Kimde0dcc42016-10-12 13:38:41 -0700964 !PageUptodate(sum_page) ||
965 unlikely(f2fs_cp_error(sbi)))
966 goto next;
967
Chao Yu718e53f2016-01-23 16:23:55 +0800968 sum = page_address(sum_page);
969 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
Jaegeuk Kim9236cac2015-05-28 18:19:17 -0700970
Chao Yu718e53f2016-01-23 16:23:55 +0800971 /*
972 * this is to avoid deadlock:
973 * - lock_page(sum_page) - f2fs_replace_block
Chao Yu25d0bec2017-10-30 17:49:53 +0800974 * - check_valid_map() - down_write(sentry_lock)
975 * - down_read(sentry_lock) - change_curseg()
Chao Yu718e53f2016-01-23 16:23:55 +0800976 * - lock_page(sum_page)
977 */
Chao Yu718e53f2016-01-23 16:23:55 +0800978 if (type == SUM_TYPE_NODE)
979 gc_node_segment(sbi, sum->entries, segno, gc_type);
980 else
981 gc_data_segment(sbi, sum->entries, gc_list, segno,
982 gc_type);
983
984 stat_inc_seg_count(sbi, type, gc_type);
Chao Yu796786d2017-08-11 18:00:15 +0800985
986 if (gc_type == FG_GC &&
987 get_valid_blocks(sbi, segno, false) == 0)
988 seg_freed++;
Jaegeuk Kimf6fe2be2016-09-21 09:34:48 -0700989next:
Chao Yu718e53f2016-01-23 16:23:55 +0800990 f2fs_put_page(sum_page, 0);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900991 }
Chao Yu718e53f2016-01-23 16:23:55 +0800992
Chao Yuda011cc2016-04-27 21:40:15 +0800993 if (gc_type == FG_GC)
Jaegeuk Kim9bc576a2017-05-10 11:28:38 -0700994 f2fs_submit_merged_write(sbi,
995 (type == SUM_TYPE_NODE) ? NODE : DATA);
Chao Yu718e53f2016-01-23 16:23:55 +0800996
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900997 blk_finish_plug(&plug);
998
Chao Yu17d899d2016-02-22 18:32:13 +0800999 stat_inc_call_count(sbi->stat_info);
1000
Chao Yu796786d2017-08-11 18:00:15 +08001001 return seg_freed;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001002}
1003
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001004int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1005 bool background, unsigned int segno)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001006{
Chao Yud530d4d2015-10-05 22:22:44 +08001007 int gc_type = sync ? FG_GC : BG_GC;
Chao Yu796786d2017-08-11 18:00:15 +08001008 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1009 int ret = 0;
Jaegeuk Kimd5053a342014-10-30 22:47:03 -07001010 struct cp_control cpc;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001011 unsigned int init_segno = segno;
Changman Lee7dda2af2014-11-28 15:49:40 +00001012 struct gc_inode_list gc_list = {
1013 .ilist = LIST_HEAD_INIT(gc_list.ilist),
Jaegeuk Kim769ec6e2014-12-03 20:47:26 -08001014 .iroot = RADIX_TREE_INIT(GFP_NOFS),
Changman Lee7dda2af2014-11-28 15:49:40 +00001015 };
Jaegeuk Kimd5053a342014-10-30 22:47:03 -07001016
Chao Yu796786d2017-08-11 18:00:15 +08001017 trace_f2fs_gc_begin(sbi->sb, sync, background,
1018 get_pages(sbi, F2FS_DIRTY_NODES),
1019 get_pages(sbi, F2FS_DIRTY_DENTS),
1020 get_pages(sbi, F2FS_DIRTY_IMETA),
1021 free_sections(sbi),
1022 free_segments(sbi),
1023 reserved_segments(sbi),
1024 prefree_segments(sbi));
1025
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001026 cpc.reason = __get_cp_reason(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001027gc_more:
Weichao Guo74f36262017-05-11 04:28:00 +08001028 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
1029 ret = -EINVAL;
Jaegeuk Kim408e9372013-01-03 17:55:52 +09001030 goto stop;
Weichao Guo74f36262017-05-11 04:28:00 +08001031 }
Chao Yu6d5a1492015-12-24 18:04:56 +08001032 if (unlikely(f2fs_cp_error(sbi))) {
1033 ret = -EIO;
Jaegeuk Kim203681f2014-02-05 13:03:57 +09001034 goto stop;
Chao Yu6d5a1492015-12-24 18:04:56 +08001035 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001036
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001037 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
Jaegeuk Kim6e17bfb2016-01-23 22:00:57 +08001038 /*
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001039 * For example, if there are many prefree_segments below given
1040 * threshold, we can make them free by checkpoint. Then, we
1041 * secure free segments which doesn't need fggc any more.
Jaegeuk Kim6e17bfb2016-01-23 22:00:57 +08001042 */
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001043 if (prefree_segments(sbi)) {
Jaegeuk Kim2956e452016-09-21 09:28:06 -07001044 ret = write_checkpoint(sbi, &cpc);
1045 if (ret)
1046 goto stop;
Yunlei Hefe947932016-07-22 19:08:31 +08001047 }
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001048 if (has_not_enough_free_secs(sbi, 0, 0))
1049 gc_type = FG_GC;
Jaegeuk Kimd64f8042013-04-08 16:01:00 +09001050 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001051
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001052 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
Chao Yu796786d2017-08-11 18:00:15 +08001053 if (gc_type == BG_GC && !background) {
1054 ret = -EINVAL;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001055 goto stop;
Chao Yu796786d2017-08-11 18:00:15 +08001056 }
1057 if (!__get_victim(sbi, &segno, gc_type)) {
1058 ret = -ENODATA;
Jaegeuk Kim408e9372013-01-03 17:55:52 +09001059 goto stop;
Chao Yu796786d2017-08-11 18:00:15 +08001060 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001061
Chao Yu796786d2017-08-11 18:00:15 +08001062 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
1063 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
Chao Yu45fe8492015-09-28 17:42:24 +08001064 sec_freed++;
Chao Yu796786d2017-08-11 18:00:15 +08001065 total_freed += seg_freed;
Jaegeuk Kim43727522013-02-04 15:11:17 +09001066
Jaegeuk Kim5ee52932015-08-15 22:06:08 -07001067 if (gc_type == FG_GC)
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +09001068 sbi->cur_victim_sec = NULL_SEGNO;
Jaegeuk Kim43727522013-02-04 15:11:17 +09001069
Chao Yud530d4d2015-10-05 22:22:44 +08001070 if (!sync) {
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001071 if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1072 segno = NULL_SEGNO;
Chao Yud530d4d2015-10-05 22:22:44 +08001073 goto gc_more;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001074 }
Jaegeuk Kim43727522013-02-04 15:11:17 +09001075
Chao Yud530d4d2015-10-05 22:22:44 +08001076 if (gc_type == FG_GC)
Jaegeuk Kim2956e452016-09-21 09:28:06 -07001077 ret = write_checkpoint(sbi, &cpc);
Chao Yud530d4d2015-10-05 22:22:44 +08001078 }
Jaegeuk Kim408e9372013-01-03 17:55:52 +09001079stop:
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001080 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1081 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
Chao Yu796786d2017-08-11 18:00:15 +08001082
1083 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1084 get_pages(sbi, F2FS_DIRTY_NODES),
1085 get_pages(sbi, F2FS_DIRTY_DENTS),
1086 get_pages(sbi, F2FS_DIRTY_IMETA),
1087 free_sections(sbi),
1088 free_segments(sbi),
1089 reserved_segments(sbi),
1090 prefree_segments(sbi));
1091
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001092 mutex_unlock(&sbi->gc_mutex);
1093
Changman Lee7dda2af2014-11-28 15:49:40 +00001094 put_gc_inode(&gc_list);
Chao Yud530d4d2015-10-05 22:22:44 +08001095
1096 if (sync)
1097 ret = sec_freed ? 0 : -EAGAIN;
Jaegeuk Kim43727522013-02-04 15:11:17 +09001098 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001099}
1100
1101void build_gc_manager(struct f2fs_sb_info *sbi)
1102{
1103 DIRTY_I(sbi)->v_ops = &default_v_ops;
Hou Pengyang4992ba22017-02-16 12:34:31 +00001104
Jaegeuk Kim9ce3d6b2017-12-07 16:25:39 -08001105 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
Jaegeuk Kime6b120d2017-07-10 12:55:09 -07001106
1107 /* give warm/cold data area from slower device */
1108 if (sbi->s_ndevs && sbi->segs_per_sec == 1)
1109 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1110 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001111}