blob: 939be88a883379aa45328c1110f9f929d059c70e [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09002 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/backing-dev.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090014#include <linux/init.h>
15#include <linux/f2fs_fs.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/freezer.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090019
20#include "f2fs.h"
21#include "node.h"
22#include "segment.h"
23#include "gc.h"
Namjae Jeon8e46b3e2013-04-23 16:42:53 +090024#include <trace/events/f2fs.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090025
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090026static int gc_thread_func(void *data)
27{
28 struct f2fs_sb_info *sbi = data;
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090029 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090030 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
31 long wait_ms;
32
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090033 wait_ms = gc_th->min_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090034
35 do {
36 if (try_to_freeze())
37 continue;
38 else
39 wait_event_interruptible_timeout(*wq,
40 kthread_should_stop(),
41 msecs_to_jiffies(wait_ms));
42 if (kthread_should_stop())
43 break;
44
Changman Leed6212a52013-01-29 18:30:07 +090045 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
Chao Yu88dd8932015-01-26 20:24:21 +080046 increase_sleep_time(gc_th, &wait_ms);
Changman Leed6212a52013-01-29 18:30:07 +090047 continue;
48 }
49
Chao Yu0f348022016-09-26 19:45:55 +080050#ifdef CONFIG_F2FS_FAULT_INJECTION
Chao Yu55523512017-02-25 11:08:28 +080051 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
52 f2fs_show_injection_info(FAULT_CHECKPOINT);
Chao Yu0f348022016-09-26 19:45:55 +080053 f2fs_stop_checkpoint(sbi, false);
Chao Yu55523512017-02-25 11:08:28 +080054 }
Chao Yu0f348022016-09-26 19:45:55 +080055#endif
56
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090057 /*
58 * [GC triggering condition]
59 * 0. GC is not conducted currently.
60 * 1. There are enough dirty segments.
61 * 2. IO subsystem is idle by checking the # of writeback pages.
62 * 3. IO subsystem is idle by checking the # of requests in
63 * bdev's request list.
64 *
arter97e1c42042014-08-06 23:22:50 +090065 * Note) We have to avoid triggering GCs frequently.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090066 * Because it is possible that some segments can be
67 * invalidated soon after by user update or deletion.
68 * So, I'd like to wait some time to collect dirty segments.
69 */
70 if (!mutex_trylock(&sbi->gc_mutex))
71 continue;
72
73 if (!is_idle(sbi)) {
Chao Yu88dd8932015-01-26 20:24:21 +080074 increase_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090075 mutex_unlock(&sbi->gc_mutex);
76 continue;
77 }
78
79 if (has_enough_invalid_blocks(sbi))
Chao Yu88dd8932015-01-26 20:24:21 +080080 decrease_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090081 else
Chao Yu88dd8932015-01-26 20:24:21 +080082 increase_sleep_time(gc_th, &wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090083
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +090084 stat_inc_bggc_count(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090085
Jaegeuk Kim43727522013-02-04 15:11:17 +090086 /* if return value is not zero, no victim was selected */
Jaegeuk Kim7702bdb2016-11-14 17:38:35 -080087 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true))
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090088 wait_ms = gc_th->no_gc_sleep_time;
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +090089
Jaegeuk Kim84e42142015-10-13 10:00:53 -070090 trace_f2fs_background_gc(sbi->sb, wait_ms,
91 prefree_segments(sbi), free_segments(sbi));
92
Jaegeuk Kim4660f9c2013-10-24 14:19:18 +090093 /* balancing f2fs's metadata periodically */
94 f2fs_balance_fs_bg(sbi);
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +090095
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090096 } while (!kthread_should_stop());
97 return 0;
98}
99
100int start_gc_thread(struct f2fs_sb_info *sbi)
101{
Namjae Jeon1042d602012-12-01 10:56:13 +0900102 struct f2fs_gc_kthread *gc_th;
Namjae Jeonec7b1f22013-02-02 23:52:28 +0900103 dev_t dev = sbi->sb->s_bdev->bd_dev;
Namjae Jeon7a267f82013-05-26 11:05:32 +0900104 int err = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900105
Chao Yu1ecc0c52016-09-23 21:30:09 +0800106 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
Namjae Jeon7a267f82013-05-26 11:05:32 +0900107 if (!gc_th) {
108 err = -ENOMEM;
109 goto out;
110 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900111
Namjae Jeonb59d0ba2013-08-04 23:09:40 +0900112 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
113 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
114 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
115
Namjae Jeond2dc0952013-08-04 23:10:15 +0900116 gc_th->gc_idle = 0;
117
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900118 sbi->gc_thread = gc_th;
119 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
120 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
Namjae Jeonec7b1f22013-02-02 23:52:28 +0900121 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900122 if (IS_ERR(gc_th->f2fs_gc_task)) {
Namjae Jeon7a267f82013-05-26 11:05:32 +0900123 err = PTR_ERR(gc_th->f2fs_gc_task);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900124 kfree(gc_th);
Namjae Jeon25718422013-02-02 23:52:42 +0900125 sbi->gc_thread = NULL;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900126 }
Namjae Jeon7a267f82013-05-26 11:05:32 +0900127out:
128 return err;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900129}
130
131void stop_gc_thread(struct f2fs_sb_info *sbi)
132{
133 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
134 if (!gc_th)
135 return;
136 kthread_stop(gc_th->f2fs_gc_task);
137 kfree(gc_th);
138 sbi->gc_thread = NULL;
139}
140
Namjae Jeond2dc0952013-08-04 23:10:15 +0900141static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900142{
Namjae Jeond2dc0952013-08-04 23:10:15 +0900143 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
144
145 if (gc_th && gc_th->gc_idle) {
146 if (gc_th->gc_idle == 1)
147 gc_mode = GC_CB;
148 else if (gc_th->gc_idle == 2)
149 gc_mode = GC_GREEDY;
150 }
151 return gc_mode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900152}
153
154static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
155 int type, struct victim_sel_policy *p)
156{
157 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
158
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900159 if (p->alloc_mode == SSR) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900160 p->gc_mode = GC_GREEDY;
161 p->dirty_segmap = dirty_i->dirty_segmap[type];
Jin Xua26b7c82013-09-05 12:45:26 +0800162 p->max_search = dirty_i->nr_dirty[type];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900163 p->ofs_unit = 1;
164 } else {
Namjae Jeond2dc0952013-08-04 23:10:15 +0900165 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900166 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
Jin Xua26b7c82013-09-05 12:45:26 +0800167 p->max_search = dirty_i->nr_dirty[DIRTY];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900168 p->ofs_unit = sbi->segs_per_sec;
169 }
Jin Xua26b7c82013-09-05 12:45:26 +0800170
Hou Pengyange93b9862017-02-16 12:34:31 +0000171 /* we need to check every dirty segments in the FG_GC case */
172 if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
Jaegeuk Kimb1c57c12014-01-08 13:45:08 +0900173 p->max_search = sbi->max_victim_search;
Jin Xua26b7c82013-09-05 12:45:26 +0800174
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900175 p->offset = sbi->last_victim[p->gc_mode];
176}
177
178static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
179 struct victim_sel_policy *p)
180{
Jaegeuk Kimb7250d22013-02-05 13:19:28 +0900181 /* SSR allocates in a segment unit */
182 if (p->alloc_mode == SSR)
Chao Yu3519e3f2015-12-01 11:56:52 +0800183 return sbi->blocks_per_seg;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900184 if (p->gc_mode == GC_GREEDY)
Chao Yu3519e3f2015-12-01 11:56:52 +0800185 return sbi->blocks_per_seg * p->ofs_unit;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900186 else if (p->gc_mode == GC_CB)
187 return UINT_MAX;
188 else /* No other gc_mode */
189 return 0;
190}
191
192static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
193{
194 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900195 unsigned int secno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900196
197 /*
198 * If the gc_type is FG_GC, we can select victim segments
199 * selected by background GC before.
200 * Those segments guarantee they have small valid blocks.
201 */
Jaegeuk Kim7cd85582014-09-23 11:23:01 -0700202 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900203 if (sec_usage_check(sbi, secno))
Chao Yub65ee142014-08-04 10:10:07 +0800204 continue;
Hou Pengyange93b9862017-02-16 12:34:31 +0000205
206 if (no_fggc_candidate(sbi, secno))
207 continue;
208
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900209 clear_bit(secno, dirty_i->victim_secmap);
210 return secno * sbi->segs_per_sec;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900211 }
212 return NULL_SEGNO;
213}
214
215static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
216{
217 struct sit_info *sit_i = SIT_I(sbi);
218 unsigned int secno = GET_SECNO(sbi, segno);
219 unsigned int start = secno * sbi->segs_per_sec;
220 unsigned long long mtime = 0;
221 unsigned int vblocks;
222 unsigned char age = 0;
223 unsigned char u;
224 unsigned int i;
225
226 for (i = 0; i < sbi->segs_per_sec; i++)
227 mtime += get_seg_entry(sbi, start + i)->mtime;
228 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
229
230 mtime = div_u64(mtime, sbi->segs_per_sec);
231 vblocks = div_u64(vblocks, sbi->segs_per_sec);
232
233 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
234
arter97e1c42042014-08-06 23:22:50 +0900235 /* Handle if the system time has changed by the user */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900236 if (mtime < sit_i->min_mtime)
237 sit_i->min_mtime = mtime;
238 if (mtime > sit_i->max_mtime)
239 sit_i->max_mtime = mtime;
240 if (sit_i->max_mtime != sit_i->min_mtime)
241 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
242 sit_i->max_mtime - sit_i->min_mtime);
243
244 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
245}
246
Hou Pengyangb9cd2062017-02-22 10:28:59 +0000247static unsigned int get_greedy_cost(struct f2fs_sb_info *sbi,
248 unsigned int segno)
249{
250 unsigned int valid_blocks =
251 get_valid_blocks(sbi, segno, sbi->segs_per_sec);
252
253 return IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
254 valid_blocks * 2 : valid_blocks;
255}
256
Jin Xua57e5642013-09-13 08:38:54 +0800257static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
258 unsigned int segno, struct victim_sel_policy *p)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900259{
260 if (p->alloc_mode == SSR)
261 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
262
263 /* alloc_mode == LFS */
264 if (p->gc_mode == GC_GREEDY)
Hou Pengyangb9cd2062017-02-22 10:28:59 +0000265 return get_greedy_cost(sbi, segno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900266 else
267 return get_cb_cost(sbi, segno);
268}
269
Fan Li688159b2016-02-03 16:21:57 +0800270static unsigned int count_bits(const unsigned long *addr,
271 unsigned int offset, unsigned int len)
272{
273 unsigned int end = offset + len, sum = 0;
274
275 while (offset < end) {
276 if (test_bit(offset++, addr))
277 ++sum;
278 }
279 return sum;
280}
281
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900282/*
Masanari Iida111d2492013-03-19 08:03:35 +0900283 * This function is called from two paths.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900284 * One is garbage collection and the other is SSR segment selection.
285 * When it is called during GC, it just gets a victim segment
286 * and it does not remove it from dirty seglist.
287 * When it is called from SSR segment selection, it finds a segment
288 * which has minimum valid blocks and removes it from dirty seglist.
289 */
290static int get_victim_by_default(struct f2fs_sb_info *sbi,
291 unsigned int *result, int gc_type, int type, char alloc_mode)
292{
293 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
294 struct victim_sel_policy p;
Sheng Yong3fa56502016-09-29 18:37:31 +0800295 unsigned int secno, last_victim;
Chao Yua43f7ec2015-10-05 22:19:24 +0800296 unsigned int last_segment = MAIN_SEGS(sbi);
Fan Li688159b2016-02-03 16:21:57 +0800297 unsigned int nsearched = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900298
Chao Yu210f41b2014-09-15 18:05:44 +0800299 mutex_lock(&dirty_i->seglist_lock);
300
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900301 p.alloc_mode = alloc_mode;
302 select_policy(sbi, gc_type, type, &p);
303
304 p.min_segno = NULL_SEGNO;
Sheng Yong3fa56502016-09-29 18:37:31 +0800305 p.min_cost = get_max_cost(sbi, &p);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900306
Chao Yu3342bb32015-10-05 22:20:40 +0800307 if (p.max_search == 0)
308 goto out;
309
Jaegeuk Kim4ce53772016-02-18 16:34:38 -0800310 last_victim = sbi->last_victim[p.gc_mode];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900311 if (p.alloc_mode == LFS && gc_type == FG_GC) {
312 p.min_segno = check_bg_victims(sbi);
313 if (p.min_segno != NULL_SEGNO)
314 goto got_it;
315 }
316
317 while (1) {
318 unsigned long cost;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900319 unsigned int segno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900320
Chao Yua43f7ec2015-10-05 22:19:24 +0800321 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
322 if (segno >= last_segment) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900323 if (sbi->last_victim[p.gc_mode]) {
Chao Yua43f7ec2015-10-05 22:19:24 +0800324 last_segment = sbi->last_victim[p.gc_mode];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900325 sbi->last_victim[p.gc_mode] = 0;
326 p.offset = 0;
327 continue;
328 }
329 break;
330 }
Jin Xua57e5642013-09-13 08:38:54 +0800331
332 p.offset = segno + p.ofs_unit;
Fan Li688159b2016-02-03 16:21:57 +0800333 if (p.ofs_unit > 1) {
Jin Xua57e5642013-09-13 08:38:54 +0800334 p.offset -= segno % p.ofs_unit;
Fan Li688159b2016-02-03 16:21:57 +0800335 nsearched += count_bits(p.dirty_segmap,
336 p.offset - p.ofs_unit,
337 p.ofs_unit);
338 } else {
339 nsearched++;
340 }
341
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900342 secno = GET_SECNO(sbi, segno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900343
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900344 if (sec_usage_check(sbi, secno))
Fan Li688159b2016-02-03 16:21:57 +0800345 goto next;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900346 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
Fan Li688159b2016-02-03 16:21:57 +0800347 goto next;
Hou Pengyange93b9862017-02-16 12:34:31 +0000348 if (gc_type == FG_GC && p.alloc_mode == LFS &&
349 no_fggc_candidate(sbi, secno))
350 goto next;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900351
352 cost = get_gc_cost(sbi, segno, &p);
353
354 if (p.min_cost > cost) {
355 p.min_segno = segno;
356 p.min_cost = cost;
Jin Xua57e5642013-09-13 08:38:54 +0800357 }
Fan Li688159b2016-02-03 16:21:57 +0800358next:
359 if (nsearched >= p.max_search) {
Jaegeuk Kim4ce53772016-02-18 16:34:38 -0800360 if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
361 sbi->last_victim[p.gc_mode] = last_victim + 1;
362 else
363 sbi->last_victim[p.gc_mode] = segno + 1;
Chao Yu98971592017-03-21 20:09:45 +0800364 sbi->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900365 break;
366 }
367 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900368 if (p.min_segno != NULL_SEGNO) {
Namjae Jeonb2b34602013-06-01 16:20:26 +0900369got_it:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900370 if (p.alloc_mode == LFS) {
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900371 secno = GET_SECNO(sbi, p.min_segno);
372 if (gc_type == FG_GC)
373 sbi->cur_victim_sec = secno;
374 else
375 set_bit(secno, dirty_i->victim_secmap);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900376 }
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900377 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
Namjae Jeon8e46b3e2013-04-23 16:42:53 +0900378
379 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
380 sbi->cur_victim_sec,
381 prefree_segments(sbi), free_segments(sbi));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900382 }
Chao Yu3342bb32015-10-05 22:20:40 +0800383out:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900384 mutex_unlock(&dirty_i->seglist_lock);
385
386 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
387}
388
389static const struct victim_selection default_v_ops = {
390 .get_victim = get_victim_by_default,
391};
392
Changman Lee7dda2af2014-11-28 15:49:40 +0000393static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900394{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900395 struct inode_entry *ie;
396
Changman Lee7dda2af2014-11-28 15:49:40 +0000397 ie = radix_tree_lookup(&gc_list->iroot, ino);
398 if (ie)
399 return ie->inode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900400 return NULL;
401}
402
Changman Lee7dda2af2014-11-28 15:49:40 +0000403static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900404{
Gu Zheng6cc4af52013-06-20 17:52:39 +0800405 struct inode_entry *new_ie;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900406
Changman Lee7dda2af2014-11-28 15:49:40 +0000407 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
Gu Zheng6cc4af52013-06-20 17:52:39 +0800408 iput(inode);
409 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900410 }
Chao Yu06292072014-12-29 15:56:18 +0800411 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900412 new_ie->inode = inode;
Chao Yuf28e5032015-01-23 20:37:53 +0800413
414 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
Changman Lee7dda2af2014-11-28 15:49:40 +0000415 list_add_tail(&new_ie->list, &gc_list->ilist);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900416}
417
Changman Lee7dda2af2014-11-28 15:49:40 +0000418static void put_gc_inode(struct gc_inode_list *gc_list)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900419{
420 struct inode_entry *ie, *next_ie;
Changman Lee7dda2af2014-11-28 15:49:40 +0000421 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
422 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900423 iput(ie->inode);
424 list_del(&ie->list);
Chao Yu06292072014-12-29 15:56:18 +0800425 kmem_cache_free(inode_entry_slab, ie);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900426 }
427}
428
429static int check_valid_map(struct f2fs_sb_info *sbi,
430 unsigned int segno, int offset)
431{
432 struct sit_info *sit_i = SIT_I(sbi);
433 struct seg_entry *sentry;
434 int ret;
435
436 mutex_lock(&sit_i->sentry_lock);
437 sentry = get_seg_entry(sbi, segno);
438 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
439 mutex_unlock(&sit_i->sentry_lock);
Jaegeuk Kim43727522013-02-04 15:11:17 +0900440 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900441}
442
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900443/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900444 * This function compares node address got in summary with that in NAT.
445 * On validity, copy that node with cold status, otherwise (invalid node)
446 * ignore that.
447 */
Chao Yu718e53f2016-01-23 16:23:55 +0800448static void gc_node_segment(struct f2fs_sb_info *sbi,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900449 struct f2fs_summary *sum, unsigned int segno, int gc_type)
450{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900451 struct f2fs_summary *entry;
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700452 block_t start_addr;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900453 int off;
Chao Yu7ea984b2016-08-27 00:14:31 +0800454 int phase = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900455
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700456 start_addr = START_BLOCK(sbi, segno);
457
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900458next_step:
459 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900460
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900461 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
462 nid_t nid = le32_to_cpu(entry->nid);
463 struct page *node_page;
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700464 struct node_info ni;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900465
Jaegeuk Kim43727522013-02-04 15:11:17 +0900466 /* stop BG_GC if there is not enough free sections. */
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700467 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
Chao Yu718e53f2016-01-23 16:23:55 +0800468 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900469
Jaegeuk Kim43727522013-02-04 15:11:17 +0900470 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900471 continue;
472
Chao Yu7ea984b2016-08-27 00:14:31 +0800473 if (phase == 0) {
474 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
475 META_NAT, true);
476 continue;
477 }
478
479 if (phase == 1) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900480 ra_node_page(sbi, nid);
481 continue;
482 }
Chao Yu7ea984b2016-08-27 00:14:31 +0800483
484 /* phase == 2 */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900485 node_page = get_node_page(sbi, nid);
486 if (IS_ERR(node_page))
487 continue;
488
Huang Ying9a01b562014-09-07 11:05:20 +0800489 /* block may become invalid during get_node_page */
490 if (check_valid_map(sbi, segno, off) == 0) {
491 f2fs_put_page(node_page, 1);
492 continue;
493 }
494
Jaegeuk Kim26d58592015-08-14 14:37:50 -0700495 get_node_info(sbi, nid, &ni);
496 if (ni.blk_addr != start_addr + off) {
497 f2fs_put_page(node_page, 1);
498 continue;
499 }
500
Chao Yuda011cc2016-04-27 21:40:15 +0800501 move_node_page(node_page, gc_type);
Changman Leee1235982014-12-23 08:37:39 +0900502 stat_inc_node_blk_count(sbi, 1, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900503 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900504
Chao Yu7ea984b2016-08-27 00:14:31 +0800505 if (++phase < 3)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900506 goto next_step;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900507}
508
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900509/*
Jaegeuk Kim9af45ef2013-01-21 17:34:21 +0900510 * Calculate start block index indicating the given node offset.
511 * Be careful, caller should give this node offset only indicating direct node
512 * blocks. If any node offsets, which point the other types of node blocks such
513 * as indirect or double indirect node blocks, are given, it must be a caller's
514 * bug.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900515 */
Chao Yu81ca7352016-01-26 15:39:35 +0800516block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900517{
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900518 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
519 unsigned int bidx;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900520
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900521 if (node_ofs == 0)
522 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900523
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900524 if (node_ofs <= 2) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900525 bidx = node_ofs - 1;
526 } else if (node_ofs <= indirect_blks) {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900527 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900528 bidx = node_ofs - 2 - dec;
529 } else {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900530 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900531 bidx = node_ofs - 5 - dec;
532 }
Chao Yu81ca7352016-01-26 15:39:35 +0800533 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900534}
535
Nicholas Krausec1079892015-06-30 21:37:21 -0400536static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900537 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
538{
539 struct page *node_page;
540 nid_t nid;
541 unsigned int ofs_in_node;
542 block_t source_blkaddr;
543
544 nid = le32_to_cpu(sum->nid);
545 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
546
547 node_page = get_node_page(sbi, nid);
548 if (IS_ERR(node_page))
Nicholas Krausec1079892015-06-30 21:37:21 -0400549 return false;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900550
551 get_node_info(sbi, nid, dni);
552
553 if (sum->version != dni->version) {
554 f2fs_put_page(node_page, 1);
Nicholas Krausec1079892015-06-30 21:37:21 -0400555 return false;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900556 }
557
558 *nofs = ofs_of_node(node_page);
559 source_blkaddr = datablock_addr(node_page, ofs_in_node);
560 f2fs_put_page(node_page, 1);
561
562 if (source_blkaddr != blkaddr)
Nicholas Krausec1079892015-06-30 21:37:21 -0400563 return false;
564 return true;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900565}
566
Yunlei He20614712016-11-07 21:22:31 +0800567static void move_encrypted_block(struct inode *inode, block_t bidx,
568 unsigned int segno, int off)
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700569{
570 struct f2fs_io_info fio = {
571 .sbi = F2FS_I_SB(inode),
572 .type = DATA,
Mike Christie04d328d2016-06-05 14:31:55 -0500573 .op = REQ_OP_READ,
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600574 .op_flags = 0,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700575 .encrypted_page = NULL,
576 };
577 struct dnode_of_data dn;
578 struct f2fs_summary sum;
579 struct node_info ni;
580 struct page *page;
Chao Yu4356e482016-02-23 17:52:43 +0800581 block_t newaddr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700582 int err;
583
584 /* do not read out */
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700585 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700586 if (!page)
587 return;
588
Yunlei He20614712016-11-07 21:22:31 +0800589 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
590 goto out;
591
Chao Yu5fe45742017-01-07 18:50:26 +0800592 if (f2fs_is_atomic_file(inode))
593 goto out;
594
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700595 set_new_dnode(&dn, inode, NULL, NULL, 0);
596 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
597 if (err)
598 goto out;
599
Chao Yu08b39fb2015-10-08 13:27:34 +0800600 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
601 ClearPageUptodate(page);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700602 goto put_out;
Chao Yu08b39fb2015-10-08 13:27:34 +0800603 }
604
605 /*
606 * don't cache encrypted data into meta inode until previous dirty
607 * data were writebacked to avoid racing between GC and flush.
608 */
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800609 f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700610
611 get_node_info(fio.sbi, dn.nid, &ni);
612 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
613
614 /* read page */
615 fio.page = page;
Chao Yu7a9d7542016-02-22 18:36:38 +0800616 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700617
Chao Yu4356e482016-02-23 17:52:43 +0800618 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
619 &sum, CURSEG_COLD_DATA);
620
621 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
622 FGP_LOCK | FGP_CREAT, GFP_NOFS);
623 if (!fio.encrypted_page) {
624 err = -ENOMEM;
625 goto recover_block;
626 }
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700627
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700628 err = f2fs_submit_page_bio(&fio);
629 if (err)
630 goto put_page_out;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700631
632 /* write page */
633 lock_page(fio.encrypted_page);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700634
Chao Yu1563ac72016-07-03 22:05:12 +0800635 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
Chao Yu4356e482016-02-23 17:52:43 +0800636 err = -EIO;
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700637 goto put_page_out;
Chao Yu4356e482016-02-23 17:52:43 +0800638 }
Chao Yu1563ac72016-07-03 22:05:12 +0800639 if (unlikely(!PageUptodate(fio.encrypted_page))) {
Chao Yu4356e482016-02-23 17:52:43 +0800640 err = -EIO;
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700641 goto put_page_out;
Chao Yu4356e482016-02-23 17:52:43 +0800642 }
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700643
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700644 set_page_dirty(fio.encrypted_page);
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800645 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700646 if (clear_page_dirty_for_io(fio.encrypted_page))
647 dec_page_count(fio.sbi, F2FS_DIRTY_META);
648
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700649 set_page_writeback(fio.encrypted_page);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700650
651 /* allocate block address */
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800652 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
Chao Yu4356e482016-02-23 17:52:43 +0800653
Mike Christie04d328d2016-06-05 14:31:55 -0500654 fio.op = REQ_OP_WRITE;
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600655 fio.op_flags = REQ_SYNC;
Chao Yu4356e482016-02-23 17:52:43 +0800656 fio.new_blkaddr = newaddr;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700657 f2fs_submit_page_mbio(&fio);
658
Chao Yuf28b3432016-02-24 17:16:47 +0800659 f2fs_update_data_blkaddr(&dn, newaddr);
Jaegeuk Kim91942322016-05-20 10:13:22 -0700660 set_inode_flag(inode, FI_APPEND_WRITE);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700661 if (page->index == 0)
Jaegeuk Kim91942322016-05-20 10:13:22 -0700662 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kim548aeda2015-07-13 17:44:14 -0700663put_page_out:
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700664 f2fs_put_page(fio.encrypted_page, 1);
Chao Yu4356e482016-02-23 17:52:43 +0800665recover_block:
666 if (err)
667 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
668 true, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700669put_out:
670 f2fs_put_dnode(&dn);
671out:
672 f2fs_put_page(page, 1);
673}
674
Yunlei He20614712016-11-07 21:22:31 +0800675static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
676 unsigned int segno, int off)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900677{
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700678 struct page *page;
679
Jaegeuk Kima56c7c62015-10-09 15:11:38 -0700680 page = get_lock_data_page(inode, bidx, true);
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700681 if (IS_ERR(page))
682 return;
Fan Li63a0b7cb2013-12-09 16:09:00 +0800683
Yunlei He20614712016-11-07 21:22:31 +0800684 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
685 goto out;
686
Chao Yu5fe45742017-01-07 18:50:26 +0800687 if (f2fs_is_atomic_file(inode))
688 goto out;
689
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900690 if (gc_type == BG_GC) {
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900691 if (PageWriteback(page))
692 goto out;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900693 set_page_dirty(page);
694 set_cold_data(page);
695 } else {
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700696 struct f2fs_io_info fio = {
697 .sbi = F2FS_I_SB(inode),
698 .type = DATA,
Mike Christie04d328d2016-06-05 14:31:55 -0500699 .op = REQ_OP_WRITE,
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600700 .op_flags = REQ_SYNC,
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700701 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700702 .encrypted_page = NULL,
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700703 };
Chao Yu72e1c792016-07-03 22:05:13 +0800704 bool is_dirty = PageDirty(page);
705 int err;
706
707retry:
Jaegeuk Kim6282adb2015-07-25 00:29:17 -0700708 set_page_dirty(page);
Jaegeuk Kimfec1d652016-01-20 23:43:51 +0800709 f2fs_wait_on_page_writeback(page, DATA, true);
Chao Yu933439c2016-10-11 22:57:01 +0800710 if (clear_page_dirty_for_io(page)) {
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -0700711 inode_dec_dirty_pages(inode);
Chao Yu933439c2016-10-11 22:57:01 +0800712 remove_dirty_inode(inode);
713 }
Chao Yu72e1c792016-07-03 22:05:13 +0800714
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900715 set_cold_data(page);
Chao Yu72e1c792016-07-03 22:05:13 +0800716
717 err = do_write_data_page(&fio);
718 if (err == -ENOMEM && is_dirty) {
719 congestion_wait(BLK_RW_ASYNC, HZ/50);
720 goto retry;
721 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900722 }
723out:
724 f2fs_put_page(page, 1);
725}
726
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900727/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900728 * This function tries to get parent node of victim data block, and identifies
729 * data block validity. If the block is valid, copy that with cold status and
730 * modify parent node.
731 * If the parent node is not valid or the data block address is different,
732 * the victim data block is ignored.
733 */
Chao Yu718e53f2016-01-23 16:23:55 +0800734static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Changman Lee7dda2af2014-11-28 15:49:40 +0000735 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900736{
737 struct super_block *sb = sbi->sb;
738 struct f2fs_summary *entry;
739 block_t start_addr;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900740 int off;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900741 int phase = 0;
742
743 start_addr = START_BLOCK(sbi, segno);
744
745next_step:
746 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900747
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900748 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
749 struct page *data_page;
750 struct inode *inode;
751 struct node_info dni; /* dnode info for the data */
752 unsigned int ofs_in_node, nofs;
753 block_t start_bidx;
Chao Yu7ea984b2016-08-27 00:14:31 +0800754 nid_t nid = le32_to_cpu(entry->nid);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900755
Jaegeuk Kim43727522013-02-04 15:11:17 +0900756 /* stop BG_GC if there is not enough free sections. */
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700757 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
Chao Yu718e53f2016-01-23 16:23:55 +0800758 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900759
Jaegeuk Kim43727522013-02-04 15:11:17 +0900760 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900761 continue;
762
763 if (phase == 0) {
Chao Yu7ea984b2016-08-27 00:14:31 +0800764 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
765 META_NAT, true);
766 continue;
767 }
768
769 if (phase == 1) {
770 ra_node_page(sbi, nid);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900771 continue;
772 }
773
774 /* Get an inode by ino with checking validity */
Nicholas Krausec1079892015-06-30 21:37:21 -0400775 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900776 continue;
777
Chao Yu7ea984b2016-08-27 00:14:31 +0800778 if (phase == 2) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900779 ra_node_page(sbi, dni.ino);
780 continue;
781 }
782
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900783 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
784
Chao Yu7ea984b2016-08-27 00:14:31 +0800785 if (phase == 3) {
Jaegeuk Kimd4686d562013-01-31 15:36:04 +0900786 inode = f2fs_iget(sb, dni.ino);
Chao Yub73e5282014-08-30 09:52:34 +0800787 if (IS_ERR(inode) || is_bad_inode(inode))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900788 continue;
789
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700790 /* if encrypted inode, let's go phase 3 */
791 if (f2fs_encrypted_inode(inode) &&
792 S_ISREG(inode->i_mode)) {
793 add_gc_inode(gc_list, inode);
794 continue;
795 }
796
Chao Yu81ca7352016-01-26 15:39:35 +0800797 start_bidx = start_bidx_of_node(nofs, inode);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -0700798 data_page = get_read_data_page(inode,
Christoph Hellwig70246282016-07-19 11:28:41 +0200799 start_bidx + ofs_in_node, REQ_RAHEAD,
800 true);
Changman Lee31a32682014-11-27 16:03:08 +0900801 if (IS_ERR(data_page)) {
802 iput(inode);
803 continue;
804 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900805
806 f2fs_put_page(data_page, 0);
Changman Lee7dda2af2014-11-28 15:49:40 +0000807 add_gc_inode(gc_list, inode);
Changman Lee31a32682014-11-27 16:03:08 +0900808 continue;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900809 }
Changman Lee31a32682014-11-27 16:03:08 +0900810
Chao Yu7ea984b2016-08-27 00:14:31 +0800811 /* phase 4 */
Changman Lee7dda2af2014-11-28 15:49:40 +0000812 inode = find_gc_inode(gc_list, dni.ino);
Changman Lee31a32682014-11-27 16:03:08 +0900813 if (inode) {
Chao Yu82e0a5a2016-07-13 09:18:29 +0800814 struct f2fs_inode_info *fi = F2FS_I(inode);
815 bool locked = false;
816
817 if (S_ISREG(inode->i_mode)) {
818 if (!down_write_trylock(&fi->dio_rwsem[READ]))
819 continue;
820 if (!down_write_trylock(
821 &fi->dio_rwsem[WRITE])) {
822 up_write(&fi->dio_rwsem[READ]);
823 continue;
824 }
825 locked = true;
826 }
827
Chao Yu81ca7352016-01-26 15:39:35 +0800828 start_bidx = start_bidx_of_node(nofs, inode)
Jaegeuk Kimc879f902015-04-24 14:34:30 -0700829 + ofs_in_node;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700830 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
Yunlei He20614712016-11-07 21:22:31 +0800831 move_encrypted_block(inode, start_bidx, segno, off);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700832 else
Yunlei He20614712016-11-07 21:22:31 +0800833 move_data_page(inode, start_bidx, gc_type, segno, off);
Chao Yu82e0a5a2016-07-13 09:18:29 +0800834
835 if (locked) {
836 up_write(&fi->dio_rwsem[WRITE]);
837 up_write(&fi->dio_rwsem[READ]);
838 }
839
Changman Leee1235982014-12-23 08:37:39 +0900840 stat_inc_data_blk_count(sbi, 1, gc_type);
Changman Lee31a32682014-11-27 16:03:08 +0900841 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900842 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900843
Chao Yu7ea984b2016-08-27 00:14:31 +0800844 if (++phase < 5)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900845 goto next_step;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900846}
847
848static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800849 int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900850{
851 struct sit_info *sit_i = SIT_I(sbi);
852 int ret;
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800853
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900854 mutex_lock(&sit_i->sentry_lock);
Gu Zheng8a2d0ac2014-10-20 17:45:48 +0800855 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
856 NO_CHECK_TYPE, LFS);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900857 mutex_unlock(&sit_i->sentry_lock);
858 return ret;
859}
860
Chao Yu718e53f2016-01-23 16:23:55 +0800861static int do_garbage_collect(struct f2fs_sb_info *sbi,
862 unsigned int start_segno,
Changman Lee7dda2af2014-11-28 15:49:40 +0000863 struct gc_inode_list *gc_list, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900864{
865 struct page *sum_page;
866 struct f2fs_summary_block *sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900867 struct blk_plug plug;
Chao Yu718e53f2016-01-23 16:23:55 +0800868 unsigned int segno = start_segno;
869 unsigned int end_segno = start_segno + sbi->segs_per_sec;
Chao Yu43ced842016-08-19 23:13:46 +0800870 int sec_freed = 0;
Chao Yu718e53f2016-01-23 16:23:55 +0800871 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
872 SUM_TYPE_DATA : SUM_TYPE_NODE;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900873
Chao Yu718e53f2016-01-23 16:23:55 +0800874 /* readahead multi ssa blocks those have contiguous address */
875 if (sbi->segs_per_sec > 1)
876 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
877 sbi->segs_per_sec, META_SSA, true);
878
879 /* reference all summary page */
880 while (segno < end_segno) {
881 sum_page = get_sum_page(sbi, segno++);
882 unlock_page(sum_page);
883 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900884
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900885 blk_start_plug(&plug);
886
Chao Yu718e53f2016-01-23 16:23:55 +0800887 for (segno = start_segno; segno < end_segno; segno++) {
Jaegeuk Kimaa987272016-06-06 18:49:54 -0700888
Chao Yu718e53f2016-01-23 16:23:55 +0800889 /* find segment summary of victim */
890 sum_page = find_get_page(META_MAPPING(sbi),
891 GET_SUM_BLOCK(sbi, segno));
Chao Yu718e53f2016-01-23 16:23:55 +0800892 f2fs_put_page(sum_page, 0);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900893
Jaegeuk Kimde0dcc42016-10-12 13:38:41 -0700894 if (get_valid_blocks(sbi, segno, 1) == 0 ||
895 !PageUptodate(sum_page) ||
896 unlikely(f2fs_cp_error(sbi)))
897 goto next;
898
Chao Yu718e53f2016-01-23 16:23:55 +0800899 sum = page_address(sum_page);
900 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
Jaegeuk Kim9236cac2015-05-28 18:19:17 -0700901
Chao Yu718e53f2016-01-23 16:23:55 +0800902 /*
903 * this is to avoid deadlock:
904 * - lock_page(sum_page) - f2fs_replace_block
905 * - check_valid_map() - mutex_lock(sentry_lock)
906 * - mutex_lock(sentry_lock) - change_curseg()
907 * - lock_page(sum_page)
908 */
909
910 if (type == SUM_TYPE_NODE)
911 gc_node_segment(sbi, sum->entries, segno, gc_type);
912 else
913 gc_data_segment(sbi, sum->entries, gc_list, segno,
914 gc_type);
915
916 stat_inc_seg_count(sbi, type, gc_type);
Jaegeuk Kimf6fe2be2016-09-21 09:34:48 -0700917next:
Chao Yu718e53f2016-01-23 16:23:55 +0800918 f2fs_put_page(sum_page, 0);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900919 }
Chao Yu718e53f2016-01-23 16:23:55 +0800920
Chao Yuda011cc2016-04-27 21:40:15 +0800921 if (gc_type == FG_GC)
922 f2fs_submit_merged_bio(sbi,
923 (type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
Chao Yu718e53f2016-01-23 16:23:55 +0800924
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900925 blk_finish_plug(&plug);
926
Chao Yu43ced842016-08-19 23:13:46 +0800927 if (gc_type == FG_GC &&
928 get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
929 sec_freed = 1;
Chao Yu17d899d2016-02-22 18:32:13 +0800930
931 stat_inc_call_count(sbi->stat_info);
932
Chao Yu43ced842016-08-19 23:13:46 +0800933 return sec_freed;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900934}
935
Jaegeuk Kim7702bdb2016-11-14 17:38:35 -0800936int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900937{
Chao Yu718e53f2016-01-23 16:23:55 +0800938 unsigned int segno;
Chao Yud530d4d2015-10-05 22:22:44 +0800939 int gc_type = sync ? FG_GC : BG_GC;
Chao Yu43ced842016-08-19 23:13:46 +0800940 int sec_freed = 0;
Chao Yud530d4d2015-10-05 22:22:44 +0800941 int ret = -EINVAL;
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700942 struct cp_control cpc;
Changman Lee7dda2af2014-11-28 15:49:40 +0000943 struct gc_inode_list gc_list = {
944 .ilist = LIST_HEAD_INIT(gc_list.ilist),
Jaegeuk Kim769ec6e2014-12-03 20:47:26 -0800945 .iroot = RADIX_TREE_INIT(GFP_NOFS),
Changman Lee7dda2af2014-11-28 15:49:40 +0000946 };
Jaegeuk Kimd5053a342014-10-30 22:47:03 -0700947
Jaegeuk Kim119ee912015-01-29 11:45:33 -0800948 cpc.reason = __get_cp_reason(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900949gc_more:
Jaegeuk Kim6bacf522013-12-06 15:00:58 +0900950 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900951 goto stop;
Chao Yu6d5a1492015-12-24 18:04:56 +0800952 if (unlikely(f2fs_cp_error(sbi))) {
953 ret = -EIO;
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900954 goto stop;
Chao Yu6d5a1492015-12-24 18:04:56 +0800955 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900956
Hou Pengyang19f4e682017-02-25 03:57:38 +0000957 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
Jaegeuk Kim6e17bfb2016-01-23 22:00:57 +0800958 /*
Hou Pengyang19f4e682017-02-25 03:57:38 +0000959 * For example, if there are many prefree_segments below given
960 * threshold, we can make them free by checkpoint. Then, we
961 * secure free segments which doesn't need fggc any more.
Jaegeuk Kim6e17bfb2016-01-23 22:00:57 +0800962 */
Yunlong Song77190e12017-02-21 20:43:48 +0800963 ret = write_checkpoint(sbi, &cpc);
964 if (ret)
965 goto stop;
Hou Pengyang19f4e682017-02-25 03:57:38 +0000966 if (has_not_enough_free_secs(sbi, 0, 0))
967 gc_type = FG_GC;
Jaegeuk Kimd64f8042013-04-08 16:01:00 +0900968 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900969
Hou Pengyang19f4e682017-02-25 03:57:38 +0000970 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
971 if (gc_type == BG_GC && !background)
972 goto stop;
Yunlei He05eeb112017-02-17 17:16:38 +0800973 if (!__get_victim(sbi, &segno, gc_type))
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900974 goto stop;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900975 ret = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900976
Chao Yu43ced842016-08-19 23:13:46 +0800977 if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
978 gc_type == FG_GC)
Chao Yu45fe8492015-09-28 17:42:24 +0800979 sec_freed++;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900980
Jaegeuk Kim5ee52932015-08-15 22:06:08 -0700981 if (gc_type == FG_GC)
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900982 sbi->cur_victim_sec = NULL_SEGNO;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900983
Chao Yud530d4d2015-10-05 22:22:44 +0800984 if (!sync) {
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -0700985 if (has_not_enough_free_secs(sbi, sec_freed, 0))
Chao Yud530d4d2015-10-05 22:22:44 +0800986 goto gc_more;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900987
Chao Yud530d4d2015-10-05 22:22:44 +0800988 if (gc_type == FG_GC)
Jaegeuk Kim2956e452016-09-21 09:28:06 -0700989 ret = write_checkpoint(sbi, &cpc);
Chao Yud530d4d2015-10-05 22:22:44 +0800990 }
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900991stop:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900992 mutex_unlock(&sbi->gc_mutex);
993
Changman Lee7dda2af2014-11-28 15:49:40 +0000994 put_gc_inode(&gc_list);
Chao Yud530d4d2015-10-05 22:22:44 +0800995
996 if (sync)
997 ret = sec_freed ? 0 : -EAGAIN;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900998 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900999}
1000
1001void build_gc_manager(struct f2fs_sb_info *sbi)
1002{
Hou Pengyange93b9862017-02-16 12:34:31 +00001003 u64 main_count, resv_count, ovp_count, blocks_per_sec;
1004
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001005 DIRTY_I(sbi)->v_ops = &default_v_ops;
Hou Pengyange93b9862017-02-16 12:34:31 +00001006
1007 /* threshold of # of valid blocks in a section for victims of FG_GC */
1008 main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
1009 resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
1010 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
1011 blocks_per_sec = sbi->blocks_per_seg * sbi->segs_per_sec;
1012
Hou Pengyang37e79cd2017-02-27 13:02:59 +00001013 sbi->fggc_threshold = div64_u64((main_count - ovp_count) * blocks_per_sec,
Hou Pengyange93b9862017-02-16 12:34:31 +00001014 (main_count - resv_count));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09001015}