blob: 783c6cc6253c2d9fb4a13d507617a012de8149ef [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09002 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/backing-dev.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090014#include <linux/init.h>
15#include <linux/f2fs_fs.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/freezer.h>
19#include <linux/blkdev.h>
20
21#include "f2fs.h"
22#include "node.h"
23#include "segment.h"
24#include "gc.h"
Namjae Jeon8e46b3e2013-04-23 16:42:53 +090025#include <trace/events/f2fs.h>
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090026
27static struct kmem_cache *winode_slab;
28
29static int gc_thread_func(void *data)
30{
31 struct f2fs_sb_info *sbi = data;
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090032 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090033 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
34 long wait_ms;
35
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090036 wait_ms = gc_th->min_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090037
38 do {
39 if (try_to_freeze())
40 continue;
41 else
42 wait_event_interruptible_timeout(*wq,
43 kthread_should_stop(),
44 msecs_to_jiffies(wait_ms));
45 if (kthread_should_stop())
46 break;
47
Changman Leed6212a52013-01-29 18:30:07 +090048 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090049 wait_ms = increase_sleep_time(gc_th, wait_ms);
Changman Leed6212a52013-01-29 18:30:07 +090050 continue;
51 }
52
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090053 /*
54 * [GC triggering condition]
55 * 0. GC is not conducted currently.
56 * 1. There are enough dirty segments.
57 * 2. IO subsystem is idle by checking the # of writeback pages.
58 * 3. IO subsystem is idle by checking the # of requests in
59 * bdev's request list.
60 *
61 * Note) We have to avoid triggering GCs too much frequently.
62 * Because it is possible that some segments can be
63 * invalidated soon after by user update or deletion.
64 * So, I'd like to wait some time to collect dirty segments.
65 */
66 if (!mutex_trylock(&sbi->gc_mutex))
67 continue;
68
69 if (!is_idle(sbi)) {
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090070 wait_ms = increase_sleep_time(gc_th, wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090071 mutex_unlock(&sbi->gc_mutex);
72 continue;
73 }
74
75 if (has_enough_invalid_blocks(sbi))
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090076 wait_ms = decrease_sleep_time(gc_th, wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090077 else
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090078 wait_ms = increase_sleep_time(gc_th, wait_ms);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090079
Jaegeuk Kimdcdfff62013-10-22 20:56:10 +090080 stat_inc_bggc_count(sbi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090081
Jaegeuk Kim43727522013-02-04 15:11:17 +090082 /* if return value is not zero, no victim was selected */
83 if (f2fs_gc(sbi))
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090084 wait_ms = gc_th->no_gc_sleep_time;
Jaegeuk Kim81eb8d62013-10-24 13:31:34 +090085
86 /* balancing prefree segments */
87 if (excess_prefree_segs(sbi))
88 f2fs_sync_fs(sbi->sb, true);
89
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090090 } while (!kthread_should_stop());
91 return 0;
92}
93
94int start_gc_thread(struct f2fs_sb_info *sbi)
95{
Namjae Jeon1042d602012-12-01 10:56:13 +090096 struct f2fs_gc_kthread *gc_th;
Namjae Jeonec7b1f22013-02-02 23:52:28 +090097 dev_t dev = sbi->sb->s_bdev->bd_dev;
Namjae Jeon7a267f82013-05-26 11:05:32 +090098 int err = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090099
Changman Lee48600e42013-02-04 10:05:09 +0900100 if (!test_opt(sbi, BG_GC))
Namjae Jeon7a267f82013-05-26 11:05:32 +0900101 goto out;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900102 gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
Namjae Jeon7a267f82013-05-26 11:05:32 +0900103 if (!gc_th) {
104 err = -ENOMEM;
105 goto out;
106 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900107
Namjae Jeonb59d0ba2013-08-04 23:09:40 +0900108 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
109 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
110 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
111
Namjae Jeond2dc0952013-08-04 23:10:15 +0900112 gc_th->gc_idle = 0;
113
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900114 sbi->gc_thread = gc_th;
115 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
116 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
Namjae Jeonec7b1f22013-02-02 23:52:28 +0900117 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900118 if (IS_ERR(gc_th->f2fs_gc_task)) {
Namjae Jeon7a267f82013-05-26 11:05:32 +0900119 err = PTR_ERR(gc_th->f2fs_gc_task);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900120 kfree(gc_th);
Namjae Jeon25718422013-02-02 23:52:42 +0900121 sbi->gc_thread = NULL;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900122 }
Namjae Jeon7a267f82013-05-26 11:05:32 +0900123
124out:
125 return err;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900126}
127
128void stop_gc_thread(struct f2fs_sb_info *sbi)
129{
130 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
131 if (!gc_th)
132 return;
133 kthread_stop(gc_th->f2fs_gc_task);
134 kfree(gc_th);
135 sbi->gc_thread = NULL;
136}
137
Namjae Jeond2dc0952013-08-04 23:10:15 +0900138static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900139{
Namjae Jeond2dc0952013-08-04 23:10:15 +0900140 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
141
142 if (gc_th && gc_th->gc_idle) {
143 if (gc_th->gc_idle == 1)
144 gc_mode = GC_CB;
145 else if (gc_th->gc_idle == 2)
146 gc_mode = GC_GREEDY;
147 }
148 return gc_mode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900149}
150
151static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
152 int type, struct victim_sel_policy *p)
153{
154 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
155
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900156 if (p->alloc_mode == SSR) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900157 p->gc_mode = GC_GREEDY;
158 p->dirty_segmap = dirty_i->dirty_segmap[type];
Jin Xua26b7c82013-09-05 12:45:26 +0800159 p->max_search = dirty_i->nr_dirty[type];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900160 p->ofs_unit = 1;
161 } else {
Namjae Jeond2dc0952013-08-04 23:10:15 +0900162 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900163 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
Jin Xua26b7c82013-09-05 12:45:26 +0800164 p->max_search = dirty_i->nr_dirty[DIRTY];
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900165 p->ofs_unit = sbi->segs_per_sec;
166 }
Jin Xua26b7c82013-09-05 12:45:26 +0800167
168 if (p->max_search > MAX_VICTIM_SEARCH)
169 p->max_search = MAX_VICTIM_SEARCH;
170
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900171 p->offset = sbi->last_victim[p->gc_mode];
172}
173
174static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
175 struct victim_sel_policy *p)
176{
Jaegeuk Kimb7250d22013-02-05 13:19:28 +0900177 /* SSR allocates in a segment unit */
178 if (p->alloc_mode == SSR)
179 return 1 << sbi->log_blocks_per_seg;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900180 if (p->gc_mode == GC_GREEDY)
181 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
182 else if (p->gc_mode == GC_CB)
183 return UINT_MAX;
184 else /* No other gc_mode */
185 return 0;
186}
187
188static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
189{
190 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900191 unsigned int hint = 0;
192 unsigned int secno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900193
194 /*
195 * If the gc_type is FG_GC, we can select victim segments
196 * selected by background GC before.
197 * Those segments guarantee they have small valid blocks.
198 */
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900199next:
200 secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++);
201 if (secno < TOTAL_SECS(sbi)) {
202 if (sec_usage_check(sbi, secno))
203 goto next;
204 clear_bit(secno, dirty_i->victim_secmap);
205 return secno * sbi->segs_per_sec;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900206 }
207 return NULL_SEGNO;
208}
209
210static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
211{
212 struct sit_info *sit_i = SIT_I(sbi);
213 unsigned int secno = GET_SECNO(sbi, segno);
214 unsigned int start = secno * sbi->segs_per_sec;
215 unsigned long long mtime = 0;
216 unsigned int vblocks;
217 unsigned char age = 0;
218 unsigned char u;
219 unsigned int i;
220
221 for (i = 0; i < sbi->segs_per_sec; i++)
222 mtime += get_seg_entry(sbi, start + i)->mtime;
223 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
224
225 mtime = div_u64(mtime, sbi->segs_per_sec);
226 vblocks = div_u64(vblocks, sbi->segs_per_sec);
227
228 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
229
230 /* Handle if the system time is changed by user */
231 if (mtime < sit_i->min_mtime)
232 sit_i->min_mtime = mtime;
233 if (mtime > sit_i->max_mtime)
234 sit_i->max_mtime = mtime;
235 if (sit_i->max_mtime != sit_i->min_mtime)
236 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
237 sit_i->max_mtime - sit_i->min_mtime);
238
239 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
240}
241
Jin Xua57e5642013-09-13 08:38:54 +0800242static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
243 unsigned int segno, struct victim_sel_policy *p)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900244{
245 if (p->alloc_mode == SSR)
246 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
247
248 /* alloc_mode == LFS */
249 if (p->gc_mode == GC_GREEDY)
250 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
251 else
252 return get_cb_cost(sbi, segno);
253}
254
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900255/*
Masanari Iida111d2492013-03-19 08:03:35 +0900256 * This function is called from two paths.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900257 * One is garbage collection and the other is SSR segment selection.
258 * When it is called during GC, it just gets a victim segment
259 * and it does not remove it from dirty seglist.
260 * When it is called from SSR segment selection, it finds a segment
261 * which has minimum valid blocks and removes it from dirty seglist.
262 */
263static int get_victim_by_default(struct f2fs_sb_info *sbi,
264 unsigned int *result, int gc_type, int type, char alloc_mode)
265{
266 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
267 struct victim_sel_policy p;
Namjae Jeonb2b34602013-06-01 16:20:26 +0900268 unsigned int secno, max_cost;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900269 int nsearched = 0;
270
271 p.alloc_mode = alloc_mode;
272 select_policy(sbi, gc_type, type, &p);
273
274 p.min_segno = NULL_SEGNO;
Namjae Jeonb2b34602013-06-01 16:20:26 +0900275 p.min_cost = max_cost = get_max_cost(sbi, &p);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900276
277 mutex_lock(&dirty_i->seglist_lock);
278
279 if (p.alloc_mode == LFS && gc_type == FG_GC) {
280 p.min_segno = check_bg_victims(sbi);
281 if (p.min_segno != NULL_SEGNO)
282 goto got_it;
283 }
284
285 while (1) {
286 unsigned long cost;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900287 unsigned int segno;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900288
289 segno = find_next_bit(p.dirty_segmap,
290 TOTAL_SEGS(sbi), p.offset);
291 if (segno >= TOTAL_SEGS(sbi)) {
292 if (sbi->last_victim[p.gc_mode]) {
293 sbi->last_victim[p.gc_mode] = 0;
294 p.offset = 0;
295 continue;
296 }
297 break;
298 }
Jin Xua57e5642013-09-13 08:38:54 +0800299
300 p.offset = segno + p.ofs_unit;
301 if (p.ofs_unit > 1)
302 p.offset -= segno % p.ofs_unit;
303
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900304 secno = GET_SECNO(sbi, segno);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900305
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900306 if (sec_usage_check(sbi, secno))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900307 continue;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900308 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900309 continue;
310
311 cost = get_gc_cost(sbi, segno, &p);
312
313 if (p.min_cost > cost) {
314 p.min_segno = segno;
315 p.min_cost = cost;
Jin Xua57e5642013-09-13 08:38:54 +0800316 } else if (unlikely(cost == max_cost)) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900317 continue;
Jin Xua57e5642013-09-13 08:38:54 +0800318 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900319
Jin Xua26b7c82013-09-05 12:45:26 +0800320 if (nsearched++ >= p.max_search) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900321 sbi->last_victim[p.gc_mode] = segno;
322 break;
323 }
324 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900325 if (p.min_segno != NULL_SEGNO) {
Namjae Jeonb2b34602013-06-01 16:20:26 +0900326got_it:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900327 if (p.alloc_mode == LFS) {
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900328 secno = GET_SECNO(sbi, p.min_segno);
329 if (gc_type == FG_GC)
330 sbi->cur_victim_sec = secno;
331 else
332 set_bit(secno, dirty_i->victim_secmap);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900333 }
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900334 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
Namjae Jeon8e46b3e2013-04-23 16:42:53 +0900335
336 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
337 sbi->cur_victim_sec,
338 prefree_segments(sbi), free_segments(sbi));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900339 }
340 mutex_unlock(&dirty_i->seglist_lock);
341
342 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
343}
344
345static const struct victim_selection default_v_ops = {
346 .get_victim = get_victim_by_default,
347};
348
349static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
350{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900351 struct inode_entry *ie;
352
Gu Zheng6cc4af52013-06-20 17:52:39 +0800353 list_for_each_entry(ie, ilist, list)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900354 if (ie->inode->i_ino == ino)
355 return ie->inode;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900356 return NULL;
357}
358
359static void add_gc_inode(struct inode *inode, struct list_head *ilist)
360{
Gu Zheng6cc4af52013-06-20 17:52:39 +0800361 struct inode_entry *new_ie;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900362
Gu Zheng6cc4af52013-06-20 17:52:39 +0800363 if (inode == find_gc_inode(inode->i_ino, ilist)) {
364 iput(inode);
365 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900366 }
Gu Zheng7bd59382013-10-22 14:52:26 +0800367
368 new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900369 new_ie->inode = inode;
370 list_add_tail(&new_ie->list, ilist);
371}
372
373static void put_gc_inode(struct list_head *ilist)
374{
375 struct inode_entry *ie, *next_ie;
376 list_for_each_entry_safe(ie, next_ie, ilist, list) {
377 iput(ie->inode);
378 list_del(&ie->list);
379 kmem_cache_free(winode_slab, ie);
380 }
381}
382
383static int check_valid_map(struct f2fs_sb_info *sbi,
384 unsigned int segno, int offset)
385{
386 struct sit_info *sit_i = SIT_I(sbi);
387 struct seg_entry *sentry;
388 int ret;
389
390 mutex_lock(&sit_i->sentry_lock);
391 sentry = get_seg_entry(sbi, segno);
392 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
393 mutex_unlock(&sit_i->sentry_lock);
Jaegeuk Kim43727522013-02-04 15:11:17 +0900394 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900395}
396
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900397/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900398 * This function compares node address got in summary with that in NAT.
399 * On validity, copy that node with cold status, otherwise (invalid node)
400 * ignore that.
401 */
Jaegeuk Kim43727522013-02-04 15:11:17 +0900402static void gc_node_segment(struct f2fs_sb_info *sbi,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900403 struct f2fs_summary *sum, unsigned int segno, int gc_type)
404{
405 bool initial = true;
406 struct f2fs_summary *entry;
407 int off;
408
409next_step:
410 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900411
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900412 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
413 nid_t nid = le32_to_cpu(entry->nid);
414 struct page *node_page;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900415
Jaegeuk Kim43727522013-02-04 15:11:17 +0900416 /* stop BG_GC if there is not enough free sections. */
417 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
418 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900419
Jaegeuk Kim43727522013-02-04 15:11:17 +0900420 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900421 continue;
422
423 if (initial) {
424 ra_node_page(sbi, nid);
425 continue;
426 }
427 node_page = get_node_page(sbi, nid);
428 if (IS_ERR(node_page))
429 continue;
430
431 /* set page dirty and write it */
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900432 if (gc_type == FG_GC) {
Jin Xua5694692013-08-05 20:02:04 +0800433 f2fs_wait_on_page_writeback(node_page, NODE, true);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900434 set_page_dirty(node_page);
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900435 } else {
436 if (!PageWriteback(node_page))
437 set_page_dirty(node_page);
438 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900439 f2fs_put_page(node_page, 1);
440 stat_inc_node_blk_count(sbi, 1);
441 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900442
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900443 if (initial) {
444 initial = false;
445 goto next_step;
446 }
447
448 if (gc_type == FG_GC) {
449 struct writeback_control wbc = {
450 .sync_mode = WB_SYNC_ALL,
451 .nr_to_write = LONG_MAX,
452 .for_reclaim = 0,
453 };
454 sync_node_pages(sbi, 0, &wbc);
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900455
456 /*
457 * In the case of FG_GC, it'd be better to reclaim this victim
458 * completely.
459 */
460 if (get_valid_blocks(sbi, segno, 1) != 0)
461 goto next_step;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900462 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900463}
464
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900465/*
Jaegeuk Kim9af45ef2013-01-21 17:34:21 +0900466 * Calculate start block index indicating the given node offset.
467 * Be careful, caller should give this node offset only indicating direct node
468 * blocks. If any node offsets, which point the other types of node blocks such
469 * as indirect or double indirect node blocks, are given, it must be a caller's
470 * bug.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900471 */
Jaegeuk Kimde936532013-08-12 21:08:03 +0900472block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900473{
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900474 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
475 unsigned int bidx;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900476
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900477 if (node_ofs == 0)
478 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900479
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900480 if (node_ofs <= 2) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900481 bidx = node_ofs - 1;
482 } else if (node_ofs <= indirect_blks) {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900483 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900484 bidx = node_ofs - 2 - dec;
485 } else {
Jaegeuk Kimce19a5d2012-12-26 12:03:22 +0900486 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900487 bidx = node_ofs - 5 - dec;
488 }
Jaegeuk Kimde936532013-08-12 21:08:03 +0900489 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900490}
491
492static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
493 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
494{
495 struct page *node_page;
496 nid_t nid;
497 unsigned int ofs_in_node;
498 block_t source_blkaddr;
499
500 nid = le32_to_cpu(sum->nid);
501 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
502
503 node_page = get_node_page(sbi, nid);
504 if (IS_ERR(node_page))
Jaegeuk Kim43727522013-02-04 15:11:17 +0900505 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900506
507 get_node_info(sbi, nid, dni);
508
509 if (sum->version != dni->version) {
510 f2fs_put_page(node_page, 1);
Jaegeuk Kim43727522013-02-04 15:11:17 +0900511 return 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900512 }
513
514 *nofs = ofs_of_node(node_page);
515 source_blkaddr = datablock_addr(node_page, ofs_in_node);
516 f2fs_put_page(node_page, 1);
517
518 if (source_blkaddr != blkaddr)
Jaegeuk Kim43727522013-02-04 15:11:17 +0900519 return 0;
520 return 1;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900521}
522
523static void move_data_page(struct inode *inode, struct page *page, int gc_type)
524{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900525 if (gc_type == BG_GC) {
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900526 if (PageWriteback(page))
527 goto out;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900528 set_page_dirty(page);
529 set_cold_data(page);
530 } else {
531 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900532
Jin Xua5694692013-08-05 20:02:04 +0800533 f2fs_wait_on_page_writeback(page, DATA, true);
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900534
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900535 if (clear_page_dirty_for_io(page) &&
536 S_ISDIR(inode->i_mode)) {
537 dec_page_count(sbi, F2FS_DIRTY_DENTS);
538 inode_dec_dirty_dents(inode);
539 }
540 set_cold_data(page);
541 do_write_data_page(page);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900542 clear_cold_data(page);
543 }
544out:
545 f2fs_put_page(page, 1);
546}
547
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +0900548/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900549 * This function tries to get parent node of victim data block, and identifies
550 * data block validity. If the block is valid, copy that with cold status and
551 * modify parent node.
552 * If the parent node is not valid or the data block address is different,
553 * the victim data block is ignored.
554 */
Jaegeuk Kim43727522013-02-04 15:11:17 +0900555static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900556 struct list_head *ilist, unsigned int segno, int gc_type)
557{
558 struct super_block *sb = sbi->sb;
559 struct f2fs_summary *entry;
560 block_t start_addr;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900561 int off;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900562 int phase = 0;
563
564 start_addr = START_BLOCK(sbi, segno);
565
566next_step:
567 entry = sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900568
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900569 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
570 struct page *data_page;
571 struct inode *inode;
572 struct node_info dni; /* dnode info for the data */
573 unsigned int ofs_in_node, nofs;
574 block_t start_bidx;
575
Jaegeuk Kim43727522013-02-04 15:11:17 +0900576 /* stop BG_GC if there is not enough free sections. */
577 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
578 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900579
Jaegeuk Kim43727522013-02-04 15:11:17 +0900580 if (check_valid_map(sbi, segno, off) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900581 continue;
582
583 if (phase == 0) {
584 ra_node_page(sbi, le32_to_cpu(entry->nid));
585 continue;
586 }
587
588 /* Get an inode by ino with checking validity */
Jaegeuk Kim43727522013-02-04 15:11:17 +0900589 if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900590 continue;
591
592 if (phase == 1) {
593 ra_node_page(sbi, dni.ino);
594 continue;
595 }
596
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900597 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
598
599 if (phase == 2) {
Jaegeuk Kimd4686d562013-01-31 15:36:04 +0900600 inode = f2fs_iget(sb, dni.ino);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900601 if (IS_ERR(inode))
602 continue;
603
Jaegeuk Kimde936532013-08-12 21:08:03 +0900604 start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
605
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900606 data_page = find_data_page(inode,
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900607 start_bidx + ofs_in_node, false);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900608 if (IS_ERR(data_page))
609 goto next_iput;
610
611 f2fs_put_page(data_page, 0);
612 add_gc_inode(inode, ilist);
613 } else {
614 inode = find_gc_inode(dni.ino, ilist);
615 if (inode) {
Jaegeuk Kimde936532013-08-12 21:08:03 +0900616 start_bidx = start_bidx_of_node(nofs,
617 F2FS_I(inode));
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900618 data_page = get_lock_data_page(inode,
619 start_bidx + ofs_in_node);
620 if (IS_ERR(data_page))
621 continue;
622 move_data_page(inode, data_page, gc_type);
623 stat_inc_data_blk_count(sbi, 1);
624 }
625 }
626 continue;
627next_iput:
628 iput(inode);
629 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900630
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900631 if (++phase < 4)
632 goto next_step;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900633
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900634 if (gc_type == FG_GC) {
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900635 f2fs_submit_bio(sbi, DATA, true);
Jaegeuk Kim4ebefc42013-03-31 13:49:18 +0900636
637 /*
638 * In the case of FG_GC, it'd be better to reclaim this victim
639 * completely.
640 */
641 if (get_valid_blocks(sbi, segno, 1) != 0) {
642 phase = 2;
643 goto next_step;
644 }
645 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900646}
647
648static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
649 int gc_type, int type)
650{
651 struct sit_info *sit_i = SIT_I(sbi);
652 int ret;
653 mutex_lock(&sit_i->sentry_lock);
654 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS);
655 mutex_unlock(&sit_i->sentry_lock);
656 return ret;
657}
658
Jaegeuk Kim43727522013-02-04 15:11:17 +0900659static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900660 struct list_head *ilist, int gc_type)
661{
662 struct page *sum_page;
663 struct f2fs_summary_block *sum;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900664 struct blk_plug plug;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900665
666 /* read segment summary of victim */
667 sum_page = get_sum_page(sbi, segno);
668 if (IS_ERR(sum_page))
Jaegeuk Kim43727522013-02-04 15:11:17 +0900669 return;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900670
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900671 blk_start_plug(&plug);
672
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900673 sum = page_address(sum_page);
674
675 switch (GET_SUM_TYPE((&sum->footer))) {
676 case SUM_TYPE_NODE:
Jaegeuk Kim43727522013-02-04 15:11:17 +0900677 gc_node_segment(sbi, sum->entries, segno, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900678 break;
679 case SUM_TYPE_DATA:
Jaegeuk Kim43727522013-02-04 15:11:17 +0900680 gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900681 break;
682 }
Jaegeuk Kimc718379b2013-04-24 13:19:56 +0900683 blk_finish_plug(&plug);
684
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900685 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
686 stat_inc_call_count(sbi->stat_info);
687
Jaegeuk Kimb7473752013-04-01 08:32:21 +0900688 f2fs_put_page(sum_page, 1);
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900689}
690
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900691int f2fs_gc(struct f2fs_sb_info *sbi)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900692{
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900693 struct list_head ilist;
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900694 unsigned int segno, i;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900695 int gc_type = BG_GC;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900696 int nfree = 0;
697 int ret = -1;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900698
699 INIT_LIST_HEAD(&ilist);
700gc_more:
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900701 if (!(sbi->sb->s_flags & MS_ACTIVE))
702 goto stop;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900703
Jaegeuk Kimd64f8042013-04-08 16:01:00 +0900704 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900705 gc_type = FG_GC;
Jaegeuk Kimd64f8042013-04-08 16:01:00 +0900706 write_checkpoint(sbi, false);
707 }
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900708
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900709 if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
710 goto stop;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900711 ret = 0;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900712
Jaegeuk Kim43727522013-02-04 15:11:17 +0900713 for (i = 0; i < sbi->segs_per_sec; i++)
714 do_garbage_collect(sbi, segno + i, &ilist, gc_type);
715
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900716 if (gc_type == FG_GC) {
717 sbi->cur_victim_sec = NULL_SEGNO;
Jaegeuk Kim43727522013-02-04 15:11:17 +0900718 nfree++;
Jaegeuk Kim5ec4e492013-03-31 13:26:03 +0900719 WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
720 }
Jaegeuk Kim43727522013-02-04 15:11:17 +0900721
722 if (has_not_enough_free_secs(sbi, nfree))
723 goto gc_more;
724
725 if (gc_type == FG_GC)
726 write_checkpoint(sbi, false);
Jaegeuk Kim408e9372013-01-03 17:55:52 +0900727stop:
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900728 mutex_unlock(&sbi->gc_mutex);
729
730 put_gc_inode(&ilist);
Jaegeuk Kim43727522013-02-04 15:11:17 +0900731 return ret;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900732}
733
734void build_gc_manager(struct f2fs_sb_info *sbi)
735{
736 DIRTY_I(sbi)->v_ops = &default_v_ops;
737}
738
Namjae Jeon6e6093a2013-01-17 00:08:30 +0900739int __init create_gc_caches(void)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +0900740{
741 winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
742 sizeof(struct inode_entry), NULL);
743 if (!winode_slab)
744 return -ENOMEM;
745 return 0;
746}
747
748void destroy_gc_caches(void)
749{
750 kmem_cache_destroy(winode_slab);
751}