blob: b4a65be9f7d3fc03a9b1836873f392fea1cfd646 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09002 * fs/f2fs/gc.h
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090011#define GC_THREAD_MIN_WB_PAGES 1 /*
12 * a threshold to determine
13 * whether IO subsystem is idle
14 * or not
15 */
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090016#define DEF_GC_THREAD_MIN_SLEEP_TIME 30000 /* milliseconds */
17#define DEF_GC_THREAD_MAX_SLEEP_TIME 60000
18#define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090019#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
20#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
21
22/* Search max. number of dirty segments to select a victim segment */
Jaegeuk Kimb1c57c12014-01-08 13:45:08 +090023#define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090024
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090025struct f2fs_gc_kthread {
26 struct task_struct *f2fs_gc_task;
27 wait_queue_head_t gc_wait_queue_head;
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090028
29 /* for gc sleep time */
30 unsigned int min_sleep_time;
31 unsigned int max_sleep_time;
32 unsigned int no_gc_sleep_time;
Namjae Jeond2dc0952013-08-04 23:10:15 +090033
34 /* for changing gc mode */
35 unsigned int gc_idle;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090036};
37
Changman Lee7dda2af2014-11-28 15:49:40 +000038struct gc_inode_list {
39 struct list_head ilist;
40 struct radix_tree_root iroot;
41};
42
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090043/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090044 * inline functions
45 */
46static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
47{
48 if (free_segments(sbi) < overprovision_segments(sbi))
49 return 0;
50 else
51 return (free_segments(sbi) - overprovision_segments(sbi))
52 << sbi->log_blocks_per_seg;
53}
54
55static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
56{
57 return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
58}
59
60static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
61{
62 block_t reclaimable_user_blocks = sbi->user_block_count -
63 written_block_count(sbi);
64 return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
65}
66
Chao Yu88dd8932015-01-26 20:24:21 +080067static inline void increase_sleep_time(struct f2fs_gc_kthread *gc_th,
68 long *wait)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090069{
Chao Yu88dd8932015-01-26 20:24:21 +080070 if (*wait == gc_th->no_gc_sleep_time)
71 return;
Jaegeuk Kim6cb968d2013-04-24 13:00:14 +090072
Chao Yu88dd8932015-01-26 20:24:21 +080073 *wait += gc_th->min_sleep_time;
74 if (*wait > gc_th->max_sleep_time)
75 *wait = gc_th->max_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090076}
77
Chao Yu88dd8932015-01-26 20:24:21 +080078static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th,
79 long *wait)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090080{
Chao Yu88dd8932015-01-26 20:24:21 +080081 if (*wait == gc_th->no_gc_sleep_time)
82 *wait = gc_th->max_sleep_time;
Jaegeuk Kim6cb968d2013-04-24 13:00:14 +090083
Chao Yu88dd8932015-01-26 20:24:21 +080084 *wait -= gc_th->min_sleep_time;
85 if (*wait <= gc_th->min_sleep_time)
86 *wait = gc_th->min_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090087}
88
89static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
90{
91 block_t invalid_user_blocks = sbi->user_block_count -
92 written_block_count(sbi);
93 /*
arter97e1c42042014-08-06 23:22:50 +090094 * Background GC is triggered with the following conditions.
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090095 * 1. There are a number of invalid blocks.
96 * 2. There is not enough free space.
97 */
98 if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
99 free_user_blocks(sbi) < limit_free_user_blocks(sbi))
100 return true;
101 return false;
102}
103
104static inline int is_idle(struct f2fs_sb_info *sbi)
105{
106 struct block_device *bdev = sbi->sb->s_bdev;
107 struct request_queue *q = bdev_get_queue(bdev);
108 struct request_list *rl = &q->root_rl;
109 return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
110}