blob: 5d5eb6047bf467a4c27fba131acdf7ffbff4ab20 [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09002 * fs/f2fs/gc.h
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090011#define GC_THREAD_MIN_WB_PAGES 1 /*
12 * a threshold to determine
13 * whether IO subsystem is idle
14 * or not
15 */
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090016#define DEF_GC_THREAD_MIN_SLEEP_TIME 30000 /* milliseconds */
17#define DEF_GC_THREAD_MAX_SLEEP_TIME 60000
18#define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090019#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
20#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
21
22/* Search max. number of dirty segments to select a victim segment */
Jaegeuk Kimb1c57c12014-01-08 13:45:08 +090023#define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090024
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090025struct f2fs_gc_kthread {
26 struct task_struct *f2fs_gc_task;
27 wait_queue_head_t gc_wait_queue_head;
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090028
29 /* for gc sleep time */
30 unsigned int min_sleep_time;
31 unsigned int max_sleep_time;
32 unsigned int no_gc_sleep_time;
Namjae Jeond2dc0952013-08-04 23:10:15 +090033
34 /* for changing gc mode */
35 unsigned int gc_idle;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090036};
37
38struct inode_entry {
39 struct list_head list;
40 struct inode *inode;
41};
42
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090043/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090044 * inline functions
45 */
46static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
47{
48 if (free_segments(sbi) < overprovision_segments(sbi))
49 return 0;
50 else
51 return (free_segments(sbi) - overprovision_segments(sbi))
52 << sbi->log_blocks_per_seg;
53}
54
55static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
56{
57 return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
58}
59
60static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
61{
62 block_t reclaimable_user_blocks = sbi->user_block_count -
63 written_block_count(sbi);
64 return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
65}
66
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090067static inline long increase_sleep_time(struct f2fs_gc_kthread *gc_th, long wait)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090068{
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090069 if (wait == gc_th->no_gc_sleep_time)
Jaegeuk Kim6cb968d2013-04-24 13:00:14 +090070 return wait;
71
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090072 wait += gc_th->min_sleep_time;
73 if (wait > gc_th->max_sleep_time)
74 wait = gc_th->max_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090075 return wait;
76}
77
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090078static inline long decrease_sleep_time(struct f2fs_gc_kthread *gc_th, long wait)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090079{
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090080 if (wait == gc_th->no_gc_sleep_time)
81 wait = gc_th->max_sleep_time;
Jaegeuk Kim6cb968d2013-04-24 13:00:14 +090082
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090083 wait -= gc_th->min_sleep_time;
84 if (wait <= gc_th->min_sleep_time)
85 wait = gc_th->min_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090086 return wait;
87}
88
89static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
90{
91 block_t invalid_user_blocks = sbi->user_block_count -
92 written_block_count(sbi);
93 /*
94 * Background GC is triggered with the following condition.
95 * 1. There are a number of invalid blocks.
96 * 2. There is not enough free space.
97 */
98 if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
99 free_user_blocks(sbi) < limit_free_user_blocks(sbi))
100 return true;
101 return false;
102}
103
104static inline int is_idle(struct f2fs_sb_info *sbi)
105{
106 struct block_device *bdev = sbi->sb->s_bdev;
107 struct request_queue *q = bdev_get_queue(bdev);
108 struct request_list *rl = &q->root_rl;
109 return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
110}