blob: f4bf44c9dedab91f9312049d250db77ed1567e5c [file] [log] [blame]
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +09002 * fs/f2fs/gc.h
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090011#define GC_THREAD_MIN_WB_PAGES 1 /*
12 * a threshold to determine
13 * whether IO subsystem is idle
14 * or not
15 */
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090016#define DEF_GC_THREAD_MIN_SLEEP_TIME 30000 /* milliseconds */
17#define DEF_GC_THREAD_MAX_SLEEP_TIME 60000
18#define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090019#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
20#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
21
22/* Search max. number of dirty segments to select a victim segment */
23#define MAX_VICTIM_SEARCH 20
24
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090025struct f2fs_gc_kthread {
26 struct task_struct *f2fs_gc_task;
27 wait_queue_head_t gc_wait_queue_head;
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090028
29 /* for gc sleep time */
30 unsigned int min_sleep_time;
31 unsigned int max_sleep_time;
32 unsigned int no_gc_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090033};
34
35struct inode_entry {
36 struct list_head list;
37 struct inode *inode;
38};
39
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090040/*
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090041 * inline functions
42 */
43static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
44{
45 if (free_segments(sbi) < overprovision_segments(sbi))
46 return 0;
47 else
48 return (free_segments(sbi) - overprovision_segments(sbi))
49 << sbi->log_blocks_per_seg;
50}
51
52static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
53{
54 return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
55}
56
57static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
58{
59 block_t reclaimable_user_blocks = sbi->user_block_count -
60 written_block_count(sbi);
61 return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
62}
63
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090064static inline long increase_sleep_time(struct f2fs_gc_kthread *gc_th, long wait)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090065{
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090066 if (wait == gc_th->no_gc_sleep_time)
Jaegeuk Kim6cb968d2013-04-24 13:00:14 +090067 return wait;
68
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090069 wait += gc_th->min_sleep_time;
70 if (wait > gc_th->max_sleep_time)
71 wait = gc_th->max_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090072 return wait;
73}
74
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090075static inline long decrease_sleep_time(struct f2fs_gc_kthread *gc_th, long wait)
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090076{
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090077 if (wait == gc_th->no_gc_sleep_time)
78 wait = gc_th->max_sleep_time;
Jaegeuk Kim6cb968d2013-04-24 13:00:14 +090079
Namjae Jeonb59d0ba2013-08-04 23:09:40 +090080 wait -= gc_th->min_sleep_time;
81 if (wait <= gc_th->min_sleep_time)
82 wait = gc_th->min_sleep_time;
Jaegeuk Kim7bc09002012-11-02 17:13:01 +090083 return wait;
84}
85
86static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
87{
88 block_t invalid_user_blocks = sbi->user_block_count -
89 written_block_count(sbi);
90 /*
91 * Background GC is triggered with the following condition.
92 * 1. There are a number of invalid blocks.
93 * 2. There is not enough free space.
94 */
95 if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
96 free_user_blocks(sbi) < limit_free_user_blocks(sbi))
97 return true;
98 return false;
99}
100
101static inline int is_idle(struct f2fs_sb_info *sbi)
102{
103 struct block_device *bdev = sbi->sb->s_bdev;
104 struct request_queue *q = bdev_get_queue(bdev);
105 struct request_list *rl = &q->root_rl;
106 return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
107}