blob: 5c60fc28ec758894a56d7b73e1e830d03fe0f0fb [file] [log] [blame]
Jaegeuk Kim2658e502015-06-19 12:01:21 -07001/*
2 * f2fs shrinker support
3 * the basic infra was copied from fs/ubifs/shrinker.c
4 *
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/fs.h>
13#include <linux/f2fs_fs.h>
14
15#include "f2fs.h"
Jaegeuk Kimad4edb82016-06-16 16:41:49 -070016#include "node.h"
Jaegeuk Kim2658e502015-06-19 12:01:21 -070017
18static LIST_HEAD(f2fs_list);
19static DEFINE_SPINLOCK(f2fs_list_lock);
20static unsigned int shrinker_run_no;
21
Jaegeuk Kim1b38dc82015-06-19 15:36:07 -070022static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
23{
Chao Yu02110a42016-10-11 22:31:36 +080024 long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
25
26 return count > 0 ? count : 0;
Jaegeuk Kim1b38dc82015-06-19 15:36:07 -070027}
28
Chao Yu31696582015-07-28 18:33:46 +080029static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
30{
Chao Yu02110a42016-10-11 22:31:36 +080031 long count = NM_I(sbi)->nid_cnt[FREE_NID_LIST] - MAX_FREE_NIDS;
32
33 return count > 0 ? count : 0;
Chao Yu31696582015-07-28 18:33:46 +080034}
35
Jaegeuk Kim554df792015-06-19 13:41:23 -070036static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
37{
Jaegeuk Kim74fd8d92015-12-21 19:25:50 -080038 return atomic_read(&sbi->total_zombie_tree) +
Jaegeuk Kim7441cce2015-12-21 19:20:15 -080039 atomic_read(&sbi->total_ext_node);
Jaegeuk Kim554df792015-06-19 13:41:23 -070040}
41
Jaegeuk Kim2658e502015-06-19 12:01:21 -070042unsigned long f2fs_shrink_count(struct shrinker *shrink,
43 struct shrink_control *sc)
44{
45 struct f2fs_sb_info *sbi;
46 struct list_head *p;
47 unsigned long count = 0;
48
49 spin_lock(&f2fs_list_lock);
50 p = f2fs_list.next;
51 while (p != &f2fs_list) {
52 sbi = list_entry(p, struct f2fs_sb_info, s_list);
53
54 /* stop f2fs_put_super */
55 if (!mutex_trylock(&sbi->umount_mutex)) {
56 p = p->next;
57 continue;
58 }
59 spin_unlock(&f2fs_list_lock);
60
Jaegeuk Kim554df792015-06-19 13:41:23 -070061 /* count extent cache entries */
62 count += __count_extent_cache(sbi);
63
Jaegeuk Kim1b38dc82015-06-19 15:36:07 -070064 /* shrink clean nat cache entries */
65 count += __count_nat_entries(sbi);
Jaegeuk Kim2658e502015-06-19 12:01:21 -070066
Chao Yu31696582015-07-28 18:33:46 +080067 /* count free nids cache entries */
68 count += __count_free_nids(sbi);
69
Jaegeuk Kim2658e502015-06-19 12:01:21 -070070 spin_lock(&f2fs_list_lock);
71 p = p->next;
72 mutex_unlock(&sbi->umount_mutex);
73 }
74 spin_unlock(&f2fs_list_lock);
75 return count;
76}
77
78unsigned long f2fs_shrink_scan(struct shrinker *shrink,
79 struct shrink_control *sc)
80{
81 unsigned long nr = sc->nr_to_scan;
82 struct f2fs_sb_info *sbi;
83 struct list_head *p;
84 unsigned int run_no;
85 unsigned long freed = 0;
86
87 spin_lock(&f2fs_list_lock);
88 do {
89 run_no = ++shrinker_run_no;
90 } while (run_no == 0);
91 p = f2fs_list.next;
92 while (p != &f2fs_list) {
93 sbi = list_entry(p, struct f2fs_sb_info, s_list);
94
95 if (sbi->shrinker_run_no == run_no)
96 break;
97
98 /* stop f2fs_put_super */
99 if (!mutex_trylock(&sbi->umount_mutex)) {
100 p = p->next;
101 continue;
102 }
103 spin_unlock(&f2fs_list_lock);
104
105 sbi->shrinker_run_no = run_no;
106
Jaegeuk Kim554df792015-06-19 13:41:23 -0700107 /* shrink extent cache entries */
108 freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
109
Jaegeuk Kim1b38dc82015-06-19 15:36:07 -0700110 /* shrink clean nat cache entries */
Jaegeuk Kim554df792015-06-19 13:41:23 -0700111 if (freed < nr)
112 freed += try_to_free_nats(sbi, nr - freed);
Jaegeuk Kim2658e502015-06-19 12:01:21 -0700113
Chao Yu31696582015-07-28 18:33:46 +0800114 /* shrink free nids cache entries */
115 if (freed < nr)
116 freed += try_to_free_nids(sbi, nr - freed);
117
Jaegeuk Kim2658e502015-06-19 12:01:21 -0700118 spin_lock(&f2fs_list_lock);
119 p = p->next;
120 list_move_tail(&sbi->s_list, &f2fs_list);
121 mutex_unlock(&sbi->umount_mutex);
122 if (freed >= nr)
123 break;
124 }
125 spin_unlock(&f2fs_list_lock);
126 return freed;
127}
128
129void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
130{
131 spin_lock(&f2fs_list_lock);
132 list_add_tail(&sbi->s_list, &f2fs_list);
133 spin_unlock(&f2fs_list_lock);
134}
135
136void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
137{
Jaegeuk Kim3e72f722015-06-19 17:53:26 -0700138 f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
139
Jaegeuk Kim2658e502015-06-19 12:01:21 -0700140 spin_lock(&f2fs_list_lock);
141 list_del(&sbi->s_list);
142 spin_unlock(&f2fs_list_lock);
143}