blob: 4241b633f15547adb681ea2b44e913035d9de3cf [file] [log] [blame]
Jan Karaf9a61eb2016-02-22 11:49:09 -05001#include <linux/spinlock.h>
2#include <linux/slab.h>
3#include <linux/list.h>
4#include <linux/list_bl.h>
5#include <linux/module.h>
6#include <linux/sched.h>
Jan Karac2f31402016-02-22 12:33:03 -05007#include <linux/workqueue.h>
Jan Kara7a2508e2016-02-22 22:35:22 -05008#include <linux/mbcache.h>
Jan Karaf9a61eb2016-02-22 11:49:09 -05009
10/*
11 * Mbcache is a simple key-value store. Keys need not be unique, however
12 * key-value pairs are expected to be unique (we use this fact in
Jan Kara7a2508e2016-02-22 22:35:22 -050013 * mb_cache_entry_delete_block()).
Jan Karaf9a61eb2016-02-22 11:49:09 -050014 *
15 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
16 * They use hash of a block contents as a key and block number as a value.
17 * That's why keys need not be unique (different xattr blocks may end up having
18 * the same hash). However block number always uniquely identifies a cache
19 * entry.
20 *
21 * We provide functions for creation and removal of entries, search by key,
22 * and a special "delete entry with given key-value pair" operation. Fixed
23 * size hash table is used for fast key lookups.
24 */
25
Jan Kara7a2508e2016-02-22 22:35:22 -050026struct mb_cache {
Jan Karaf9a61eb2016-02-22 11:49:09 -050027 /* Hash table of entries */
28 struct hlist_bl_head *c_hash;
29 /* log2 of hash table size */
30 int c_bucket_bits;
Jan Karac2f31402016-02-22 12:33:03 -050031 /* Maximum entries in cache to avoid degrading hash too much */
32 int c_max_entries;
Jan Karaf0c8b462016-02-22 18:23:47 -050033 /* Protects c_list, c_entry_count */
34 spinlock_t c_list_lock;
35 struct list_head c_list;
Jan Karaf9a61eb2016-02-22 11:49:09 -050036 /* Number of entries in cache */
37 unsigned long c_entry_count;
38 struct shrinker c_shrink;
Jan Karac2f31402016-02-22 12:33:03 -050039 /* Work for shrinking when the cache has too many entries */
40 struct work_struct c_shrink_work;
Jan Karaf9a61eb2016-02-22 11:49:09 -050041};
42
Jan Kara7a2508e2016-02-22 22:35:22 -050043static struct kmem_cache *mb_entry_cache;
Jan Karaf9a61eb2016-02-22 11:49:09 -050044
Jan Kara7a2508e2016-02-22 22:35:22 -050045static unsigned long mb_cache_shrink(struct mb_cache *cache,
46 unsigned int nr_to_scan);
Jan Karac2f31402016-02-22 12:33:03 -050047
Jan Kara7a2508e2016-02-22 22:35:22 -050048static inline bool mb_cache_entry_referenced(struct mb_cache_entry *entry)
Jan Karaf0c8b462016-02-22 18:23:47 -050049{
50 return entry->_e_hash_list_head & 1;
51}
52
Jan Kara7a2508e2016-02-22 22:35:22 -050053static inline void mb_cache_entry_set_referenced(struct mb_cache_entry *entry)
Jan Karaf0c8b462016-02-22 18:23:47 -050054{
55 entry->_e_hash_list_head |= 1;
56}
57
Jan Kara7a2508e2016-02-22 22:35:22 -050058static inline void mb_cache_entry_clear_referenced(
59 struct mb_cache_entry *entry)
Jan Karaf0c8b462016-02-22 18:23:47 -050060{
61 entry->_e_hash_list_head &= ~1;
62}
63
Jan Kara7a2508e2016-02-22 22:35:22 -050064static inline struct hlist_bl_head *mb_cache_entry_head(
65 struct mb_cache_entry *entry)
Jan Karaf0c8b462016-02-22 18:23:47 -050066{
67 return (struct hlist_bl_head *)
68 (entry->_e_hash_list_head & ~1);
69}
70
Jan Karac2f31402016-02-22 12:33:03 -050071/*
72 * Number of entries to reclaim synchronously when there are too many entries
73 * in cache
74 */
75#define SYNC_SHRINK_BATCH 64
76
Jan Karaf9a61eb2016-02-22 11:49:09 -050077/*
Jan Kara7a2508e2016-02-22 22:35:22 -050078 * mb_cache_entry_create - create entry in cache
Jan Karaf9a61eb2016-02-22 11:49:09 -050079 * @cache - cache where the entry should be created
80 * @mask - gfp mask with which the entry should be allocated
81 * @key - key of the entry
82 * @block - block that contains data
83 *
84 * Creates entry in @cache with key @key and records that data is stored in
85 * block @block. The function returns -EBUSY if entry with the same key
86 * and for the same block already exists in cache. Otherwise 0 is returned.
87 */
Jan Kara7a2508e2016-02-22 22:35:22 -050088int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
89 sector_t block)
Jan Karaf9a61eb2016-02-22 11:49:09 -050090{
Jan Kara7a2508e2016-02-22 22:35:22 -050091 struct mb_cache_entry *entry, *dup;
Jan Karaf9a61eb2016-02-22 11:49:09 -050092 struct hlist_bl_node *dup_node;
93 struct hlist_bl_head *head;
94
Jan Karac2f31402016-02-22 12:33:03 -050095 /* Schedule background reclaim if there are too many entries */
96 if (cache->c_entry_count >= cache->c_max_entries)
97 schedule_work(&cache->c_shrink_work);
98 /* Do some sync reclaim if background reclaim cannot keep up */
99 if (cache->c_entry_count >= 2*cache->c_max_entries)
Jan Kara7a2508e2016-02-22 22:35:22 -0500100 mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
Jan Karac2f31402016-02-22 12:33:03 -0500101
Jan Kara7a2508e2016-02-22 22:35:22 -0500102 entry = kmem_cache_alloc(mb_entry_cache, mask);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500103 if (!entry)
104 return -ENOMEM;
105
Jan Karaf0c8b462016-02-22 18:23:47 -0500106 INIT_LIST_HEAD(&entry->e_list);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500107 /* One ref for hash, one ref returned */
108 atomic_set(&entry->e_refcnt, 1);
109 entry->e_key = key;
110 entry->e_block = block;
111 head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
Jan Karaf0c8b462016-02-22 18:23:47 -0500112 entry->_e_hash_list_head = (unsigned long)head;
Jan Karaf9a61eb2016-02-22 11:49:09 -0500113 hlist_bl_lock(head);
114 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
115 if (dup->e_key == key && dup->e_block == block) {
116 hlist_bl_unlock(head);
Jan Kara7a2508e2016-02-22 22:35:22 -0500117 kmem_cache_free(mb_entry_cache, entry);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500118 return -EBUSY;
119 }
120 }
121 hlist_bl_add_head(&entry->e_hash_list, head);
122 hlist_bl_unlock(head);
123
Jan Karaf0c8b462016-02-22 18:23:47 -0500124 spin_lock(&cache->c_list_lock);
125 list_add_tail(&entry->e_list, &cache->c_list);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500126 /* Grab ref for LRU list */
127 atomic_inc(&entry->e_refcnt);
128 cache->c_entry_count++;
Jan Karaf0c8b462016-02-22 18:23:47 -0500129 spin_unlock(&cache->c_list_lock);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500130
131 return 0;
132}
Jan Kara7a2508e2016-02-22 22:35:22 -0500133EXPORT_SYMBOL(mb_cache_entry_create);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500134
Jan Kara7a2508e2016-02-22 22:35:22 -0500135void __mb_cache_entry_free(struct mb_cache_entry *entry)
Jan Karaf9a61eb2016-02-22 11:49:09 -0500136{
Jan Kara7a2508e2016-02-22 22:35:22 -0500137 kmem_cache_free(mb_entry_cache, entry);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500138}
Jan Kara7a2508e2016-02-22 22:35:22 -0500139EXPORT_SYMBOL(__mb_cache_entry_free);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500140
Jan Kara7a2508e2016-02-22 22:35:22 -0500141static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
142 struct mb_cache_entry *entry,
143 u32 key)
Jan Karaf9a61eb2016-02-22 11:49:09 -0500144{
Jan Kara7a2508e2016-02-22 22:35:22 -0500145 struct mb_cache_entry *old_entry = entry;
Jan Karaf9a61eb2016-02-22 11:49:09 -0500146 struct hlist_bl_node *node;
147 struct hlist_bl_head *head;
148
149 if (entry)
Jan Kara7a2508e2016-02-22 22:35:22 -0500150 head = mb_cache_entry_head(entry);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500151 else
152 head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
153 hlist_bl_lock(head);
154 if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
155 node = entry->e_hash_list.next;
156 else
157 node = hlist_bl_first(head);
158 while (node) {
Jan Kara7a2508e2016-02-22 22:35:22 -0500159 entry = hlist_bl_entry(node, struct mb_cache_entry,
Jan Karaf9a61eb2016-02-22 11:49:09 -0500160 e_hash_list);
161 if (entry->e_key == key) {
162 atomic_inc(&entry->e_refcnt);
163 goto out;
164 }
165 node = node->next;
166 }
167 entry = NULL;
168out:
169 hlist_bl_unlock(head);
170 if (old_entry)
Jan Kara7a2508e2016-02-22 22:35:22 -0500171 mb_cache_entry_put(cache, old_entry);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500172
173 return entry;
174}
175
176/*
Jan Kara7a2508e2016-02-22 22:35:22 -0500177 * mb_cache_entry_find_first - find the first entry in cache with given key
Jan Karaf9a61eb2016-02-22 11:49:09 -0500178 * @cache: cache where we should search
179 * @key: key to look for
180 *
181 * Search in @cache for entry with key @key. Grabs reference to the first
182 * entry found and returns the entry.
183 */
Jan Kara7a2508e2016-02-22 22:35:22 -0500184struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
185 u32 key)
Jan Karaf9a61eb2016-02-22 11:49:09 -0500186{
187 return __entry_find(cache, NULL, key);
188}
Jan Kara7a2508e2016-02-22 22:35:22 -0500189EXPORT_SYMBOL(mb_cache_entry_find_first);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500190
191/*
Jan Kara7a2508e2016-02-22 22:35:22 -0500192 * mb_cache_entry_find_next - find next entry in cache with the same
Jan Karaf9a61eb2016-02-22 11:49:09 -0500193 * @cache: cache where we should search
194 * @entry: entry to start search from
195 *
196 * Finds next entry in the hash chain which has the same key as @entry.
197 * If @entry is unhashed (which can happen when deletion of entry races
198 * with the search), finds the first entry in the hash chain. The function
199 * drops reference to @entry and returns with a reference to the found entry.
200 */
Jan Kara7a2508e2016-02-22 22:35:22 -0500201struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
202 struct mb_cache_entry *entry)
Jan Karaf9a61eb2016-02-22 11:49:09 -0500203{
204 return __entry_find(cache, entry, entry->e_key);
205}
Jan Kara7a2508e2016-02-22 22:35:22 -0500206EXPORT_SYMBOL(mb_cache_entry_find_next);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500207
Jan Kara7a2508e2016-02-22 22:35:22 -0500208/* mb_cache_entry_delete_block - remove information about block from cache
Jan Karaf9a61eb2016-02-22 11:49:09 -0500209 * @cache - cache we work with
210 * @key - key of the entry to remove
211 * @block - block containing data for @key
212 *
213 * Remove entry from cache @cache with key @key with data stored in @block.
214 */
Jan Kara7a2508e2016-02-22 22:35:22 -0500215void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
216 sector_t block)
Jan Karaf9a61eb2016-02-22 11:49:09 -0500217{
218 struct hlist_bl_node *node;
219 struct hlist_bl_head *head;
Jan Kara7a2508e2016-02-22 22:35:22 -0500220 struct mb_cache_entry *entry;
Jan Karaf9a61eb2016-02-22 11:49:09 -0500221
222 head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
223 hlist_bl_lock(head);
224 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
225 if (entry->e_key == key && entry->e_block == block) {
226 /* We keep hash list reference to keep entry alive */
227 hlist_bl_del_init(&entry->e_hash_list);
228 hlist_bl_unlock(head);
Jan Karaf0c8b462016-02-22 18:23:47 -0500229 spin_lock(&cache->c_list_lock);
230 if (!list_empty(&entry->e_list)) {
231 list_del_init(&entry->e_list);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500232 cache->c_entry_count--;
233 atomic_dec(&entry->e_refcnt);
234 }
Jan Karaf0c8b462016-02-22 18:23:47 -0500235 spin_unlock(&cache->c_list_lock);
Jan Kara7a2508e2016-02-22 22:35:22 -0500236 mb_cache_entry_put(cache, entry);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500237 return;
238 }
239 }
240 hlist_bl_unlock(head);
241}
Jan Kara7a2508e2016-02-22 22:35:22 -0500242EXPORT_SYMBOL(mb_cache_entry_delete_block);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500243
Jan Kara7a2508e2016-02-22 22:35:22 -0500244/* mb_cache_entry_touch - cache entry got used
Jan Karaf9a61eb2016-02-22 11:49:09 -0500245 * @cache - cache the entry belongs to
246 * @entry - entry that got used
247 *
Jan Karaf0c8b462016-02-22 18:23:47 -0500248 * Marks entry as used to give hit higher chances of surviving in cache.
Jan Karaf9a61eb2016-02-22 11:49:09 -0500249 */
Jan Kara7a2508e2016-02-22 22:35:22 -0500250void mb_cache_entry_touch(struct mb_cache *cache,
251 struct mb_cache_entry *entry)
Jan Karaf9a61eb2016-02-22 11:49:09 -0500252{
Jan Kara7a2508e2016-02-22 22:35:22 -0500253 mb_cache_entry_set_referenced(entry);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500254}
Jan Kara7a2508e2016-02-22 22:35:22 -0500255EXPORT_SYMBOL(mb_cache_entry_touch);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500256
Jan Kara7a2508e2016-02-22 22:35:22 -0500257static unsigned long mb_cache_count(struct shrinker *shrink,
258 struct shrink_control *sc)
Jan Karaf9a61eb2016-02-22 11:49:09 -0500259{
Jan Kara7a2508e2016-02-22 22:35:22 -0500260 struct mb_cache *cache = container_of(shrink, struct mb_cache,
261 c_shrink);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500262
263 return cache->c_entry_count;
264}
265
266/* Shrink number of entries in cache */
Jan Kara7a2508e2016-02-22 22:35:22 -0500267static unsigned long mb_cache_shrink(struct mb_cache *cache,
268 unsigned int nr_to_scan)
Jan Karaf9a61eb2016-02-22 11:49:09 -0500269{
Jan Kara7a2508e2016-02-22 22:35:22 -0500270 struct mb_cache_entry *entry;
Jan Karaf9a61eb2016-02-22 11:49:09 -0500271 struct hlist_bl_head *head;
272 unsigned int shrunk = 0;
273
Jan Karaf0c8b462016-02-22 18:23:47 -0500274 spin_lock(&cache->c_list_lock);
275 while (nr_to_scan-- && !list_empty(&cache->c_list)) {
276 entry = list_first_entry(&cache->c_list,
Jan Kara7a2508e2016-02-22 22:35:22 -0500277 struct mb_cache_entry, e_list);
278 if (mb_cache_entry_referenced(entry)) {
279 mb_cache_entry_clear_referenced(entry);
Jan Karaf0c8b462016-02-22 18:23:47 -0500280 list_move_tail(&cache->c_list, &entry->e_list);
281 continue;
282 }
283 list_del_init(&entry->e_list);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500284 cache->c_entry_count--;
285 /*
286 * We keep LRU list reference so that entry doesn't go away
287 * from under us.
288 */
Jan Karaf0c8b462016-02-22 18:23:47 -0500289 spin_unlock(&cache->c_list_lock);
Jan Kara7a2508e2016-02-22 22:35:22 -0500290 head = mb_cache_entry_head(entry);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500291 hlist_bl_lock(head);
292 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
293 hlist_bl_del_init(&entry->e_hash_list);
294 atomic_dec(&entry->e_refcnt);
295 }
296 hlist_bl_unlock(head);
Jan Kara7a2508e2016-02-22 22:35:22 -0500297 if (mb_cache_entry_put(cache, entry))
Jan Karaf9a61eb2016-02-22 11:49:09 -0500298 shrunk++;
299 cond_resched();
Jan Karaf0c8b462016-02-22 18:23:47 -0500300 spin_lock(&cache->c_list_lock);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500301 }
Jan Karaf0c8b462016-02-22 18:23:47 -0500302 spin_unlock(&cache->c_list_lock);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500303
304 return shrunk;
305}
306
Jan Kara7a2508e2016-02-22 22:35:22 -0500307static unsigned long mb_cache_scan(struct shrinker *shrink,
308 struct shrink_control *sc)
Jan Karac2f31402016-02-22 12:33:03 -0500309{
310 int nr_to_scan = sc->nr_to_scan;
Jan Kara7a2508e2016-02-22 22:35:22 -0500311 struct mb_cache *cache = container_of(shrink, struct mb_cache,
Jan Karac2f31402016-02-22 12:33:03 -0500312 c_shrink);
Jan Kara7a2508e2016-02-22 22:35:22 -0500313 return mb_cache_shrink(cache, nr_to_scan);
Jan Karac2f31402016-02-22 12:33:03 -0500314}
315
316/* We shrink 1/X of the cache when we have too many entries in it */
317#define SHRINK_DIVISOR 16
318
Jan Kara7a2508e2016-02-22 22:35:22 -0500319static void mb_cache_shrink_worker(struct work_struct *work)
Jan Karac2f31402016-02-22 12:33:03 -0500320{
Jan Kara7a2508e2016-02-22 22:35:22 -0500321 struct mb_cache *cache = container_of(work, struct mb_cache,
322 c_shrink_work);
323 mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
Jan Karac2f31402016-02-22 12:33:03 -0500324}
325
Jan Karaf9a61eb2016-02-22 11:49:09 -0500326/*
Jan Kara7a2508e2016-02-22 22:35:22 -0500327 * mb_cache_create - create cache
Jan Karaf9a61eb2016-02-22 11:49:09 -0500328 * @bucket_bits: log2 of the hash table size
329 *
330 * Create cache for keys with 2^bucket_bits hash entries.
331 */
Jan Kara7a2508e2016-02-22 22:35:22 -0500332struct mb_cache *mb_cache_create(int bucket_bits)
Jan Karaf9a61eb2016-02-22 11:49:09 -0500333{
Jan Kara7a2508e2016-02-22 22:35:22 -0500334 struct mb_cache *cache;
Jan Karaf9a61eb2016-02-22 11:49:09 -0500335 int bucket_count = 1 << bucket_bits;
336 int i;
337
338 if (!try_module_get(THIS_MODULE))
339 return NULL;
340
Jan Kara7a2508e2016-02-22 22:35:22 -0500341 cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500342 if (!cache)
343 goto err_out;
344 cache->c_bucket_bits = bucket_bits;
Jan Karac2f31402016-02-22 12:33:03 -0500345 cache->c_max_entries = bucket_count << 4;
Jan Karaf0c8b462016-02-22 18:23:47 -0500346 INIT_LIST_HEAD(&cache->c_list);
347 spin_lock_init(&cache->c_list_lock);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500348 cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
349 GFP_KERNEL);
350 if (!cache->c_hash) {
351 kfree(cache);
352 goto err_out;
353 }
354 for (i = 0; i < bucket_count; i++)
355 INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
356
Jan Kara7a2508e2016-02-22 22:35:22 -0500357 cache->c_shrink.count_objects = mb_cache_count;
358 cache->c_shrink.scan_objects = mb_cache_scan;
Jan Karaf9a61eb2016-02-22 11:49:09 -0500359 cache->c_shrink.seeks = DEFAULT_SEEKS;
360 register_shrinker(&cache->c_shrink);
361
Jan Kara7a2508e2016-02-22 22:35:22 -0500362 INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
Jan Karac2f31402016-02-22 12:33:03 -0500363
Jan Karaf9a61eb2016-02-22 11:49:09 -0500364 return cache;
365
366err_out:
367 module_put(THIS_MODULE);
368 return NULL;
369}
Jan Kara7a2508e2016-02-22 22:35:22 -0500370EXPORT_SYMBOL(mb_cache_create);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500371
372/*
Jan Kara7a2508e2016-02-22 22:35:22 -0500373 * mb_cache_destroy - destroy cache
Jan Karaf9a61eb2016-02-22 11:49:09 -0500374 * @cache: the cache to destroy
375 *
376 * Free all entries in cache and cache itself. Caller must make sure nobody
377 * (except shrinker) can reach @cache when calling this.
378 */
Jan Kara7a2508e2016-02-22 22:35:22 -0500379void mb_cache_destroy(struct mb_cache *cache)
Jan Karaf9a61eb2016-02-22 11:49:09 -0500380{
Jan Kara7a2508e2016-02-22 22:35:22 -0500381 struct mb_cache_entry *entry, *next;
Jan Karaf9a61eb2016-02-22 11:49:09 -0500382
383 unregister_shrinker(&cache->c_shrink);
384
385 /*
386 * We don't bother with any locking. Cache must not be used at this
387 * point.
388 */
Jan Karaf0c8b462016-02-22 18:23:47 -0500389 list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
Jan Karaf9a61eb2016-02-22 11:49:09 -0500390 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
391 hlist_bl_del_init(&entry->e_hash_list);
392 atomic_dec(&entry->e_refcnt);
393 } else
394 WARN_ON(1);
Jan Karaf0c8b462016-02-22 18:23:47 -0500395 list_del(&entry->e_list);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500396 WARN_ON(atomic_read(&entry->e_refcnt) != 1);
Jan Kara7a2508e2016-02-22 22:35:22 -0500397 mb_cache_entry_put(cache, entry);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500398 }
399 kfree(cache->c_hash);
400 kfree(cache);
401 module_put(THIS_MODULE);
402}
Jan Kara7a2508e2016-02-22 22:35:22 -0500403EXPORT_SYMBOL(mb_cache_destroy);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500404
Jan Kara7a2508e2016-02-22 22:35:22 -0500405static int __init mbcache_init(void)
Jan Karaf9a61eb2016-02-22 11:49:09 -0500406{
Jan Kara7a2508e2016-02-22 22:35:22 -0500407 mb_entry_cache = kmem_cache_create("mbcache",
408 sizeof(struct mb_cache_entry), 0,
Jan Karaf9a61eb2016-02-22 11:49:09 -0500409 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
Jan Kara7a2508e2016-02-22 22:35:22 -0500410 BUG_ON(!mb_entry_cache);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500411 return 0;
412}
413
Jan Kara7a2508e2016-02-22 22:35:22 -0500414static void __exit mbcache_exit(void)
Jan Karaf9a61eb2016-02-22 11:49:09 -0500415{
Jan Kara7a2508e2016-02-22 22:35:22 -0500416 kmem_cache_destroy(mb_entry_cache);
Jan Karaf9a61eb2016-02-22 11:49:09 -0500417}
418
Jan Kara7a2508e2016-02-22 22:35:22 -0500419module_init(mbcache_init)
420module_exit(mbcache_exit)
Jan Karaf9a61eb2016-02-22 11:49:09 -0500421
422MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
423MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
424MODULE_LICENSE("GPL");