blob: 786ecab81c99c5c4765f8d976c32a1d4029b9179 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/mbcache.c
3 * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
4 */
5
6/*
7 * Filesystem Meta Information Block Cache (mbcache)
8 *
9 * The mbcache caches blocks of block devices that need to be located
10 * by their device/block number, as well as by other criteria (such
11 * as the block's contents).
12 *
13 * There can only be one cache entry in a cache per device and block number.
14 * Additional indexes need not be unique in this sense. The number of
15 * additional indexes (=other criteria) can be hardwired at compile time
16 * or specified at cache create time.
17 *
18 * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19 * in the cache. A valid entry is in the main hash tables of the cache,
20 * and may also be in the lru list. An invalid entry is not in any hashes
21 * or lists.
22 *
23 * A valid cache entry is only in the lru list if no handles refer to it.
24 * Invalid cache entries will be freed when the last handle to the cache
25 * entry is released. Entries that cannot be freed immediately are put
26 * back on the lru list.
27 */
28
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -040029/*
30 * Lock descriptions and usage:
31 *
32 * Each hash chain of both the block and index hash tables now contains
33 * a built-in lock used to serialize accesses to the hash chain.
34 *
35 * Accesses to global data structures mb_cache_list and mb_cache_lru_list
36 * are serialized via the global spinlock mb_cache_spinlock.
37 *
38 * Each mb_cache_entry contains a spinlock, e_entry_lock, to serialize
39 * accesses to its local data, such as e_used and e_queued.
40 *
41 * Lock ordering:
42 *
43 * Each block hash chain's lock has the highest lock order, followed by an
44 * index hash chain's lock, mb_cache_bg_lock (used to implement mb_cache_entry's
45 * lock), and mb_cach_spinlock, with the lowest order. While holding
46 * either a block or index hash chain lock, a thread can acquire an
47 * mc_cache_bg_lock, which in turn can also acquire mb_cache_spinlock.
48 *
49 * Synchronization:
50 *
51 * Since both mb_cache_entry_get and mb_cache_entry_find scan the block and
52 * index hash chian, it needs to lock the corresponding hash chain. For each
53 * mb_cache_entry within the chain, it needs to lock the mb_cache_entry to
54 * prevent either any simultaneous release or free on the entry and also
55 * to serialize accesses to either the e_used or e_queued member of the entry.
56 *
57 * To avoid having a dangling reference to an already freed
58 * mb_cache_entry, an mb_cache_entry is only freed when it is not on a
59 * block hash chain and also no longer being referenced, both e_used,
60 * and e_queued are 0's. When an mb_cache_entry is explicitly freed it is
61 * first removed from a block hash chain.
62 */
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/kernel.h>
65#include <linux/module.h>
66
67#include <linux/hash.h>
68#include <linux/fs.h>
69#include <linux/mm.h>
70#include <linux/slab.h>
71#include <linux/sched.h>
T Makphaibulchoke3e037e52014-03-18 19:19:41 -040072#include <linux/list_bl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#include <linux/mbcache.h>
T Makphaibulchoke3e037e52014-03-18 19:19:41 -040074#include <linux/init.h>
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -040075#include <linux/blockgroup_lock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77#ifdef MB_CACHE_DEBUG
78# define mb_debug(f...) do { \
79 printk(KERN_DEBUG f); \
80 printk("\n"); \
81 } while (0)
82#define mb_assert(c) do { if (!(c)) \
83 printk(KERN_ERR "assertion " #c " failed\n"); \
84 } while(0)
85#else
86# define mb_debug(f...) do { } while(0)
87# define mb_assert(c) do { } while(0)
88#endif
89#define mb_error(f...) do { \
90 printk(KERN_ERR f); \
91 printk("\n"); \
92 } while(0)
93
94#define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
95
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -040096#define MB_CACHE_ENTRY_LOCK_BITS __builtin_log2(NR_BG_LOCKS)
97#define MB_CACHE_ENTRY_LOCK_INDEX(ce) \
98 (hash_long((unsigned long)ce, MB_CACHE_ENTRY_LOCK_BITS))
99
Adrian Bunk75c96f82005-05-05 16:16:09 -0700100static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400101static struct blockgroup_lock *mb_cache_bg_lock;
102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
104MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
105MODULE_LICENSE("GPL");
106
107EXPORT_SYMBOL(mb_cache_create);
108EXPORT_SYMBOL(mb_cache_shrink);
109EXPORT_SYMBOL(mb_cache_destroy);
110EXPORT_SYMBOL(mb_cache_entry_alloc);
111EXPORT_SYMBOL(mb_cache_entry_insert);
112EXPORT_SYMBOL(mb_cache_entry_release);
113EXPORT_SYMBOL(mb_cache_entry_free);
114EXPORT_SYMBOL(mb_cache_entry_get);
115#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
116EXPORT_SYMBOL(mb_cache_entry_find_first);
117EXPORT_SYMBOL(mb_cache_entry_find_next);
118#endif
119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120/*
121 * Global data: list of all mbcache's, lru list, and a spinlock for
122 * accessing cache data structures on SMP machines. The lru list is
123 * global across all mbcaches.
124 */
125
126static LIST_HEAD(mb_cache_list);
127static LIST_HEAD(mb_cache_lru_list);
128static DEFINE_SPINLOCK(mb_cache_spinlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400130static inline void
131__spin_lock_mb_cache_entry(struct mb_cache_entry *ce)
132{
133 spin_lock(bgl_lock_ptr(mb_cache_bg_lock,
134 MB_CACHE_ENTRY_LOCK_INDEX(ce)));
135}
136
137static inline void
138__spin_unlock_mb_cache_entry(struct mb_cache_entry *ce)
139{
140 spin_unlock(bgl_lock_ptr(mb_cache_bg_lock,
141 MB_CACHE_ENTRY_LOCK_INDEX(ce)));
142}
143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144static inline int
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400145__mb_cache_entry_is_block_hashed(struct mb_cache_entry *ce)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400147 return !hlist_bl_unhashed(&ce->e_block_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148}
149
150
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400151static inline void
152__mb_cache_entry_unhash_block(struct mb_cache_entry *ce)
153{
154 if (__mb_cache_entry_is_block_hashed(ce))
155 hlist_bl_del_init(&ce->e_block_list);
156}
157
158static inline int
159__mb_cache_entry_is_index_hashed(struct mb_cache_entry *ce)
160{
161 return !hlist_bl_unhashed(&ce->e_index.o_list);
162}
163
164static inline void
165__mb_cache_entry_unhash_index(struct mb_cache_entry *ce)
166{
167 if (__mb_cache_entry_is_index_hashed(ce))
168 hlist_bl_del_init(&ce->e_index.o_list);
169}
170
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400171/*
172 * __mb_cache_entry_unhash_unlock()
173 *
174 * This function is called to unhash both the block and index hash
175 * chain.
176 * It assumes both the block and index hash chain is locked upon entry.
177 * It also unlock both hash chains both exit
178 */
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400179static inline void
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400180__mb_cache_entry_unhash_unlock(struct mb_cache_entry *ce)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181{
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400182 __mb_cache_entry_unhash_index(ce);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400183 hlist_bl_unlock(ce->e_index_hash_p);
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400184 __mb_cache_entry_unhash_block(ce);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400185 hlist_bl_unlock(ce->e_block_hash_p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
187
Arjan van de Ven858119e2006-01-14 13:20:43 -0800188static void
Al Viro27496a82005-10-21 03:20:48 -0400189__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190{
191 struct mb_cache *cache = ce->e_cache;
192
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400193 mb_assert(!(ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt)));
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200194 kmem_cache_free(cache->c_entry_cache, ce);
195 atomic_dec(&cache->c_entry_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196}
197
Arjan van de Ven858119e2006-01-14 13:20:43 -0800198static void
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400199__mb_cache_entry_release(struct mb_cache_entry *ce)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200{
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400201 /* First lock the entry to serialize access to its local data. */
202 __spin_lock_mb_cache_entry(ce);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 /* Wake up all processes queuing for this cache entry. */
204 if (ce->e_queued)
205 wake_up_all(&mb_cache_queue);
206 if (ce->e_used >= MB_CACHE_WRITER)
207 ce->e_used -= MB_CACHE_WRITER;
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400208 /*
209 * Make sure that all cache entries on lru_list have
210 * both e_used and e_qued of 0s.
211 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 ce->e_used--;
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400213 if (!(ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))) {
214 if (!__mb_cache_entry_is_block_hashed(ce)) {
215 __spin_unlock_mb_cache_entry(ce);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 goto forget;
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400217 }
218 /*
219 * Need access to lru list, first drop entry lock,
220 * then reacquire the lock in the proper order.
221 */
222 spin_lock(&mb_cache_spinlock);
223 if (list_empty(&ce->e_lru_list))
224 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
225 spin_unlock(&mb_cache_spinlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 }
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400227 __spin_unlock_mb_cache_entry(ce);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 return;
229forget:
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400230 mb_assert(list_empty(&ce->e_lru_list));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 __mb_cache_entry_forget(ce, GFP_KERNEL);
232}
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234/*
Dave Chinner1ab6c492013-08-28 10:18:09 +1000235 * mb_cache_shrink_scan() memory pressure callback
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 *
237 * This function is called by the kernel memory management when memory
238 * gets low.
239 *
Dave Chinner7f8275d2010-07-19 14:56:17 +1000240 * @shrink: (ignored)
Ying Han1495f232011-05-24 17:12:27 -0700241 * @sc: shrink_control passed from reclaim
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 *
Dave Chinner1ab6c492013-08-28 10:18:09 +1000243 * Returns the number of objects freed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 */
Dave Chinner1ab6c492013-08-28 10:18:09 +1000245static unsigned long
246mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247{
248 LIST_HEAD(free_list);
Andreas Gruenbachere566d482010-07-21 19:44:45 +0200249 struct mb_cache_entry *entry, *tmp;
Ying Han1495f232011-05-24 17:12:27 -0700250 int nr_to_scan = sc->nr_to_scan;
251 gfp_t gfp_mask = sc->gfp_mask;
Dave Chinner1ab6c492013-08-28 10:18:09 +1000252 unsigned long freed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 mb_debug("trying to free %d entries", nr_to_scan);
Andreas Gruenbachere566d482010-07-21 19:44:45 +0200255 spin_lock(&mb_cache_spinlock);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400256 while ((nr_to_scan-- > 0) && !list_empty(&mb_cache_lru_list)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 struct mb_cache_entry *ce =
258 list_entry(mb_cache_lru_list.next,
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400259 struct mb_cache_entry, e_lru_list);
260 list_del_init(&ce->e_lru_list);
261 if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))
262 continue;
263 spin_unlock(&mb_cache_spinlock);
264 /* Prevent any find or get operation on the entry */
265 hlist_bl_lock(ce->e_block_hash_p);
266 hlist_bl_lock(ce->e_index_hash_p);
267 /* Ignore if it is touched by a find/get */
268 if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt) ||
269 !list_empty(&ce->e_lru_list)) {
270 hlist_bl_unlock(ce->e_index_hash_p);
271 hlist_bl_unlock(ce->e_block_hash_p);
272 spin_lock(&mb_cache_spinlock);
273 continue;
274 }
275 __mb_cache_entry_unhash_unlock(ce);
276 list_add_tail(&ce->e_lru_list, &free_list);
277 spin_lock(&mb_cache_spinlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 }
Dave Chinner1ab6c492013-08-28 10:18:09 +1000279 spin_unlock(&mb_cache_spinlock);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400280
Dave Chinner1ab6c492013-08-28 10:18:09 +1000281 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
282 __mb_cache_entry_forget(entry, gfp_mask);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400283 freed++;
Dave Chinner1ab6c492013-08-28 10:18:09 +1000284 }
285 return freed;
286}
287
288static unsigned long
289mb_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
290{
291 struct mb_cache *cache;
292 unsigned long count = 0;
293
294 spin_lock(&mb_cache_spinlock);
Andreas Gruenbachere566d482010-07-21 19:44:45 +0200295 list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
296 mb_debug("cache %s (%d)", cache->c_name,
297 atomic_read(&cache->c_entry_count));
298 count += atomic_read(&cache->c_entry_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 }
Andreas Gruenbachere566d482010-07-21 19:44:45 +0200300 spin_unlock(&mb_cache_spinlock);
Dave Chinner1ab6c492013-08-28 10:18:09 +1000301
Glauber Costa55f841c2013-08-28 10:17:53 +1000302 return vfs_pressure_ratio(count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
Dave Chinner1ab6c492013-08-28 10:18:09 +1000305static struct shrinker mb_cache_shrinker = {
306 .count_objects = mb_cache_shrink_count,
307 .scan_objects = mb_cache_shrink_scan,
308 .seeks = DEFAULT_SEEKS,
309};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
311/*
312 * mb_cache_create() create a new cache
313 *
314 * All entries in one cache are equal size. Cache entries may be from
315 * multiple devices. If this is the first mbcache created, registers
316 * the cache with kernel memory management. Returns NULL if no more
317 * memory was available.
318 *
319 * @name: name of the cache (informal)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 * @bucket_bits: log2(number of hash buckets)
321 */
322struct mb_cache *
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200323mb_cache_create(const char *name, int bucket_bits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324{
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200325 int n, bucket_count = 1 << bucket_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 struct mb_cache *cache = NULL;
327
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400328 if (!mb_cache_bg_lock) {
329 mb_cache_bg_lock = kmalloc(sizeof(struct blockgroup_lock),
330 GFP_KERNEL);
331 if (!mb_cache_bg_lock)
332 return NULL;
333 bgl_lock_init(mb_cache_bg_lock);
334 }
335
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200336 cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 if (!cache)
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200338 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 cache->c_name = name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 atomic_set(&cache->c_entry_count, 0);
341 cache->c_bucket_bits = bucket_bits;
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400342 cache->c_block_hash = kmalloc(bucket_count *
343 sizeof(struct hlist_bl_head), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 if (!cache->c_block_hash)
345 goto fail;
346 for (n=0; n<bucket_count; n++)
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400347 INIT_HLIST_BL_HEAD(&cache->c_block_hash[n]);
348 cache->c_index_hash = kmalloc(bucket_count *
349 sizeof(struct hlist_bl_head), GFP_KERNEL);
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200350 if (!cache->c_index_hash)
351 goto fail;
352 for (n=0; n<bucket_count; n++)
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400353 INIT_HLIST_BL_HEAD(&cache->c_index_hash[n]);
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200354 cache->c_entry_cache = kmem_cache_create(name,
355 sizeof(struct mb_cache_entry), 0,
Paul Mundt20c2df82007-07-20 10:11:58 +0900356 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 if (!cache->c_entry_cache)
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200358 goto fail2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
Andreas Gruenbacher3a48ee82010-08-16 19:05:23 +0200360 /*
361 * Set an upper limit on the number of cache entries so that the hash
362 * chains won't grow too long.
363 */
364 cache->c_max_entries = bucket_count << 4;
365
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 spin_lock(&mb_cache_spinlock);
367 list_add(&cache->c_cache_list, &mb_cache_list);
368 spin_unlock(&mb_cache_spinlock);
369 return cache;
370
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200371fail2:
372 kfree(cache->c_index_hash);
373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374fail:
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200375 kfree(cache->c_block_hash);
376 kfree(cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 return NULL;
378}
379
380
381/*
382 * mb_cache_shrink()
383 *
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -0800384 * Removes all cache entries of a device from the cache. All cache entries
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 * currently in use cannot be freed, and thus remain in the cache. All others
386 * are freed.
387 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 * @bdev: which device's cache entries to shrink
389 */
390void
Andreas Gruenbacher8c52ab42005-07-27 11:45:15 -0700391mb_cache_shrink(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392{
393 LIST_HEAD(free_list);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400394 struct list_head *l;
395 struct mb_cache_entry *ce, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400397 l = &mb_cache_lru_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 spin_lock(&mb_cache_spinlock);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400399 while (!list_is_last(l, &mb_cache_lru_list)) {
400 l = l->next;
401 ce = list_entry(l, struct mb_cache_entry, e_lru_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 if (ce->e_bdev == bdev) {
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400403 list_del_init(&ce->e_lru_list);
404 if (ce->e_used || ce->e_queued ||
405 atomic_read(&ce->e_refcnt))
406 continue;
407 spin_unlock(&mb_cache_spinlock);
408 /*
409 * Prevent any find or get operation on the entry.
410 */
411 hlist_bl_lock(ce->e_block_hash_p);
412 hlist_bl_lock(ce->e_index_hash_p);
413 /* Ignore if it is touched by a find/get */
414 if (ce->e_used || ce->e_queued ||
415 atomic_read(&ce->e_refcnt) ||
416 !list_empty(&ce->e_lru_list)) {
417 hlist_bl_unlock(ce->e_index_hash_p);
418 hlist_bl_unlock(ce->e_block_hash_p);
419 l = &mb_cache_lru_list;
420 spin_lock(&mb_cache_spinlock);
421 continue;
422 }
423 __mb_cache_entry_unhash_unlock(ce);
424 mb_assert(!(ce->e_used || ce->e_queued ||
425 atomic_read(&ce->e_refcnt)));
426 list_add_tail(&ce->e_lru_list, &free_list);
427 l = &mb_cache_lru_list;
428 spin_lock(&mb_cache_spinlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 }
430 }
431 spin_unlock(&mb_cache_spinlock);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400432
433 list_for_each_entry_safe(ce, tmp, &free_list, e_lru_list) {
434 __mb_cache_entry_forget(ce, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 }
436}
437
438
439/*
440 * mb_cache_destroy()
441 *
442 * Shrinks the cache to its minimum possible size (hopefully 0 entries),
443 * and then destroys it. If this was the last mbcache, un-registers the
444 * mbcache from kernel memory management.
445 */
446void
447mb_cache_destroy(struct mb_cache *cache)
448{
449 LIST_HEAD(free_list);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400450 struct mb_cache_entry *ce, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
452 spin_lock(&mb_cache_spinlock);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400453 list_for_each_entry_safe(ce, tmp, &mb_cache_lru_list, e_lru_list) {
454 if (ce->e_cache == cache)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 list_move_tail(&ce->e_lru_list, &free_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 }
457 list_del(&cache->c_cache_list);
458 spin_unlock(&mb_cache_spinlock);
459
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400460 list_for_each_entry_safe(ce, tmp, &free_list, e_lru_list) {
461 list_del_init(&ce->e_lru_list);
462 /*
463 * Prevent any find or get operation on the entry.
464 */
465 hlist_bl_lock(ce->e_block_hash_p);
466 hlist_bl_lock(ce->e_index_hash_p);
467 mb_assert(!(ce->e_used || ce->e_queued ||
468 atomic_read(&ce->e_refcnt)));
469 __mb_cache_entry_unhash_unlock(ce);
470 __mb_cache_entry_forget(ce, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 }
472
473 if (atomic_read(&cache->c_entry_count) > 0) {
474 mb_error("cache %s: %d orphaned entries",
475 cache->c_name,
476 atomic_read(&cache->c_entry_count));
477 }
478
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200479 kfree(cache->c_index_hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 kfree(cache->c_block_hash);
481 kfree(cache);
482}
483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484/*
485 * mb_cache_entry_alloc()
486 *
487 * Allocates a new cache entry. The new entry will not be valid initially,
488 * and thus cannot be looked up yet. It should be filled with data, and
489 * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
490 * if no more memory was available.
491 */
492struct mb_cache_entry *
Jan Kara335e92e2008-04-15 14:34:43 -0700493mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494{
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400495 struct mb_cache_entry *ce;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
Andreas Gruenbacher3a48ee82010-08-16 19:05:23 +0200497 if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400498 struct list_head *l;
499
500 l = &mb_cache_lru_list;
Andreas Gruenbacher3a48ee82010-08-16 19:05:23 +0200501 spin_lock(&mb_cache_spinlock);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400502 while (!list_is_last(l, &mb_cache_lru_list)) {
503 l = l->next;
504 ce = list_entry(l, struct mb_cache_entry, e_lru_list);
505 if (ce->e_cache == cache) {
506 list_del_init(&ce->e_lru_list);
507 if (ce->e_used || ce->e_queued ||
508 atomic_read(&ce->e_refcnt))
509 continue;
510 spin_unlock(&mb_cache_spinlock);
511 /*
512 * Prevent any find or get operation on the
513 * entry.
514 */
515 hlist_bl_lock(ce->e_block_hash_p);
516 hlist_bl_lock(ce->e_index_hash_p);
517 /* Ignore if it is touched by a find/get */
518 if (ce->e_used || ce->e_queued ||
519 atomic_read(&ce->e_refcnt) ||
520 !list_empty(&ce->e_lru_list)) {
521 hlist_bl_unlock(ce->e_index_hash_p);
522 hlist_bl_unlock(ce->e_block_hash_p);
523 l = &mb_cache_lru_list;
524 spin_lock(&mb_cache_spinlock);
525 continue;
526 }
527 mb_assert(list_empty(&ce->e_lru_list));
528 mb_assert(!(ce->e_used || ce->e_queued ||
529 atomic_read(&ce->e_refcnt)));
530 __mb_cache_entry_unhash_unlock(ce);
531 goto found;
532 }
Andreas Gruenbacher3a48ee82010-08-16 19:05:23 +0200533 }
534 spin_unlock(&mb_cache_spinlock);
535 }
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400536
537 ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
538 if (!ce)
539 return NULL;
540 atomic_inc(&cache->c_entry_count);
541 INIT_LIST_HEAD(&ce->e_lru_list);
542 INIT_HLIST_BL_NODE(&ce->e_block_list);
543 INIT_HLIST_BL_NODE(&ce->e_index.o_list);
544 ce->e_cache = cache;
545 ce->e_queued = 0;
546 atomic_set(&ce->e_refcnt, 0);
547found:
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400548 ce->e_block_hash_p = &cache->c_block_hash[0];
549 ce->e_index_hash_p = &cache->c_index_hash[0];
Andreas Gruenbacher3a48ee82010-08-16 19:05:23 +0200550 ce->e_used = 1 + MB_CACHE_WRITER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 return ce;
552}
553
554
555/*
556 * mb_cache_entry_insert()
557 *
558 * Inserts an entry that was allocated using mb_cache_entry_alloc() into
559 * the cache. After this, the cache entry can be looked up, but is not yet
560 * in the lru list as the caller still holds a handle to it. Returns 0 on
561 * success, or -EBUSY if a cache entry for that device + inode exists
562 * already (this may happen after a failed lookup, but when another process
563 * has inserted the same cache entry in the meantime).
564 *
565 * @bdev: device the cache entry belongs to
566 * @block: block number
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200567 * @key: lookup key
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 */
569int
570mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200571 sector_t block, unsigned int key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
573 struct mb_cache *cache = ce->e_cache;
574 unsigned int bucket;
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400575 struct hlist_bl_node *l;
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400576 struct hlist_bl_head *block_hash_p;
577 struct hlist_bl_head *index_hash_p;
578 struct mb_cache_entry *lce;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400580 mb_assert(ce);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
582 cache->c_bucket_bits);
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400583 block_hash_p = &cache->c_block_hash[bucket];
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400584 hlist_bl_lock(block_hash_p);
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400585 hlist_bl_for_each_entry(lce, l, block_hash_p, e_block_list) {
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400586 if (lce->e_bdev == bdev && lce->e_block == block) {
587 hlist_bl_unlock(block_hash_p);
588 return -EBUSY;
589 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 }
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400591 mb_assert(!__mb_cache_entry_is_block_hashed(ce));
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400592 __mb_cache_entry_unhash_block(ce);
593 __mb_cache_entry_unhash_index(ce);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 ce->e_bdev = bdev;
595 ce->e_block = block;
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400596 ce->e_block_hash_p = block_hash_p;
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200597 ce->e_index.o_key = key;
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400598 hlist_bl_add_head(&ce->e_block_list, block_hash_p);
599 hlist_bl_unlock(block_hash_p);
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200600 bucket = hash_long(key, cache->c_bucket_bits);
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400601 index_hash_p = &cache->c_index_hash[bucket];
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400602 hlist_bl_lock(index_hash_p);
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400603 ce->e_index_hash_p = index_hash_p;
604 hlist_bl_add_head(&ce->e_index.o_list, index_hash_p);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400605 hlist_bl_unlock(index_hash_p);
606 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607}
608
609
610/*
611 * mb_cache_entry_release()
612 *
613 * Release a handle to a cache entry. When the last handle to a cache entry
614 * is released it is either freed (if it is invalid) or otherwise inserted
615 * in to the lru list.
616 */
617void
618mb_cache_entry_release(struct mb_cache_entry *ce)
619{
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400620 __mb_cache_entry_release(ce);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621}
622
623
624/*
625 * mb_cache_entry_free()
626 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 */
628void
629mb_cache_entry_free(struct mb_cache_entry *ce)
630{
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400631 mb_assert(ce);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 mb_assert(list_empty(&ce->e_lru_list));
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400633 hlist_bl_lock(ce->e_index_hash_p);
634 __mb_cache_entry_unhash_index(ce);
635 hlist_bl_unlock(ce->e_index_hash_p);
636 hlist_bl_lock(ce->e_block_hash_p);
637 __mb_cache_entry_unhash_block(ce);
638 hlist_bl_unlock(ce->e_block_hash_p);
639 __mb_cache_entry_release(ce);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640}
641
642
643/*
644 * mb_cache_entry_get()
645 *
646 * Get a cache entry by device / block number. (There can only be one entry
647 * in the cache per device and block.) Returns NULL if no such cache entry
648 * exists. The returned cache entry is locked for exclusive access ("single
649 * writer").
650 */
651struct mb_cache_entry *
652mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
653 sector_t block)
654{
655 unsigned int bucket;
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400656 struct hlist_bl_node *l;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 struct mb_cache_entry *ce;
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400658 struct hlist_bl_head *block_hash_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
660 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
661 cache->c_bucket_bits);
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400662 block_hash_p = &cache->c_block_hash[bucket];
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400663 /* First serialize access to the block corresponding hash chain. */
664 hlist_bl_lock(block_hash_p);
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400665 hlist_bl_for_each_entry(ce, l, block_hash_p, e_block_list) {
666 mb_assert(ce->e_block_hash_p == block_hash_p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 if (ce->e_bdev == bdev && ce->e_block == block) {
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400668 /*
669 * Prevent a free from removing the entry.
670 */
671 atomic_inc(&ce->e_refcnt);
672 hlist_bl_unlock(block_hash_p);
673 __spin_lock_mb_cache_entry(ce);
674 atomic_dec(&ce->e_refcnt);
675 if (ce->e_used > 0) {
676 DEFINE_WAIT(wait);
677 while (ce->e_used > 0) {
678 ce->e_queued++;
679 prepare_to_wait(&mb_cache_queue, &wait,
680 TASK_UNINTERRUPTIBLE);
681 __spin_unlock_mb_cache_entry(ce);
682 schedule();
683 __spin_lock_mb_cache_entry(ce);
684 ce->e_queued--;
685 }
686 finish_wait(&mb_cache_queue, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 ce->e_used += 1 + MB_CACHE_WRITER;
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400689 __spin_unlock_mb_cache_entry(ce);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400691 if (!list_empty(&ce->e_lru_list)) {
692 spin_lock(&mb_cache_spinlock);
693 list_del_init(&ce->e_lru_list);
694 spin_unlock(&mb_cache_spinlock);
695 }
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400696 if (!__mb_cache_entry_is_block_hashed(ce)) {
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400697 __mb_cache_entry_release(ce);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 return NULL;
699 }
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400700 return ce;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 }
702 }
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400703 hlist_bl_unlock(block_hash_p);
704 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705}
706
707#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
708
709static struct mb_cache_entry *
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400710__mb_cache_entry_find(struct hlist_bl_node *l, struct hlist_bl_head *head,
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200711 struct block_device *bdev, unsigned int key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712{
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400713
714 /* The index hash chain is alredy acquire by caller. */
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400715 while (l != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 struct mb_cache_entry *ce =
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400717 hlist_bl_entry(l, struct mb_cache_entry,
718 e_index.o_list);
719 mb_assert(ce->e_index_hash_p == head);
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200720 if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400721 /*
722 * Prevent a free from removing the entry.
723 */
724 atomic_inc(&ce->e_refcnt);
725 hlist_bl_unlock(head);
726 __spin_lock_mb_cache_entry(ce);
727 atomic_dec(&ce->e_refcnt);
728 ce->e_used++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 /* Incrementing before holding the lock gives readers
730 priority over writers. */
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400731 if (ce->e_used >= MB_CACHE_WRITER) {
732 DEFINE_WAIT(wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400734 while (ce->e_used >= MB_CACHE_WRITER) {
735 ce->e_queued++;
736 prepare_to_wait(&mb_cache_queue, &wait,
737 TASK_UNINTERRUPTIBLE);
738 __spin_unlock_mb_cache_entry(ce);
739 schedule();
740 __spin_lock_mb_cache_entry(ce);
741 ce->e_queued--;
742 }
743 finish_wait(&mb_cache_queue, &wait);
744 }
745 __spin_unlock_mb_cache_entry(ce);
746 if (!list_empty(&ce->e_lru_list)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 spin_lock(&mb_cache_spinlock);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400748 list_del_init(&ce->e_lru_list);
749 spin_unlock(&mb_cache_spinlock);
750 }
751 if (!__mb_cache_entry_is_block_hashed(ce)) {
752 __mb_cache_entry_release(ce);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 return ERR_PTR(-EAGAIN);
754 }
755 return ce;
756 }
757 l = l->next;
758 }
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400759 hlist_bl_unlock(head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 return NULL;
761}
762
763
764/*
765 * mb_cache_entry_find_first()
766 *
767 * Find the first cache entry on a given device with a certain key in
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300768 * an additional index. Additional matches can be found with
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 * mb_cache_entry_find_next(). Returns NULL if no match was found. The
770 * returned cache entry is locked for shared access ("multiple readers").
771 *
772 * @cache: the cache to search
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 * @bdev: the device the cache entry should belong to
774 * @key: the key in the index
775 */
776struct mb_cache_entry *
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200777mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
778 unsigned int key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779{
780 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400781 struct hlist_bl_node *l;
782 struct mb_cache_entry *ce = NULL;
783 struct hlist_bl_head *index_hash_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400785 index_hash_p = &cache->c_index_hash[bucket];
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400786 hlist_bl_lock(index_hash_p);
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400787 if (!hlist_bl_empty(index_hash_p)) {
788 l = hlist_bl_first(index_hash_p);
789 ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400790 } else
791 hlist_bl_unlock(index_hash_p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 return ce;
793}
794
795
796/*
797 * mb_cache_entry_find_next()
798 *
799 * Find the next cache entry on a given device with a certain key in an
800 * additional index. Returns NULL if no match could be found. The previous
801 * entry is atomatically released, so that mb_cache_entry_find_next() can
802 * be called like this:
803 *
804 * entry = mb_cache_entry_find_first();
805 * while (entry) {
806 * ...
807 * entry = mb_cache_entry_find_next(entry, ...);
808 * }
809 *
810 * @prev: The previous match
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 * @bdev: the device the cache entry should belong to
812 * @key: the key in the index
813 */
814struct mb_cache_entry *
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200815mb_cache_entry_find_next(struct mb_cache_entry *prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 struct block_device *bdev, unsigned int key)
817{
818 struct mb_cache *cache = prev->e_cache;
819 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400820 struct hlist_bl_node *l;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 struct mb_cache_entry *ce;
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400822 struct hlist_bl_head *index_hash_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400824 index_hash_p = &cache->c_index_hash[bucket];
825 mb_assert(prev->e_index_hash_p == index_hash_p);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400826 hlist_bl_lock(index_hash_p);
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400827 mb_assert(!hlist_bl_empty(index_hash_p));
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200828 l = prev->e_index.o_list.next;
T Makphaibulchoke3e037e52014-03-18 19:19:41 -0400829 ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
T Makphaibulchoke1f3e55f2014-03-18 19:23:20 -0400830 __mb_cache_entry_release(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 return ce;
832}
833
834#endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
835
836static int __init init_mbcache(void)
837{
Rusty Russell8e1f9362007-07-17 04:03:17 -0700838 register_shrinker(&mb_cache_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 return 0;
840}
841
842static void __exit exit_mbcache(void)
843{
Rusty Russell8e1f9362007-07-17 04:03:17 -0700844 unregister_shrinker(&mb_cache_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845}
846
847module_init(init_mbcache)
848module_exit(exit_mbcache)
849