blob: 93444747237b98c03d5192870bd9740311da93f7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/mbcache.c
3 * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
4 */
5
6/*
7 * Filesystem Meta Information Block Cache (mbcache)
8 *
9 * The mbcache caches blocks of block devices that need to be located
10 * by their device/block number, as well as by other criteria (such
11 * as the block's contents).
12 *
13 * There can only be one cache entry in a cache per device and block number.
14 * Additional indexes need not be unique in this sense. The number of
15 * additional indexes (=other criteria) can be hardwired at compile time
16 * or specified at cache create time.
17 *
18 * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19 * in the cache. A valid entry is in the main hash tables of the cache,
20 * and may also be in the lru list. An invalid entry is not in any hashes
21 * or lists.
22 *
23 * A valid cache entry is only in the lru list if no handles refer to it.
24 * Invalid cache entries will be freed when the last handle to the cache
25 * entry is released. Entries that cannot be freed immediately are put
26 * back on the lru list.
27 */
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31
32#include <linux/hash.h>
33#include <linux/fs.h>
34#include <linux/mm.h>
35#include <linux/slab.h>
36#include <linux/sched.h>
37#include <linux/init.h>
38#include <linux/mbcache.h>
39
40
41#ifdef MB_CACHE_DEBUG
42# define mb_debug(f...) do { \
43 printk(KERN_DEBUG f); \
44 printk("\n"); \
45 } while (0)
46#define mb_assert(c) do { if (!(c)) \
47 printk(KERN_ERR "assertion " #c " failed\n"); \
48 } while(0)
49#else
50# define mb_debug(f...) do { } while(0)
51# define mb_assert(c) do { } while(0)
52#endif
53#define mb_error(f...) do { \
54 printk(KERN_ERR f); \
55 printk("\n"); \
56 } while(0)
57
58#define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
59
Adrian Bunk75c96f82005-05-05 16:16:09 -070060static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
62MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
63MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
64MODULE_LICENSE("GPL");
65
66EXPORT_SYMBOL(mb_cache_create);
67EXPORT_SYMBOL(mb_cache_shrink);
68EXPORT_SYMBOL(mb_cache_destroy);
69EXPORT_SYMBOL(mb_cache_entry_alloc);
70EXPORT_SYMBOL(mb_cache_entry_insert);
71EXPORT_SYMBOL(mb_cache_entry_release);
72EXPORT_SYMBOL(mb_cache_entry_free);
73EXPORT_SYMBOL(mb_cache_entry_get);
74#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
75EXPORT_SYMBOL(mb_cache_entry_find_first);
76EXPORT_SYMBOL(mb_cache_entry_find_next);
77#endif
78
79struct mb_cache {
80 struct list_head c_cache_list;
81 const char *c_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 atomic_t c_entry_count;
Andreas Gruenbacher3a48ee82010-08-16 19:05:23 +020083 int c_max_entries;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 int c_bucket_bits;
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +020085 struct kmem_cache *c_entry_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 struct list_head *c_block_hash;
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +020087 struct list_head *c_index_hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088};
89
90
91/*
92 * Global data: list of all mbcache's, lru list, and a spinlock for
93 * accessing cache data structures on SMP machines. The lru list is
94 * global across all mbcaches.
95 */
96
97static LIST_HEAD(mb_cache_list);
98static LIST_HEAD(mb_cache_lru_list);
99static DEFINE_SPINLOCK(mb_cache_spinlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101/*
102 * What the mbcache registers as to get shrunk dynamically.
103 */
104
Dave Chinner7f8275d2010-07-19 14:56:17 +1000105static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Rusty Russell8e1f9362007-07-17 04:03:17 -0700107static struct shrinker mb_cache_shrinker = {
108 .shrink = mb_cache_shrink_fn,
109 .seeks = DEFAULT_SEEKS,
110};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112static inline int
113__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
114{
115 return !list_empty(&ce->e_block_list);
116}
117
118
Arjan van de Ven858119e2006-01-14 13:20:43 -0800119static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120__mb_cache_entry_unhash(struct mb_cache_entry *ce)
121{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 if (__mb_cache_entry_is_hashed(ce)) {
123 list_del_init(&ce->e_block_list);
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200124 list_del(&ce->e_index.o_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 }
126}
127
128
Arjan van de Ven858119e2006-01-14 13:20:43 -0800129static void
Al Viro27496a82005-10-21 03:20:48 -0400130__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
132 struct mb_cache *cache = ce->e_cache;
133
134 mb_assert(!(ce->e_used || ce->e_queued));
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200135 kmem_cache_free(cache->c_entry_cache, ce);
136 atomic_dec(&cache->c_entry_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137}
138
139
Arjan van de Ven858119e2006-01-14 13:20:43 -0800140static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141__mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
Josh Triplett58f555e2006-09-29 01:59:24 -0700142 __releases(mb_cache_spinlock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
144 /* Wake up all processes queuing for this cache entry. */
145 if (ce->e_queued)
146 wake_up_all(&mb_cache_queue);
147 if (ce->e_used >= MB_CACHE_WRITER)
148 ce->e_used -= MB_CACHE_WRITER;
149 ce->e_used--;
150 if (!(ce->e_used || ce->e_queued)) {
151 if (!__mb_cache_entry_is_hashed(ce))
152 goto forget;
153 mb_assert(list_empty(&ce->e_lru_list));
154 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
155 }
156 spin_unlock(&mb_cache_spinlock);
157 return;
158forget:
159 spin_unlock(&mb_cache_spinlock);
160 __mb_cache_entry_forget(ce, GFP_KERNEL);
161}
162
163
164/*
165 * mb_cache_shrink_fn() memory pressure callback
166 *
167 * This function is called by the kernel memory management when memory
168 * gets low.
169 *
Dave Chinner7f8275d2010-07-19 14:56:17 +1000170 * @shrink: (ignored)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 * @nr_to_scan: Number of objects to scan
172 * @gfp_mask: (ignored)
173 *
174 * Returns the number of objects which are present in the cache.
175 */
176static int
Dave Chinner7f8275d2010-07-19 14:56:17 +1000177mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
179 LIST_HEAD(free_list);
Andreas Gruenbachere566d482010-07-21 19:44:45 +0200180 struct mb_cache *cache;
181 struct mb_cache_entry *entry, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 int count = 0;
183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 mb_debug("trying to free %d entries", nr_to_scan);
Andreas Gruenbachere566d482010-07-21 19:44:45 +0200185 spin_lock(&mb_cache_spinlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
187 struct mb_cache_entry *ce =
188 list_entry(mb_cache_lru_list.next,
189 struct mb_cache_entry, e_lru_list);
190 list_move_tail(&ce->e_lru_list, &free_list);
191 __mb_cache_entry_unhash(ce);
192 }
Andreas Gruenbachere566d482010-07-21 19:44:45 +0200193 list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
194 mb_debug("cache %s (%d)", cache->c_name,
195 atomic_read(&cache->c_entry_count));
196 count += atomic_read(&cache->c_entry_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 }
Andreas Gruenbachere566d482010-07-21 19:44:45 +0200198 spin_unlock(&mb_cache_spinlock);
199 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
200 __mb_cache_entry_forget(entry, gfp_mask);
201 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 return (count / 100) * sysctl_vfs_cache_pressure;
203}
204
205
206/*
207 * mb_cache_create() create a new cache
208 *
209 * All entries in one cache are equal size. Cache entries may be from
210 * multiple devices. If this is the first mbcache created, registers
211 * the cache with kernel memory management. Returns NULL if no more
212 * memory was available.
213 *
214 * @name: name of the cache (informal)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 * @bucket_bits: log2(number of hash buckets)
216 */
217struct mb_cache *
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200218mb_cache_create(const char *name, int bucket_bits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219{
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200220 int n, bucket_count = 1 << bucket_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 struct mb_cache *cache = NULL;
222
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200223 cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 if (!cache)
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200225 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 cache->c_name = name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 atomic_set(&cache->c_entry_count, 0);
228 cache->c_bucket_bits = bucket_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
230 GFP_KERNEL);
231 if (!cache->c_block_hash)
232 goto fail;
233 for (n=0; n<bucket_count; n++)
234 INIT_LIST_HEAD(&cache->c_block_hash[n]);
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200235 cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head),
236 GFP_KERNEL);
237 if (!cache->c_index_hash)
238 goto fail;
239 for (n=0; n<bucket_count; n++)
240 INIT_LIST_HEAD(&cache->c_index_hash[n]);
241 cache->c_entry_cache = kmem_cache_create(name,
242 sizeof(struct mb_cache_entry), 0,
Paul Mundt20c2df82007-07-20 10:11:58 +0900243 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 if (!cache->c_entry_cache)
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200245 goto fail2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Andreas Gruenbacher3a48ee82010-08-16 19:05:23 +0200247 /*
248 * Set an upper limit on the number of cache entries so that the hash
249 * chains won't grow too long.
250 */
251 cache->c_max_entries = bucket_count << 4;
252
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 spin_lock(&mb_cache_spinlock);
254 list_add(&cache->c_cache_list, &mb_cache_list);
255 spin_unlock(&mb_cache_spinlock);
256 return cache;
257
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200258fail2:
259 kfree(cache->c_index_hash);
260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261fail:
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200262 kfree(cache->c_block_hash);
263 kfree(cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 return NULL;
265}
266
267
268/*
269 * mb_cache_shrink()
270 *
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -0800271 * Removes all cache entries of a device from the cache. All cache entries
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 * currently in use cannot be freed, and thus remain in the cache. All others
273 * are freed.
274 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 * @bdev: which device's cache entries to shrink
276 */
277void
Andreas Gruenbacher8c52ab42005-07-27 11:45:15 -0700278mb_cache_shrink(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279{
280 LIST_HEAD(free_list);
281 struct list_head *l, *ltmp;
282
283 spin_lock(&mb_cache_spinlock);
284 list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
285 struct mb_cache_entry *ce =
286 list_entry(l, struct mb_cache_entry, e_lru_list);
287 if (ce->e_bdev == bdev) {
288 list_move_tail(&ce->e_lru_list, &free_list);
289 __mb_cache_entry_unhash(ce);
290 }
291 }
292 spin_unlock(&mb_cache_spinlock);
293 list_for_each_safe(l, ltmp, &free_list) {
294 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
295 e_lru_list), GFP_KERNEL);
296 }
297}
298
299
300/*
301 * mb_cache_destroy()
302 *
303 * Shrinks the cache to its minimum possible size (hopefully 0 entries),
304 * and then destroys it. If this was the last mbcache, un-registers the
305 * mbcache from kernel memory management.
306 */
307void
308mb_cache_destroy(struct mb_cache *cache)
309{
310 LIST_HEAD(free_list);
311 struct list_head *l, *ltmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
313 spin_lock(&mb_cache_spinlock);
314 list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
315 struct mb_cache_entry *ce =
316 list_entry(l, struct mb_cache_entry, e_lru_list);
317 if (ce->e_cache == cache) {
318 list_move_tail(&ce->e_lru_list, &free_list);
319 __mb_cache_entry_unhash(ce);
320 }
321 }
322 list_del(&cache->c_cache_list);
323 spin_unlock(&mb_cache_spinlock);
324
325 list_for_each_safe(l, ltmp, &free_list) {
326 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
327 e_lru_list), GFP_KERNEL);
328 }
329
330 if (atomic_read(&cache->c_entry_count) > 0) {
331 mb_error("cache %s: %d orphaned entries",
332 cache->c_name,
333 atomic_read(&cache->c_entry_count));
334 }
335
336 kmem_cache_destroy(cache->c_entry_cache);
337
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200338 kfree(cache->c_index_hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 kfree(cache->c_block_hash);
340 kfree(cache);
341}
342
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343/*
344 * mb_cache_entry_alloc()
345 *
346 * Allocates a new cache entry. The new entry will not be valid initially,
347 * and thus cannot be looked up yet. It should be filled with data, and
348 * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
349 * if no more memory was available.
350 */
351struct mb_cache_entry *
Jan Kara335e92e2008-04-15 14:34:43 -0700352mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353{
Andreas Gruenbacher3a48ee82010-08-16 19:05:23 +0200354 struct mb_cache_entry *ce = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
Andreas Gruenbacher3a48ee82010-08-16 19:05:23 +0200356 if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
357 spin_lock(&mb_cache_spinlock);
358 if (!list_empty(&mb_cache_lru_list)) {
359 ce = list_entry(mb_cache_lru_list.next,
360 struct mb_cache_entry, e_lru_list);
361 list_del_init(&ce->e_lru_list);
362 __mb_cache_entry_unhash(ce);
363 }
364 spin_unlock(&mb_cache_spinlock);
365 }
366 if (!ce) {
367 ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
368 if (!ce)
369 return NULL;
Ram Guptaf9e83482007-10-25 10:03:28 -0500370 atomic_inc(&cache->c_entry_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 INIT_LIST_HEAD(&ce->e_lru_list);
372 INIT_LIST_HEAD(&ce->e_block_list);
373 ce->e_cache = cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 ce->e_queued = 0;
375 }
Andreas Gruenbacher3a48ee82010-08-16 19:05:23 +0200376 ce->e_used = 1 + MB_CACHE_WRITER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 return ce;
378}
379
380
381/*
382 * mb_cache_entry_insert()
383 *
384 * Inserts an entry that was allocated using mb_cache_entry_alloc() into
385 * the cache. After this, the cache entry can be looked up, but is not yet
386 * in the lru list as the caller still holds a handle to it. Returns 0 on
387 * success, or -EBUSY if a cache entry for that device + inode exists
388 * already (this may happen after a failed lookup, but when another process
389 * has inserted the same cache entry in the meantime).
390 *
391 * @bdev: device the cache entry belongs to
392 * @block: block number
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200393 * @key: lookup key
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 */
395int
396mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200397 sector_t block, unsigned int key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
399 struct mb_cache *cache = ce->e_cache;
400 unsigned int bucket;
401 struct list_head *l;
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200402 int error = -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
405 cache->c_bucket_bits);
406 spin_lock(&mb_cache_spinlock);
407 list_for_each_prev(l, &cache->c_block_hash[bucket]) {
408 struct mb_cache_entry *ce =
409 list_entry(l, struct mb_cache_entry, e_block_list);
410 if (ce->e_bdev == bdev && ce->e_block == block)
411 goto out;
412 }
413 __mb_cache_entry_unhash(ce);
414 ce->e_bdev = bdev;
415 ce->e_block = block;
416 list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200417 ce->e_index.o_key = key;
418 bucket = hash_long(key, cache->c_bucket_bits);
419 list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 error = 0;
421out:
422 spin_unlock(&mb_cache_spinlock);
423 return error;
424}
425
426
427/*
428 * mb_cache_entry_release()
429 *
430 * Release a handle to a cache entry. When the last handle to a cache entry
431 * is released it is either freed (if it is invalid) or otherwise inserted
432 * in to the lru list.
433 */
434void
435mb_cache_entry_release(struct mb_cache_entry *ce)
436{
437 spin_lock(&mb_cache_spinlock);
438 __mb_cache_entry_release_unlock(ce);
439}
440
441
442/*
443 * mb_cache_entry_free()
444 *
445 * This is equivalent to the sequence mb_cache_entry_takeout() --
446 * mb_cache_entry_release().
447 */
448void
449mb_cache_entry_free(struct mb_cache_entry *ce)
450{
451 spin_lock(&mb_cache_spinlock);
452 mb_assert(list_empty(&ce->e_lru_list));
453 __mb_cache_entry_unhash(ce);
454 __mb_cache_entry_release_unlock(ce);
455}
456
457
458/*
459 * mb_cache_entry_get()
460 *
461 * Get a cache entry by device / block number. (There can only be one entry
462 * in the cache per device and block.) Returns NULL if no such cache entry
463 * exists. The returned cache entry is locked for exclusive access ("single
464 * writer").
465 */
466struct mb_cache_entry *
467mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
468 sector_t block)
469{
470 unsigned int bucket;
471 struct list_head *l;
472 struct mb_cache_entry *ce;
473
474 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
475 cache->c_bucket_bits);
476 spin_lock(&mb_cache_spinlock);
477 list_for_each(l, &cache->c_block_hash[bucket]) {
478 ce = list_entry(l, struct mb_cache_entry, e_block_list);
479 if (ce->e_bdev == bdev && ce->e_block == block) {
480 DEFINE_WAIT(wait);
481
482 if (!list_empty(&ce->e_lru_list))
483 list_del_init(&ce->e_lru_list);
484
485 while (ce->e_used > 0) {
486 ce->e_queued++;
487 prepare_to_wait(&mb_cache_queue, &wait,
488 TASK_UNINTERRUPTIBLE);
489 spin_unlock(&mb_cache_spinlock);
490 schedule();
491 spin_lock(&mb_cache_spinlock);
492 ce->e_queued--;
493 }
494 finish_wait(&mb_cache_queue, &wait);
495 ce->e_used += 1 + MB_CACHE_WRITER;
496
497 if (!__mb_cache_entry_is_hashed(ce)) {
498 __mb_cache_entry_release_unlock(ce);
499 return NULL;
500 }
501 goto cleanup;
502 }
503 }
504 ce = NULL;
505
506cleanup:
507 spin_unlock(&mb_cache_spinlock);
508 return ce;
509}
510
511#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
512
513static struct mb_cache_entry *
514__mb_cache_entry_find(struct list_head *l, struct list_head *head,
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200515 struct block_device *bdev, unsigned int key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
517 while (l != head) {
518 struct mb_cache_entry *ce =
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200519 list_entry(l, struct mb_cache_entry, e_index.o_list);
520 if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 DEFINE_WAIT(wait);
522
523 if (!list_empty(&ce->e_lru_list))
524 list_del_init(&ce->e_lru_list);
525
526 /* Incrementing before holding the lock gives readers
527 priority over writers. */
528 ce->e_used++;
529 while (ce->e_used >= MB_CACHE_WRITER) {
530 ce->e_queued++;
531 prepare_to_wait(&mb_cache_queue, &wait,
532 TASK_UNINTERRUPTIBLE);
533 spin_unlock(&mb_cache_spinlock);
534 schedule();
535 spin_lock(&mb_cache_spinlock);
536 ce->e_queued--;
537 }
538 finish_wait(&mb_cache_queue, &wait);
539
540 if (!__mb_cache_entry_is_hashed(ce)) {
541 __mb_cache_entry_release_unlock(ce);
542 spin_lock(&mb_cache_spinlock);
543 return ERR_PTR(-EAGAIN);
544 }
545 return ce;
546 }
547 l = l->next;
548 }
549 return NULL;
550}
551
552
553/*
554 * mb_cache_entry_find_first()
555 *
556 * Find the first cache entry on a given device with a certain key in
557 * an additional index. Additonal matches can be found with
558 * mb_cache_entry_find_next(). Returns NULL if no match was found. The
559 * returned cache entry is locked for shared access ("multiple readers").
560 *
561 * @cache: the cache to search
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 * @bdev: the device the cache entry should belong to
563 * @key: the key in the index
564 */
565struct mb_cache_entry *
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200566mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
567 unsigned int key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568{
569 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
570 struct list_head *l;
571 struct mb_cache_entry *ce;
572
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 spin_lock(&mb_cache_spinlock);
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200574 l = cache->c_index_hash[bucket].next;
575 ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 spin_unlock(&mb_cache_spinlock);
577 return ce;
578}
579
580
581/*
582 * mb_cache_entry_find_next()
583 *
584 * Find the next cache entry on a given device with a certain key in an
585 * additional index. Returns NULL if no match could be found. The previous
586 * entry is atomatically released, so that mb_cache_entry_find_next() can
587 * be called like this:
588 *
589 * entry = mb_cache_entry_find_first();
590 * while (entry) {
591 * ...
592 * entry = mb_cache_entry_find_next(entry, ...);
593 * }
594 *
595 * @prev: The previous match
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 * @bdev: the device the cache entry should belong to
597 * @key: the key in the index
598 */
599struct mb_cache_entry *
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200600mb_cache_entry_find_next(struct mb_cache_entry *prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 struct block_device *bdev, unsigned int key)
602{
603 struct mb_cache *cache = prev->e_cache;
604 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
605 struct list_head *l;
606 struct mb_cache_entry *ce;
607
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 spin_lock(&mb_cache_spinlock);
Andreas Gruenbacher2aec7c52010-07-19 18:19:41 +0200609 l = prev->e_index.o_list.next;
610 ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 __mb_cache_entry_release_unlock(prev);
612 return ce;
613}
614
615#endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
616
617static int __init init_mbcache(void)
618{
Rusty Russell8e1f9362007-07-17 04:03:17 -0700619 register_shrinker(&mb_cache_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 return 0;
621}
622
623static void __exit exit_mbcache(void)
624{
Rusty Russell8e1f9362007-07-17 04:03:17 -0700625 unregister_shrinker(&mb_cache_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626}
627
628module_init(init_mbcache)
629module_exit(exit_mbcache)
630