blob: ac6b8ee07825ee6ce99dff9faf9901a224a97b5e [file] [log] [blame]
Dave Chinnerb0d40c92011-07-08 14:14:42 +10001#ifndef _LINUX_SHRINKER_H
2#define _LINUX_SHRINKER_H
3
4/*
5 * This struct is used to pass information from page reclaim to the shrinkers.
6 * We consolidate the values for easier extention later.
7 */
8struct shrink_control {
9 gfp_t gfp_mask;
10
11 /* How many slab objects shrinker() should scan and try to reclaim */
12 unsigned long nr_to_scan;
13};
14
15/*
16 * A callback you can register to apply pressure to ageable caches.
17 *
18 * 'sc' is passed shrink_control which includes a count 'nr_to_scan'
19 * and a 'gfpmask'. It should look through the least-recently-used
20 * 'nr_to_scan' entries and attempt to free them up. It should return
21 * the number of objects which remain in the cache. If it returns -1, it means
22 * it cannot do any scanning at this time (eg. there is a risk of deadlock).
Dave Chinnerb0d40c92011-07-08 14:14:42 +100023 *
24 * The 'gfpmask' refers to the allocation we are currently trying to
25 * fulfil.
26 *
27 * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
28 * querying the cache size, so a fastpath for that case is appropriate.
29 */
30struct shrinker {
31 int (*shrink)(struct shrinker *, struct shrink_control *sc);
32 int seeks; /* seeks to recreate an obj */
33 long batch; /* reclaim batch size, 0 = default */
34
35 /* These are for internal use */
36 struct list_head list;
Konstantin Khlebnikov83aeead2011-12-08 14:33:54 -080037 atomic_long_t nr_in_batch; /* objs pending delete */
Dave Chinnerb0d40c92011-07-08 14:14:42 +100038};
39#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
40extern void register_shrinker(struct shrinker *);
41extern void unregister_shrinker(struct shrinker *);
42#endif