blob: f4aee75f00b19449e63c2a48b864e36f11b10ce7 [file] [log] [blame]
Dave Chinnerb0d40c92011-07-08 14:14:42 +10001#ifndef _LINUX_SHRINKER_H
2#define _LINUX_SHRINKER_H
3
4/*
5 * This struct is used to pass information from page reclaim to the shrinkers.
6 * We consolidate the values for easier extention later.
Dave Chinner24f7c6b2013-08-28 10:17:56 +10007 *
8 * The 'gfpmask' refers to the allocation we are currently trying to
9 * fulfil.
Dave Chinnerb0d40c92011-07-08 14:14:42 +100010 */
11struct shrink_control {
12 gfp_t gfp_mask;
13
Dave Chinnera0b02132013-08-28 10:18:16 +100014 /*
15 * How many objects scan_objects should scan and try to reclaim.
16 * This is reset before every call, so it is safe for callees
17 * to modify.
18 */
Dave Chinnerb0d40c92011-07-08 14:14:42 +100019 unsigned long nr_to_scan;
Dave Chinner0ce3d742013-08-28 10:18:03 +100020
Glauber Costa1d3d4432013-08-28 10:18:04 +100021 /* current node being shrunk (for NUMA aware shrinkers) */
22 int nid;
Dave Chinnerb0d40c92011-07-08 14:14:42 +100023};
24
Dave Chinner24f7c6b2013-08-28 10:17:56 +100025#define SHRINK_STOP (~0UL)
Dave Chinnerb0d40c92011-07-08 14:14:42 +100026/*
27 * A callback you can register to apply pressure to ageable caches.
28 *
Dave Chinner24f7c6b2013-08-28 10:17:56 +100029 * @count_objects should return the number of freeable items in the cache. If
30 * there are no objects to free or the number of freeable items cannot be
31 * determined, it should return 0. No deadlock checks should be done during the
32 * count callback - the shrinker relies on aggregating scan counts that couldn't
33 * be executed due to potential deadlocks to be run at a later call when the
34 * deadlock condition is no longer pending.
Dave Chinnerb0d40c92011-07-08 14:14:42 +100035 *
Dave Chinner24f7c6b2013-08-28 10:17:56 +100036 * @scan_objects will only be called if @count_objects returned a non-zero
37 * value for the number of freeable objects. The callout should scan the cache
38 * and attempt to free items from the cache. It should then return the number
39 * of objects freed during the scan, or SHRINK_STOP if progress cannot be made
40 * due to potential deadlocks. If SHRINK_STOP is returned, then no further
41 * attempts to call the @scan_objects will be made from the current reclaim
42 * context.
Glauber Costa1d3d4432013-08-28 10:18:04 +100043 *
44 * @flags determine the shrinker abilities, like numa awareness
Dave Chinnerb0d40c92011-07-08 14:14:42 +100045 */
46struct shrinker {
Dave Chinner24f7c6b2013-08-28 10:17:56 +100047 unsigned long (*count_objects)(struct shrinker *,
48 struct shrink_control *sc);
49 unsigned long (*scan_objects)(struct shrinker *,
50 struct shrink_control *sc);
51
Dave Chinnerb0d40c92011-07-08 14:14:42 +100052 int seeks; /* seeks to recreate an obj */
53 long batch; /* reclaim batch size, 0 = default */
Glauber Costa1d3d4432013-08-28 10:18:04 +100054 unsigned long flags;
Dave Chinnerb0d40c92011-07-08 14:14:42 +100055
56 /* These are for internal use */
57 struct list_head list;
Glauber Costa1d3d4432013-08-28 10:18:04 +100058 /* objs pending delete, per node */
59 atomic_long_t *nr_deferred;
Dave Chinnerb0d40c92011-07-08 14:14:42 +100060};
61#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
Glauber Costa1d3d4432013-08-28 10:18:04 +100062
63/* Flags */
64#define SHRINKER_NUMA_AWARE (1 << 0)
65
66extern int register_shrinker(struct shrinker *);
Dave Chinnerb0d40c92011-07-08 14:14:42 +100067extern void unregister_shrinker(struct shrinker *);
68#endif