Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 1 | #ifndef _LINUX_SHRINKER_H |
| 2 | #define _LINUX_SHRINKER_H |
| 3 | |
| 4 | /* |
| 5 | * This struct is used to pass information from page reclaim to the shrinkers. |
| 6 | * We consolidate the values for easier extention later. |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 7 | * |
| 8 | * The 'gfpmask' refers to the allocation we are currently trying to |
| 9 | * fulfil. |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 10 | */ |
| 11 | struct shrink_control { |
| 12 | gfp_t gfp_mask; |
| 13 | |
Dave Chinner | a0b0213 | 2013-08-28 10:18:16 +1000 | [diff] [blame] | 14 | /* |
| 15 | * How many objects scan_objects should scan and try to reclaim. |
| 16 | * This is reset before every call, so it is safe for callees |
| 17 | * to modify. |
| 18 | */ |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 19 | unsigned long nr_to_scan; |
Dave Chinner | 0ce3d74 | 2013-08-28 10:18:03 +1000 | [diff] [blame] | 20 | |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 21 | /* current node being shrunk (for NUMA aware shrinkers) */ |
| 22 | int nid; |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 23 | }; |
| 24 | |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 25 | #define SHRINK_STOP (~0UL) |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 26 | /* |
| 27 | * A callback you can register to apply pressure to ageable caches. |
| 28 | * |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 29 | * @count_objects should return the number of freeable items in the cache. If |
| 30 | * there are no objects to free or the number of freeable items cannot be |
| 31 | * determined, it should return 0. No deadlock checks should be done during the |
| 32 | * count callback - the shrinker relies on aggregating scan counts that couldn't |
| 33 | * be executed due to potential deadlocks to be run at a later call when the |
| 34 | * deadlock condition is no longer pending. |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 35 | * |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 36 | * @scan_objects will only be called if @count_objects returned a non-zero |
| 37 | * value for the number of freeable objects. The callout should scan the cache |
| 38 | * and attempt to free items from the cache. It should then return the number |
| 39 | * of objects freed during the scan, or SHRINK_STOP if progress cannot be made |
| 40 | * due to potential deadlocks. If SHRINK_STOP is returned, then no further |
| 41 | * attempts to call the @scan_objects will be made from the current reclaim |
| 42 | * context. |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 43 | * |
| 44 | * @flags determine the shrinker abilities, like numa awareness |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 45 | */ |
| 46 | struct shrinker { |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 47 | unsigned long (*count_objects)(struct shrinker *, |
| 48 | struct shrink_control *sc); |
| 49 | unsigned long (*scan_objects)(struct shrinker *, |
| 50 | struct shrink_control *sc); |
| 51 | |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 52 | int seeks; /* seeks to recreate an obj */ |
| 53 | long batch; /* reclaim batch size, 0 = default */ |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 54 | unsigned long flags; |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 55 | |
| 56 | /* These are for internal use */ |
| 57 | struct list_head list; |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 58 | /* objs pending delete, per node */ |
| 59 | atomic_long_t *nr_deferred; |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 60 | }; |
| 61 | #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 62 | |
| 63 | /* Flags */ |
| 64 | #define SHRINKER_NUMA_AWARE (1 << 0) |
| 65 | |
| 66 | extern int register_shrinker(struct shrinker *); |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 67 | extern void unregister_shrinker(struct shrinker *); |
| 68 | #endif |