Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 1 | #ifndef _LINUX_SHRINKER_H |
| 2 | #define _LINUX_SHRINKER_H |
| 3 | |
| 4 | /* |
| 5 | * This struct is used to pass information from page reclaim to the shrinkers. |
| 6 | * We consolidate the values for easier extention later. |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 7 | * |
| 8 | * The 'gfpmask' refers to the allocation we are currently trying to |
| 9 | * fulfil. |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 10 | */ |
| 11 | struct shrink_control { |
| 12 | gfp_t gfp_mask; |
| 13 | |
Dave Chinner | a0b0213 | 2013-08-28 10:18:16 +1000 | [diff] [blame] | 14 | /* |
| 15 | * How many objects scan_objects should scan and try to reclaim. |
| 16 | * This is reset before every call, so it is safe for callees |
| 17 | * to modify. |
| 18 | */ |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 19 | unsigned long nr_to_scan; |
Dave Chinner | 0ce3d74 | 2013-08-28 10:18:03 +1000 | [diff] [blame] | 20 | |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 21 | /* current node being shrunk (for NUMA aware shrinkers) */ |
| 22 | int nid; |
Vladimir Davydov | cb731d6 | 2015-02-12 14:58:54 -0800 | [diff] [blame] | 23 | |
| 24 | /* current memcg being shrunk (for memcg aware shrinkers) */ |
| 25 | struct mem_cgroup *memcg; |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 26 | }; |
| 27 | |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 28 | #define SHRINK_STOP (~0UL) |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 29 | /* |
| 30 | * A callback you can register to apply pressure to ageable caches. |
| 31 | * |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 32 | * @count_objects should return the number of freeable items in the cache. If |
| 33 | * there are no objects to free or the number of freeable items cannot be |
| 34 | * determined, it should return 0. No deadlock checks should be done during the |
| 35 | * count callback - the shrinker relies on aggregating scan counts that couldn't |
| 36 | * be executed due to potential deadlocks to be run at a later call when the |
| 37 | * deadlock condition is no longer pending. |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 38 | * |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 39 | * @scan_objects will only be called if @count_objects returned a non-zero |
| 40 | * value for the number of freeable objects. The callout should scan the cache |
| 41 | * and attempt to free items from the cache. It should then return the number |
| 42 | * of objects freed during the scan, or SHRINK_STOP if progress cannot be made |
| 43 | * due to potential deadlocks. If SHRINK_STOP is returned, then no further |
| 44 | * attempts to call the @scan_objects will be made from the current reclaim |
| 45 | * context. |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 46 | * |
| 47 | * @flags determine the shrinker abilities, like numa awareness |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 48 | */ |
| 49 | struct shrinker { |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 50 | unsigned long (*count_objects)(struct shrinker *, |
| 51 | struct shrink_control *sc); |
| 52 | unsigned long (*scan_objects)(struct shrinker *, |
| 53 | struct shrink_control *sc); |
| 54 | |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 55 | int seeks; /* seeks to recreate an obj */ |
| 56 | long batch; /* reclaim batch size, 0 = default */ |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 57 | unsigned long flags; |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 58 | |
| 59 | /* These are for internal use */ |
| 60 | struct list_head list; |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 61 | /* objs pending delete, per node */ |
| 62 | atomic_long_t *nr_deferred; |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 63 | }; |
| 64 | #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 65 | |
| 66 | /* Flags */ |
Vladimir Davydov | cb731d6 | 2015-02-12 14:58:54 -0800 | [diff] [blame] | 67 | #define SHRINKER_NUMA_AWARE (1 << 0) |
| 68 | #define SHRINKER_MEMCG_AWARE (1 << 1) |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 69 | |
| 70 | extern int register_shrinker(struct shrinker *); |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 71 | extern void unregister_shrinker(struct shrinker *); |
| 72 | #endif |