Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 2 | #ifndef _LINUX_SHRINKER_H |
| 3 | #define _LINUX_SHRINKER_H |
| 4 | |
| 5 | /* |
| 6 | * This struct is used to pass information from page reclaim to the shrinkers. |
| 7 | * We consolidate the values for easier extention later. |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 8 | * |
| 9 | * The 'gfpmask' refers to the allocation we are currently trying to |
| 10 | * fulfil. |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 11 | */ |
| 12 | struct shrink_control { |
| 13 | gfp_t gfp_mask; |
| 14 | |
Dave Chinner | a0b0213 | 2013-08-28 10:18:16 +1000 | [diff] [blame] | 15 | /* |
| 16 | * How many objects scan_objects should scan and try to reclaim. |
| 17 | * This is reset before every call, so it is safe for callees |
| 18 | * to modify. |
| 19 | */ |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 20 | unsigned long nr_to_scan; |
Dave Chinner | 0ce3d74 | 2013-08-28 10:18:03 +1000 | [diff] [blame] | 21 | |
Chris Wilson | d460acb | 2017-09-06 16:19:26 -0700 | [diff] [blame] | 22 | /* |
| 23 | * How many objects did scan_objects process? |
| 24 | * This defaults to nr_to_scan before every call, but the callee |
| 25 | * should track its actual progress. |
| 26 | */ |
| 27 | unsigned long nr_scanned; |
| 28 | |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 29 | /* current node being shrunk (for NUMA aware shrinkers) */ |
| 30 | int nid; |
Vladimir Davydov | cb731d6 | 2015-02-12 14:58:54 -0800 | [diff] [blame] | 31 | |
| 32 | /* current memcg being shrunk (for memcg aware shrinkers) */ |
| 33 | struct mem_cgroup *memcg; |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 34 | }; |
| 35 | |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 36 | #define SHRINK_STOP (~0UL) |
Kirill Tkhai | 9b99646 | 2018-08-17 15:48:21 -0700 | [diff] [blame^] | 37 | #define SHRINK_EMPTY (~0UL - 1) |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 38 | /* |
| 39 | * A callback you can register to apply pressure to ageable caches. |
| 40 | * |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 41 | * @count_objects should return the number of freeable items in the cache. If |
Kirill Tkhai | 9b99646 | 2018-08-17 15:48:21 -0700 | [diff] [blame^] | 42 | * there are no objects to free, it should return SHRINK_EMPTY, while 0 is |
| 43 | * returned in cases of the number of freeable items cannot be determined |
| 44 | * or shrinker should skip this cache for this time (e.g., their number |
| 45 | * is below shrinkable limit). No deadlock checks should be done during the |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 46 | * count callback - the shrinker relies on aggregating scan counts that couldn't |
| 47 | * be executed due to potential deadlocks to be run at a later call when the |
| 48 | * deadlock condition is no longer pending. |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 49 | * |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 50 | * @scan_objects will only be called if @count_objects returned a non-zero |
| 51 | * value for the number of freeable objects. The callout should scan the cache |
| 52 | * and attempt to free items from the cache. It should then return the number |
| 53 | * of objects freed during the scan, or SHRINK_STOP if progress cannot be made |
| 54 | * due to potential deadlocks. If SHRINK_STOP is returned, then no further |
| 55 | * attempts to call the @scan_objects will be made from the current reclaim |
| 56 | * context. |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 57 | * |
| 58 | * @flags determine the shrinker abilities, like numa awareness |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 59 | */ |
| 60 | struct shrinker { |
Dave Chinner | 24f7c6b | 2013-08-28 10:17:56 +1000 | [diff] [blame] | 61 | unsigned long (*count_objects)(struct shrinker *, |
| 62 | struct shrink_control *sc); |
| 63 | unsigned long (*scan_objects)(struct shrinker *, |
| 64 | struct shrink_control *sc); |
| 65 | |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 66 | int seeks; /* seeks to recreate an obj */ |
| 67 | long batch; /* reclaim batch size, 0 = default */ |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 68 | unsigned long flags; |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 69 | |
| 70 | /* These are for internal use */ |
| 71 | struct list_head list; |
Kirill Tkhai | b4c2b23 | 2018-08-17 15:47:29 -0700 | [diff] [blame] | 72 | #ifdef CONFIG_MEMCG_KMEM |
| 73 | /* ID in shrinker_idr */ |
| 74 | int id; |
| 75 | #endif |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 76 | /* objs pending delete, per node */ |
| 77 | atomic_long_t *nr_deferred; |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 78 | }; |
| 79 | #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 80 | |
| 81 | /* Flags */ |
Vladimir Davydov | cb731d6 | 2015-02-12 14:58:54 -0800 | [diff] [blame] | 82 | #define SHRINKER_NUMA_AWARE (1 << 0) |
| 83 | #define SHRINKER_MEMCG_AWARE (1 << 1) |
Glauber Costa | 1d3d443 | 2013-08-28 10:18:04 +1000 | [diff] [blame] | 84 | |
Tetsuo Handa | 8e04944 | 2018-04-04 19:53:07 +0900 | [diff] [blame] | 85 | extern int prealloc_shrinker(struct shrinker *shrinker); |
| 86 | extern void register_shrinker_prepared(struct shrinker *shrinker); |
| 87 | extern int register_shrinker(struct shrinker *shrinker); |
| 88 | extern void unregister_shrinker(struct shrinker *shrinker); |
| 89 | extern void free_prealloced_shrinker(struct shrinker *shrinker); |
Dave Chinner | b0d40c9 | 2011-07-08 14:14:42 +1000 | [diff] [blame] | 90 | #endif |