Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_COMPACTION_H |
| 2 | #define _LINUX_COMPACTION_H |
| 3 | |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 4 | /* |
| 5 | * Determines how hard direct compaction should try to succeed. |
| 6 | * Lower value means higher priority, analogically to reclaim priority. |
| 7 | */ |
| 8 | enum compact_priority { |
Vlastimil Babka | a8e025e | 2016-10-07 16:57:47 -0700 | [diff] [blame] | 9 | COMPACT_PRIO_SYNC_FULL, |
| 10 | MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_FULL, |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 11 | COMPACT_PRIO_SYNC_LIGHT, |
Vlastimil Babka | c2033b0 | 2016-10-07 17:00:34 -0700 | [diff] [blame] | 12 | MIN_COMPACT_COSTLY_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 13 | DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, |
| 14 | COMPACT_PRIO_ASYNC, |
| 15 | INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC |
| 16 | }; |
| 17 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 18 | /* Return values for compact_zone() and try_to_compact_pages() */ |
Vlastimil Babka | fa6c7b4 | 2015-11-05 18:47:56 -0800 | [diff] [blame] | 19 | /* When adding new states, please adjust include/trace/events/compaction.h */ |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 20 | enum compact_result { |
Michal Hocko | 4f9a358 | 2016-05-20 16:56:50 -0700 | [diff] [blame] | 21 | /* For more detailed tracepoint output - internal to compaction */ |
| 22 | COMPACT_NOT_SUITABLE_ZONE, |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 23 | /* |
| 24 | * compaction didn't start as it was not possible or direct reclaim |
| 25 | * was more suitable |
| 26 | */ |
| 27 | COMPACT_SKIPPED, |
Michal Hocko | 1d4746d | 2016-05-20 16:56:44 -0700 | [diff] [blame] | 28 | /* compaction didn't start as it was deferred due to past failures */ |
| 29 | COMPACT_DEFERRED, |
Michal Hocko | 4f9a358 | 2016-05-20 16:56:50 -0700 | [diff] [blame] | 30 | |
Michal Hocko | 1d4746d | 2016-05-20 16:56:44 -0700 | [diff] [blame] | 31 | /* compaction not active last round */ |
| 32 | COMPACT_INACTIVE = COMPACT_DEFERRED, |
| 33 | |
Michal Hocko | 4f9a358 | 2016-05-20 16:56:50 -0700 | [diff] [blame] | 34 | /* For more detailed tracepoint output - internal to compaction */ |
| 35 | COMPACT_NO_SUITABLE_PAGE, |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 36 | /* compaction should continue to another pageblock */ |
| 37 | COMPACT_CONTINUE, |
Michal Hocko | 4f9a358 | 2016-05-20 16:56:50 -0700 | [diff] [blame] | 38 | |
Michal Hocko | c8f7de0 | 2016-05-20 16:56:47 -0700 | [diff] [blame] | 39 | /* |
| 40 | * The full zone was compacted scanned but wasn't successfull to compact |
| 41 | * suitable pages. |
| 42 | */ |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 43 | COMPACT_COMPLETE, |
Michal Hocko | 4f9a358 | 2016-05-20 16:56:50 -0700 | [diff] [blame] | 44 | /* |
| 45 | * direct compaction has scanned part of the zone but wasn't successfull |
| 46 | * to compact suitable pages. |
| 47 | */ |
| 48 | COMPACT_PARTIAL_SKIPPED, |
| 49 | |
| 50 | /* compaction terminated prematurely due to lock contentions */ |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 51 | COMPACT_CONTENDED, |
Michal Hocko | 4f9a358 | 2016-05-20 16:56:50 -0700 | [diff] [blame] | 52 | |
| 53 | /* |
Vlastimil Babka | cf37831 | 2016-10-07 16:57:41 -0700 | [diff] [blame] | 54 | * direct compaction terminated after concluding that the allocation |
| 55 | * should now succeed |
Michal Hocko | 4f9a358 | 2016-05-20 16:56:50 -0700 | [diff] [blame] | 56 | */ |
Vlastimil Babka | cf37831 | 2016-10-07 16:57:41 -0700 | [diff] [blame] | 57 | COMPACT_SUCCESS, |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 58 | }; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 59 | |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 60 | struct alloc_context; /* in mm/internal.h */ |
| 61 | |
Vlastimil Babka | 9861a62 | 2016-10-07 16:57:53 -0700 | [diff] [blame] | 62 | /* |
| 63 | * Number of free order-0 pages that should be available above given watermark |
| 64 | * to make sure compaction has reasonable chance of not running out of free |
| 65 | * pages that it needs to isolate as migration target during its work. |
| 66 | */ |
| 67 | static inline unsigned long compact_gap(unsigned int order) |
| 68 | { |
| 69 | /* |
| 70 | * Although all the isolations for migration are temporary, compaction |
| 71 | * free scanner may have up to 1 << order pages on its list and then |
| 72 | * try to split an (order - 1) free page. At that point, a gap of |
| 73 | * 1 << order might not be enough, so it's safer to require twice that |
| 74 | * amount. Note that the number of pages on the list is also |
| 75 | * effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum |
| 76 | * that the migrate scanner can have isolated on migrate list, and free |
| 77 | * scanner is only invoked when the number of isolated free pages is |
| 78 | * lower than that. But it's not worth to complicate the formula here |
| 79 | * as a bigger gap for higher orders than strictly necessary can also |
| 80 | * improve chances of compaction success. |
| 81 | */ |
| 82 | return 2UL << order; |
| 83 | } |
| 84 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 85 | #ifdef CONFIG_COMPACTION |
| 86 | extern int sysctl_compact_memory; |
| 87 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, |
| 88 | void __user *buffer, size_t *length, loff_t *ppos); |
Mel Gorman | 5e77190 | 2010-05-24 14:32:31 -0700 | [diff] [blame] | 89 | extern int sysctl_extfrag_threshold; |
| 90 | extern int sysctl_extfrag_handler(struct ctl_table *table, int write, |
| 91 | void __user *buffer, size_t *length, loff_t *ppos); |
Eric B Munson | 5bbe354 | 2015-04-15 16:13:20 -0700 | [diff] [blame] | 92 | extern int sysctl_compact_unevictable_allowed; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 93 | |
| 94 | extern int fragmentation_index(struct zone *zone, unsigned int order); |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 95 | extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 96 | unsigned int order, unsigned int alloc_flags, |
| 97 | const struct alloc_context *ac, enum compact_priority prio); |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 98 | extern void reset_isolation_suitable(pg_data_t *pgdat); |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 99 | extern enum compact_result compaction_suitable(struct zone *zone, int order, |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 100 | unsigned int alloc_flags, int classzone_idx); |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 101 | |
Joonsoo Kim | 24e2716 | 2015-02-11 15:27:09 -0800 | [diff] [blame] | 102 | extern void defer_compaction(struct zone *zone, int order); |
| 103 | extern bool compaction_deferred(struct zone *zone, int order); |
| 104 | extern void compaction_defer_reset(struct zone *zone, int order, |
| 105 | bool alloc_success); |
| 106 | extern bool compaction_restarting(struct zone *zone, int order); |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 107 | |
Michal Hocko | cab1802 | 2016-05-20 16:56:56 -0700 | [diff] [blame] | 108 | /* Compaction has made some progress and retrying makes sense */ |
| 109 | static inline bool compaction_made_progress(enum compact_result result) |
| 110 | { |
| 111 | /* |
| 112 | * Even though this might sound confusing this in fact tells us |
| 113 | * that the compaction successfully isolated and migrated some |
| 114 | * pageblocks. |
| 115 | */ |
Vlastimil Babka | cf37831 | 2016-10-07 16:57:41 -0700 | [diff] [blame] | 116 | if (result == COMPACT_SUCCESS) |
Michal Hocko | cab1802 | 2016-05-20 16:56:56 -0700 | [diff] [blame] | 117 | return true; |
| 118 | |
| 119 | return false; |
| 120 | } |
| 121 | |
| 122 | /* Compaction has failed and it doesn't make much sense to keep retrying. */ |
| 123 | static inline bool compaction_failed(enum compact_result result) |
| 124 | { |
| 125 | /* All zones were scanned completely and still not result. */ |
| 126 | if (result == COMPACT_COMPLETE) |
| 127 | return true; |
| 128 | |
| 129 | return false; |
| 130 | } |
| 131 | |
| 132 | /* |
| 133 | * Compaction has backed off for some reason. It might be throttling or |
| 134 | * lock contention. Retrying is still worthwhile. |
| 135 | */ |
| 136 | static inline bool compaction_withdrawn(enum compact_result result) |
| 137 | { |
| 138 | /* |
| 139 | * Compaction backed off due to watermark checks for order-0 |
| 140 | * so the regular reclaim has to try harder and reclaim something. |
| 141 | */ |
| 142 | if (result == COMPACT_SKIPPED) |
| 143 | return true; |
| 144 | |
| 145 | /* |
| 146 | * If compaction is deferred for high-order allocations, it is |
| 147 | * because sync compaction recently failed. If this is the case |
| 148 | * and the caller requested a THP allocation, we do not want |
| 149 | * to heavily disrupt the system, so we fail the allocation |
| 150 | * instead of entering direct reclaim. |
| 151 | */ |
| 152 | if (result == COMPACT_DEFERRED) |
| 153 | return true; |
| 154 | |
| 155 | /* |
| 156 | * If compaction in async mode encounters contention or blocks higher |
| 157 | * priority task we back off early rather than cause stalls. |
| 158 | */ |
| 159 | if (result == COMPACT_CONTENDED) |
| 160 | return true; |
| 161 | |
| 162 | /* |
| 163 | * Page scanners have met but we haven't scanned full zones so this |
| 164 | * is a back off in fact. |
| 165 | */ |
| 166 | if (result == COMPACT_PARTIAL_SKIPPED) |
| 167 | return true; |
| 168 | |
| 169 | return false; |
| 170 | } |
| 171 | |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 172 | |
| 173 | bool compaction_zonelist_suitable(struct alloc_context *ac, int order, |
| 174 | int alloc_flags); |
| 175 | |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 176 | extern int kcompactd_run(int nid); |
| 177 | extern void kcompactd_stop(int nid); |
| 178 | extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); |
| 179 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 180 | #else |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 181 | static inline void reset_isolation_suitable(pg_data_t *pgdat) |
| 182 | { |
| 183 | } |
| 184 | |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 185 | static inline enum compact_result compaction_suitable(struct zone *zone, int order, |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 186 | int alloc_flags, int classzone_idx) |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 187 | { |
| 188 | return COMPACT_SKIPPED; |
| 189 | } |
| 190 | |
Rik van Riel | aff6224 | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 191 | static inline void defer_compaction(struct zone *zone, int order) |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 192 | { |
| 193 | } |
| 194 | |
Rik van Riel | aff6224 | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 195 | static inline bool compaction_deferred(struct zone *zone, int order) |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 196 | { |
Gavin Shan | c59e261 | 2012-07-31 16:42:49 -0700 | [diff] [blame] | 197 | return true; |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 198 | } |
| 199 | |
Michal Hocko | cab1802 | 2016-05-20 16:56:56 -0700 | [diff] [blame] | 200 | static inline bool compaction_made_progress(enum compact_result result) |
| 201 | { |
| 202 | return false; |
| 203 | } |
| 204 | |
| 205 | static inline bool compaction_failed(enum compact_result result) |
| 206 | { |
| 207 | return false; |
| 208 | } |
| 209 | |
| 210 | static inline bool compaction_withdrawn(enum compact_result result) |
| 211 | { |
| 212 | return true; |
| 213 | } |
| 214 | |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 215 | static inline int kcompactd_run(int nid) |
| 216 | { |
| 217 | return 0; |
| 218 | } |
| 219 | static inline void kcompactd_stop(int nid) |
| 220 | { |
| 221 | } |
| 222 | |
| 223 | static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) |
| 224 | { |
| 225 | } |
| 226 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 227 | #endif /* CONFIG_COMPACTION */ |
| 228 | |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 229 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 230 | struct node; |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 231 | extern int compaction_register_node(struct node *node); |
| 232 | extern void compaction_unregister_node(struct node *node); |
| 233 | |
| 234 | #else |
| 235 | |
| 236 | static inline int compaction_register_node(struct node *node) |
| 237 | { |
| 238 | return 0; |
| 239 | } |
| 240 | |
| 241 | static inline void compaction_unregister_node(struct node *node) |
| 242 | { |
| 243 | } |
| 244 | #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ |
| 245 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 246 | #endif /* _LINUX_COMPACTION_H */ |