Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/compaction.c |
| 3 | * |
| 4 | * Memory compaction for the reduction of external fragmentation. Note that |
| 5 | * this heavily depends upon page migration to do all the real heavy |
| 6 | * lifting |
| 7 | * |
| 8 | * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> |
| 9 | */ |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 10 | #include <linux/cpu.h> |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 11 | #include <linux/swap.h> |
| 12 | #include <linux/migrate.h> |
| 13 | #include <linux/compaction.h> |
| 14 | #include <linux/mm_inline.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 15 | #include <linux/sched/signal.h> |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 16 | #include <linux/backing-dev.h> |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 17 | #include <linux/sysctl.h> |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 18 | #include <linux/sysfs.h> |
Minchan Kim | 194159f | 2013-02-22 16:33:58 -0800 | [diff] [blame] | 19 | #include <linux/page-isolation.h> |
Andrey Ryabinin | b8c73fc | 2015-02-13 14:39:28 -0800 | [diff] [blame] | 20 | #include <linux/kasan.h> |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 21 | #include <linux/kthread.h> |
| 22 | #include <linux/freezer.h> |
Joonsoo Kim | 83358ec | 2016-07-26 15:23:43 -0700 | [diff] [blame] | 23 | #include <linux/page_owner.h> |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 24 | #include "internal.h" |
| 25 | |
Minchan Kim | 010fc29 | 2012-12-20 15:05:06 -0800 | [diff] [blame] | 26 | #ifdef CONFIG_COMPACTION |
| 27 | static inline void count_compact_event(enum vm_event_item item) |
| 28 | { |
| 29 | count_vm_event(item); |
| 30 | } |
| 31 | |
| 32 | static inline void count_compact_events(enum vm_event_item item, long delta) |
| 33 | { |
| 34 | count_vm_events(item, delta); |
| 35 | } |
| 36 | #else |
| 37 | #define count_compact_event(item) do { } while (0) |
| 38 | #define count_compact_events(item, delta) do { } while (0) |
| 39 | #endif |
| 40 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 41 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| 42 | |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 43 | #define CREATE_TRACE_POINTS |
| 44 | #include <trace/events/compaction.h> |
| 45 | |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 46 | #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) |
| 47 | #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) |
| 48 | #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) |
| 49 | #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) |
| 50 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 51 | static unsigned long release_freepages(struct list_head *freelist) |
| 52 | { |
| 53 | struct page *page, *next; |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 54 | unsigned long high_pfn = 0; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 55 | |
| 56 | list_for_each_entry_safe(page, next, freelist, lru) { |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 57 | unsigned long pfn = page_to_pfn(page); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 58 | list_del(&page->lru); |
| 59 | __free_page(page); |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 60 | if (pfn > high_pfn) |
| 61 | high_pfn = pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 62 | } |
| 63 | |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 64 | return high_pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 65 | } |
| 66 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 67 | static void map_pages(struct list_head *list) |
| 68 | { |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 69 | unsigned int i, order, nr_pages; |
| 70 | struct page *page, *next; |
| 71 | LIST_HEAD(tmp_list); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 72 | |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 73 | list_for_each_entry_safe(page, next, list, lru) { |
| 74 | list_del(&page->lru); |
| 75 | |
| 76 | order = page_private(page); |
| 77 | nr_pages = 1 << order; |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 78 | |
Joonsoo Kim | 46f24fd | 2016-07-26 15:23:58 -0700 | [diff] [blame] | 79 | post_alloc_hook(page, order, __GFP_MOVABLE); |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 80 | if (order) |
| 81 | split_page(page, order); |
| 82 | |
| 83 | for (i = 0; i < nr_pages; i++) { |
| 84 | list_add(&page->lru, &tmp_list); |
| 85 | page++; |
| 86 | } |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 87 | } |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 88 | |
| 89 | list_splice(&tmp_list, list); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 90 | } |
| 91 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 92 | #ifdef CONFIG_COMPACTION |
Joonsoo Kim | 24e2716 | 2015-02-11 15:27:09 -0800 | [diff] [blame] | 93 | |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 94 | int PageMovable(struct page *page) |
| 95 | { |
| 96 | struct address_space *mapping; |
| 97 | |
| 98 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 99 | if (!__PageMovable(page)) |
| 100 | return 0; |
| 101 | |
| 102 | mapping = page_mapping(page); |
| 103 | if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) |
| 104 | return 1; |
| 105 | |
| 106 | return 0; |
| 107 | } |
| 108 | EXPORT_SYMBOL(PageMovable); |
| 109 | |
| 110 | void __SetPageMovable(struct page *page, struct address_space *mapping) |
| 111 | { |
| 112 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 113 | VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); |
| 114 | page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); |
| 115 | } |
| 116 | EXPORT_SYMBOL(__SetPageMovable); |
| 117 | |
| 118 | void __ClearPageMovable(struct page *page) |
| 119 | { |
| 120 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 121 | VM_BUG_ON_PAGE(!PageMovable(page), page); |
| 122 | /* |
| 123 | * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE |
| 124 | * flag so that VM can catch up released page by driver after isolation. |
| 125 | * With it, VM migration doesn't try to put it back. |
| 126 | */ |
| 127 | page->mapping = (void *)((unsigned long)page->mapping & |
| 128 | PAGE_MAPPING_MOVABLE); |
| 129 | } |
| 130 | EXPORT_SYMBOL(__ClearPageMovable); |
| 131 | |
Joonsoo Kim | 24e2716 | 2015-02-11 15:27:09 -0800 | [diff] [blame] | 132 | /* Do not skip compaction more than 64 times */ |
| 133 | #define COMPACT_MAX_DEFER_SHIFT 6 |
| 134 | |
| 135 | /* |
| 136 | * Compaction is deferred when compaction fails to result in a page |
| 137 | * allocation success. 1 << compact_defer_limit compactions are skipped up |
| 138 | * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT |
| 139 | */ |
| 140 | void defer_compaction(struct zone *zone, int order) |
| 141 | { |
| 142 | zone->compact_considered = 0; |
| 143 | zone->compact_defer_shift++; |
| 144 | |
| 145 | if (order < zone->compact_order_failed) |
| 146 | zone->compact_order_failed = order; |
| 147 | |
| 148 | if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) |
| 149 | zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; |
| 150 | |
| 151 | trace_mm_compaction_defer_compaction(zone, order); |
| 152 | } |
| 153 | |
| 154 | /* Returns true if compaction should be skipped this time */ |
| 155 | bool compaction_deferred(struct zone *zone, int order) |
| 156 | { |
| 157 | unsigned long defer_limit = 1UL << zone->compact_defer_shift; |
| 158 | |
| 159 | if (order < zone->compact_order_failed) |
| 160 | return false; |
| 161 | |
| 162 | /* Avoid possible overflow */ |
| 163 | if (++zone->compact_considered > defer_limit) |
| 164 | zone->compact_considered = defer_limit; |
| 165 | |
| 166 | if (zone->compact_considered >= defer_limit) |
| 167 | return false; |
| 168 | |
| 169 | trace_mm_compaction_deferred(zone, order); |
| 170 | |
| 171 | return true; |
| 172 | } |
| 173 | |
| 174 | /* |
| 175 | * Update defer tracking counters after successful compaction of given order, |
| 176 | * which means an allocation either succeeded (alloc_success == true) or is |
| 177 | * expected to succeed. |
| 178 | */ |
| 179 | void compaction_defer_reset(struct zone *zone, int order, |
| 180 | bool alloc_success) |
| 181 | { |
| 182 | if (alloc_success) { |
| 183 | zone->compact_considered = 0; |
| 184 | zone->compact_defer_shift = 0; |
| 185 | } |
| 186 | if (order >= zone->compact_order_failed) |
| 187 | zone->compact_order_failed = order + 1; |
| 188 | |
| 189 | trace_mm_compaction_defer_reset(zone, order); |
| 190 | } |
| 191 | |
| 192 | /* Returns true if restarting compaction after many failures */ |
| 193 | bool compaction_restarting(struct zone *zone, int order) |
| 194 | { |
| 195 | if (order < zone->compact_order_failed) |
| 196 | return false; |
| 197 | |
| 198 | return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && |
| 199 | zone->compact_considered >= 1UL << zone->compact_defer_shift; |
| 200 | } |
| 201 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 202 | /* Returns true if the pageblock should be scanned for pages to isolate. */ |
| 203 | static inline bool isolation_suitable(struct compact_control *cc, |
| 204 | struct page *page) |
| 205 | { |
| 206 | if (cc->ignore_skip_hint) |
| 207 | return true; |
| 208 | |
| 209 | return !get_pageblock_skip(page); |
| 210 | } |
| 211 | |
Vlastimil Babka | 02333641 | 2015-09-08 15:02:42 -0700 | [diff] [blame] | 212 | static void reset_cached_positions(struct zone *zone) |
| 213 | { |
| 214 | zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; |
| 215 | zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; |
Joonsoo Kim | 623446e | 2016-03-15 14:57:45 -0700 | [diff] [blame] | 216 | zone->compact_cached_free_pfn = |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 217 | pageblock_start_pfn(zone_end_pfn(zone) - 1); |
Vlastimil Babka | 02333641 | 2015-09-08 15:02:42 -0700 | [diff] [blame] | 218 | } |
| 219 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 220 | /* |
| 221 | * This function is called to clear all cached information on pageblocks that |
| 222 | * should be skipped for page isolation when the migrate and free page scanner |
| 223 | * meet. |
| 224 | */ |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 225 | static void __reset_isolation_suitable(struct zone *zone) |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 226 | { |
| 227 | unsigned long start_pfn = zone->zone_start_pfn; |
Cody P Schafer | 108bcc9 | 2013-02-22 16:35:23 -0800 | [diff] [blame] | 228 | unsigned long end_pfn = zone_end_pfn(zone); |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 229 | unsigned long pfn; |
| 230 | |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 231 | zone->compact_blockskip_flush = false; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 232 | |
| 233 | /* Walk the zone and mark every pageblock as suitable for isolation */ |
| 234 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { |
| 235 | struct page *page; |
| 236 | |
| 237 | cond_resched(); |
| 238 | |
Michal Hocko | ccbe1e4 | 2017-07-06 15:38:00 -0700 | [diff] [blame] | 239 | page = pfn_to_online_page(pfn); |
| 240 | if (!page) |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 241 | continue; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 242 | if (zone != page_zone(page)) |
| 243 | continue; |
| 244 | |
| 245 | clear_pageblock_skip(page); |
| 246 | } |
Vlastimil Babka | 02333641 | 2015-09-08 15:02:42 -0700 | [diff] [blame] | 247 | |
| 248 | reset_cached_positions(zone); |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 249 | } |
| 250 | |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 251 | void reset_isolation_suitable(pg_data_t *pgdat) |
| 252 | { |
| 253 | int zoneid; |
| 254 | |
| 255 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { |
| 256 | struct zone *zone = &pgdat->node_zones[zoneid]; |
| 257 | if (!populated_zone(zone)) |
| 258 | continue; |
| 259 | |
| 260 | /* Only flush if a full compaction finished recently */ |
| 261 | if (zone->compact_blockskip_flush) |
| 262 | __reset_isolation_suitable(zone); |
| 263 | } |
| 264 | } |
| 265 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 266 | /* |
| 267 | * If no pages were isolated then mark this pageblock to be skipped in the |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 268 | * future. The information is later cleared by __reset_isolation_suitable(). |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 269 | */ |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 270 | static void update_pageblock_skip(struct compact_control *cc, |
| 271 | struct page *page, unsigned long nr_isolated, |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 272 | bool migrate_scanner) |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 273 | { |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 274 | struct zone *zone = cc->zone; |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 275 | unsigned long pfn; |
Joonsoo Kim | 6815bf3 | 2013-12-18 17:08:52 -0800 | [diff] [blame] | 276 | |
| 277 | if (cc->ignore_skip_hint) |
| 278 | return; |
| 279 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 280 | if (!page) |
| 281 | return; |
| 282 | |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 283 | if (nr_isolated) |
| 284 | return; |
| 285 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 286 | set_pageblock_skip(page); |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 287 | |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 288 | pfn = page_to_pfn(page); |
| 289 | |
| 290 | /* Update where async and sync compaction should restart */ |
| 291 | if (migrate_scanner) { |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 292 | if (pfn > zone->compact_cached_migrate_pfn[0]) |
| 293 | zone->compact_cached_migrate_pfn[0] = pfn; |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 294 | if (cc->mode != MIGRATE_ASYNC && |
| 295 | pfn > zone->compact_cached_migrate_pfn[1]) |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 296 | zone->compact_cached_migrate_pfn[1] = pfn; |
| 297 | } else { |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 298 | if (pfn < zone->compact_cached_free_pfn) |
| 299 | zone->compact_cached_free_pfn = pfn; |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 300 | } |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 301 | } |
| 302 | #else |
| 303 | static inline bool isolation_suitable(struct compact_control *cc, |
| 304 | struct page *page) |
| 305 | { |
| 306 | return true; |
| 307 | } |
| 308 | |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 309 | static void update_pageblock_skip(struct compact_control *cc, |
| 310 | struct page *page, unsigned long nr_isolated, |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 311 | bool migrate_scanner) |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 312 | { |
| 313 | } |
| 314 | #endif /* CONFIG_COMPACTION */ |
| 315 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 316 | /* |
| 317 | * Compaction requires the taking of some coarse locks that are potentially |
| 318 | * very heavily contended. For async compaction, back out if the lock cannot |
| 319 | * be taken immediately. For sync compaction, spin on the lock if needed. |
| 320 | * |
| 321 | * Returns true if the lock is held |
| 322 | * Returns false if the lock is not held and compaction should abort |
| 323 | */ |
| 324 | static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, |
| 325 | struct compact_control *cc) |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 326 | { |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 327 | if (cc->mode == MIGRATE_ASYNC) { |
| 328 | if (!spin_trylock_irqsave(lock, *flags)) { |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 329 | cc->contended = true; |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 330 | return false; |
| 331 | } |
| 332 | } else { |
| 333 | spin_lock_irqsave(lock, *flags); |
| 334 | } |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 335 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 336 | return true; |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 337 | } |
| 338 | |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 339 | /* |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 340 | * Compaction requires the taking of some coarse locks that are potentially |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 341 | * very heavily contended. The lock should be periodically unlocked to avoid |
| 342 | * having disabled IRQs for a long time, even when there is nobody waiting on |
| 343 | * the lock. It might also be that allowing the IRQs will result in |
| 344 | * need_resched() becoming true. If scheduling is needed, async compaction |
| 345 | * aborts. Sync compaction schedules. |
| 346 | * Either compaction type will also abort if a fatal signal is pending. |
| 347 | * In either case if the lock was locked, it is dropped and not regained. |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 348 | * |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 349 | * Returns true if compaction should abort due to fatal signal pending, or |
| 350 | * async compaction due to need_resched() |
| 351 | * Returns false when compaction can continue (sync compaction might have |
| 352 | * scheduled) |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 353 | */ |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 354 | static bool compact_unlock_should_abort(spinlock_t *lock, |
| 355 | unsigned long flags, bool *locked, struct compact_control *cc) |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 356 | { |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 357 | if (*locked) { |
| 358 | spin_unlock_irqrestore(lock, flags); |
| 359 | *locked = false; |
| 360 | } |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 361 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 362 | if (fatal_signal_pending(current)) { |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 363 | cc->contended = true; |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 364 | return true; |
| 365 | } |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 366 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 367 | if (need_resched()) { |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 368 | if (cc->mode == MIGRATE_ASYNC) { |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 369 | cc->contended = true; |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 370 | return true; |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 371 | } |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 372 | cond_resched(); |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 373 | } |
| 374 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 375 | return false; |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 376 | } |
| 377 | |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 378 | /* |
| 379 | * Aside from avoiding lock contention, compaction also periodically checks |
| 380 | * need_resched() and either schedules in sync compaction or aborts async |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 381 | * compaction. This is similar to what compact_unlock_should_abort() does, but |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 382 | * is used where no lock is concerned. |
| 383 | * |
| 384 | * Returns false when no scheduling was needed, or sync compaction scheduled. |
| 385 | * Returns true when async compaction should abort. |
| 386 | */ |
| 387 | static inline bool compact_should_abort(struct compact_control *cc) |
| 388 | { |
| 389 | /* async compaction aborts if contended */ |
| 390 | if (need_resched()) { |
| 391 | if (cc->mode == MIGRATE_ASYNC) { |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 392 | cc->contended = true; |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 393 | return true; |
| 394 | } |
| 395 | |
| 396 | cond_resched(); |
| 397 | } |
| 398 | |
| 399 | return false; |
| 400 | } |
| 401 | |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 402 | /* |
Jerome Marchand | 9e4be47 | 2013-11-12 15:07:12 -0800 | [diff] [blame] | 403 | * Isolate free pages onto a private freelist. If @strict is true, will abort |
| 404 | * returning 0 on any invalid PFNs or non-free pages inside of the pageblock |
| 405 | * (even though it may still end up isolating some pages). |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 406 | */ |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 407 | static unsigned long isolate_freepages_block(struct compact_control *cc, |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 408 | unsigned long *start_pfn, |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 409 | unsigned long end_pfn, |
| 410 | struct list_head *freelist, |
| 411 | bool strict) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 412 | { |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 413 | int nr_scanned = 0, total_isolated = 0; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 414 | struct page *cursor, *valid_page = NULL; |
Xiubo Li | b8b2d82 | 2014-10-09 15:28:21 -0700 | [diff] [blame] | 415 | unsigned long flags = 0; |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 416 | bool locked = false; |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 417 | unsigned long blockpfn = *start_pfn; |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 418 | unsigned int order; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 419 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 420 | cursor = pfn_to_page(blockpfn); |
| 421 | |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 422 | /* Isolate free pages. */ |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 423 | for (; blockpfn < end_pfn; blockpfn++, cursor++) { |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 424 | int isolated; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 425 | struct page *page = cursor; |
| 426 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 427 | /* |
| 428 | * Periodically drop the lock (if held) regardless of its |
| 429 | * contention, to give chance to IRQs. Abort if fatal signal |
| 430 | * pending or async compaction detects need_resched() |
| 431 | */ |
| 432 | if (!(blockpfn % SWAP_CLUSTER_MAX) |
| 433 | && compact_unlock_should_abort(&cc->zone->lock, flags, |
| 434 | &locked, cc)) |
| 435 | break; |
| 436 | |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 437 | nr_scanned++; |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 438 | if (!pfn_valid_within(blockpfn)) |
Laura Abbott | 2af120b | 2014-03-10 15:49:44 -0700 | [diff] [blame] | 439 | goto isolate_fail; |
| 440 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 441 | if (!valid_page) |
| 442 | valid_page = page; |
Vlastimil Babka | 9fcd6d2 | 2015-09-08 15:02:49 -0700 | [diff] [blame] | 443 | |
| 444 | /* |
| 445 | * For compound pages such as THP and hugetlbfs, we can save |
| 446 | * potentially a lot of iterations if we skip them at once. |
| 447 | * The check is racy, but we can consider only valid values |
| 448 | * and the only danger is skipping too much. |
| 449 | */ |
| 450 | if (PageCompound(page)) { |
| 451 | unsigned int comp_order = compound_order(page); |
| 452 | |
| 453 | if (likely(comp_order < MAX_ORDER)) { |
| 454 | blockpfn += (1UL << comp_order) - 1; |
| 455 | cursor += (1UL << comp_order) - 1; |
| 456 | } |
| 457 | |
| 458 | goto isolate_fail; |
| 459 | } |
| 460 | |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 461 | if (!PageBuddy(page)) |
Laura Abbott | 2af120b | 2014-03-10 15:49:44 -0700 | [diff] [blame] | 462 | goto isolate_fail; |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 463 | |
| 464 | /* |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 465 | * If we already hold the lock, we can skip some rechecking. |
| 466 | * Note that if we hold the lock now, checked_pageblock was |
| 467 | * already set in some previous iteration (or strict is true), |
| 468 | * so it is correct to skip the suitable migration target |
| 469 | * recheck as well. |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 470 | */ |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 471 | if (!locked) { |
| 472 | /* |
| 473 | * The zone lock must be held to isolate freepages. |
| 474 | * Unfortunately this is a very coarse lock and can be |
| 475 | * heavily contended if there are parallel allocations |
| 476 | * or parallel compactions. For async compaction do not |
| 477 | * spin on the lock and we acquire the lock as late as |
| 478 | * possible. |
| 479 | */ |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 480 | locked = compact_trylock_irqsave(&cc->zone->lock, |
| 481 | &flags, cc); |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 482 | if (!locked) |
| 483 | break; |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 484 | |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 485 | /* Recheck this is a buddy page under lock */ |
| 486 | if (!PageBuddy(page)) |
| 487 | goto isolate_fail; |
| 488 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 489 | |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 490 | /* Found a free page, will break it into order-0 pages */ |
| 491 | order = page_order(page); |
| 492 | isolated = __isolate_free_page(page, order); |
David Rientjes | a4f04f2 | 2016-06-24 14:50:10 -0700 | [diff] [blame] | 493 | if (!isolated) |
| 494 | break; |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 495 | set_page_private(page, order); |
David Rientjes | a4f04f2 | 2016-06-24 14:50:10 -0700 | [diff] [blame] | 496 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 497 | total_isolated += isolated; |
David Rientjes | a4f04f2 | 2016-06-24 14:50:10 -0700 | [diff] [blame] | 498 | cc->nr_freepages += isolated; |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 499 | list_add_tail(&page->lru, freelist); |
| 500 | |
David Rientjes | a4f04f2 | 2016-06-24 14:50:10 -0700 | [diff] [blame] | 501 | if (!strict && cc->nr_migratepages <= cc->nr_freepages) { |
| 502 | blockpfn += isolated; |
| 503 | break; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 504 | } |
David Rientjes | a4f04f2 | 2016-06-24 14:50:10 -0700 | [diff] [blame] | 505 | /* Advance to the end of split page */ |
| 506 | blockpfn += isolated - 1; |
| 507 | cursor += isolated - 1; |
| 508 | continue; |
Laura Abbott | 2af120b | 2014-03-10 15:49:44 -0700 | [diff] [blame] | 509 | |
| 510 | isolate_fail: |
| 511 | if (strict) |
| 512 | break; |
| 513 | else |
| 514 | continue; |
| 515 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 516 | } |
| 517 | |
David Rientjes | a4f04f2 | 2016-06-24 14:50:10 -0700 | [diff] [blame] | 518 | if (locked) |
| 519 | spin_unlock_irqrestore(&cc->zone->lock, flags); |
| 520 | |
Vlastimil Babka | 9fcd6d2 | 2015-09-08 15:02:49 -0700 | [diff] [blame] | 521 | /* |
| 522 | * There is a tiny chance that we have read bogus compound_order(), |
| 523 | * so be careful to not go outside of the pageblock. |
| 524 | */ |
| 525 | if (unlikely(blockpfn > end_pfn)) |
| 526 | blockpfn = end_pfn; |
| 527 | |
Joonsoo Kim | e34d85f | 2015-02-11 15:27:04 -0800 | [diff] [blame] | 528 | trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, |
| 529 | nr_scanned, total_isolated); |
| 530 | |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 531 | /* Record how far we have got within the block */ |
| 532 | *start_pfn = blockpfn; |
| 533 | |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 534 | /* |
| 535 | * If strict isolation is requested by CMA then check that all the |
| 536 | * pages requested were isolated. If there were any failures, 0 is |
| 537 | * returned and CMA will fail. |
| 538 | */ |
Laura Abbott | 2af120b | 2014-03-10 15:49:44 -0700 | [diff] [blame] | 539 | if (strict && blockpfn < end_pfn) |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 540 | total_isolated = 0; |
| 541 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 542 | /* Update the pageblock-skip if the whole pageblock was scanned */ |
| 543 | if (blockpfn == end_pfn) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 544 | update_pageblock_skip(cc, valid_page, total_isolated, false); |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 545 | |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 546 | cc->total_free_scanned += nr_scanned; |
Mel Gorman | 397487d | 2012-10-19 12:00:10 +0100 | [diff] [blame] | 547 | if (total_isolated) |
Minchan Kim | 010fc29 | 2012-12-20 15:05:06 -0800 | [diff] [blame] | 548 | count_compact_events(COMPACTISOLATED, total_isolated); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 549 | return total_isolated; |
| 550 | } |
| 551 | |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 552 | /** |
| 553 | * isolate_freepages_range() - isolate free pages. |
| 554 | * @start_pfn: The first PFN to start isolating. |
| 555 | * @end_pfn: The one-past-last PFN. |
| 556 | * |
| 557 | * Non-free pages, invalid PFNs, or zone boundaries within the |
| 558 | * [start_pfn, end_pfn) range are considered errors, cause function to |
| 559 | * undo its actions and return zero. |
| 560 | * |
| 561 | * Otherwise, function returns one-past-the-last PFN of isolated page |
| 562 | * (which may be greater then end_pfn if end fell in a middle of |
| 563 | * a free page). |
| 564 | */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 565 | unsigned long |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 566 | isolate_freepages_range(struct compact_control *cc, |
| 567 | unsigned long start_pfn, unsigned long end_pfn) |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 568 | { |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 569 | unsigned long isolated, pfn, block_start_pfn, block_end_pfn; |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 570 | LIST_HEAD(freelist); |
| 571 | |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 572 | pfn = start_pfn; |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 573 | block_start_pfn = pageblock_start_pfn(pfn); |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 574 | if (block_start_pfn < cc->zone->zone_start_pfn) |
| 575 | block_start_pfn = cc->zone->zone_start_pfn; |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 576 | block_end_pfn = pageblock_end_pfn(pfn); |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 577 | |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 578 | for (; pfn < end_pfn; pfn += isolated, |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 579 | block_start_pfn = block_end_pfn, |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 580 | block_end_pfn += pageblock_nr_pages) { |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 581 | /* Protect pfn from changing by isolate_freepages_block */ |
| 582 | unsigned long isolate_start_pfn = pfn; |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 583 | |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 584 | block_end_pfn = min(block_end_pfn, end_pfn); |
| 585 | |
Joonsoo Kim | 5842001 | 2014-11-13 15:19:07 -0800 | [diff] [blame] | 586 | /* |
| 587 | * pfn could pass the block_end_pfn if isolated freepage |
| 588 | * is more than pageblock order. In this case, we adjust |
| 589 | * scanning range to right one. |
| 590 | */ |
| 591 | if (pfn >= block_end_pfn) { |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 592 | block_start_pfn = pageblock_start_pfn(pfn); |
| 593 | block_end_pfn = pageblock_end_pfn(pfn); |
Joonsoo Kim | 5842001 | 2014-11-13 15:19:07 -0800 | [diff] [blame] | 594 | block_end_pfn = min(block_end_pfn, end_pfn); |
| 595 | } |
| 596 | |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 597 | if (!pageblock_pfn_to_page(block_start_pfn, |
| 598 | block_end_pfn, cc->zone)) |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 599 | break; |
| 600 | |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 601 | isolated = isolate_freepages_block(cc, &isolate_start_pfn, |
| 602 | block_end_pfn, &freelist, true); |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 603 | |
| 604 | /* |
| 605 | * In strict mode, isolate_freepages_block() returns 0 if |
| 606 | * there are any holes in the block (ie. invalid PFNs or |
| 607 | * non-free pages). |
| 608 | */ |
| 609 | if (!isolated) |
| 610 | break; |
| 611 | |
| 612 | /* |
| 613 | * If we managed to isolate pages, it is always (1 << n) * |
| 614 | * pageblock_nr_pages for some non-negative n. (Max order |
| 615 | * page may span two pageblocks). |
| 616 | */ |
| 617 | } |
| 618 | |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 619 | /* __isolate_free_page() does not map the pages */ |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 620 | map_pages(&freelist); |
| 621 | |
| 622 | if (pfn < end_pfn) { |
| 623 | /* Loop terminated early, cleanup. */ |
| 624 | release_freepages(&freelist); |
| 625 | return 0; |
| 626 | } |
| 627 | |
| 628 | /* We don't use freelists for anything. */ |
| 629 | return pfn; |
| 630 | } |
| 631 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 632 | /* Similar to reclaim, but different enough that they don't share logic */ |
| 633 | static bool too_many_isolated(struct zone *zone) |
| 634 | { |
Minchan Kim | bc69304 | 2010-09-09 16:38:00 -0700 | [diff] [blame] | 635 | unsigned long active, inactive, isolated; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 636 | |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 637 | inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) + |
| 638 | node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON); |
| 639 | active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) + |
| 640 | node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON); |
| 641 | isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) + |
| 642 | node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 643 | |
Minchan Kim | bc69304 | 2010-09-09 16:38:00 -0700 | [diff] [blame] | 644 | return isolated > (inactive + active) / 2; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 645 | } |
| 646 | |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 647 | /** |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 648 | * isolate_migratepages_block() - isolate all migrate-able pages within |
| 649 | * a single pageblock |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 650 | * @cc: Compaction control structure. |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 651 | * @low_pfn: The first PFN to isolate |
| 652 | * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock |
| 653 | * @isolate_mode: Isolation mode to be used. |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 654 | * |
| 655 | * Isolate all pages that can be migrated from the range specified by |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 656 | * [low_pfn, end_pfn). The range is expected to be within same pageblock. |
| 657 | * Returns zero if there is a fatal signal pending, otherwise PFN of the |
| 658 | * first page that was not scanned (which may be both less, equal to or more |
| 659 | * than end_pfn). |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 660 | * |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 661 | * The pages are isolated on cc->migratepages list (not required to be empty), |
| 662 | * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field |
| 663 | * is neither read nor updated. |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 664 | */ |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 665 | static unsigned long |
| 666 | isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, |
| 667 | unsigned long end_pfn, isolate_mode_t isolate_mode) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 668 | { |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 669 | struct zone *zone = cc->zone; |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 670 | unsigned long nr_scanned = 0, nr_isolated = 0; |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 671 | struct lruvec *lruvec; |
Xiubo Li | b8b2d82 | 2014-10-09 15:28:21 -0700 | [diff] [blame] | 672 | unsigned long flags = 0; |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 673 | bool locked = false; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 674 | struct page *page = NULL, *valid_page = NULL; |
Joonsoo Kim | e34d85f | 2015-02-11 15:27:04 -0800 | [diff] [blame] | 675 | unsigned long start_pfn = low_pfn; |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 676 | bool skip_on_failure = false; |
| 677 | unsigned long next_skip_pfn = 0; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 678 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 679 | /* |
| 680 | * Ensure that there are not too many pages isolated from the LRU |
| 681 | * list by either parallel reclaimers or compaction. If there are, |
| 682 | * delay for some time until fewer pages are isolated |
| 683 | */ |
| 684 | while (unlikely(too_many_isolated(zone))) { |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 685 | /* async migration should just abort */ |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 686 | if (cc->mode == MIGRATE_ASYNC) |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 687 | return 0; |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 688 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 689 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
| 690 | |
| 691 | if (fatal_signal_pending(current)) |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 692 | return 0; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 693 | } |
| 694 | |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 695 | if (compact_should_abort(cc)) |
| 696 | return 0; |
David Rientjes | aeef4b8 | 2014-06-04 16:08:31 -0700 | [diff] [blame] | 697 | |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 698 | if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { |
| 699 | skip_on_failure = true; |
| 700 | next_skip_pfn = block_end_pfn(low_pfn, cc->order); |
| 701 | } |
| 702 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 703 | /* Time to isolate some pages for migration */ |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 704 | for (; low_pfn < end_pfn; low_pfn++) { |
Vlastimil Babka | 29c0dde | 2015-09-08 15:02:46 -0700 | [diff] [blame] | 705 | |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 706 | if (skip_on_failure && low_pfn >= next_skip_pfn) { |
| 707 | /* |
| 708 | * We have isolated all migration candidates in the |
| 709 | * previous order-aligned block, and did not skip it due |
| 710 | * to failure. We should migrate the pages now and |
| 711 | * hopefully succeed compaction. |
| 712 | */ |
| 713 | if (nr_isolated) |
| 714 | break; |
| 715 | |
| 716 | /* |
| 717 | * We failed to isolate in the previous order-aligned |
| 718 | * block. Set the new boundary to the end of the |
| 719 | * current block. Note we can't simply increase |
| 720 | * next_skip_pfn by 1 << order, as low_pfn might have |
| 721 | * been incremented by a higher number due to skipping |
| 722 | * a compound or a high-order buddy page in the |
| 723 | * previous loop iteration. |
| 724 | */ |
| 725 | next_skip_pfn = block_end_pfn(low_pfn, cc->order); |
| 726 | } |
| 727 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 728 | /* |
| 729 | * Periodically drop the lock (if held) regardless of its |
| 730 | * contention, to give chance to IRQs. Abort async compaction |
| 731 | * if contended. |
| 732 | */ |
| 733 | if (!(low_pfn % SWAP_CLUSTER_MAX) |
Mel Gorman | a52633d | 2016-07-28 15:45:28 -0700 | [diff] [blame] | 734 | && compact_unlock_should_abort(zone_lru_lock(zone), flags, |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 735 | &locked, cc)) |
| 736 | break; |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 737 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 738 | if (!pfn_valid_within(low_pfn)) |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 739 | goto isolate_fail; |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 740 | nr_scanned++; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 741 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 742 | page = pfn_to_page(low_pfn); |
Mel Gorman | dc90860 | 2012-02-08 17:13:38 -0800 | [diff] [blame] | 743 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 744 | if (!valid_page) |
| 745 | valid_page = page; |
| 746 | |
Mel Gorman | 6c14466 | 2014-01-23 15:53:38 -0800 | [diff] [blame] | 747 | /* |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 748 | * Skip if free. We read page order here without zone lock |
| 749 | * which is generally unsafe, but the race window is small and |
| 750 | * the worst thing that can happen is that we skip some |
| 751 | * potential isolation targets. |
Mel Gorman | 6c14466 | 2014-01-23 15:53:38 -0800 | [diff] [blame] | 752 | */ |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 753 | if (PageBuddy(page)) { |
| 754 | unsigned long freepage_order = page_order_unsafe(page); |
| 755 | |
| 756 | /* |
| 757 | * Without lock, we cannot be sure that what we got is |
| 758 | * a valid page order. Consider only values in the |
| 759 | * valid order range to prevent low_pfn overflow. |
| 760 | */ |
| 761 | if (freepage_order > 0 && freepage_order < MAX_ORDER) |
| 762 | low_pfn += (1UL << freepage_order) - 1; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 763 | continue; |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 764 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 765 | |
Mel Gorman | 9927af74 | 2011-01-13 15:45:59 -0800 | [diff] [blame] | 766 | /* |
Vlastimil Babka | 29c0dde | 2015-09-08 15:02:46 -0700 | [diff] [blame] | 767 | * Regardless of being on LRU, compound pages such as THP and |
| 768 | * hugetlbfs are not to be compacted. We can potentially save |
| 769 | * a lot of iterations if we skip them at once. The check is |
| 770 | * racy, but we can consider only valid values and the only |
| 771 | * danger is skipping too much. |
Andrea Arcangeli | bc83501 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 772 | */ |
Vlastimil Babka | 29c0dde | 2015-09-08 15:02:46 -0700 | [diff] [blame] | 773 | if (PageCompound(page)) { |
| 774 | unsigned int comp_order = compound_order(page); |
| 775 | |
| 776 | if (likely(comp_order < MAX_ORDER)) |
| 777 | low_pfn += (1UL << comp_order) - 1; |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 778 | |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 779 | goto isolate_fail; |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 780 | } |
| 781 | |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 782 | /* |
| 783 | * Check may be lockless but that's ok as we recheck later. |
| 784 | * It's possible to migrate LRU and non-lru movable pages. |
| 785 | * Skip any other type of page |
| 786 | */ |
| 787 | if (!PageLRU(page)) { |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 788 | /* |
| 789 | * __PageMovable can return false positive so we need |
| 790 | * to verify it under page_lock. |
| 791 | */ |
| 792 | if (unlikely(__PageMovable(page)) && |
| 793 | !PageIsolated(page)) { |
| 794 | if (locked) { |
Mel Gorman | a52633d | 2016-07-28 15:45:28 -0700 | [diff] [blame] | 795 | spin_unlock_irqrestore(zone_lru_lock(zone), |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 796 | flags); |
| 797 | locked = false; |
| 798 | } |
| 799 | |
Yisheng Xie | 9e5bcd6 | 2017-02-24 14:57:29 -0800 | [diff] [blame] | 800 | if (!isolate_movable_page(page, isolate_mode)) |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 801 | goto isolate_success; |
| 802 | } |
| 803 | |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 804 | goto isolate_fail; |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 805 | } |
Vlastimil Babka | 29c0dde | 2015-09-08 15:02:46 -0700 | [diff] [blame] | 806 | |
David Rientjes | 119d6d5 | 2014-04-03 14:48:00 -0700 | [diff] [blame] | 807 | /* |
| 808 | * Migration will fail if an anonymous page is pinned in memory, |
| 809 | * so avoid taking lru_lock and isolating it unnecessarily in an |
| 810 | * admittedly racy check. |
| 811 | */ |
| 812 | if (!page_mapping(page) && |
| 813 | page_count(page) > page_mapcount(page)) |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 814 | goto isolate_fail; |
David Rientjes | 119d6d5 | 2014-04-03 14:48:00 -0700 | [diff] [blame] | 815 | |
Michal Hocko | 73e64c5 | 2016-12-14 15:04:07 -0800 | [diff] [blame] | 816 | /* |
| 817 | * Only allow to migrate anonymous pages in GFP_NOFS context |
| 818 | * because those do not depend on fs locks. |
| 819 | */ |
| 820 | if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) |
| 821 | goto isolate_fail; |
| 822 | |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 823 | /* If we already hold the lock, we can skip some rechecking */ |
| 824 | if (!locked) { |
Mel Gorman | a52633d | 2016-07-28 15:45:28 -0700 | [diff] [blame] | 825 | locked = compact_trylock_irqsave(zone_lru_lock(zone), |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 826 | &flags, cc); |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 827 | if (!locked) |
| 828 | break; |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 829 | |
Vlastimil Babka | 29c0dde | 2015-09-08 15:02:46 -0700 | [diff] [blame] | 830 | /* Recheck PageLRU and PageCompound under lock */ |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 831 | if (!PageLRU(page)) |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 832 | goto isolate_fail; |
Vlastimil Babka | 29c0dde | 2015-09-08 15:02:46 -0700 | [diff] [blame] | 833 | |
| 834 | /* |
| 835 | * Page become compound since the non-locked check, |
| 836 | * and it's on LRU. It can only be a THP so the order |
| 837 | * is safe to read and it's 0 for tail pages. |
| 838 | */ |
| 839 | if (unlikely(PageCompound(page))) { |
| 840 | low_pfn += (1UL << compound_order(page)) - 1; |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 841 | goto isolate_fail; |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 842 | } |
Andrea Arcangeli | bc83501 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 843 | } |
| 844 | |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 845 | lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 846 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 847 | /* Try isolate the page */ |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 848 | if (__isolate_lru_page(page, isolate_mode) != 0) |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 849 | goto isolate_fail; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 850 | |
Vlastimil Babka | 29c0dde | 2015-09-08 15:02:46 -0700 | [diff] [blame] | 851 | VM_BUG_ON_PAGE(PageCompound(page), page); |
Andrea Arcangeli | bc83501 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 852 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 853 | /* Successfully isolated */ |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 854 | del_page_from_lru_list(page, lruvec, page_lru(page)); |
Ming Ling | 6afcf8e | 2016-12-12 16:42:26 -0800 | [diff] [blame] | 855 | inc_node_page_state(page, |
| 856 | NR_ISOLATED_ANON + page_is_file_cache(page)); |
Joonsoo Kim | b6c7501 | 2014-04-07 15:37:07 -0700 | [diff] [blame] | 857 | |
| 858 | isolate_success: |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 859 | list_add(&page->lru, &cc->migratepages); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 860 | cc->nr_migratepages++; |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 861 | nr_isolated++; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 862 | |
Vlastimil Babka | a34753d | 2016-05-19 17:11:51 -0700 | [diff] [blame] | 863 | /* |
| 864 | * Record where we could have freed pages by migration and not |
| 865 | * yet flushed them to buddy allocator. |
| 866 | * - this is the lowest page that was isolated and likely be |
| 867 | * then freed by migration. |
| 868 | */ |
| 869 | if (!cc->last_migrated_pfn) |
| 870 | cc->last_migrated_pfn = low_pfn; |
| 871 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 872 | /* Avoid isolating too much */ |
Hillf Danton | 31b8384 | 2012-01-10 15:07:59 -0800 | [diff] [blame] | 873 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { |
| 874 | ++low_pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 875 | break; |
Hillf Danton | 31b8384 | 2012-01-10 15:07:59 -0800 | [diff] [blame] | 876 | } |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 877 | |
| 878 | continue; |
| 879 | isolate_fail: |
| 880 | if (!skip_on_failure) |
| 881 | continue; |
| 882 | |
| 883 | /* |
| 884 | * We have isolated some pages, but then failed. Release them |
| 885 | * instead of migrating, as we cannot form the cc->order buddy |
| 886 | * page anyway. |
| 887 | */ |
| 888 | if (nr_isolated) { |
| 889 | if (locked) { |
Mel Gorman | a52633d | 2016-07-28 15:45:28 -0700 | [diff] [blame] | 890 | spin_unlock_irqrestore(zone_lru_lock(zone), flags); |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 891 | locked = false; |
| 892 | } |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 893 | putback_movable_pages(&cc->migratepages); |
| 894 | cc->nr_migratepages = 0; |
| 895 | cc->last_migrated_pfn = 0; |
| 896 | nr_isolated = 0; |
| 897 | } |
| 898 | |
| 899 | if (low_pfn < next_skip_pfn) { |
| 900 | low_pfn = next_skip_pfn - 1; |
| 901 | /* |
| 902 | * The check near the loop beginning would have updated |
| 903 | * next_skip_pfn too, but this is a bit simpler. |
| 904 | */ |
| 905 | next_skip_pfn += 1UL << cc->order; |
| 906 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 907 | } |
| 908 | |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 909 | /* |
| 910 | * The PageBuddy() check could have potentially brought us outside |
| 911 | * the range to be scanned. |
| 912 | */ |
| 913 | if (unlikely(low_pfn > end_pfn)) |
| 914 | low_pfn = end_pfn; |
| 915 | |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 916 | if (locked) |
Mel Gorman | a52633d | 2016-07-28 15:45:28 -0700 | [diff] [blame] | 917 | spin_unlock_irqrestore(zone_lru_lock(zone), flags); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 918 | |
Vlastimil Babka | 50b5b09 | 2014-01-21 15:51:10 -0800 | [diff] [blame] | 919 | /* |
| 920 | * Update the pageblock-skip information and cached scanner pfn, |
| 921 | * if the whole pageblock was scanned without isolating any page. |
Vlastimil Babka | 50b5b09 | 2014-01-21 15:51:10 -0800 | [diff] [blame] | 922 | */ |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 923 | if (low_pfn == end_pfn) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 924 | update_pageblock_skip(cc, valid_page, nr_isolated, true); |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 925 | |
Joonsoo Kim | e34d85f | 2015-02-11 15:27:04 -0800 | [diff] [blame] | 926 | trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, |
| 927 | nr_scanned, nr_isolated); |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 928 | |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 929 | cc->total_migrate_scanned += nr_scanned; |
Mel Gorman | 397487d | 2012-10-19 12:00:10 +0100 | [diff] [blame] | 930 | if (nr_isolated) |
Minchan Kim | 010fc29 | 2012-12-20 15:05:06 -0800 | [diff] [blame] | 931 | count_compact_events(COMPACTISOLATED, nr_isolated); |
Mel Gorman | 397487d | 2012-10-19 12:00:10 +0100 | [diff] [blame] | 932 | |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 933 | return low_pfn; |
| 934 | } |
| 935 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 936 | /** |
| 937 | * isolate_migratepages_range() - isolate migrate-able pages in a PFN range |
| 938 | * @cc: Compaction control structure. |
| 939 | * @start_pfn: The first PFN to start isolating. |
| 940 | * @end_pfn: The one-past-last PFN. |
| 941 | * |
| 942 | * Returns zero if isolation fails fatally due to e.g. pending signal. |
| 943 | * Otherwise, function returns one-past-the-last PFN of isolated page |
| 944 | * (which may be greater than end_pfn if end fell in a middle of a THP page). |
| 945 | */ |
| 946 | unsigned long |
| 947 | isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, |
| 948 | unsigned long end_pfn) |
| 949 | { |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 950 | unsigned long pfn, block_start_pfn, block_end_pfn; |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 951 | |
| 952 | /* Scan block by block. First and last block may be incomplete */ |
| 953 | pfn = start_pfn; |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 954 | block_start_pfn = pageblock_start_pfn(pfn); |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 955 | if (block_start_pfn < cc->zone->zone_start_pfn) |
| 956 | block_start_pfn = cc->zone->zone_start_pfn; |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 957 | block_end_pfn = pageblock_end_pfn(pfn); |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 958 | |
| 959 | for (; pfn < end_pfn; pfn = block_end_pfn, |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 960 | block_start_pfn = block_end_pfn, |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 961 | block_end_pfn += pageblock_nr_pages) { |
| 962 | |
| 963 | block_end_pfn = min(block_end_pfn, end_pfn); |
| 964 | |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 965 | if (!pageblock_pfn_to_page(block_start_pfn, |
| 966 | block_end_pfn, cc->zone)) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 967 | continue; |
| 968 | |
| 969 | pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, |
| 970 | ISOLATE_UNEVICTABLE); |
| 971 | |
Hugh Dickins | 14af4a5 | 2016-05-05 16:22:15 -0700 | [diff] [blame] | 972 | if (!pfn) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 973 | break; |
Joonsoo Kim | 6ea41c0 | 2014-10-29 14:50:20 -0700 | [diff] [blame] | 974 | |
| 975 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) |
| 976 | break; |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 977 | } |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 978 | |
| 979 | return pfn; |
| 980 | } |
| 981 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 982 | #endif /* CONFIG_COMPACTION || CONFIG_CMA */ |
| 983 | #ifdef CONFIG_COMPACTION |
Andrew Morton | 018e9a4 | 2015-04-15 16:15:20 -0700 | [diff] [blame] | 984 | |
Vlastimil Babka | b682deb | 2017-05-08 15:54:43 -0700 | [diff] [blame] | 985 | static bool suitable_migration_source(struct compact_control *cc, |
| 986 | struct page *page) |
| 987 | { |
Vlastimil Babka | 282722b | 2017-05-08 15:54:49 -0700 | [diff] [blame] | 988 | int block_mt; |
| 989 | |
| 990 | if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) |
Vlastimil Babka | b682deb | 2017-05-08 15:54:43 -0700 | [diff] [blame] | 991 | return true; |
| 992 | |
Vlastimil Babka | 282722b | 2017-05-08 15:54:49 -0700 | [diff] [blame] | 993 | block_mt = get_pageblock_migratetype(page); |
| 994 | |
| 995 | if (cc->migratetype == MIGRATE_MOVABLE) |
| 996 | return is_migrate_movable(block_mt); |
| 997 | else |
| 998 | return block_mt == cc->migratetype; |
Vlastimil Babka | b682deb | 2017-05-08 15:54:43 -0700 | [diff] [blame] | 999 | } |
| 1000 | |
Andrew Morton | 018e9a4 | 2015-04-15 16:15:20 -0700 | [diff] [blame] | 1001 | /* Returns true if the page is within a block suitable for migration to */ |
Vlastimil Babka | 9f7e338 | 2016-10-07 17:00:37 -0700 | [diff] [blame] | 1002 | static bool suitable_migration_target(struct compact_control *cc, |
| 1003 | struct page *page) |
Andrew Morton | 018e9a4 | 2015-04-15 16:15:20 -0700 | [diff] [blame] | 1004 | { |
| 1005 | /* If the page is a large free page, then disallow migration */ |
| 1006 | if (PageBuddy(page)) { |
| 1007 | /* |
| 1008 | * We are checking page_order without zone->lock taken. But |
| 1009 | * the only small danger is that we skip a potentially suitable |
| 1010 | * pageblock, so it's not worth to check order for valid range. |
| 1011 | */ |
| 1012 | if (page_order_unsafe(page) >= pageblock_order) |
| 1013 | return false; |
| 1014 | } |
| 1015 | |
Yisheng Xie | 1ef36db | 2017-05-03 14:53:54 -0700 | [diff] [blame] | 1016 | if (cc->ignore_block_suitable) |
| 1017 | return true; |
| 1018 | |
Andrew Morton | 018e9a4 | 2015-04-15 16:15:20 -0700 | [diff] [blame] | 1019 | /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ |
Vlastimil Babka | b682deb | 2017-05-08 15:54:43 -0700 | [diff] [blame] | 1020 | if (is_migrate_movable(get_pageblock_migratetype(page))) |
Andrew Morton | 018e9a4 | 2015-04-15 16:15:20 -0700 | [diff] [blame] | 1021 | return true; |
| 1022 | |
| 1023 | /* Otherwise skip the block */ |
| 1024 | return false; |
| 1025 | } |
| 1026 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1027 | /* |
Vlastimil Babka | f2849aa | 2015-09-08 15:02:36 -0700 | [diff] [blame] | 1028 | * Test whether the free scanner has reached the same or lower pageblock than |
| 1029 | * the migration scanner, and compaction should thus terminate. |
| 1030 | */ |
| 1031 | static inline bool compact_scanners_met(struct compact_control *cc) |
| 1032 | { |
| 1033 | return (cc->free_pfn >> pageblock_order) |
| 1034 | <= (cc->migrate_pfn >> pageblock_order); |
| 1035 | } |
| 1036 | |
| 1037 | /* |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1038 | * Based on information in the current compact_control, find blocks |
| 1039 | * suitable for isolating free pages from and then isolate them. |
| 1040 | */ |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1041 | static void isolate_freepages(struct compact_control *cc) |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1042 | { |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1043 | struct zone *zone = cc->zone; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1044 | struct page *page; |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 1045 | unsigned long block_start_pfn; /* start of current pageblock */ |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 1046 | unsigned long isolate_start_pfn; /* exact pfn we start at */ |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 1047 | unsigned long block_end_pfn; /* end of current pageblock */ |
| 1048 | unsigned long low_pfn; /* lowest pfn scanner is able to scan */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1049 | struct list_head *freelist = &cc->freepages; |
| 1050 | |
| 1051 | /* |
| 1052 | * Initialise the free scanner. The starting point is where we last |
Vlastimil Babka | 49e068f | 2014-05-06 12:50:03 -0700 | [diff] [blame] | 1053 | * successfully isolated from, zone-cached value, or the end of the |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 1054 | * zone when isolating for the first time. For looping we also need |
| 1055 | * this pfn aligned down to the pageblock boundary, because we do |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 1056 | * block_start_pfn -= pageblock_nr_pages in the for loop. |
| 1057 | * For ending point, take care when isolating in last pageblock of a |
| 1058 | * a zone which ends in the middle of a pageblock. |
Vlastimil Babka | 49e068f | 2014-05-06 12:50:03 -0700 | [diff] [blame] | 1059 | * The low boundary is the end of the pageblock the migration scanner |
| 1060 | * is using. |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1061 | */ |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 1062 | isolate_start_pfn = cc->free_pfn; |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 1063 | block_start_pfn = pageblock_start_pfn(cc->free_pfn); |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 1064 | block_end_pfn = min(block_start_pfn + pageblock_nr_pages, |
| 1065 | zone_end_pfn(zone)); |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 1066 | low_pfn = pageblock_end_pfn(cc->migrate_pfn); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1067 | |
| 1068 | /* |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1069 | * Isolate free pages until enough are available to migrate the |
| 1070 | * pages on cc->migratepages. We stop searching if the migrate |
| 1071 | * and free page scanners meet or enough free pages are isolated. |
| 1072 | */ |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1073 | for (; block_start_pfn >= low_pfn; |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 1074 | block_end_pfn = block_start_pfn, |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 1075 | block_start_pfn -= pageblock_nr_pages, |
| 1076 | isolate_start_pfn = block_start_pfn) { |
David Rientjes | f6ea3ad | 2013-09-30 13:45:03 -0700 | [diff] [blame] | 1077 | /* |
| 1078 | * This can iterate a massively long zone without finding any |
| 1079 | * suitable migration targets, so periodically check if we need |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 1080 | * to schedule, or even abort async compaction. |
David Rientjes | f6ea3ad | 2013-09-30 13:45:03 -0700 | [diff] [blame] | 1081 | */ |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 1082 | if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) |
| 1083 | && compact_should_abort(cc)) |
| 1084 | break; |
David Rientjes | f6ea3ad | 2013-09-30 13:45:03 -0700 | [diff] [blame] | 1085 | |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 1086 | page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, |
| 1087 | zone); |
| 1088 | if (!page) |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1089 | continue; |
| 1090 | |
| 1091 | /* Check the block is suitable for migration */ |
Vlastimil Babka | 9f7e338 | 2016-10-07 17:00:37 -0700 | [diff] [blame] | 1092 | if (!suitable_migration_target(cc, page)) |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1093 | continue; |
Linus Torvalds | 68e3e92 | 2012-06-03 20:05:57 -0700 | [diff] [blame] | 1094 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 1095 | /* If isolation recently failed, do not retry */ |
| 1096 | if (!isolation_suitable(cc, page)) |
| 1097 | continue; |
| 1098 | |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 1099 | /* Found a block suitable for isolating free pages from. */ |
David Rientjes | a46cbf3 | 2016-07-14 12:06:50 -0700 | [diff] [blame] | 1100 | isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, |
| 1101 | freelist, false); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1102 | |
| 1103 | /* |
David Rientjes | a46cbf3 | 2016-07-14 12:06:50 -0700 | [diff] [blame] | 1104 | * If we isolated enough freepages, or aborted due to lock |
| 1105 | * contention, terminate. |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 1106 | */ |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1107 | if ((cc->nr_freepages >= cc->nr_migratepages) |
| 1108 | || cc->contended) { |
David Rientjes | a46cbf3 | 2016-07-14 12:06:50 -0700 | [diff] [blame] | 1109 | if (isolate_start_pfn >= block_end_pfn) { |
| 1110 | /* |
| 1111 | * Restart at previous pageblock if more |
| 1112 | * freepages can be isolated next time. |
| 1113 | */ |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1114 | isolate_start_pfn = |
| 1115 | block_start_pfn - pageblock_nr_pages; |
David Rientjes | a46cbf3 | 2016-07-14 12:06:50 -0700 | [diff] [blame] | 1116 | } |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 1117 | break; |
David Rientjes | a46cbf3 | 2016-07-14 12:06:50 -0700 | [diff] [blame] | 1118 | } else if (isolate_start_pfn < block_end_pfn) { |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1119 | /* |
David Rientjes | a46cbf3 | 2016-07-14 12:06:50 -0700 | [diff] [blame] | 1120 | * If isolation failed early, do not continue |
| 1121 | * needlessly. |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1122 | */ |
David Rientjes | a46cbf3 | 2016-07-14 12:06:50 -0700 | [diff] [blame] | 1123 | break; |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1124 | } |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 1125 | } |
| 1126 | |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 1127 | /* __isolate_free_page() does not map the pages */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1128 | map_pages(freelist); |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 1129 | |
Vlastimil Babka | 7ed695e | 2014-01-21 15:51:09 -0800 | [diff] [blame] | 1130 | /* |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1131 | * Record where the free scanner will restart next time. Either we |
| 1132 | * broke from the loop and set isolate_start_pfn based on the last |
| 1133 | * call to isolate_freepages_block(), or we met the migration scanner |
| 1134 | * and the loop terminated due to isolate_start_pfn < low_pfn |
Vlastimil Babka | 7ed695e | 2014-01-21 15:51:09 -0800 | [diff] [blame] | 1135 | */ |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1136 | cc->free_pfn = isolate_start_pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1137 | } |
| 1138 | |
| 1139 | /* |
| 1140 | * This is a migrate-callback that "allocates" freepages by taking pages |
| 1141 | * from the isolated freelists in the block we are migrating to. |
| 1142 | */ |
| 1143 | static struct page *compaction_alloc(struct page *migratepage, |
| 1144 | unsigned long data, |
| 1145 | int **result) |
| 1146 | { |
| 1147 | struct compact_control *cc = (struct compact_control *)data; |
| 1148 | struct page *freepage; |
| 1149 | |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 1150 | /* |
| 1151 | * Isolate free pages if necessary, and if we are not aborting due to |
| 1152 | * contention. |
| 1153 | */ |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1154 | if (list_empty(&cc->freepages)) { |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 1155 | if (!cc->contended) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1156 | isolate_freepages(cc); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1157 | |
| 1158 | if (list_empty(&cc->freepages)) |
| 1159 | return NULL; |
| 1160 | } |
| 1161 | |
| 1162 | freepage = list_entry(cc->freepages.next, struct page, lru); |
| 1163 | list_del(&freepage->lru); |
| 1164 | cc->nr_freepages--; |
| 1165 | |
| 1166 | return freepage; |
| 1167 | } |
| 1168 | |
| 1169 | /* |
David Rientjes | d53aea3 | 2014-06-04 16:08:26 -0700 | [diff] [blame] | 1170 | * This is a migrate-callback that "frees" freepages back to the isolated |
| 1171 | * freelist. All pages on the freelist are from the same zone, so there is no |
| 1172 | * special handling needed for NUMA. |
| 1173 | */ |
| 1174 | static void compaction_free(struct page *page, unsigned long data) |
| 1175 | { |
| 1176 | struct compact_control *cc = (struct compact_control *)data; |
| 1177 | |
| 1178 | list_add(&page->lru, &cc->freepages); |
| 1179 | cc->nr_freepages++; |
| 1180 | } |
| 1181 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1182 | /* possible outcome of isolate_migratepages */ |
| 1183 | typedef enum { |
| 1184 | ISOLATE_ABORT, /* Abort compaction now */ |
| 1185 | ISOLATE_NONE, /* No pages isolated, continue scanning */ |
| 1186 | ISOLATE_SUCCESS, /* Pages isolated, migrate */ |
| 1187 | } isolate_migrate_t; |
| 1188 | |
| 1189 | /* |
Eric B Munson | 5bbe354 | 2015-04-15 16:13:20 -0700 | [diff] [blame] | 1190 | * Allow userspace to control policy on scanning the unevictable LRU for |
| 1191 | * compactable pages. |
| 1192 | */ |
| 1193 | int sysctl_compact_unevictable_allowed __read_mostly = 1; |
| 1194 | |
| 1195 | /* |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1196 | * Isolate all pages that can be migrated from the first suitable block, |
| 1197 | * starting at the block pointed to by the migrate scanner pfn within |
| 1198 | * compact_control. |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1199 | */ |
| 1200 | static isolate_migrate_t isolate_migratepages(struct zone *zone, |
| 1201 | struct compact_control *cc) |
| 1202 | { |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 1203 | unsigned long block_start_pfn; |
| 1204 | unsigned long block_end_pfn; |
| 1205 | unsigned long low_pfn; |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1206 | struct page *page; |
| 1207 | const isolate_mode_t isolate_mode = |
Eric B Munson | 5bbe354 | 2015-04-15 16:13:20 -0700 | [diff] [blame] | 1208 | (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | |
Hugh Dickins | 1d2047f | 2016-07-28 15:48:41 -0700 | [diff] [blame] | 1209 | (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1210 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1211 | /* |
| 1212 | * Start at where we last stopped, or beginning of the zone as |
| 1213 | * initialized by compact_zone() |
| 1214 | */ |
| 1215 | low_pfn = cc->migrate_pfn; |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 1216 | block_start_pfn = pageblock_start_pfn(low_pfn); |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 1217 | if (block_start_pfn < zone->zone_start_pfn) |
| 1218 | block_start_pfn = zone->zone_start_pfn; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1219 | |
| 1220 | /* Only scan within a pageblock boundary */ |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 1221 | block_end_pfn = pageblock_end_pfn(low_pfn); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1222 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1223 | /* |
| 1224 | * Iterate over whole pageblocks until we find the first suitable. |
| 1225 | * Do not cross the free scanner. |
| 1226 | */ |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 1227 | for (; block_end_pfn <= cc->free_pfn; |
| 1228 | low_pfn = block_end_pfn, |
| 1229 | block_start_pfn = block_end_pfn, |
| 1230 | block_end_pfn += pageblock_nr_pages) { |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1231 | |
| 1232 | /* |
| 1233 | * This can potentially iterate a massively long zone with |
| 1234 | * many pageblocks unsuitable, so periodically check if we |
| 1235 | * need to schedule, or even abort async compaction. |
| 1236 | */ |
| 1237 | if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) |
| 1238 | && compact_should_abort(cc)) |
| 1239 | break; |
| 1240 | |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 1241 | page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, |
| 1242 | zone); |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 1243 | if (!page) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1244 | continue; |
| 1245 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1246 | /* If isolation recently failed, do not retry */ |
| 1247 | if (!isolation_suitable(cc, page)) |
| 1248 | continue; |
| 1249 | |
| 1250 | /* |
| 1251 | * For async compaction, also only scan in MOVABLE blocks. |
| 1252 | * Async compaction is optimistic to see if the minimum amount |
| 1253 | * of work satisfies the allocation. |
| 1254 | */ |
Vlastimil Babka | b682deb | 2017-05-08 15:54:43 -0700 | [diff] [blame] | 1255 | if (!suitable_migration_source(cc, page)) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1256 | continue; |
| 1257 | |
| 1258 | /* Perform the isolation */ |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 1259 | low_pfn = isolate_migratepages_block(cc, low_pfn, |
| 1260 | block_end_pfn, isolate_mode); |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1261 | |
Ming Ling | 6afcf8e | 2016-12-12 16:42:26 -0800 | [diff] [blame] | 1262 | if (!low_pfn || cc->contended) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1263 | return ISOLATE_ABORT; |
| 1264 | |
| 1265 | /* |
| 1266 | * Either we isolated something and proceed with migration. Or |
| 1267 | * we failed and compact_zone should decide if we should |
| 1268 | * continue or not. |
| 1269 | */ |
| 1270 | break; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1271 | } |
| 1272 | |
Vlastimil Babka | f2849aa | 2015-09-08 15:02:36 -0700 | [diff] [blame] | 1273 | /* Record where migration scanner will be restarted. */ |
| 1274 | cc->migrate_pfn = low_pfn; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1275 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1276 | return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1277 | } |
| 1278 | |
Yaowei Bai | 21c527a | 2015-11-05 18:47:20 -0800 | [diff] [blame] | 1279 | /* |
| 1280 | * order == -1 is expected when compacting via |
| 1281 | * /proc/sys/vm/compact_memory |
| 1282 | */ |
| 1283 | static inline bool is_via_compact_memory(int order) |
| 1284 | { |
| 1285 | return order == -1; |
| 1286 | } |
| 1287 | |
Vlastimil Babka | d39773a | 2017-05-08 15:54:46 -0700 | [diff] [blame] | 1288 | static enum compact_result __compact_finished(struct zone *zone, |
| 1289 | struct compact_control *cc) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1290 | { |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 1291 | unsigned int order; |
Vlastimil Babka | d39773a | 2017-05-08 15:54:46 -0700 | [diff] [blame] | 1292 | const int migratetype = cc->migratetype; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1293 | |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 1294 | if (cc->contended || fatal_signal_pending(current)) |
Vlastimil Babka | 2d1e104 | 2015-11-05 18:48:02 -0800 | [diff] [blame] | 1295 | return COMPACT_CONTENDED; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1296 | |
Mel Gorman | 753341a | 2012-10-08 16:32:40 -0700 | [diff] [blame] | 1297 | /* Compaction run completes if the migrate and free scanner meet */ |
Vlastimil Babka | f2849aa | 2015-09-08 15:02:36 -0700 | [diff] [blame] | 1298 | if (compact_scanners_met(cc)) { |
Vlastimil Babka | 55b7c4c | 2014-01-21 15:51:11 -0800 | [diff] [blame] | 1299 | /* Let the next compaction start anew. */ |
Vlastimil Babka | 02333641 | 2015-09-08 15:02:42 -0700 | [diff] [blame] | 1300 | reset_cached_positions(zone); |
Vlastimil Babka | 55b7c4c | 2014-01-21 15:51:11 -0800 | [diff] [blame] | 1301 | |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 1302 | /* |
| 1303 | * Mark that the PG_migrate_skip information should be cleared |
Vlastimil Babka | accf624 | 2016-03-17 14:18:15 -0700 | [diff] [blame] | 1304 | * by kswapd when it goes to sleep. kcompactd does not set the |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 1305 | * flag itself as the decision to be clear should be directly |
| 1306 | * based on an allocation request. |
| 1307 | */ |
Vlastimil Babka | accf624 | 2016-03-17 14:18:15 -0700 | [diff] [blame] | 1308 | if (cc->direct_compaction) |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 1309 | zone->compact_blockskip_flush = true; |
| 1310 | |
Michal Hocko | c8f7de0 | 2016-05-20 16:56:47 -0700 | [diff] [blame] | 1311 | if (cc->whole_zone) |
| 1312 | return COMPACT_COMPLETE; |
| 1313 | else |
| 1314 | return COMPACT_PARTIAL_SKIPPED; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 1315 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1316 | |
Yaowei Bai | 21c527a | 2015-11-05 18:47:20 -0800 | [diff] [blame] | 1317 | if (is_via_compact_memory(cc->order)) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1318 | return COMPACT_CONTINUE; |
| 1319 | |
Vlastimil Babka | baf6a9a | 2017-05-08 15:54:52 -0700 | [diff] [blame] | 1320 | if (cc->finishing_block) { |
| 1321 | /* |
| 1322 | * We have finished the pageblock, but better check again that |
| 1323 | * we really succeeded. |
| 1324 | */ |
| 1325 | if (IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) |
| 1326 | cc->finishing_block = false; |
| 1327 | else |
| 1328 | return COMPACT_CONTINUE; |
| 1329 | } |
| 1330 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1331 | /* Direct compactor: Is a suitable page free? */ |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 1332 | for (order = cc->order; order < MAX_ORDER; order++) { |
| 1333 | struct free_area *area = &zone->free_area[order]; |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 1334 | bool can_steal; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1335 | |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 1336 | /* Job done if page is free of the right migratetype */ |
David Rientjes | 6d7ce55 | 2014-10-09 15:27:27 -0700 | [diff] [blame] | 1337 | if (!list_empty(&area->free_list[migratetype])) |
Vlastimil Babka | cf37831 | 2016-10-07 16:57:41 -0700 | [diff] [blame] | 1338 | return COMPACT_SUCCESS; |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 1339 | |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 1340 | #ifdef CONFIG_CMA |
| 1341 | /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ |
| 1342 | if (migratetype == MIGRATE_MOVABLE && |
| 1343 | !list_empty(&area->free_list[MIGRATE_CMA])) |
Vlastimil Babka | cf37831 | 2016-10-07 16:57:41 -0700 | [diff] [blame] | 1344 | return COMPACT_SUCCESS; |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 1345 | #endif |
| 1346 | /* |
| 1347 | * Job done if allocation would steal freepages from |
| 1348 | * other migratetype buddy lists. |
| 1349 | */ |
| 1350 | if (find_suitable_fallback(area, order, migratetype, |
Vlastimil Babka | baf6a9a | 2017-05-08 15:54:52 -0700 | [diff] [blame] | 1351 | true, &can_steal) != -1) { |
| 1352 | |
| 1353 | /* movable pages are OK in any pageblock */ |
| 1354 | if (migratetype == MIGRATE_MOVABLE) |
| 1355 | return COMPACT_SUCCESS; |
| 1356 | |
| 1357 | /* |
| 1358 | * We are stealing for a non-movable allocation. Make |
| 1359 | * sure we finish compacting the current pageblock |
| 1360 | * first so it is as free as possible and we won't |
| 1361 | * have to steal another one soon. This only applies |
| 1362 | * to sync compaction, as async compaction operates |
| 1363 | * on pageblocks of the same migratetype. |
| 1364 | */ |
| 1365 | if (cc->mode == MIGRATE_ASYNC || |
| 1366 | IS_ALIGNED(cc->migrate_pfn, |
| 1367 | pageblock_nr_pages)) { |
| 1368 | return COMPACT_SUCCESS; |
| 1369 | } |
| 1370 | |
| 1371 | cc->finishing_block = true; |
| 1372 | return COMPACT_CONTINUE; |
| 1373 | } |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1374 | } |
| 1375 | |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1376 | return COMPACT_NO_SUITABLE_PAGE; |
| 1377 | } |
| 1378 | |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 1379 | static enum compact_result compact_finished(struct zone *zone, |
Vlastimil Babka | d39773a | 2017-05-08 15:54:46 -0700 | [diff] [blame] | 1380 | struct compact_control *cc) |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1381 | { |
| 1382 | int ret; |
| 1383 | |
Vlastimil Babka | d39773a | 2017-05-08 15:54:46 -0700 | [diff] [blame] | 1384 | ret = __compact_finished(zone, cc); |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1385 | trace_mm_compaction_finished(zone, cc->order, ret); |
| 1386 | if (ret == COMPACT_NO_SUITABLE_PAGE) |
| 1387 | ret = COMPACT_CONTINUE; |
| 1388 | |
| 1389 | return ret; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1390 | } |
| 1391 | |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1392 | /* |
| 1393 | * compaction_suitable: Is this suitable to run compaction on this zone now? |
| 1394 | * Returns |
| 1395 | * COMPACT_SKIPPED - If there are too few free pages for compaction |
Vlastimil Babka | cf37831 | 2016-10-07 16:57:41 -0700 | [diff] [blame] | 1396 | * COMPACT_SUCCESS - If the allocation would succeed without compaction |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1397 | * COMPACT_CONTINUE - If compaction should run now |
| 1398 | */ |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 1399 | static enum compact_result __compaction_suitable(struct zone *zone, int order, |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 1400 | unsigned int alloc_flags, |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 1401 | int classzone_idx, |
| 1402 | unsigned long wmark_target) |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1403 | { |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1404 | unsigned long watermark; |
| 1405 | |
Yaowei Bai | 21c527a | 2015-11-05 18:47:20 -0800 | [diff] [blame] | 1406 | if (is_via_compact_memory(order)) |
Michal Hocko | 3957c77 | 2011-06-15 15:08:25 -0700 | [diff] [blame] | 1407 | return COMPACT_CONTINUE; |
| 1408 | |
Vlastimil Babka | f2b8228 | 2016-10-07 16:57:50 -0700 | [diff] [blame] | 1409 | watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1410 | /* |
| 1411 | * If watermarks for high-order allocation are already met, there |
| 1412 | * should be no need for compaction at all. |
| 1413 | */ |
| 1414 | if (zone_watermark_ok(zone, order, watermark, classzone_idx, |
| 1415 | alloc_flags)) |
Vlastimil Babka | cf37831 | 2016-10-07 16:57:41 -0700 | [diff] [blame] | 1416 | return COMPACT_SUCCESS; |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1417 | |
Michal Hocko | 3957c77 | 2011-06-15 15:08:25 -0700 | [diff] [blame] | 1418 | /* |
Vlastimil Babka | 9861a62 | 2016-10-07 16:57:53 -0700 | [diff] [blame] | 1419 | * Watermarks for order-0 must be met for compaction to be able to |
Vlastimil Babka | 984fdba | 2016-10-07 16:57:57 -0700 | [diff] [blame] | 1420 | * isolate free pages for migration targets. This means that the |
| 1421 | * watermark and alloc_flags have to match, or be more pessimistic than |
| 1422 | * the check in __isolate_free_page(). We don't use the direct |
| 1423 | * compactor's alloc_flags, as they are not relevant for freepage |
| 1424 | * isolation. We however do use the direct compactor's classzone_idx to |
| 1425 | * skip over zones where lowmem reserves would prevent allocation even |
| 1426 | * if compaction succeeds. |
Vlastimil Babka | 8348faf | 2016-10-07 16:58:00 -0700 | [diff] [blame] | 1427 | * For costly orders, we require low watermark instead of min for |
| 1428 | * compaction to proceed to increase its chances. |
Vlastimil Babka | 984fdba | 2016-10-07 16:57:57 -0700 | [diff] [blame] | 1429 | * ALLOC_CMA is used, as pages in CMA pageblocks are considered |
| 1430 | * suitable migration targets |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1431 | */ |
Vlastimil Babka | 8348faf | 2016-10-07 16:58:00 -0700 | [diff] [blame] | 1432 | watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? |
| 1433 | low_wmark_pages(zone) : min_wmark_pages(zone); |
| 1434 | watermark += compact_gap(order); |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 1435 | if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, |
Vlastimil Babka | 984fdba | 2016-10-07 16:57:57 -0700 | [diff] [blame] | 1436 | ALLOC_CMA, wmark_target)) |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1437 | return COMPACT_SKIPPED; |
| 1438 | |
Vlastimil Babka | cc5c9f0 | 2016-10-07 17:00:43 -0700 | [diff] [blame] | 1439 | return COMPACT_CONTINUE; |
| 1440 | } |
| 1441 | |
| 1442 | enum compact_result compaction_suitable(struct zone *zone, int order, |
| 1443 | unsigned int alloc_flags, |
| 1444 | int classzone_idx) |
| 1445 | { |
| 1446 | enum compact_result ret; |
| 1447 | int fragindex; |
| 1448 | |
| 1449 | ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx, |
| 1450 | zone_page_state(zone, NR_FREE_PAGES)); |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1451 | /* |
| 1452 | * fragmentation index determines if allocation failures are due to |
| 1453 | * low memory or external fragmentation |
| 1454 | * |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1455 | * index of -1000 would imply allocations might succeed depending on |
| 1456 | * watermarks, but we already failed the high-order watermark check |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1457 | * index towards 0 implies failure is due to lack of memory |
| 1458 | * index towards 1000 implies failure is due to fragmentation |
| 1459 | * |
Vlastimil Babka | 2031142 | 2016-10-07 17:00:46 -0700 | [diff] [blame] | 1460 | * Only compact if a failure would be due to fragmentation. Also |
| 1461 | * ignore fragindex for non-costly orders where the alternative to |
| 1462 | * a successful reclaim/compaction is OOM. Fragindex and the |
| 1463 | * vm.extfrag_threshold sysctl is meant as a heuristic to prevent |
| 1464 | * excessive compaction for costly orders, but it should not be at the |
| 1465 | * expense of system stability. |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1466 | */ |
Vlastimil Babka | 2031142 | 2016-10-07 17:00:46 -0700 | [diff] [blame] | 1467 | if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { |
Vlastimil Babka | cc5c9f0 | 2016-10-07 17:00:43 -0700 | [diff] [blame] | 1468 | fragindex = fragmentation_index(zone, order); |
| 1469 | if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) |
| 1470 | ret = COMPACT_NOT_SUITABLE_ZONE; |
| 1471 | } |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1472 | |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1473 | trace_mm_compaction_suitable(zone, order, ret); |
| 1474 | if (ret == COMPACT_NOT_SUITABLE_ZONE) |
| 1475 | ret = COMPACT_SKIPPED; |
| 1476 | |
| 1477 | return ret; |
| 1478 | } |
| 1479 | |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 1480 | bool compaction_zonelist_suitable(struct alloc_context *ac, int order, |
| 1481 | int alloc_flags) |
| 1482 | { |
| 1483 | struct zone *zone; |
| 1484 | struct zoneref *z; |
| 1485 | |
| 1486 | /* |
| 1487 | * Make sure at least one zone would pass __compaction_suitable if we continue |
| 1488 | * retrying the reclaim. |
| 1489 | */ |
| 1490 | for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, |
| 1491 | ac->nodemask) { |
| 1492 | unsigned long available; |
| 1493 | enum compact_result compact_result; |
| 1494 | |
| 1495 | /* |
| 1496 | * Do not consider all the reclaimable memory because we do not |
| 1497 | * want to trash just for a single high order allocation which |
| 1498 | * is even not guaranteed to appear even if __compaction_suitable |
| 1499 | * is happy about the watermark check. |
| 1500 | */ |
Mel Gorman | 5a1c84b | 2016-07-28 15:47:31 -0700 | [diff] [blame] | 1501 | available = zone_reclaimable_pages(zone) / order; |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 1502 | available += zone_page_state_snapshot(zone, NR_FREE_PAGES); |
| 1503 | compact_result = __compaction_suitable(zone, order, alloc_flags, |
| 1504 | ac_classzone_idx(ac), available); |
Vlastimil Babka | cc5c9f0 | 2016-10-07 17:00:43 -0700 | [diff] [blame] | 1505 | if (compact_result != COMPACT_SKIPPED) |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 1506 | return true; |
| 1507 | } |
| 1508 | |
| 1509 | return false; |
| 1510 | } |
| 1511 | |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 1512 | static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1513 | { |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 1514 | enum compact_result ret; |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 1515 | unsigned long start_pfn = zone->zone_start_pfn; |
Cody P Schafer | 108bcc9 | 2013-02-22 16:35:23 -0800 | [diff] [blame] | 1516 | unsigned long end_pfn = zone_end_pfn(zone); |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 1517 | const bool sync = cc->mode != MIGRATE_ASYNC; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1518 | |
Vlastimil Babka | d39773a | 2017-05-08 15:54:46 -0700 | [diff] [blame] | 1519 | cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1520 | ret = compaction_suitable(zone, cc->order, cc->alloc_flags, |
| 1521 | cc->classzone_idx); |
Michal Hocko | c46649d | 2016-05-20 16:56:41 -0700 | [diff] [blame] | 1522 | /* Compaction is likely to fail */ |
Vlastimil Babka | cf37831 | 2016-10-07 16:57:41 -0700 | [diff] [blame] | 1523 | if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1524 | return ret; |
Michal Hocko | c46649d | 2016-05-20 16:56:41 -0700 | [diff] [blame] | 1525 | |
| 1526 | /* huh, compaction_suitable is returning something unexpected */ |
| 1527 | VM_BUG_ON(ret != COMPACT_CONTINUE); |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1528 | |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 1529 | /* |
Vlastimil Babka | d3132e4 | 2014-01-21 15:51:08 -0800 | [diff] [blame] | 1530 | * Clear pageblock skip if there were failures recently and compaction |
Vlastimil Babka | accf624 | 2016-03-17 14:18:15 -0700 | [diff] [blame] | 1531 | * is about to be retried after being deferred. |
Vlastimil Babka | d3132e4 | 2014-01-21 15:51:08 -0800 | [diff] [blame] | 1532 | */ |
Vlastimil Babka | accf624 | 2016-03-17 14:18:15 -0700 | [diff] [blame] | 1533 | if (compaction_restarting(zone, cc->order)) |
Vlastimil Babka | d3132e4 | 2014-01-21 15:51:08 -0800 | [diff] [blame] | 1534 | __reset_isolation_suitable(zone); |
| 1535 | |
| 1536 | /* |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 1537 | * Setup to move all movable pages to the end of the zone. Used cached |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 1538 | * information on where the scanners should start (unless we explicitly |
| 1539 | * want to compact the whole zone), but check that it is initialised |
| 1540 | * by ensuring the values are within zone boundaries. |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 1541 | */ |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 1542 | if (cc->whole_zone) { |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 1543 | cc->migrate_pfn = start_pfn; |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 1544 | cc->free_pfn = pageblock_start_pfn(end_pfn - 1); |
| 1545 | } else { |
| 1546 | cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; |
| 1547 | cc->free_pfn = zone->compact_cached_free_pfn; |
| 1548 | if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { |
| 1549 | cc->free_pfn = pageblock_start_pfn(end_pfn - 1); |
| 1550 | zone->compact_cached_free_pfn = cc->free_pfn; |
| 1551 | } |
| 1552 | if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { |
| 1553 | cc->migrate_pfn = start_pfn; |
| 1554 | zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; |
| 1555 | zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; |
| 1556 | } |
Michal Hocko | c8f7de0 | 2016-05-20 16:56:47 -0700 | [diff] [blame] | 1557 | |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 1558 | if (cc->migrate_pfn == start_pfn) |
| 1559 | cc->whole_zone = true; |
| 1560 | } |
Michal Hocko | c8f7de0 | 2016-05-20 16:56:47 -0700 | [diff] [blame] | 1561 | |
Joonsoo Kim | 1a16718 | 2015-09-08 15:03:59 -0700 | [diff] [blame] | 1562 | cc->last_migrated_pfn = 0; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1563 | |
Joonsoo Kim | 16c4a09 | 2015-02-11 15:27:01 -0800 | [diff] [blame] | 1564 | trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, |
| 1565 | cc->free_pfn, end_pfn, sync); |
Mel Gorman | 0eb927c | 2014-01-21 15:51:05 -0800 | [diff] [blame] | 1566 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1567 | migrate_prep_local(); |
| 1568 | |
Vlastimil Babka | d39773a | 2017-05-08 15:54:46 -0700 | [diff] [blame] | 1569 | while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { |
Minchan Kim | 9d502c1 | 2011-03-22 16:30:39 -0700 | [diff] [blame] | 1570 | int err; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1571 | |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 1572 | switch (isolate_migratepages(zone, cc)) { |
| 1573 | case ISOLATE_ABORT: |
Vlastimil Babka | 2d1e104 | 2015-11-05 18:48:02 -0800 | [diff] [blame] | 1574 | ret = COMPACT_CONTENDED; |
Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 1575 | putback_movable_pages(&cc->migratepages); |
Shaohua Li | e64c523 | 2012-10-08 16:32:27 -0700 | [diff] [blame] | 1576 | cc->nr_migratepages = 0; |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 1577 | goto out; |
| 1578 | case ISOLATE_NONE: |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 1579 | /* |
| 1580 | * We haven't isolated and migrated anything, but |
| 1581 | * there might still be unflushed migrations from |
| 1582 | * previous cc->order aligned block. |
| 1583 | */ |
| 1584 | goto check_drain; |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 1585 | case ISOLATE_SUCCESS: |
| 1586 | ; |
| 1587 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1588 | |
David Rientjes | d53aea3 | 2014-06-04 16:08:26 -0700 | [diff] [blame] | 1589 | err = migrate_pages(&cc->migratepages, compaction_alloc, |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 1590 | compaction_free, (unsigned long)cc, cc->mode, |
Mel Gorman | 7b2a2d4 | 2012-10-19 14:07:31 +0100 | [diff] [blame] | 1591 | MR_COMPACTION); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1592 | |
Vlastimil Babka | f8c9301 | 2014-06-04 16:08:32 -0700 | [diff] [blame] | 1593 | trace_mm_compaction_migratepages(cc->nr_migratepages, err, |
| 1594 | &cc->migratepages); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1595 | |
Vlastimil Babka | f8c9301 | 2014-06-04 16:08:32 -0700 | [diff] [blame] | 1596 | /* All pages were either migrated or will be released */ |
| 1597 | cc->nr_migratepages = 0; |
Minchan Kim | 9d502c1 | 2011-03-22 16:30:39 -0700 | [diff] [blame] | 1598 | if (err) { |
Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 1599 | putback_movable_pages(&cc->migratepages); |
Vlastimil Babka | 7ed695e | 2014-01-21 15:51:09 -0800 | [diff] [blame] | 1600 | /* |
| 1601 | * migrate_pages() may return -ENOMEM when scanners meet |
| 1602 | * and we want compact_finished() to detect it |
| 1603 | */ |
Vlastimil Babka | f2849aa | 2015-09-08 15:02:36 -0700 | [diff] [blame] | 1604 | if (err == -ENOMEM && !compact_scanners_met(cc)) { |
Vlastimil Babka | 2d1e104 | 2015-11-05 18:48:02 -0800 | [diff] [blame] | 1605 | ret = COMPACT_CONTENDED; |
David Rientjes | 4bf2bba | 2012-07-11 14:02:13 -0700 | [diff] [blame] | 1606 | goto out; |
| 1607 | } |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 1608 | /* |
| 1609 | * We failed to migrate at least one page in the current |
| 1610 | * order-aligned block, so skip the rest of it. |
| 1611 | */ |
| 1612 | if (cc->direct_compaction && |
| 1613 | (cc->mode == MIGRATE_ASYNC)) { |
| 1614 | cc->migrate_pfn = block_end_pfn( |
| 1615 | cc->migrate_pfn - 1, cc->order); |
| 1616 | /* Draining pcplists is useless in this case */ |
| 1617 | cc->last_migrated_pfn = 0; |
| 1618 | |
| 1619 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1620 | } |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 1621 | |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 1622 | check_drain: |
| 1623 | /* |
| 1624 | * Has the migration scanner moved away from the previous |
| 1625 | * cc->order aligned block where we migrated from? If yes, |
| 1626 | * flush the pages that were freed, so that they can merge and |
| 1627 | * compact_finished() can detect immediately if allocation |
| 1628 | * would succeed. |
| 1629 | */ |
Joonsoo Kim | 1a16718 | 2015-09-08 15:03:59 -0700 | [diff] [blame] | 1630 | if (cc->order > 0 && cc->last_migrated_pfn) { |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 1631 | int cpu; |
| 1632 | unsigned long current_block_start = |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 1633 | block_start_pfn(cc->migrate_pfn, cc->order); |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 1634 | |
Joonsoo Kim | 1a16718 | 2015-09-08 15:03:59 -0700 | [diff] [blame] | 1635 | if (cc->last_migrated_pfn < current_block_start) { |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 1636 | cpu = get_cpu(); |
| 1637 | lru_add_drain_cpu(cpu); |
| 1638 | drain_local_pages(zone); |
| 1639 | put_cpu(); |
| 1640 | /* No more flushing until we migrate again */ |
Joonsoo Kim | 1a16718 | 2015-09-08 15:03:59 -0700 | [diff] [blame] | 1641 | cc->last_migrated_pfn = 0; |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 1642 | } |
| 1643 | } |
| 1644 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1645 | } |
| 1646 | |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 1647 | out: |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 1648 | /* |
| 1649 | * Release free pages and update where the free scanner should restart, |
| 1650 | * so we don't leave any returned pages behind in the next attempt. |
| 1651 | */ |
| 1652 | if (cc->nr_freepages > 0) { |
| 1653 | unsigned long free_pfn = release_freepages(&cc->freepages); |
| 1654 | |
| 1655 | cc->nr_freepages = 0; |
| 1656 | VM_BUG_ON(free_pfn == 0); |
| 1657 | /* The cached pfn is always the first in a pageblock */ |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 1658 | free_pfn = pageblock_start_pfn(free_pfn); |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 1659 | /* |
| 1660 | * Only go back, not forward. The cached pfn might have been |
| 1661 | * already reset to zone end in compact_finished() |
| 1662 | */ |
| 1663 | if (free_pfn > zone->compact_cached_free_pfn) |
| 1664 | zone->compact_cached_free_pfn = free_pfn; |
| 1665 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1666 | |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 1667 | count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); |
| 1668 | count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); |
| 1669 | |
Joonsoo Kim | 16c4a09 | 2015-02-11 15:27:01 -0800 | [diff] [blame] | 1670 | trace_mm_compaction_end(start_pfn, cc->migrate_pfn, |
| 1671 | cc->free_pfn, end_pfn, sync, ret); |
Mel Gorman | 0eb927c | 2014-01-21 15:51:05 -0800 | [diff] [blame] | 1672 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1673 | return ret; |
| 1674 | } |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1675 | |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 1676 | static enum compact_result compact_zone_order(struct zone *zone, int order, |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 1677 | gfp_t gfp_mask, enum compact_priority prio, |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 1678 | unsigned int alloc_flags, int classzone_idx) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1679 | { |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 1680 | enum compact_result ret; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1681 | struct compact_control cc = { |
| 1682 | .nr_freepages = 0, |
| 1683 | .nr_migratepages = 0, |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 1684 | .total_migrate_scanned = 0, |
| 1685 | .total_free_scanned = 0, |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1686 | .order = order, |
David Rientjes | 6d7ce55 | 2014-10-09 15:27:27 -0700 | [diff] [blame] | 1687 | .gfp_mask = gfp_mask, |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1688 | .zone = zone, |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 1689 | .mode = (prio == COMPACT_PRIO_ASYNC) ? |
| 1690 | MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1691 | .alloc_flags = alloc_flags, |
| 1692 | .classzone_idx = classzone_idx, |
Vlastimil Babka | accf624 | 2016-03-17 14:18:15 -0700 | [diff] [blame] | 1693 | .direct_compaction = true, |
Vlastimil Babka | a8e025e | 2016-10-07 16:57:47 -0700 | [diff] [blame] | 1694 | .whole_zone = (prio == MIN_COMPACT_PRIORITY), |
Vlastimil Babka | 9f7e338 | 2016-10-07 17:00:37 -0700 | [diff] [blame] | 1695 | .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), |
| 1696 | .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1697 | }; |
| 1698 | INIT_LIST_HEAD(&cc.freepages); |
| 1699 | INIT_LIST_HEAD(&cc.migratepages); |
| 1700 | |
Shaohua Li | e64c523 | 2012-10-08 16:32:27 -0700 | [diff] [blame] | 1701 | ret = compact_zone(zone, &cc); |
| 1702 | |
| 1703 | VM_BUG_ON(!list_empty(&cc.freepages)); |
| 1704 | VM_BUG_ON(!list_empty(&cc.migratepages)); |
| 1705 | |
Shaohua Li | e64c523 | 2012-10-08 16:32:27 -0700 | [diff] [blame] | 1706 | return ret; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1707 | } |
| 1708 | |
Mel Gorman | 5e77190 | 2010-05-24 14:32:31 -0700 | [diff] [blame] | 1709 | int sysctl_extfrag_threshold = 500; |
| 1710 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1711 | /** |
| 1712 | * try_to_compact_pages - Direct compact to satisfy a high-order allocation |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1713 | * @gfp_mask: The GFP mask of the current allocation |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 1714 | * @order: The order of the current allocation |
| 1715 | * @alloc_flags: The allocation flags of the current allocation |
| 1716 | * @ac: The context of current allocation |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 1717 | * @mode: The migration mode for async, sync light, or sync migration |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1718 | * |
| 1719 | * This is the main entry point for direct page compaction. |
| 1720 | */ |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 1721 | enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 1722 | unsigned int alloc_flags, const struct alloc_context *ac, |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 1723 | enum compact_priority prio) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1724 | { |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1725 | int may_perform_io = gfp_mask & __GFP_IO; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1726 | struct zoneref *z; |
| 1727 | struct zone *zone; |
Michal Hocko | 1d4746d | 2016-05-20 16:56:44 -0700 | [diff] [blame] | 1728 | enum compact_result rc = COMPACT_SKIPPED; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1729 | |
Michal Hocko | 73e64c5 | 2016-12-14 15:04:07 -0800 | [diff] [blame] | 1730 | /* |
| 1731 | * Check if the GFP flags allow compaction - GFP_NOIO is really |
| 1732 | * tricky context because the migration might require IO |
| 1733 | */ |
| 1734 | if (!may_perform_io) |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 1735 | return COMPACT_SKIPPED; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1736 | |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 1737 | trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1738 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1739 | /* Compact each zone in the list */ |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 1740 | for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, |
| 1741 | ac->nodemask) { |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 1742 | enum compact_result status; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1743 | |
Vlastimil Babka | a8e025e | 2016-10-07 16:57:47 -0700 | [diff] [blame] | 1744 | if (prio > MIN_COMPACT_PRIORITY |
| 1745 | && compaction_deferred(zone, order)) { |
Michal Hocko | 1d4746d | 2016-05-20 16:56:44 -0700 | [diff] [blame] | 1746 | rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 1747 | continue; |
Michal Hocko | 1d4746d | 2016-05-20 16:56:44 -0700 | [diff] [blame] | 1748 | } |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 1749 | |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 1750 | status = compact_zone_order(zone, order, gfp_mask, prio, |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 1751 | alloc_flags, ac_classzone_idx(ac)); |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1752 | rc = max(status, rc); |
| 1753 | |
Vlastimil Babka | 7ceb009 | 2016-10-07 16:57:44 -0700 | [diff] [blame] | 1754 | /* The allocation should succeed, stop compacting */ |
| 1755 | if (status == COMPACT_SUCCESS) { |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 1756 | /* |
| 1757 | * We think the allocation will succeed in this zone, |
| 1758 | * but it is not certain, hence the false. The caller |
| 1759 | * will repeat this with true if allocation indeed |
| 1760 | * succeeds in this zone. |
| 1761 | */ |
| 1762 | compaction_defer_reset(zone, order, false); |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 1763 | |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 1764 | break; |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 1765 | } |
| 1766 | |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 1767 | if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 1768 | status == COMPACT_PARTIAL_SKIPPED)) |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 1769 | /* |
| 1770 | * We think that allocation won't succeed in this zone |
| 1771 | * so we defer compaction there. If it ends up |
| 1772 | * succeeding after all, it will be reset. |
| 1773 | */ |
| 1774 | defer_compaction(zone, order); |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 1775 | |
| 1776 | /* |
| 1777 | * We might have stopped compacting due to need_resched() in |
| 1778 | * async compaction, or due to a fatal signal detected. In that |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 1779 | * case do not try further zones |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 1780 | */ |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 1781 | if ((prio == COMPACT_PRIO_ASYNC && need_resched()) |
| 1782 | || fatal_signal_pending(current)) |
| 1783 | break; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1784 | } |
| 1785 | |
| 1786 | return rc; |
| 1787 | } |
| 1788 | |
| 1789 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1790 | /* Compact all zones within a node */ |
Andrew Morton | 7103f16 | 2013-02-22 16:32:33 -0800 | [diff] [blame] | 1791 | static void compact_node(int nid) |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1792 | { |
Vlastimil Babka | 791cae9 | 2016-10-07 16:57:38 -0700 | [diff] [blame] | 1793 | pg_data_t *pgdat = NODE_DATA(nid); |
| 1794 | int zoneid; |
| 1795 | struct zone *zone; |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1796 | struct compact_control cc = { |
| 1797 | .order = -1, |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 1798 | .total_migrate_scanned = 0, |
| 1799 | .total_free_scanned = 0, |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 1800 | .mode = MIGRATE_SYNC, |
David Rientjes | 91ca918 | 2014-04-03 14:47:23 -0700 | [diff] [blame] | 1801 | .ignore_skip_hint = true, |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 1802 | .whole_zone = true, |
Michal Hocko | 73e64c5 | 2016-12-14 15:04:07 -0800 | [diff] [blame] | 1803 | .gfp_mask = GFP_KERNEL, |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1804 | }; |
| 1805 | |
Vlastimil Babka | 791cae9 | 2016-10-07 16:57:38 -0700 | [diff] [blame] | 1806 | |
| 1807 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { |
| 1808 | |
| 1809 | zone = &pgdat->node_zones[zoneid]; |
| 1810 | if (!populated_zone(zone)) |
| 1811 | continue; |
| 1812 | |
| 1813 | cc.nr_freepages = 0; |
| 1814 | cc.nr_migratepages = 0; |
| 1815 | cc.zone = zone; |
| 1816 | INIT_LIST_HEAD(&cc.freepages); |
| 1817 | INIT_LIST_HEAD(&cc.migratepages); |
| 1818 | |
| 1819 | compact_zone(zone, &cc); |
| 1820 | |
| 1821 | VM_BUG_ON(!list_empty(&cc.freepages)); |
| 1822 | VM_BUG_ON(!list_empty(&cc.migratepages)); |
| 1823 | } |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1824 | } |
| 1825 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1826 | /* Compact all nodes in the system */ |
Jason Liu | 7964c06 | 2013-01-11 14:31:47 -0800 | [diff] [blame] | 1827 | static void compact_nodes(void) |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1828 | { |
| 1829 | int nid; |
| 1830 | |
Hugh Dickins | 8575ec2 | 2012-03-21 16:33:53 -0700 | [diff] [blame] | 1831 | /* Flush pending updates to the LRU lists */ |
| 1832 | lru_add_drain_all(); |
| 1833 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1834 | for_each_online_node(nid) |
| 1835 | compact_node(nid); |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1836 | } |
| 1837 | |
| 1838 | /* The written value is actually unused, all memory is compacted */ |
| 1839 | int sysctl_compact_memory; |
| 1840 | |
Yaowei Bai | fec4eb2 | 2016-01-14 15:20:09 -0800 | [diff] [blame] | 1841 | /* |
| 1842 | * This is the entry point for compacting all nodes via |
| 1843 | * /proc/sys/vm/compact_memory |
| 1844 | */ |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1845 | int sysctl_compaction_handler(struct ctl_table *table, int write, |
| 1846 | void __user *buffer, size_t *length, loff_t *ppos) |
| 1847 | { |
| 1848 | if (write) |
Jason Liu | 7964c06 | 2013-01-11 14:31:47 -0800 | [diff] [blame] | 1849 | compact_nodes(); |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1850 | |
| 1851 | return 0; |
| 1852 | } |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1853 | |
Mel Gorman | 5e77190 | 2010-05-24 14:32:31 -0700 | [diff] [blame] | 1854 | int sysctl_extfrag_handler(struct ctl_table *table, int write, |
| 1855 | void __user *buffer, size_t *length, loff_t *ppos) |
| 1856 | { |
| 1857 | proc_dointvec_minmax(table, write, buffer, length, ppos); |
| 1858 | |
| 1859 | return 0; |
| 1860 | } |
| 1861 | |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1862 | #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) |
Rashika Kheria | 74e77fb | 2014-04-03 14:48:01 -0700 | [diff] [blame] | 1863 | static ssize_t sysfs_compact_node(struct device *dev, |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1864 | struct device_attribute *attr, |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1865 | const char *buf, size_t count) |
| 1866 | { |
Hugh Dickins | 8575ec2 | 2012-03-21 16:33:53 -0700 | [diff] [blame] | 1867 | int nid = dev->id; |
| 1868 | |
| 1869 | if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { |
| 1870 | /* Flush pending updates to the LRU lists */ |
| 1871 | lru_add_drain_all(); |
| 1872 | |
| 1873 | compact_node(nid); |
| 1874 | } |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1875 | |
| 1876 | return count; |
| 1877 | } |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1878 | static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1879 | |
| 1880 | int compaction_register_node(struct node *node) |
| 1881 | { |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1882 | return device_create_file(&node->dev, &dev_attr_compact); |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1883 | } |
| 1884 | |
| 1885 | void compaction_unregister_node(struct node *node) |
| 1886 | { |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1887 | return device_remove_file(&node->dev, &dev_attr_compact); |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1888 | } |
| 1889 | #endif /* CONFIG_SYSFS && CONFIG_NUMA */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1890 | |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 1891 | static inline bool kcompactd_work_requested(pg_data_t *pgdat) |
| 1892 | { |
Vlastimil Babka | 172400c | 2016-05-05 16:22:32 -0700 | [diff] [blame] | 1893 | return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 1894 | } |
| 1895 | |
| 1896 | static bool kcompactd_node_suitable(pg_data_t *pgdat) |
| 1897 | { |
| 1898 | int zoneid; |
| 1899 | struct zone *zone; |
| 1900 | enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx; |
| 1901 | |
Chen Feng | 6cd9dc3 | 2016-05-20 16:59:02 -0700 | [diff] [blame] | 1902 | for (zoneid = 0; zoneid <= classzone_idx; zoneid++) { |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 1903 | zone = &pgdat->node_zones[zoneid]; |
| 1904 | |
| 1905 | if (!populated_zone(zone)) |
| 1906 | continue; |
| 1907 | |
| 1908 | if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, |
| 1909 | classzone_idx) == COMPACT_CONTINUE) |
| 1910 | return true; |
| 1911 | } |
| 1912 | |
| 1913 | return false; |
| 1914 | } |
| 1915 | |
| 1916 | static void kcompactd_do_work(pg_data_t *pgdat) |
| 1917 | { |
| 1918 | /* |
| 1919 | * With no special task, compact all zones so that a page of requested |
| 1920 | * order is allocatable. |
| 1921 | */ |
| 1922 | int zoneid; |
| 1923 | struct zone *zone; |
| 1924 | struct compact_control cc = { |
| 1925 | .order = pgdat->kcompactd_max_order, |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 1926 | .total_migrate_scanned = 0, |
| 1927 | .total_free_scanned = 0, |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 1928 | .classzone_idx = pgdat->kcompactd_classzone_idx, |
| 1929 | .mode = MIGRATE_SYNC_LIGHT, |
| 1930 | .ignore_skip_hint = true, |
Michal Hocko | 73e64c5 | 2016-12-14 15:04:07 -0800 | [diff] [blame] | 1931 | .gfp_mask = GFP_KERNEL, |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 1932 | |
| 1933 | }; |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 1934 | trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, |
| 1935 | cc.classzone_idx); |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 1936 | count_compact_event(KCOMPACTD_WAKE); |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 1937 | |
Chen Feng | 6cd9dc3 | 2016-05-20 16:59:02 -0700 | [diff] [blame] | 1938 | for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) { |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 1939 | int status; |
| 1940 | |
| 1941 | zone = &pgdat->node_zones[zoneid]; |
| 1942 | if (!populated_zone(zone)) |
| 1943 | continue; |
| 1944 | |
| 1945 | if (compaction_deferred(zone, cc.order)) |
| 1946 | continue; |
| 1947 | |
| 1948 | if (compaction_suitable(zone, cc.order, 0, zoneid) != |
| 1949 | COMPACT_CONTINUE) |
| 1950 | continue; |
| 1951 | |
| 1952 | cc.nr_freepages = 0; |
| 1953 | cc.nr_migratepages = 0; |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 1954 | cc.total_migrate_scanned = 0; |
| 1955 | cc.total_free_scanned = 0; |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 1956 | cc.zone = zone; |
| 1957 | INIT_LIST_HEAD(&cc.freepages); |
| 1958 | INIT_LIST_HEAD(&cc.migratepages); |
| 1959 | |
Vlastimil Babka | 172400c | 2016-05-05 16:22:32 -0700 | [diff] [blame] | 1960 | if (kthread_should_stop()) |
| 1961 | return; |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 1962 | status = compact_zone(zone, &cc); |
| 1963 | |
Vlastimil Babka | 7ceb009 | 2016-10-07 16:57:44 -0700 | [diff] [blame] | 1964 | if (status == COMPACT_SUCCESS) { |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 1965 | compaction_defer_reset(zone, cc.order, false); |
Michal Hocko | c8f7de0 | 2016-05-20 16:56:47 -0700 | [diff] [blame] | 1966 | } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 1967 | /* |
| 1968 | * We use sync migration mode here, so we defer like |
| 1969 | * sync direct compaction does. |
| 1970 | */ |
| 1971 | defer_compaction(zone, cc.order); |
| 1972 | } |
| 1973 | |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 1974 | count_compact_events(KCOMPACTD_MIGRATE_SCANNED, |
| 1975 | cc.total_migrate_scanned); |
| 1976 | count_compact_events(KCOMPACTD_FREE_SCANNED, |
| 1977 | cc.total_free_scanned); |
| 1978 | |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 1979 | VM_BUG_ON(!list_empty(&cc.freepages)); |
| 1980 | VM_BUG_ON(!list_empty(&cc.migratepages)); |
| 1981 | } |
| 1982 | |
| 1983 | /* |
| 1984 | * Regardless of success, we are done until woken up next. But remember |
| 1985 | * the requested order/classzone_idx in case it was higher/tighter than |
| 1986 | * our current ones |
| 1987 | */ |
| 1988 | if (pgdat->kcompactd_max_order <= cc.order) |
| 1989 | pgdat->kcompactd_max_order = 0; |
| 1990 | if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx) |
| 1991 | pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; |
| 1992 | } |
| 1993 | |
| 1994 | void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) |
| 1995 | { |
| 1996 | if (!order) |
| 1997 | return; |
| 1998 | |
| 1999 | if (pgdat->kcompactd_max_order < order) |
| 2000 | pgdat->kcompactd_max_order = order; |
| 2001 | |
Davidlohr Bueso | 46acef0 | 2017-02-22 15:44:55 -0800 | [diff] [blame] | 2002 | /* |
| 2003 | * Pairs with implicit barrier in wait_event_freezable() |
| 2004 | * such that wakeups are not missed in the lockless |
| 2005 | * waitqueue_active() call. |
| 2006 | */ |
| 2007 | smp_acquire__after_ctrl_dep(); |
| 2008 | |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2009 | if (pgdat->kcompactd_classzone_idx > classzone_idx) |
| 2010 | pgdat->kcompactd_classzone_idx = classzone_idx; |
| 2011 | |
| 2012 | if (!waitqueue_active(&pgdat->kcompactd_wait)) |
| 2013 | return; |
| 2014 | |
| 2015 | if (!kcompactd_node_suitable(pgdat)) |
| 2016 | return; |
| 2017 | |
| 2018 | trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, |
| 2019 | classzone_idx); |
| 2020 | wake_up_interruptible(&pgdat->kcompactd_wait); |
| 2021 | } |
| 2022 | |
| 2023 | /* |
| 2024 | * The background compaction daemon, started as a kernel thread |
| 2025 | * from the init process. |
| 2026 | */ |
| 2027 | static int kcompactd(void *p) |
| 2028 | { |
| 2029 | pg_data_t *pgdat = (pg_data_t*)p; |
| 2030 | struct task_struct *tsk = current; |
| 2031 | |
| 2032 | const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); |
| 2033 | |
| 2034 | if (!cpumask_empty(cpumask)) |
| 2035 | set_cpus_allowed_ptr(tsk, cpumask); |
| 2036 | |
| 2037 | set_freezable(); |
| 2038 | |
| 2039 | pgdat->kcompactd_max_order = 0; |
| 2040 | pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; |
| 2041 | |
| 2042 | while (!kthread_should_stop()) { |
| 2043 | trace_mm_compaction_kcompactd_sleep(pgdat->node_id); |
| 2044 | wait_event_freezable(pgdat->kcompactd_wait, |
| 2045 | kcompactd_work_requested(pgdat)); |
| 2046 | |
| 2047 | kcompactd_do_work(pgdat); |
| 2048 | } |
| 2049 | |
| 2050 | return 0; |
| 2051 | } |
| 2052 | |
| 2053 | /* |
| 2054 | * This kcompactd start function will be called by init and node-hot-add. |
| 2055 | * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. |
| 2056 | */ |
| 2057 | int kcompactd_run(int nid) |
| 2058 | { |
| 2059 | pg_data_t *pgdat = NODE_DATA(nid); |
| 2060 | int ret = 0; |
| 2061 | |
| 2062 | if (pgdat->kcompactd) |
| 2063 | return 0; |
| 2064 | |
| 2065 | pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); |
| 2066 | if (IS_ERR(pgdat->kcompactd)) { |
| 2067 | pr_err("Failed to start kcompactd on node %d\n", nid); |
| 2068 | ret = PTR_ERR(pgdat->kcompactd); |
| 2069 | pgdat->kcompactd = NULL; |
| 2070 | } |
| 2071 | return ret; |
| 2072 | } |
| 2073 | |
| 2074 | /* |
| 2075 | * Called by memory hotplug when all memory in a node is offlined. Caller must |
| 2076 | * hold mem_hotplug_begin/end(). |
| 2077 | */ |
| 2078 | void kcompactd_stop(int nid) |
| 2079 | { |
| 2080 | struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; |
| 2081 | |
| 2082 | if (kcompactd) { |
| 2083 | kthread_stop(kcompactd); |
| 2084 | NODE_DATA(nid)->kcompactd = NULL; |
| 2085 | } |
| 2086 | } |
| 2087 | |
| 2088 | /* |
| 2089 | * It's optimal to keep kcompactd on the same CPUs as their memory, but |
| 2090 | * not required for correctness. So if the last cpu in a node goes |
| 2091 | * away, we get changed to run anywhere: as the first one comes back, |
| 2092 | * restore their cpu bindings. |
| 2093 | */ |
Anna-Maria Gleixner | e46b1db | 2016-11-27 00:13:42 +0100 | [diff] [blame] | 2094 | static int kcompactd_cpu_online(unsigned int cpu) |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2095 | { |
| 2096 | int nid; |
| 2097 | |
Anna-Maria Gleixner | e46b1db | 2016-11-27 00:13:42 +0100 | [diff] [blame] | 2098 | for_each_node_state(nid, N_MEMORY) { |
| 2099 | pg_data_t *pgdat = NODE_DATA(nid); |
| 2100 | const struct cpumask *mask; |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2101 | |
Anna-Maria Gleixner | e46b1db | 2016-11-27 00:13:42 +0100 | [diff] [blame] | 2102 | mask = cpumask_of_node(pgdat->node_id); |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2103 | |
Anna-Maria Gleixner | e46b1db | 2016-11-27 00:13:42 +0100 | [diff] [blame] | 2104 | if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) |
| 2105 | /* One of our CPUs online: restore mask */ |
| 2106 | set_cpus_allowed_ptr(pgdat->kcompactd, mask); |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2107 | } |
Anna-Maria Gleixner | e46b1db | 2016-11-27 00:13:42 +0100 | [diff] [blame] | 2108 | return 0; |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2109 | } |
| 2110 | |
| 2111 | static int __init kcompactd_init(void) |
| 2112 | { |
| 2113 | int nid; |
Anna-Maria Gleixner | e46b1db | 2016-11-27 00:13:42 +0100 | [diff] [blame] | 2114 | int ret; |
| 2115 | |
| 2116 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, |
| 2117 | "mm/compaction:online", |
| 2118 | kcompactd_cpu_online, NULL); |
| 2119 | if (ret < 0) { |
| 2120 | pr_err("kcompactd: failed to register hotplug callbacks.\n"); |
| 2121 | return ret; |
| 2122 | } |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2123 | |
| 2124 | for_each_node_state(nid, N_MEMORY) |
| 2125 | kcompactd_run(nid); |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2126 | return 0; |
| 2127 | } |
| 2128 | subsys_initcall(kcompactd_init) |
| 2129 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 2130 | #endif /* CONFIG_COMPACTION */ |