blob: 017a1a1963cbe0cfb0207ab367fe3dea793e90dc [file] [log] [blame]
Mel Gorman748446b2010-05-24 14:32:27 -07001/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
Vlastimil Babka698b1b32016-03-17 14:18:08 -070010#include <linux/cpu.h>
Mel Gorman748446b2010-05-24 14:32:27 -070011#include <linux/swap.h>
12#include <linux/migrate.h>
13#include <linux/compaction.h>
14#include <linux/mm_inline.h>
15#include <linux/backing-dev.h>
Mel Gorman76ab0f52010-05-24 14:32:28 -070016#include <linux/sysctl.h>
Mel Gormaned4a6d72010-05-24 14:32:29 -070017#include <linux/sysfs.h>
Rafael Aquinibf6bddf2012-12-11 16:02:42 -080018#include <linux/balloon_compaction.h>
Minchan Kim194159f2013-02-22 16:33:58 -080019#include <linux/page-isolation.h>
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -080020#include <linux/kasan.h>
Vlastimil Babka698b1b32016-03-17 14:18:08 -070021#include <linux/kthread.h>
22#include <linux/freezer.h>
Mel Gorman748446b2010-05-24 14:32:27 -070023#include "internal.h"
24
Minchan Kim010fc292012-12-20 15:05:06 -080025#ifdef CONFIG_COMPACTION
26static inline void count_compact_event(enum vm_event_item item)
27{
28 count_vm_event(item);
29}
30
31static inline void count_compact_events(enum vm_event_item item, long delta)
32{
33 count_vm_events(item, delta);
34}
35#else
36#define count_compact_event(item) do { } while (0)
37#define count_compact_events(item, delta) do { } while (0)
38#endif
39
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010040#if defined CONFIG_COMPACTION || defined CONFIG_CMA
41
Mel Gormanb7aba692011-01-13 15:45:54 -080042#define CREATE_TRACE_POINTS
43#include <trace/events/compaction.h>
44
Vlastimil Babka06b66402016-05-19 17:11:48 -070045#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
46#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
47#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
48#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
49
Mel Gorman748446b2010-05-24 14:32:27 -070050static unsigned long release_freepages(struct list_head *freelist)
51{
52 struct page *page, *next;
Vlastimil Babka6bace092014-12-10 15:43:31 -080053 unsigned long high_pfn = 0;
Mel Gorman748446b2010-05-24 14:32:27 -070054
55 list_for_each_entry_safe(page, next, freelist, lru) {
Vlastimil Babka6bace092014-12-10 15:43:31 -080056 unsigned long pfn = page_to_pfn(page);
Mel Gorman748446b2010-05-24 14:32:27 -070057 list_del(&page->lru);
58 __free_page(page);
Vlastimil Babka6bace092014-12-10 15:43:31 -080059 if (pfn > high_pfn)
60 high_pfn = pfn;
Mel Gorman748446b2010-05-24 14:32:27 -070061 }
62
Vlastimil Babka6bace092014-12-10 15:43:31 -080063 return high_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -070064}
65
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010066static void map_pages(struct list_head *list)
67{
68 struct page *page;
69
70 list_for_each_entry(page, list, lru) {
71 arch_alloc_page(page, 0);
72 kernel_map_pages(page, 1, 1);
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -080073 kasan_alloc_pages(page, 0);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010074 }
75}
76
Michal Nazarewicz47118af2011-12-29 13:09:50 +010077static inline bool migrate_async_suitable(int migratetype)
78{
79 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
80}
81
Mel Gormanbb13ffe2012-10-08 16:32:41 -070082#ifdef CONFIG_COMPACTION
Joonsoo Kim24e27162015-02-11 15:27:09 -080083
84/* Do not skip compaction more than 64 times */
85#define COMPACT_MAX_DEFER_SHIFT 6
86
87/*
88 * Compaction is deferred when compaction fails to result in a page
89 * allocation success. 1 << compact_defer_limit compactions are skipped up
90 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
91 */
92void defer_compaction(struct zone *zone, int order)
93{
94 zone->compact_considered = 0;
95 zone->compact_defer_shift++;
96
97 if (order < zone->compact_order_failed)
98 zone->compact_order_failed = order;
99
100 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
101 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
102
103 trace_mm_compaction_defer_compaction(zone, order);
104}
105
106/* Returns true if compaction should be skipped this time */
107bool compaction_deferred(struct zone *zone, int order)
108{
109 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
110
111 if (order < zone->compact_order_failed)
112 return false;
113
114 /* Avoid possible overflow */
115 if (++zone->compact_considered > defer_limit)
116 zone->compact_considered = defer_limit;
117
118 if (zone->compact_considered >= defer_limit)
119 return false;
120
121 trace_mm_compaction_deferred(zone, order);
122
123 return true;
124}
125
126/*
127 * Update defer tracking counters after successful compaction of given order,
128 * which means an allocation either succeeded (alloc_success == true) or is
129 * expected to succeed.
130 */
131void compaction_defer_reset(struct zone *zone, int order,
132 bool alloc_success)
133{
134 if (alloc_success) {
135 zone->compact_considered = 0;
136 zone->compact_defer_shift = 0;
137 }
138 if (order >= zone->compact_order_failed)
139 zone->compact_order_failed = order + 1;
140
141 trace_mm_compaction_defer_reset(zone, order);
142}
143
144/* Returns true if restarting compaction after many failures */
145bool compaction_restarting(struct zone *zone, int order)
146{
147 if (order < zone->compact_order_failed)
148 return false;
149
150 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
151 zone->compact_considered >= 1UL << zone->compact_defer_shift;
152}
153
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700154/* Returns true if the pageblock should be scanned for pages to isolate. */
155static inline bool isolation_suitable(struct compact_control *cc,
156 struct page *page)
157{
158 if (cc->ignore_skip_hint)
159 return true;
160
161 return !get_pageblock_skip(page);
162}
163
Vlastimil Babka02333642015-09-08 15:02:42 -0700164static void reset_cached_positions(struct zone *zone)
165{
166 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
167 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
Joonsoo Kim623446e2016-03-15 14:57:45 -0700168 zone->compact_cached_free_pfn =
Vlastimil Babka06b66402016-05-19 17:11:48 -0700169 pageblock_start_pfn(zone_end_pfn(zone) - 1);
Vlastimil Babka02333642015-09-08 15:02:42 -0700170}
171
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700172/*
173 * This function is called to clear all cached information on pageblocks that
174 * should be skipped for page isolation when the migrate and free page scanner
175 * meet.
176 */
Mel Gorman62997022012-10-08 16:32:47 -0700177static void __reset_isolation_suitable(struct zone *zone)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700178{
179 unsigned long start_pfn = zone->zone_start_pfn;
Cody P Schafer108bcc92013-02-22 16:35:23 -0800180 unsigned long end_pfn = zone_end_pfn(zone);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700181 unsigned long pfn;
182
Mel Gorman62997022012-10-08 16:32:47 -0700183 zone->compact_blockskip_flush = false;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700184
185 /* Walk the zone and mark every pageblock as suitable for isolation */
186 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
187 struct page *page;
188
189 cond_resched();
190
191 if (!pfn_valid(pfn))
192 continue;
193
194 page = pfn_to_page(pfn);
195 if (zone != page_zone(page))
196 continue;
197
198 clear_pageblock_skip(page);
199 }
Vlastimil Babka02333642015-09-08 15:02:42 -0700200
201 reset_cached_positions(zone);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700202}
203
Mel Gorman62997022012-10-08 16:32:47 -0700204void reset_isolation_suitable(pg_data_t *pgdat)
205{
206 int zoneid;
207
208 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
209 struct zone *zone = &pgdat->node_zones[zoneid];
210 if (!populated_zone(zone))
211 continue;
212
213 /* Only flush if a full compaction finished recently */
214 if (zone->compact_blockskip_flush)
215 __reset_isolation_suitable(zone);
216 }
217}
218
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700219/*
220 * If no pages were isolated then mark this pageblock to be skipped in the
Mel Gorman62997022012-10-08 16:32:47 -0700221 * future. The information is later cleared by __reset_isolation_suitable().
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700222 */
Mel Gormanc89511a2012-10-08 16:32:45 -0700223static void update_pageblock_skip(struct compact_control *cc,
224 struct page *page, unsigned long nr_isolated,
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700225 bool migrate_scanner)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700226{
Mel Gormanc89511a2012-10-08 16:32:45 -0700227 struct zone *zone = cc->zone;
David Rientjes35979ef2014-06-04 16:08:27 -0700228 unsigned long pfn;
Joonsoo Kim6815bf32013-12-18 17:08:52 -0800229
230 if (cc->ignore_skip_hint)
231 return;
232
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700233 if (!page)
234 return;
235
David Rientjes35979ef2014-06-04 16:08:27 -0700236 if (nr_isolated)
237 return;
238
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700239 set_pageblock_skip(page);
Mel Gormanc89511a2012-10-08 16:32:45 -0700240
David Rientjes35979ef2014-06-04 16:08:27 -0700241 pfn = page_to_pfn(page);
242
243 /* Update where async and sync compaction should restart */
244 if (migrate_scanner) {
David Rientjes35979ef2014-06-04 16:08:27 -0700245 if (pfn > zone->compact_cached_migrate_pfn[0])
246 zone->compact_cached_migrate_pfn[0] = pfn;
David Rientjese0b9dae2014-06-04 16:08:28 -0700247 if (cc->mode != MIGRATE_ASYNC &&
248 pfn > zone->compact_cached_migrate_pfn[1])
David Rientjes35979ef2014-06-04 16:08:27 -0700249 zone->compact_cached_migrate_pfn[1] = pfn;
250 } else {
David Rientjes35979ef2014-06-04 16:08:27 -0700251 if (pfn < zone->compact_cached_free_pfn)
252 zone->compact_cached_free_pfn = pfn;
Mel Gormanc89511a2012-10-08 16:32:45 -0700253 }
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700254}
255#else
256static inline bool isolation_suitable(struct compact_control *cc,
257 struct page *page)
258{
259 return true;
260}
261
Mel Gormanc89511a2012-10-08 16:32:45 -0700262static void update_pageblock_skip(struct compact_control *cc,
263 struct page *page, unsigned long nr_isolated,
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700264 bool migrate_scanner)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700265{
266}
267#endif /* CONFIG_COMPACTION */
268
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700269/*
270 * Compaction requires the taking of some coarse locks that are potentially
271 * very heavily contended. For async compaction, back out if the lock cannot
272 * be taken immediately. For sync compaction, spin on the lock if needed.
273 *
274 * Returns true if the lock is held
275 * Returns false if the lock is not held and compaction should abort
276 */
277static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
278 struct compact_control *cc)
Mel Gorman2a1402a2012-10-08 16:32:33 -0700279{
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700280 if (cc->mode == MIGRATE_ASYNC) {
281 if (!spin_trylock_irqsave(lock, *flags)) {
282 cc->contended = COMPACT_CONTENDED_LOCK;
283 return false;
284 }
285 } else {
286 spin_lock_irqsave(lock, *flags);
287 }
Vlastimil Babka1f9efde2014-10-09 15:27:14 -0700288
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700289 return true;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700290}
291
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100292/*
Mel Gormanc67fe372012-08-21 16:16:17 -0700293 * Compaction requires the taking of some coarse locks that are potentially
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700294 * very heavily contended. The lock should be periodically unlocked to avoid
295 * having disabled IRQs for a long time, even when there is nobody waiting on
296 * the lock. It might also be that allowing the IRQs will result in
297 * need_resched() becoming true. If scheduling is needed, async compaction
298 * aborts. Sync compaction schedules.
299 * Either compaction type will also abort if a fatal signal is pending.
300 * In either case if the lock was locked, it is dropped and not regained.
Mel Gormanc67fe372012-08-21 16:16:17 -0700301 *
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700302 * Returns true if compaction should abort due to fatal signal pending, or
303 * async compaction due to need_resched()
304 * Returns false when compaction can continue (sync compaction might have
305 * scheduled)
Mel Gormanc67fe372012-08-21 16:16:17 -0700306 */
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700307static bool compact_unlock_should_abort(spinlock_t *lock,
308 unsigned long flags, bool *locked, struct compact_control *cc)
Mel Gormanc67fe372012-08-21 16:16:17 -0700309{
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700310 if (*locked) {
311 spin_unlock_irqrestore(lock, flags);
312 *locked = false;
313 }
Vlastimil Babka1f9efde2014-10-09 15:27:14 -0700314
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700315 if (fatal_signal_pending(current)) {
316 cc->contended = COMPACT_CONTENDED_SCHED;
317 return true;
318 }
Mel Gormanc67fe372012-08-21 16:16:17 -0700319
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700320 if (need_resched()) {
David Rientjese0b9dae2014-06-04 16:08:28 -0700321 if (cc->mode == MIGRATE_ASYNC) {
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700322 cc->contended = COMPACT_CONTENDED_SCHED;
323 return true;
Mel Gormanc67fe372012-08-21 16:16:17 -0700324 }
Mel Gormanc67fe372012-08-21 16:16:17 -0700325 cond_resched();
Mel Gormanc67fe372012-08-21 16:16:17 -0700326 }
327
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700328 return false;
Mel Gormanc67fe372012-08-21 16:16:17 -0700329}
330
Vlastimil Babkabe976572014-06-04 16:10:41 -0700331/*
332 * Aside from avoiding lock contention, compaction also periodically checks
333 * need_resched() and either schedules in sync compaction or aborts async
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700334 * compaction. This is similar to what compact_unlock_should_abort() does, but
Vlastimil Babkabe976572014-06-04 16:10:41 -0700335 * is used where no lock is concerned.
336 *
337 * Returns false when no scheduling was needed, or sync compaction scheduled.
338 * Returns true when async compaction should abort.
339 */
340static inline bool compact_should_abort(struct compact_control *cc)
341{
342 /* async compaction aborts if contended */
343 if (need_resched()) {
344 if (cc->mode == MIGRATE_ASYNC) {
Vlastimil Babka1f9efde2014-10-09 15:27:14 -0700345 cc->contended = COMPACT_CONTENDED_SCHED;
Vlastimil Babkabe976572014-06-04 16:10:41 -0700346 return true;
347 }
348
349 cond_resched();
350 }
351
352 return false;
353}
354
Mel Gormanc67fe372012-08-21 16:16:17 -0700355/*
Jerome Marchand9e4be472013-11-12 15:07:12 -0800356 * Isolate free pages onto a private freelist. If @strict is true, will abort
357 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
358 * (even though it may still end up isolating some pages).
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100359 */
Mel Gormanf40d1e42012-10-08 16:32:36 -0700360static unsigned long isolate_freepages_block(struct compact_control *cc,
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700361 unsigned long *start_pfn,
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100362 unsigned long end_pfn,
363 struct list_head *freelist,
364 bool strict)
Mel Gorman748446b2010-05-24 14:32:27 -0700365{
Mel Gormanb7aba692011-01-13 15:45:54 -0800366 int nr_scanned = 0, total_isolated = 0;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700367 struct page *cursor, *valid_page = NULL;
Xiubo Lib8b2d822014-10-09 15:28:21 -0700368 unsigned long flags = 0;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700369 bool locked = false;
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700370 unsigned long blockpfn = *start_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -0700371
Mel Gorman748446b2010-05-24 14:32:27 -0700372 cursor = pfn_to_page(blockpfn);
373
Mel Gormanf40d1e42012-10-08 16:32:36 -0700374 /* Isolate free pages. */
Mel Gorman748446b2010-05-24 14:32:27 -0700375 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
376 int isolated, i;
377 struct page *page = cursor;
378
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700379 /*
380 * Periodically drop the lock (if held) regardless of its
381 * contention, to give chance to IRQs. Abort if fatal signal
382 * pending or async compaction detects need_resched()
383 */
384 if (!(blockpfn % SWAP_CLUSTER_MAX)
385 && compact_unlock_should_abort(&cc->zone->lock, flags,
386 &locked, cc))
387 break;
388
Mel Gormanb7aba692011-01-13 15:45:54 -0800389 nr_scanned++;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700390 if (!pfn_valid_within(blockpfn))
Laura Abbott2af120b2014-03-10 15:49:44 -0700391 goto isolate_fail;
392
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700393 if (!valid_page)
394 valid_page = page;
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700395
396 /*
397 * For compound pages such as THP and hugetlbfs, we can save
398 * potentially a lot of iterations if we skip them at once.
399 * The check is racy, but we can consider only valid values
400 * and the only danger is skipping too much.
401 */
402 if (PageCompound(page)) {
403 unsigned int comp_order = compound_order(page);
404
405 if (likely(comp_order < MAX_ORDER)) {
406 blockpfn += (1UL << comp_order) - 1;
407 cursor += (1UL << comp_order) - 1;
408 }
409
410 goto isolate_fail;
411 }
412
Mel Gormanf40d1e42012-10-08 16:32:36 -0700413 if (!PageBuddy(page))
Laura Abbott2af120b2014-03-10 15:49:44 -0700414 goto isolate_fail;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700415
416 /*
Vlastimil Babka69b71892014-10-09 15:27:18 -0700417 * If we already hold the lock, we can skip some rechecking.
418 * Note that if we hold the lock now, checked_pageblock was
419 * already set in some previous iteration (or strict is true),
420 * so it is correct to skip the suitable migration target
421 * recheck as well.
Mel Gormanf40d1e42012-10-08 16:32:36 -0700422 */
Vlastimil Babka69b71892014-10-09 15:27:18 -0700423 if (!locked) {
424 /*
425 * The zone lock must be held to isolate freepages.
426 * Unfortunately this is a very coarse lock and can be
427 * heavily contended if there are parallel allocations
428 * or parallel compactions. For async compaction do not
429 * spin on the lock and we acquire the lock as late as
430 * possible.
431 */
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700432 locked = compact_trylock_irqsave(&cc->zone->lock,
433 &flags, cc);
Vlastimil Babka69b71892014-10-09 15:27:18 -0700434 if (!locked)
435 break;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700436
Vlastimil Babka69b71892014-10-09 15:27:18 -0700437 /* Recheck this is a buddy page under lock */
438 if (!PageBuddy(page))
439 goto isolate_fail;
440 }
Mel Gorman748446b2010-05-24 14:32:27 -0700441
442 /* Found a free page, break it into order-0 pages */
443 isolated = split_free_page(page);
444 total_isolated += isolated;
445 for (i = 0; i < isolated; i++) {
446 list_add(&page->lru, freelist);
447 page++;
448 }
449
450 /* If a page was split, advance to the end of it */
451 if (isolated) {
Joonsoo Kim932ff6b2015-02-12 14:59:53 -0800452 cc->nr_freepages += isolated;
453 if (!strict &&
454 cc->nr_migratepages <= cc->nr_freepages) {
455 blockpfn += isolated;
456 break;
457 }
458
Mel Gorman748446b2010-05-24 14:32:27 -0700459 blockpfn += isolated - 1;
460 cursor += isolated - 1;
Laura Abbott2af120b2014-03-10 15:49:44 -0700461 continue;
Mel Gorman748446b2010-05-24 14:32:27 -0700462 }
Laura Abbott2af120b2014-03-10 15:49:44 -0700463
464isolate_fail:
465 if (strict)
466 break;
467 else
468 continue;
469
Mel Gorman748446b2010-05-24 14:32:27 -0700470 }
471
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700472 /*
473 * There is a tiny chance that we have read bogus compound_order(),
474 * so be careful to not go outside of the pageblock.
475 */
476 if (unlikely(blockpfn > end_pfn))
477 blockpfn = end_pfn;
478
Joonsoo Kime34d85f2015-02-11 15:27:04 -0800479 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
480 nr_scanned, total_isolated);
481
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700482 /* Record how far we have got within the block */
483 *start_pfn = blockpfn;
484
Mel Gormanf40d1e42012-10-08 16:32:36 -0700485 /*
486 * If strict isolation is requested by CMA then check that all the
487 * pages requested were isolated. If there were any failures, 0 is
488 * returned and CMA will fail.
489 */
Laura Abbott2af120b2014-03-10 15:49:44 -0700490 if (strict && blockpfn < end_pfn)
Mel Gormanf40d1e42012-10-08 16:32:36 -0700491 total_isolated = 0;
492
493 if (locked)
494 spin_unlock_irqrestore(&cc->zone->lock, flags);
495
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700496 /* Update the pageblock-skip if the whole pageblock was scanned */
497 if (blockpfn == end_pfn)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700498 update_pageblock_skip(cc, valid_page, total_isolated, false);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700499
Minchan Kim010fc292012-12-20 15:05:06 -0800500 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
Mel Gorman397487d2012-10-19 12:00:10 +0100501 if (total_isolated)
Minchan Kim010fc292012-12-20 15:05:06 -0800502 count_compact_events(COMPACTISOLATED, total_isolated);
Mel Gorman748446b2010-05-24 14:32:27 -0700503 return total_isolated;
504}
505
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100506/**
507 * isolate_freepages_range() - isolate free pages.
508 * @start_pfn: The first PFN to start isolating.
509 * @end_pfn: The one-past-last PFN.
510 *
511 * Non-free pages, invalid PFNs, or zone boundaries within the
512 * [start_pfn, end_pfn) range are considered errors, cause function to
513 * undo its actions and return zero.
514 *
515 * Otherwise, function returns one-past-the-last PFN of isolated page
516 * (which may be greater then end_pfn if end fell in a middle of
517 * a free page).
518 */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100519unsigned long
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700520isolate_freepages_range(struct compact_control *cc,
521 unsigned long start_pfn, unsigned long end_pfn)
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100522{
Joonsoo Kime1409c32016-03-15 14:57:48 -0700523 unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100524 LIST_HEAD(freelist);
525
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700526 pfn = start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700527 block_start_pfn = pageblock_start_pfn(pfn);
Joonsoo Kime1409c32016-03-15 14:57:48 -0700528 if (block_start_pfn < cc->zone->zone_start_pfn)
529 block_start_pfn = cc->zone->zone_start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700530 block_end_pfn = pageblock_end_pfn(pfn);
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100531
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700532 for (; pfn < end_pfn; pfn += isolated,
Joonsoo Kime1409c32016-03-15 14:57:48 -0700533 block_start_pfn = block_end_pfn,
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700534 block_end_pfn += pageblock_nr_pages) {
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700535 /* Protect pfn from changing by isolate_freepages_block */
536 unsigned long isolate_start_pfn = pfn;
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700537
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100538 block_end_pfn = min(block_end_pfn, end_pfn);
539
Joonsoo Kim58420012014-11-13 15:19:07 -0800540 /*
541 * pfn could pass the block_end_pfn if isolated freepage
542 * is more than pageblock order. In this case, we adjust
543 * scanning range to right one.
544 */
545 if (pfn >= block_end_pfn) {
Vlastimil Babka06b66402016-05-19 17:11:48 -0700546 block_start_pfn = pageblock_start_pfn(pfn);
547 block_end_pfn = pageblock_end_pfn(pfn);
Joonsoo Kim58420012014-11-13 15:19:07 -0800548 block_end_pfn = min(block_end_pfn, end_pfn);
549 }
550
Joonsoo Kime1409c32016-03-15 14:57:48 -0700551 if (!pageblock_pfn_to_page(block_start_pfn,
552 block_end_pfn, cc->zone))
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700553 break;
554
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700555 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
556 block_end_pfn, &freelist, true);
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100557
558 /*
559 * In strict mode, isolate_freepages_block() returns 0 if
560 * there are any holes in the block (ie. invalid PFNs or
561 * non-free pages).
562 */
563 if (!isolated)
564 break;
565
566 /*
567 * If we managed to isolate pages, it is always (1 << n) *
568 * pageblock_nr_pages for some non-negative n. (Max order
569 * page may span two pageblocks).
570 */
571 }
572
573 /* split_free_page does not map the pages */
574 map_pages(&freelist);
575
576 if (pfn < end_pfn) {
577 /* Loop terminated early, cleanup. */
578 release_freepages(&freelist);
579 return 0;
580 }
581
582 /* We don't use freelists for anything. */
583 return pfn;
584}
585
Mel Gorman748446b2010-05-24 14:32:27 -0700586/* Update the number of anon and file isolated pages in the zone */
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700587static void acct_isolated(struct zone *zone, struct compact_control *cc)
Mel Gorman748446b2010-05-24 14:32:27 -0700588{
589 struct page *page;
Minchan Kimb9e84ac2011-10-31 17:06:44 -0700590 unsigned int count[2] = { 0, };
Mel Gorman748446b2010-05-24 14:32:27 -0700591
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700592 if (list_empty(&cc->migratepages))
593 return;
594
Minchan Kimb9e84ac2011-10-31 17:06:44 -0700595 list_for_each_entry(page, &cc->migratepages, lru)
596 count[!!page_is_file_cache(page)]++;
Mel Gorman748446b2010-05-24 14:32:27 -0700597
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700598 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
599 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
Mel Gorman748446b2010-05-24 14:32:27 -0700600}
601
602/* Similar to reclaim, but different enough that they don't share logic */
603static bool too_many_isolated(struct zone *zone)
604{
Minchan Kimbc693042010-09-09 16:38:00 -0700605 unsigned long active, inactive, isolated;
Mel Gorman748446b2010-05-24 14:32:27 -0700606
607 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
608 zone_page_state(zone, NR_INACTIVE_ANON);
Minchan Kimbc693042010-09-09 16:38:00 -0700609 active = zone_page_state(zone, NR_ACTIVE_FILE) +
610 zone_page_state(zone, NR_ACTIVE_ANON);
Mel Gorman748446b2010-05-24 14:32:27 -0700611 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
612 zone_page_state(zone, NR_ISOLATED_ANON);
613
Minchan Kimbc693042010-09-09 16:38:00 -0700614 return isolated > (inactive + active) / 2;
Mel Gorman748446b2010-05-24 14:32:27 -0700615}
616
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100617/**
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700618 * isolate_migratepages_block() - isolate all migrate-able pages within
619 * a single pageblock
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100620 * @cc: Compaction control structure.
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700621 * @low_pfn: The first PFN to isolate
622 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
623 * @isolate_mode: Isolation mode to be used.
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100624 *
625 * Isolate all pages that can be migrated from the range specified by
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700626 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
627 * Returns zero if there is a fatal signal pending, otherwise PFN of the
628 * first page that was not scanned (which may be both less, equal to or more
629 * than end_pfn).
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100630 *
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700631 * The pages are isolated on cc->migratepages list (not required to be empty),
632 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
633 * is neither read nor updated.
Mel Gorman748446b2010-05-24 14:32:27 -0700634 */
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700635static unsigned long
636isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
637 unsigned long end_pfn, isolate_mode_t isolate_mode)
Mel Gorman748446b2010-05-24 14:32:27 -0700638{
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700639 struct zone *zone = cc->zone;
Mel Gormanb7aba692011-01-13 15:45:54 -0800640 unsigned long nr_scanned = 0, nr_isolated = 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700641 struct list_head *migratelist = &cc->migratepages;
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700642 struct lruvec *lruvec;
Xiubo Lib8b2d822014-10-09 15:28:21 -0700643 unsigned long flags = 0;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700644 bool locked = false;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700645 struct page *page = NULL, *valid_page = NULL;
Joonsoo Kime34d85f2015-02-11 15:27:04 -0800646 unsigned long start_pfn = low_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -0700647
Mel Gorman748446b2010-05-24 14:32:27 -0700648 /*
649 * Ensure that there are not too many pages isolated from the LRU
650 * list by either parallel reclaimers or compaction. If there are,
651 * delay for some time until fewer pages are isolated
652 */
653 while (unlikely(too_many_isolated(zone))) {
Mel Gormanf9e35b32011-06-15 15:08:52 -0700654 /* async migration should just abort */
David Rientjese0b9dae2014-06-04 16:08:28 -0700655 if (cc->mode == MIGRATE_ASYNC)
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100656 return 0;
Mel Gormanf9e35b32011-06-15 15:08:52 -0700657
Mel Gorman748446b2010-05-24 14:32:27 -0700658 congestion_wait(BLK_RW_ASYNC, HZ/10);
659
660 if (fatal_signal_pending(current))
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100661 return 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700662 }
663
Vlastimil Babkabe976572014-06-04 16:10:41 -0700664 if (compact_should_abort(cc))
665 return 0;
David Rientjesaeef4b82014-06-04 16:08:31 -0700666
Mel Gorman748446b2010-05-24 14:32:27 -0700667 /* Time to isolate some pages for migration */
Mel Gorman748446b2010-05-24 14:32:27 -0700668 for (; low_pfn < end_pfn; low_pfn++) {
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700669 bool is_lru;
670
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700671 /*
672 * Periodically drop the lock (if held) regardless of its
673 * contention, to give chance to IRQs. Abort async compaction
674 * if contended.
675 */
676 if (!(low_pfn % SWAP_CLUSTER_MAX)
677 && compact_unlock_should_abort(&zone->lru_lock, flags,
678 &locked, cc))
679 break;
Mel Gormanc67fe372012-08-21 16:16:17 -0700680
Mel Gorman748446b2010-05-24 14:32:27 -0700681 if (!pfn_valid_within(low_pfn))
682 continue;
Mel Gormanb7aba692011-01-13 15:45:54 -0800683 nr_scanned++;
Mel Gorman748446b2010-05-24 14:32:27 -0700684
Mel Gorman748446b2010-05-24 14:32:27 -0700685 page = pfn_to_page(low_pfn);
Mel Gormandc908602012-02-08 17:13:38 -0800686
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700687 if (!valid_page)
688 valid_page = page;
689
Mel Gorman6c144662014-01-23 15:53:38 -0800690 /*
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700691 * Skip if free. We read page order here without zone lock
692 * which is generally unsafe, but the race window is small and
693 * the worst thing that can happen is that we skip some
694 * potential isolation targets.
Mel Gorman6c144662014-01-23 15:53:38 -0800695 */
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700696 if (PageBuddy(page)) {
697 unsigned long freepage_order = page_order_unsafe(page);
698
699 /*
700 * Without lock, we cannot be sure that what we got is
701 * a valid page order. Consider only values in the
702 * valid order range to prevent low_pfn overflow.
703 */
704 if (freepage_order > 0 && freepage_order < MAX_ORDER)
705 low_pfn += (1UL << freepage_order) - 1;
Mel Gorman748446b2010-05-24 14:32:27 -0700706 continue;
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700707 }
Mel Gorman748446b2010-05-24 14:32:27 -0700708
Mel Gorman9927af742011-01-13 15:45:59 -0800709 /*
Rafael Aquinibf6bddf2012-12-11 16:02:42 -0800710 * Check may be lockless but that's ok as we recheck later.
711 * It's possible to migrate LRU pages and balloon pages
712 * Skip any other type of page
713 */
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700714 is_lru = PageLRU(page);
715 if (!is_lru) {
Rafael Aquinibf6bddf2012-12-11 16:02:42 -0800716 if (unlikely(balloon_page_movable(page))) {
Konstantin Khlebnikovd6d86c02014-10-09 15:29:27 -0700717 if (balloon_page_isolate(page)) {
Rafael Aquinibf6bddf2012-12-11 16:02:42 -0800718 /* Successfully isolated */
Joonsoo Kimb6c75012014-04-07 15:37:07 -0700719 goto isolate_success;
Rafael Aquinibf6bddf2012-12-11 16:02:42 -0800720 }
721 }
Rafael Aquinibf6bddf2012-12-11 16:02:42 -0800722 }
Andrea Arcangelibc835012011-01-13 15:47:08 -0800723
724 /*
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700725 * Regardless of being on LRU, compound pages such as THP and
726 * hugetlbfs are not to be compacted. We can potentially save
727 * a lot of iterations if we skip them at once. The check is
728 * racy, but we can consider only valid values and the only
729 * danger is skipping too much.
Andrea Arcangelibc835012011-01-13 15:47:08 -0800730 */
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700731 if (PageCompound(page)) {
732 unsigned int comp_order = compound_order(page);
733
734 if (likely(comp_order < MAX_ORDER))
735 low_pfn += (1UL << comp_order) - 1;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700736
Mel Gorman2a1402a2012-10-08 16:32:33 -0700737 continue;
738 }
739
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700740 if (!is_lru)
741 continue;
742
David Rientjes119d6d52014-04-03 14:48:00 -0700743 /*
744 * Migration will fail if an anonymous page is pinned in memory,
745 * so avoid taking lru_lock and isolating it unnecessarily in an
746 * admittedly racy check.
747 */
748 if (!page_mapping(page) &&
749 page_count(page) > page_mapcount(page))
750 continue;
751
Vlastimil Babka69b71892014-10-09 15:27:18 -0700752 /* If we already hold the lock, we can skip some rechecking */
753 if (!locked) {
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700754 locked = compact_trylock_irqsave(&zone->lru_lock,
755 &flags, cc);
Vlastimil Babka69b71892014-10-09 15:27:18 -0700756 if (!locked)
757 break;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700758
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700759 /* Recheck PageLRU and PageCompound under lock */
Vlastimil Babka69b71892014-10-09 15:27:18 -0700760 if (!PageLRU(page))
761 continue;
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700762
763 /*
764 * Page become compound since the non-locked check,
765 * and it's on LRU. It can only be a THP so the order
766 * is safe to read and it's 0 for tail pages.
767 */
768 if (unlikely(PageCompound(page))) {
769 low_pfn += (1UL << compound_order(page)) - 1;
Vlastimil Babka69b71892014-10-09 15:27:18 -0700770 continue;
771 }
Andrea Arcangelibc835012011-01-13 15:47:08 -0800772 }
773
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700774 lruvec = mem_cgroup_page_lruvec(page, zone);
775
Mel Gorman748446b2010-05-24 14:32:27 -0700776 /* Try isolate the page */
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700777 if (__isolate_lru_page(page, isolate_mode) != 0)
Mel Gorman748446b2010-05-24 14:32:27 -0700778 continue;
779
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700780 VM_BUG_ON_PAGE(PageCompound(page), page);
Andrea Arcangelibc835012011-01-13 15:47:08 -0800781
Mel Gorman748446b2010-05-24 14:32:27 -0700782 /* Successfully isolated */
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700783 del_page_from_lru_list(page, lruvec, page_lru(page));
Joonsoo Kimb6c75012014-04-07 15:37:07 -0700784
785isolate_success:
Mel Gorman748446b2010-05-24 14:32:27 -0700786 list_add(&page->lru, migratelist);
Mel Gorman748446b2010-05-24 14:32:27 -0700787 cc->nr_migratepages++;
Mel Gormanb7aba692011-01-13 15:45:54 -0800788 nr_isolated++;
Mel Gorman748446b2010-05-24 14:32:27 -0700789
790 /* Avoid isolating too much */
Hillf Danton31b83842012-01-10 15:07:59 -0800791 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
792 ++low_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -0700793 break;
Hillf Danton31b83842012-01-10 15:07:59 -0800794 }
Mel Gorman748446b2010-05-24 14:32:27 -0700795 }
796
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700797 /*
798 * The PageBuddy() check could have potentially brought us outside
799 * the range to be scanned.
800 */
801 if (unlikely(low_pfn > end_pfn))
802 low_pfn = end_pfn;
803
Mel Gormanc67fe372012-08-21 16:16:17 -0700804 if (locked)
805 spin_unlock_irqrestore(&zone->lru_lock, flags);
Mel Gorman748446b2010-05-24 14:32:27 -0700806
Vlastimil Babka50b5b092014-01-21 15:51:10 -0800807 /*
808 * Update the pageblock-skip information and cached scanner pfn,
809 * if the whole pageblock was scanned without isolating any page.
Vlastimil Babka50b5b092014-01-21 15:51:10 -0800810 */
David Rientjes35979ef2014-06-04 16:08:27 -0700811 if (low_pfn == end_pfn)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700812 update_pageblock_skip(cc, valid_page, nr_isolated, true);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700813
Joonsoo Kime34d85f2015-02-11 15:27:04 -0800814 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
815 nr_scanned, nr_isolated);
Mel Gormanb7aba692011-01-13 15:45:54 -0800816
Minchan Kim010fc292012-12-20 15:05:06 -0800817 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
Mel Gorman397487d2012-10-19 12:00:10 +0100818 if (nr_isolated)
Minchan Kim010fc292012-12-20 15:05:06 -0800819 count_compact_events(COMPACTISOLATED, nr_isolated);
Mel Gorman397487d2012-10-19 12:00:10 +0100820
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100821 return low_pfn;
822}
823
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700824/**
825 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
826 * @cc: Compaction control structure.
827 * @start_pfn: The first PFN to start isolating.
828 * @end_pfn: The one-past-last PFN.
829 *
830 * Returns zero if isolation fails fatally due to e.g. pending signal.
831 * Otherwise, function returns one-past-the-last PFN of isolated page
832 * (which may be greater than end_pfn if end fell in a middle of a THP page).
833 */
834unsigned long
835isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
836 unsigned long end_pfn)
837{
Joonsoo Kime1409c32016-03-15 14:57:48 -0700838 unsigned long pfn, block_start_pfn, block_end_pfn;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700839
840 /* Scan block by block. First and last block may be incomplete */
841 pfn = start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700842 block_start_pfn = pageblock_start_pfn(pfn);
Joonsoo Kime1409c32016-03-15 14:57:48 -0700843 if (block_start_pfn < cc->zone->zone_start_pfn)
844 block_start_pfn = cc->zone->zone_start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700845 block_end_pfn = pageblock_end_pfn(pfn);
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700846
847 for (; pfn < end_pfn; pfn = block_end_pfn,
Joonsoo Kime1409c32016-03-15 14:57:48 -0700848 block_start_pfn = block_end_pfn,
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700849 block_end_pfn += pageblock_nr_pages) {
850
851 block_end_pfn = min(block_end_pfn, end_pfn);
852
Joonsoo Kime1409c32016-03-15 14:57:48 -0700853 if (!pageblock_pfn_to_page(block_start_pfn,
854 block_end_pfn, cc->zone))
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700855 continue;
856
857 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
858 ISOLATE_UNEVICTABLE);
859
Hugh Dickins14af4a52016-05-05 16:22:15 -0700860 if (!pfn)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700861 break;
Joonsoo Kim6ea41c02014-10-29 14:50:20 -0700862
863 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
864 break;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700865 }
866 acct_isolated(cc->zone, cc);
867
868 return pfn;
869}
870
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100871#endif /* CONFIG_COMPACTION || CONFIG_CMA */
872#ifdef CONFIG_COMPACTION
Andrew Morton018e9a42015-04-15 16:15:20 -0700873
874/* Returns true if the page is within a block suitable for migration to */
875static bool suitable_migration_target(struct page *page)
876{
877 /* If the page is a large free page, then disallow migration */
878 if (PageBuddy(page)) {
879 /*
880 * We are checking page_order without zone->lock taken. But
881 * the only small danger is that we skip a potentially suitable
882 * pageblock, so it's not worth to check order for valid range.
883 */
884 if (page_order_unsafe(page) >= pageblock_order)
885 return false;
886 }
887
888 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
889 if (migrate_async_suitable(get_pageblock_migratetype(page)))
890 return true;
891
892 /* Otherwise skip the block */
893 return false;
894}
895
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100896/*
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -0700897 * Test whether the free scanner has reached the same or lower pageblock than
898 * the migration scanner, and compaction should thus terminate.
899 */
900static inline bool compact_scanners_met(struct compact_control *cc)
901{
902 return (cc->free_pfn >> pageblock_order)
903 <= (cc->migrate_pfn >> pageblock_order);
904}
905
906/*
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100907 * Based on information in the current compact_control, find blocks
908 * suitable for isolating free pages from and then isolate them.
909 */
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700910static void isolate_freepages(struct compact_control *cc)
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100911{
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700912 struct zone *zone = cc->zone;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100913 struct page *page;
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700914 unsigned long block_start_pfn; /* start of current pageblock */
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700915 unsigned long isolate_start_pfn; /* exact pfn we start at */
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700916 unsigned long block_end_pfn; /* end of current pageblock */
917 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100918 struct list_head *freelist = &cc->freepages;
919
920 /*
921 * Initialise the free scanner. The starting point is where we last
Vlastimil Babka49e068f2014-05-06 12:50:03 -0700922 * successfully isolated from, zone-cached value, or the end of the
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700923 * zone when isolating for the first time. For looping we also need
924 * this pfn aligned down to the pageblock boundary, because we do
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700925 * block_start_pfn -= pageblock_nr_pages in the for loop.
926 * For ending point, take care when isolating in last pageblock of a
927 * a zone which ends in the middle of a pageblock.
Vlastimil Babka49e068f2014-05-06 12:50:03 -0700928 * The low boundary is the end of the pageblock the migration scanner
929 * is using.
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100930 */
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700931 isolate_start_pfn = cc->free_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700932 block_start_pfn = pageblock_start_pfn(cc->free_pfn);
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700933 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
934 zone_end_pfn(zone));
Vlastimil Babka06b66402016-05-19 17:11:48 -0700935 low_pfn = pageblock_end_pfn(cc->migrate_pfn);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100936
937 /*
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100938 * Isolate free pages until enough are available to migrate the
939 * pages on cc->migratepages. We stop searching if the migrate
940 * and free page scanners meet or enough free pages are isolated.
941 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -0700942 for (; block_start_pfn >= low_pfn;
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700943 block_end_pfn = block_start_pfn,
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700944 block_start_pfn -= pageblock_nr_pages,
945 isolate_start_pfn = block_start_pfn) {
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100946
David Rientjesf6ea3ad2013-09-30 13:45:03 -0700947 /*
948 * This can iterate a massively long zone without finding any
949 * suitable migration targets, so periodically check if we need
Vlastimil Babkabe976572014-06-04 16:10:41 -0700950 * to schedule, or even abort async compaction.
David Rientjesf6ea3ad2013-09-30 13:45:03 -0700951 */
Vlastimil Babkabe976572014-06-04 16:10:41 -0700952 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
953 && compact_should_abort(cc))
954 break;
David Rientjesf6ea3ad2013-09-30 13:45:03 -0700955
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700956 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
957 zone);
958 if (!page)
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100959 continue;
960
961 /* Check the block is suitable for migration */
Linus Torvalds68e3e922012-06-03 20:05:57 -0700962 if (!suitable_migration_target(page))
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100963 continue;
Linus Torvalds68e3e922012-06-03 20:05:57 -0700964
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700965 /* If isolation recently failed, do not retry */
966 if (!isolation_suitable(cc, page))
967 continue;
968
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700969 /* Found a block suitable for isolating free pages from. */
Joonsoo Kim932ff6b2015-02-12 14:59:53 -0800970 isolate_freepages_block(cc, &isolate_start_pfn,
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700971 block_end_pfn, freelist, false);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100972
973 /*
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -0700974 * If we isolated enough freepages, or aborted due to async
975 * compaction being contended, terminate the loop.
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700976 * Remember where the free scanner should restart next time,
977 * which is where isolate_freepages_block() left off.
978 * But if it scanned the whole pageblock, isolate_start_pfn
979 * now points at block_end_pfn, which is the start of the next
980 * pageblock.
981 * In that case we will however want to restart at the start
982 * of the previous pageblock.
983 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -0700984 if ((cc->nr_freepages >= cc->nr_migratepages)
985 || cc->contended) {
986 if (isolate_start_pfn >= block_end_pfn)
987 isolate_start_pfn =
988 block_start_pfn - pageblock_nr_pages;
Vlastimil Babkabe976572014-06-04 16:10:41 -0700989 break;
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -0700990 } else {
991 /*
992 * isolate_freepages_block() should not terminate
993 * prematurely unless contended, or isolated enough
994 */
995 VM_BUG_ON(isolate_start_pfn < block_end_pfn);
996 }
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100997 }
998
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100999 /* split_free_page does not map the pages */
1000 map_pages(freelist);
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +01001001
Vlastimil Babka7ed695e2014-01-21 15:51:09 -08001002 /*
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001003 * Record where the free scanner will restart next time. Either we
1004 * broke from the loop and set isolate_start_pfn based on the last
1005 * call to isolate_freepages_block(), or we met the migration scanner
1006 * and the loop terminated due to isolate_start_pfn < low_pfn
Vlastimil Babka7ed695e2014-01-21 15:51:09 -08001007 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001008 cc->free_pfn = isolate_start_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -07001009}
1010
1011/*
1012 * This is a migrate-callback that "allocates" freepages by taking pages
1013 * from the isolated freelists in the block we are migrating to.
1014 */
1015static struct page *compaction_alloc(struct page *migratepage,
1016 unsigned long data,
1017 int **result)
1018{
1019 struct compact_control *cc = (struct compact_control *)data;
1020 struct page *freepage;
1021
Vlastimil Babkabe976572014-06-04 16:10:41 -07001022 /*
1023 * Isolate free pages if necessary, and if we are not aborting due to
1024 * contention.
1025 */
Mel Gorman748446b2010-05-24 14:32:27 -07001026 if (list_empty(&cc->freepages)) {
Vlastimil Babkabe976572014-06-04 16:10:41 -07001027 if (!cc->contended)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001028 isolate_freepages(cc);
Mel Gorman748446b2010-05-24 14:32:27 -07001029
1030 if (list_empty(&cc->freepages))
1031 return NULL;
1032 }
1033
1034 freepage = list_entry(cc->freepages.next, struct page, lru);
1035 list_del(&freepage->lru);
1036 cc->nr_freepages--;
1037
1038 return freepage;
1039}
1040
1041/*
David Rientjesd53aea32014-06-04 16:08:26 -07001042 * This is a migrate-callback that "frees" freepages back to the isolated
1043 * freelist. All pages on the freelist are from the same zone, so there is no
1044 * special handling needed for NUMA.
1045 */
1046static void compaction_free(struct page *page, unsigned long data)
1047{
1048 struct compact_control *cc = (struct compact_control *)data;
1049
1050 list_add(&page->lru, &cc->freepages);
1051 cc->nr_freepages++;
1052}
1053
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001054/* possible outcome of isolate_migratepages */
1055typedef enum {
1056 ISOLATE_ABORT, /* Abort compaction now */
1057 ISOLATE_NONE, /* No pages isolated, continue scanning */
1058 ISOLATE_SUCCESS, /* Pages isolated, migrate */
1059} isolate_migrate_t;
1060
1061/*
Eric B Munson5bbe3542015-04-15 16:13:20 -07001062 * Allow userspace to control policy on scanning the unevictable LRU for
1063 * compactable pages.
1064 */
1065int sysctl_compact_unevictable_allowed __read_mostly = 1;
1066
1067/*
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001068 * Isolate all pages that can be migrated from the first suitable block,
1069 * starting at the block pointed to by the migrate scanner pfn within
1070 * compact_control.
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001071 */
1072static isolate_migrate_t isolate_migratepages(struct zone *zone,
1073 struct compact_control *cc)
1074{
Joonsoo Kime1409c32016-03-15 14:57:48 -07001075 unsigned long block_start_pfn;
1076 unsigned long block_end_pfn;
1077 unsigned long low_pfn;
Joonsoo Kim1a167182015-09-08 15:03:59 -07001078 unsigned long isolate_start_pfn;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001079 struct page *page;
1080 const isolate_mode_t isolate_mode =
Eric B Munson5bbe3542015-04-15 16:13:20 -07001081 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001082 (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001083
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001084 /*
1085 * Start at where we last stopped, or beginning of the zone as
1086 * initialized by compact_zone()
1087 */
1088 low_pfn = cc->migrate_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -07001089 block_start_pfn = pageblock_start_pfn(low_pfn);
Joonsoo Kime1409c32016-03-15 14:57:48 -07001090 if (block_start_pfn < zone->zone_start_pfn)
1091 block_start_pfn = zone->zone_start_pfn;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001092
1093 /* Only scan within a pageblock boundary */
Vlastimil Babka06b66402016-05-19 17:11:48 -07001094 block_end_pfn = pageblock_end_pfn(low_pfn);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001095
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001096 /*
1097 * Iterate over whole pageblocks until we find the first suitable.
1098 * Do not cross the free scanner.
1099 */
Joonsoo Kime1409c32016-03-15 14:57:48 -07001100 for (; block_end_pfn <= cc->free_pfn;
1101 low_pfn = block_end_pfn,
1102 block_start_pfn = block_end_pfn,
1103 block_end_pfn += pageblock_nr_pages) {
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001104
1105 /*
1106 * This can potentially iterate a massively long zone with
1107 * many pageblocks unsuitable, so periodically check if we
1108 * need to schedule, or even abort async compaction.
1109 */
1110 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1111 && compact_should_abort(cc))
1112 break;
1113
Joonsoo Kime1409c32016-03-15 14:57:48 -07001114 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1115 zone);
Vlastimil Babka7d49d882014-10-09 15:27:11 -07001116 if (!page)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001117 continue;
1118
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001119 /* If isolation recently failed, do not retry */
1120 if (!isolation_suitable(cc, page))
1121 continue;
1122
1123 /*
1124 * For async compaction, also only scan in MOVABLE blocks.
1125 * Async compaction is optimistic to see if the minimum amount
1126 * of work satisfies the allocation.
1127 */
1128 if (cc->mode == MIGRATE_ASYNC &&
1129 !migrate_async_suitable(get_pageblock_migratetype(page)))
1130 continue;
1131
1132 /* Perform the isolation */
Joonsoo Kim1a167182015-09-08 15:03:59 -07001133 isolate_start_pfn = low_pfn;
Joonsoo Kime1409c32016-03-15 14:57:48 -07001134 low_pfn = isolate_migratepages_block(cc, low_pfn,
1135 block_end_pfn, isolate_mode);
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001136
Hugh Dickinsff599092015-02-12 15:00:28 -08001137 if (!low_pfn || cc->contended) {
1138 acct_isolated(zone, cc);
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001139 return ISOLATE_ABORT;
Hugh Dickinsff599092015-02-12 15:00:28 -08001140 }
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001141
1142 /*
Joonsoo Kim1a167182015-09-08 15:03:59 -07001143 * Record where we could have freed pages by migration and not
1144 * yet flushed them to buddy allocator.
1145 * - this is the lowest page that could have been isolated and
1146 * then freed by migration.
1147 */
1148 if (cc->nr_migratepages && !cc->last_migrated_pfn)
1149 cc->last_migrated_pfn = isolate_start_pfn;
1150
1151 /*
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001152 * Either we isolated something and proceed with migration. Or
1153 * we failed and compact_zone should decide if we should
1154 * continue or not.
1155 */
1156 break;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001157 }
1158
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001159 acct_isolated(zone, cc);
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001160 /* Record where migration scanner will be restarted. */
1161 cc->migrate_pfn = low_pfn;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001162
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001163 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001164}
1165
Yaowei Bai21c527a2015-11-05 18:47:20 -08001166/*
1167 * order == -1 is expected when compacting via
1168 * /proc/sys/vm/compact_memory
1169 */
1170static inline bool is_via_compact_memory(int order)
1171{
1172 return order == -1;
1173}
1174
Joonsoo Kim837d0262015-02-11 15:27:06 -08001175static int __compact_finished(struct zone *zone, struct compact_control *cc,
David Rientjes6d7ce552014-10-09 15:27:27 -07001176 const int migratetype)
Mel Gorman748446b2010-05-24 14:32:27 -07001177{
Mel Gorman8fb74b92013-01-11 14:32:16 -08001178 unsigned int order;
Andrea Arcangeli5a03b052011-01-13 15:47:11 -08001179 unsigned long watermark;
Mel Gorman56de7262010-05-24 14:32:30 -07001180
Vlastimil Babkabe976572014-06-04 16:10:41 -07001181 if (cc->contended || fatal_signal_pending(current))
Vlastimil Babka2d1e1042015-11-05 18:48:02 -08001182 return COMPACT_CONTENDED;
Mel Gorman748446b2010-05-24 14:32:27 -07001183
Mel Gorman753341a2012-10-08 16:32:40 -07001184 /* Compaction run completes if the migrate and free scanner meet */
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001185 if (compact_scanners_met(cc)) {
Vlastimil Babka55b7c4c2014-01-21 15:51:11 -08001186 /* Let the next compaction start anew. */
Vlastimil Babka02333642015-09-08 15:02:42 -07001187 reset_cached_positions(zone);
Vlastimil Babka55b7c4c2014-01-21 15:51:11 -08001188
Mel Gorman62997022012-10-08 16:32:47 -07001189 /*
1190 * Mark that the PG_migrate_skip information should be cleared
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001191 * by kswapd when it goes to sleep. kcompactd does not set the
Mel Gorman62997022012-10-08 16:32:47 -07001192 * flag itself as the decision to be clear should be directly
1193 * based on an allocation request.
1194 */
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001195 if (cc->direct_compaction)
Mel Gorman62997022012-10-08 16:32:47 -07001196 zone->compact_blockskip_flush = true;
1197
Mel Gorman748446b2010-05-24 14:32:27 -07001198 return COMPACT_COMPLETE;
Mel Gormanbb13ffe2012-10-08 16:32:41 -07001199 }
Mel Gorman748446b2010-05-24 14:32:27 -07001200
Yaowei Bai21c527a2015-11-05 18:47:20 -08001201 if (is_via_compact_memory(cc->order))
Mel Gorman56de7262010-05-24 14:32:30 -07001202 return COMPACT_CONTINUE;
1203
Michal Hocko3957c772011-06-15 15:08:25 -07001204 /* Compaction run is not finished if the watermark is not met */
1205 watermark = low_wmark_pages(zone);
Michal Hocko3957c772011-06-15 15:08:25 -07001206
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001207 if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
1208 cc->alloc_flags))
Michal Hocko3957c772011-06-15 15:08:25 -07001209 return COMPACT_CONTINUE;
1210
Mel Gorman56de7262010-05-24 14:32:30 -07001211 /* Direct compactor: Is a suitable page free? */
Mel Gorman8fb74b92013-01-11 14:32:16 -08001212 for (order = cc->order; order < MAX_ORDER; order++) {
1213 struct free_area *area = &zone->free_area[order];
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001214 bool can_steal;
Mel Gorman56de7262010-05-24 14:32:30 -07001215
Mel Gorman8fb74b92013-01-11 14:32:16 -08001216 /* Job done if page is free of the right migratetype */
David Rientjes6d7ce552014-10-09 15:27:27 -07001217 if (!list_empty(&area->free_list[migratetype]))
Mel Gorman8fb74b92013-01-11 14:32:16 -08001218 return COMPACT_PARTIAL;
1219
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001220#ifdef CONFIG_CMA
1221 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1222 if (migratetype == MIGRATE_MOVABLE &&
1223 !list_empty(&area->free_list[MIGRATE_CMA]))
1224 return COMPACT_PARTIAL;
1225#endif
1226 /*
1227 * Job done if allocation would steal freepages from
1228 * other migratetype buddy lists.
1229 */
1230 if (find_suitable_fallback(area, order, migratetype,
1231 true, &can_steal) != -1)
Mel Gorman8fb74b92013-01-11 14:32:16 -08001232 return COMPACT_PARTIAL;
Mel Gorman56de7262010-05-24 14:32:30 -07001233 }
1234
Joonsoo Kim837d0262015-02-11 15:27:06 -08001235 return COMPACT_NO_SUITABLE_PAGE;
1236}
1237
1238static int compact_finished(struct zone *zone, struct compact_control *cc,
1239 const int migratetype)
1240{
1241 int ret;
1242
1243 ret = __compact_finished(zone, cc, migratetype);
1244 trace_mm_compaction_finished(zone, cc->order, ret);
1245 if (ret == COMPACT_NO_SUITABLE_PAGE)
1246 ret = COMPACT_CONTINUE;
1247
1248 return ret;
Mel Gorman748446b2010-05-24 14:32:27 -07001249}
1250
Mel Gorman3e7d3442011-01-13 15:45:56 -08001251/*
1252 * compaction_suitable: Is this suitable to run compaction on this zone now?
1253 * Returns
1254 * COMPACT_SKIPPED - If there are too few free pages for compaction
1255 * COMPACT_PARTIAL - If the allocation would succeed without compaction
1256 * COMPACT_CONTINUE - If compaction should run now
1257 */
Joonsoo Kim837d0262015-02-11 15:27:06 -08001258static unsigned long __compaction_suitable(struct zone *zone, int order,
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001259 int alloc_flags, int classzone_idx)
Mel Gorman3e7d3442011-01-13 15:45:56 -08001260{
1261 int fragindex;
1262 unsigned long watermark;
1263
Yaowei Bai21c527a2015-11-05 18:47:20 -08001264 if (is_via_compact_memory(order))
Michal Hocko3957c772011-06-15 15:08:25 -07001265 return COMPACT_CONTINUE;
1266
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001267 watermark = low_wmark_pages(zone);
1268 /*
1269 * If watermarks for high-order allocation are already met, there
1270 * should be no need for compaction at all.
1271 */
1272 if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1273 alloc_flags))
1274 return COMPACT_PARTIAL;
1275
Michal Hocko3957c772011-06-15 15:08:25 -07001276 /*
Mel Gorman3e7d3442011-01-13 15:45:56 -08001277 * Watermarks for order-0 must be met for compaction. Note the 2UL.
1278 * This is because during migration, copies of pages need to be
1279 * allocated and for a short time, the footprint is higher
1280 */
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001281 watermark += (2UL << order);
1282 if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags))
Mel Gorman3e7d3442011-01-13 15:45:56 -08001283 return COMPACT_SKIPPED;
1284
1285 /*
1286 * fragmentation index determines if allocation failures are due to
1287 * low memory or external fragmentation
1288 *
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001289 * index of -1000 would imply allocations might succeed depending on
1290 * watermarks, but we already failed the high-order watermark check
Mel Gorman3e7d3442011-01-13 15:45:56 -08001291 * index towards 0 implies failure is due to lack of memory
1292 * index towards 1000 implies failure is due to fragmentation
1293 *
1294 * Only compact if a failure would be due to fragmentation.
1295 */
1296 fragindex = fragmentation_index(zone, order);
1297 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
Joonsoo Kim837d0262015-02-11 15:27:06 -08001298 return COMPACT_NOT_SUITABLE_ZONE;
Mel Gorman3e7d3442011-01-13 15:45:56 -08001299
Mel Gorman3e7d3442011-01-13 15:45:56 -08001300 return COMPACT_CONTINUE;
1301}
1302
Joonsoo Kim837d0262015-02-11 15:27:06 -08001303unsigned long compaction_suitable(struct zone *zone, int order,
1304 int alloc_flags, int classzone_idx)
1305{
1306 unsigned long ret;
1307
1308 ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx);
1309 trace_mm_compaction_suitable(zone, order, ret);
1310 if (ret == COMPACT_NOT_SUITABLE_ZONE)
1311 ret = COMPACT_SKIPPED;
1312
1313 return ret;
1314}
1315
Mel Gorman748446b2010-05-24 14:32:27 -07001316static int compact_zone(struct zone *zone, struct compact_control *cc)
1317{
1318 int ret;
Mel Gormanc89511a2012-10-08 16:32:45 -07001319 unsigned long start_pfn = zone->zone_start_pfn;
Cody P Schafer108bcc92013-02-22 16:35:23 -08001320 unsigned long end_pfn = zone_end_pfn(zone);
David Rientjes6d7ce552014-10-09 15:27:27 -07001321 const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
David Rientjese0b9dae2014-06-04 16:08:28 -07001322 const bool sync = cc->mode != MIGRATE_ASYNC;
Mel Gorman748446b2010-05-24 14:32:27 -07001323
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001324 ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1325 cc->classzone_idx);
Mel Gorman3e7d3442011-01-13 15:45:56 -08001326 switch (ret) {
1327 case COMPACT_PARTIAL:
1328 case COMPACT_SKIPPED:
1329 /* Compaction is likely to fail */
1330 return ret;
1331 case COMPACT_CONTINUE:
1332 /* Fall through to compaction */
1333 ;
1334 }
1335
Mel Gormanc89511a2012-10-08 16:32:45 -07001336 /*
Vlastimil Babkad3132e42014-01-21 15:51:08 -08001337 * Clear pageblock skip if there were failures recently and compaction
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001338 * is about to be retried after being deferred.
Vlastimil Babkad3132e42014-01-21 15:51:08 -08001339 */
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001340 if (compaction_restarting(zone, cc->order))
Vlastimil Babkad3132e42014-01-21 15:51:08 -08001341 __reset_isolation_suitable(zone);
1342
1343 /*
Mel Gormanc89511a2012-10-08 16:32:45 -07001344 * Setup to move all movable pages to the end of the zone. Used cached
1345 * information on where the scanners should start but check that it
1346 * is initialised by ensuring the values are within zone boundaries.
1347 */
David Rientjese0b9dae2014-06-04 16:08:28 -07001348 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
Mel Gormanc89511a2012-10-08 16:32:45 -07001349 cc->free_pfn = zone->compact_cached_free_pfn;
Joonsoo Kim623446e2016-03-15 14:57:45 -07001350 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
Vlastimil Babka06b66402016-05-19 17:11:48 -07001351 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
Mel Gormanc89511a2012-10-08 16:32:45 -07001352 zone->compact_cached_free_pfn = cc->free_pfn;
1353 }
Joonsoo Kim623446e2016-03-15 14:57:45 -07001354 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
Mel Gormanc89511a2012-10-08 16:32:45 -07001355 cc->migrate_pfn = start_pfn;
David Rientjes35979ef2014-06-04 16:08:27 -07001356 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1357 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
Mel Gormanc89511a2012-10-08 16:32:45 -07001358 }
Joonsoo Kim1a167182015-09-08 15:03:59 -07001359 cc->last_migrated_pfn = 0;
Mel Gorman748446b2010-05-24 14:32:27 -07001360
Joonsoo Kim16c4a092015-02-11 15:27:01 -08001361 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1362 cc->free_pfn, end_pfn, sync);
Mel Gorman0eb927c2014-01-21 15:51:05 -08001363
Mel Gorman748446b2010-05-24 14:32:27 -07001364 migrate_prep_local();
1365
David Rientjes6d7ce552014-10-09 15:27:27 -07001366 while ((ret = compact_finished(zone, cc, migratetype)) ==
1367 COMPACT_CONTINUE) {
Minchan Kim9d502c12011-03-22 16:30:39 -07001368 int err;
Mel Gorman748446b2010-05-24 14:32:27 -07001369
Mel Gormanf9e35b32011-06-15 15:08:52 -07001370 switch (isolate_migratepages(zone, cc)) {
1371 case ISOLATE_ABORT:
Vlastimil Babka2d1e1042015-11-05 18:48:02 -08001372 ret = COMPACT_CONTENDED;
Rafael Aquini5733c7d2012-12-11 16:02:47 -08001373 putback_movable_pages(&cc->migratepages);
Shaohua Lie64c5232012-10-08 16:32:27 -07001374 cc->nr_migratepages = 0;
Mel Gormanf9e35b32011-06-15 15:08:52 -07001375 goto out;
1376 case ISOLATE_NONE:
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001377 /*
1378 * We haven't isolated and migrated anything, but
1379 * there might still be unflushed migrations from
1380 * previous cc->order aligned block.
1381 */
1382 goto check_drain;
Mel Gormanf9e35b32011-06-15 15:08:52 -07001383 case ISOLATE_SUCCESS:
1384 ;
1385 }
Mel Gorman748446b2010-05-24 14:32:27 -07001386
David Rientjesd53aea32014-06-04 16:08:26 -07001387 err = migrate_pages(&cc->migratepages, compaction_alloc,
David Rientjese0b9dae2014-06-04 16:08:28 -07001388 compaction_free, (unsigned long)cc, cc->mode,
Mel Gorman7b2a2d42012-10-19 14:07:31 +01001389 MR_COMPACTION);
Mel Gorman748446b2010-05-24 14:32:27 -07001390
Vlastimil Babkaf8c93012014-06-04 16:08:32 -07001391 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1392 &cc->migratepages);
Mel Gorman748446b2010-05-24 14:32:27 -07001393
Vlastimil Babkaf8c93012014-06-04 16:08:32 -07001394 /* All pages were either migrated or will be released */
1395 cc->nr_migratepages = 0;
Minchan Kim9d502c12011-03-22 16:30:39 -07001396 if (err) {
Rafael Aquini5733c7d2012-12-11 16:02:47 -08001397 putback_movable_pages(&cc->migratepages);
Vlastimil Babka7ed695e2014-01-21 15:51:09 -08001398 /*
1399 * migrate_pages() may return -ENOMEM when scanners meet
1400 * and we want compact_finished() to detect it
1401 */
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001402 if (err == -ENOMEM && !compact_scanners_met(cc)) {
Vlastimil Babka2d1e1042015-11-05 18:48:02 -08001403 ret = COMPACT_CONTENDED;
David Rientjes4bf2bba2012-07-11 14:02:13 -07001404 goto out;
1405 }
Mel Gorman748446b2010-05-24 14:32:27 -07001406 }
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001407
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001408check_drain:
1409 /*
1410 * Has the migration scanner moved away from the previous
1411 * cc->order aligned block where we migrated from? If yes,
1412 * flush the pages that were freed, so that they can merge and
1413 * compact_finished() can detect immediately if allocation
1414 * would succeed.
1415 */
Joonsoo Kim1a167182015-09-08 15:03:59 -07001416 if (cc->order > 0 && cc->last_migrated_pfn) {
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001417 int cpu;
1418 unsigned long current_block_start =
Vlastimil Babka06b66402016-05-19 17:11:48 -07001419 block_start_pfn(cc->migrate_pfn, cc->order);
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001420
Joonsoo Kim1a167182015-09-08 15:03:59 -07001421 if (cc->last_migrated_pfn < current_block_start) {
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001422 cpu = get_cpu();
1423 lru_add_drain_cpu(cpu);
1424 drain_local_pages(zone);
1425 put_cpu();
1426 /* No more flushing until we migrate again */
Joonsoo Kim1a167182015-09-08 15:03:59 -07001427 cc->last_migrated_pfn = 0;
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001428 }
1429 }
1430
Mel Gorman748446b2010-05-24 14:32:27 -07001431 }
1432
Mel Gormanf9e35b32011-06-15 15:08:52 -07001433out:
Vlastimil Babka6bace092014-12-10 15:43:31 -08001434 /*
1435 * Release free pages and update where the free scanner should restart,
1436 * so we don't leave any returned pages behind in the next attempt.
1437 */
1438 if (cc->nr_freepages > 0) {
1439 unsigned long free_pfn = release_freepages(&cc->freepages);
1440
1441 cc->nr_freepages = 0;
1442 VM_BUG_ON(free_pfn == 0);
1443 /* The cached pfn is always the first in a pageblock */
Vlastimil Babka06b66402016-05-19 17:11:48 -07001444 free_pfn = pageblock_start_pfn(free_pfn);
Vlastimil Babka6bace092014-12-10 15:43:31 -08001445 /*
1446 * Only go back, not forward. The cached pfn might have been
1447 * already reset to zone end in compact_finished()
1448 */
1449 if (free_pfn > zone->compact_cached_free_pfn)
1450 zone->compact_cached_free_pfn = free_pfn;
1451 }
Mel Gorman748446b2010-05-24 14:32:27 -07001452
Joonsoo Kim16c4a092015-02-11 15:27:01 -08001453 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1454 cc->free_pfn, end_pfn, sync, ret);
Mel Gorman0eb927c2014-01-21 15:51:05 -08001455
Vlastimil Babka2d1e1042015-11-05 18:48:02 -08001456 if (ret == COMPACT_CONTENDED)
1457 ret = COMPACT_PARTIAL;
1458
Mel Gorman748446b2010-05-24 14:32:27 -07001459 return ret;
1460}
Mel Gorman76ab0f52010-05-24 14:32:28 -07001461
David Rientjese0b9dae2014-06-04 16:08:28 -07001462static unsigned long compact_zone_order(struct zone *zone, int order,
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001463 gfp_t gfp_mask, enum migrate_mode mode, int *contended,
1464 int alloc_flags, int classzone_idx)
Mel Gorman56de7262010-05-24 14:32:30 -07001465{
Shaohua Lie64c5232012-10-08 16:32:27 -07001466 unsigned long ret;
Mel Gorman56de7262010-05-24 14:32:30 -07001467 struct compact_control cc = {
1468 .nr_freepages = 0,
1469 .nr_migratepages = 0,
1470 .order = order,
David Rientjes6d7ce552014-10-09 15:27:27 -07001471 .gfp_mask = gfp_mask,
Mel Gorman56de7262010-05-24 14:32:30 -07001472 .zone = zone,
David Rientjese0b9dae2014-06-04 16:08:28 -07001473 .mode = mode,
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001474 .alloc_flags = alloc_flags,
1475 .classzone_idx = classzone_idx,
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001476 .direct_compaction = true,
Mel Gorman56de7262010-05-24 14:32:30 -07001477 };
1478 INIT_LIST_HEAD(&cc.freepages);
1479 INIT_LIST_HEAD(&cc.migratepages);
1480
Shaohua Lie64c5232012-10-08 16:32:27 -07001481 ret = compact_zone(zone, &cc);
1482
1483 VM_BUG_ON(!list_empty(&cc.freepages));
1484 VM_BUG_ON(!list_empty(&cc.migratepages));
1485
1486 *contended = cc.contended;
1487 return ret;
Mel Gorman56de7262010-05-24 14:32:30 -07001488}
1489
Mel Gorman5e771902010-05-24 14:32:31 -07001490int sysctl_extfrag_threshold = 500;
1491
Mel Gorman56de7262010-05-24 14:32:30 -07001492/**
1493 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
Mel Gorman56de7262010-05-24 14:32:30 -07001494 * @gfp_mask: The GFP mask of the current allocation
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -08001495 * @order: The order of the current allocation
1496 * @alloc_flags: The allocation flags of the current allocation
1497 * @ac: The context of current allocation
David Rientjese0b9dae2014-06-04 16:08:28 -07001498 * @mode: The migration mode for async, sync light, or sync migration
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001499 * @contended: Return value that determines if compaction was aborted due to
1500 * need_resched() or lock contention
Mel Gorman56de7262010-05-24 14:32:30 -07001501 *
1502 * This is the main entry point for direct page compaction.
1503 */
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -08001504unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1505 int alloc_flags, const struct alloc_context *ac,
1506 enum migrate_mode mode, int *contended)
Mel Gorman56de7262010-05-24 14:32:30 -07001507{
Mel Gorman56de7262010-05-24 14:32:30 -07001508 int may_enter_fs = gfp_mask & __GFP_FS;
1509 int may_perform_io = gfp_mask & __GFP_IO;
Mel Gorman56de7262010-05-24 14:32:30 -07001510 struct zoneref *z;
1511 struct zone *zone;
Vlastimil Babka53853e22014-10-09 15:27:02 -07001512 int rc = COMPACT_DEFERRED;
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001513 int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */
1514
1515 *contended = COMPACT_CONTENDED_NONE;
Mel Gorman56de7262010-05-24 14:32:30 -07001516
Mel Gorman4ffb6332012-10-08 16:29:09 -07001517 /* Check if the GFP flags allow compaction */
Andrea Arcangelic5a73c32011-01-13 15:47:11 -08001518 if (!order || !may_enter_fs || !may_perform_io)
Vlastimil Babka53853e22014-10-09 15:27:02 -07001519 return COMPACT_SKIPPED;
Mel Gorman56de7262010-05-24 14:32:30 -07001520
Joonsoo Kim837d0262015-02-11 15:27:06 -08001521 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);
1522
Mel Gorman56de7262010-05-24 14:32:30 -07001523 /* Compact each zone in the list */
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -08001524 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1525 ac->nodemask) {
Mel Gorman56de7262010-05-24 14:32:30 -07001526 int status;
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001527 int zone_contended;
Mel Gorman56de7262010-05-24 14:32:30 -07001528
Vlastimil Babka53853e22014-10-09 15:27:02 -07001529 if (compaction_deferred(zone, order))
1530 continue;
1531
David Rientjese0b9dae2014-06-04 16:08:28 -07001532 status = compact_zone_order(zone, order, gfp_mask, mode,
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -08001533 &zone_contended, alloc_flags,
1534 ac->classzone_idx);
Mel Gorman56de7262010-05-24 14:32:30 -07001535 rc = max(status, rc);
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001536 /*
1537 * It takes at least one zone that wasn't lock contended
1538 * to clear all_zones_contended.
1539 */
1540 all_zones_contended &= zone_contended;
Mel Gorman56de7262010-05-24 14:32:30 -07001541
Mel Gorman3e7d3442011-01-13 15:45:56 -08001542 /* If a normal allocation would succeed, stop compacting */
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001543 if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -08001544 ac->classzone_idx, alloc_flags)) {
Vlastimil Babka53853e22014-10-09 15:27:02 -07001545 /*
1546 * We think the allocation will succeed in this zone,
1547 * but it is not certain, hence the false. The caller
1548 * will repeat this with true if allocation indeed
1549 * succeeds in this zone.
1550 */
1551 compaction_defer_reset(zone, order, false);
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001552 /*
1553 * It is possible that async compaction aborted due to
1554 * need_resched() and the watermarks were ok thanks to
1555 * somebody else freeing memory. The allocation can
1556 * however still fail so we better signal the
1557 * need_resched() contention anyway (this will not
1558 * prevent the allocation attempt).
1559 */
1560 if (zone_contended == COMPACT_CONTENDED_SCHED)
1561 *contended = COMPACT_CONTENDED_SCHED;
1562
1563 goto break_loop;
1564 }
1565
Vlastimil Babkaf8669792014-12-10 15:43:28 -08001566 if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) {
Vlastimil Babka53853e22014-10-09 15:27:02 -07001567 /*
1568 * We think that allocation won't succeed in this zone
1569 * so we defer compaction there. If it ends up
1570 * succeeding after all, it will be reset.
1571 */
1572 defer_compaction(zone, order);
1573 }
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001574
1575 /*
1576 * We might have stopped compacting due to need_resched() in
1577 * async compaction, or due to a fatal signal detected. In that
1578 * case do not try further zones and signal need_resched()
1579 * contention.
1580 */
1581 if ((zone_contended == COMPACT_CONTENDED_SCHED)
1582 || fatal_signal_pending(current)) {
1583 *contended = COMPACT_CONTENDED_SCHED;
1584 goto break_loop;
1585 }
1586
1587 continue;
1588break_loop:
1589 /*
1590 * We might not have tried all the zones, so be conservative
1591 * and assume they are not all lock contended.
1592 */
1593 all_zones_contended = 0;
1594 break;
Mel Gorman56de7262010-05-24 14:32:30 -07001595 }
1596
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001597 /*
1598 * If at least one zone wasn't deferred or skipped, we report if all
1599 * zones that were tried were lock contended.
1600 */
1601 if (rc > COMPACT_SKIPPED && all_zones_contended)
1602 *contended = COMPACT_CONTENDED_LOCK;
1603
Mel Gorman56de7262010-05-24 14:32:30 -07001604 return rc;
1605}
1606
1607
Mel Gorman76ab0f52010-05-24 14:32:28 -07001608/* Compact all zones within a node */
Andrew Morton7103f162013-02-22 16:32:33 -08001609static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
Mel Gorman76ab0f52010-05-24 14:32:28 -07001610{
1611 int zoneid;
Mel Gorman76ab0f52010-05-24 14:32:28 -07001612 struct zone *zone;
1613
Mel Gorman76ab0f52010-05-24 14:32:28 -07001614 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
Mel Gorman76ab0f52010-05-24 14:32:28 -07001615
1616 zone = &pgdat->node_zones[zoneid];
1617 if (!populated_zone(zone))
1618 continue;
1619
Rik van Riel7be62de2012-03-21 16:33:52 -07001620 cc->nr_freepages = 0;
1621 cc->nr_migratepages = 0;
1622 cc->zone = zone;
1623 INIT_LIST_HEAD(&cc->freepages);
1624 INIT_LIST_HEAD(&cc->migratepages);
Mel Gorman76ab0f52010-05-24 14:32:28 -07001625
Gioh Kim195b0c62015-04-15 16:13:33 -07001626 /*
1627 * When called via /proc/sys/vm/compact_memory
1628 * this makes sure we compact the whole zone regardless of
1629 * cached scanner positions.
1630 */
Yaowei Bai21c527a2015-11-05 18:47:20 -08001631 if (is_via_compact_memory(cc->order))
Gioh Kim195b0c62015-04-15 16:13:33 -07001632 __reset_isolation_suitable(zone);
1633
Yaowei Bai21c527a2015-11-05 18:47:20 -08001634 if (is_via_compact_memory(cc->order) ||
1635 !compaction_deferred(zone, cc->order))
Rik van Riel7be62de2012-03-21 16:33:52 -07001636 compact_zone(zone, cc);
Mel Gorman76ab0f52010-05-24 14:32:28 -07001637
Rik van Riel7be62de2012-03-21 16:33:52 -07001638 VM_BUG_ON(!list_empty(&cc->freepages));
1639 VM_BUG_ON(!list_empty(&cc->migratepages));
Joonsoo Kim75469342016-01-14 15:20:48 -08001640
1641 if (is_via_compact_memory(cc->order))
1642 continue;
1643
1644 if (zone_watermark_ok(zone, cc->order,
1645 low_wmark_pages(zone), 0, 0))
1646 compaction_defer_reset(zone, cc->order, false);
Mel Gorman76ab0f52010-05-24 14:32:28 -07001647 }
Mel Gorman76ab0f52010-05-24 14:32:28 -07001648}
1649
Andrew Morton7103f162013-02-22 16:32:33 -08001650void compact_pgdat(pg_data_t *pgdat, int order)
Rik van Riel7be62de2012-03-21 16:33:52 -07001651{
1652 struct compact_control cc = {
1653 .order = order,
David Rientjese0b9dae2014-06-04 16:08:28 -07001654 .mode = MIGRATE_ASYNC,
Rik van Riel7be62de2012-03-21 16:33:52 -07001655 };
1656
Mel Gorman3a7200a2013-09-11 14:22:19 -07001657 if (!order)
1658 return;
1659
Andrew Morton7103f162013-02-22 16:32:33 -08001660 __compact_pgdat(pgdat, &cc);
Rik van Riel7be62de2012-03-21 16:33:52 -07001661}
1662
Andrew Morton7103f162013-02-22 16:32:33 -08001663static void compact_node(int nid)
Rik van Riel7be62de2012-03-21 16:33:52 -07001664{
Rik van Riel7be62de2012-03-21 16:33:52 -07001665 struct compact_control cc = {
1666 .order = -1,
David Rientjese0b9dae2014-06-04 16:08:28 -07001667 .mode = MIGRATE_SYNC,
David Rientjes91ca9182014-04-03 14:47:23 -07001668 .ignore_skip_hint = true,
Rik van Riel7be62de2012-03-21 16:33:52 -07001669 };
1670
Andrew Morton7103f162013-02-22 16:32:33 -08001671 __compact_pgdat(NODE_DATA(nid), &cc);
Rik van Riel7be62de2012-03-21 16:33:52 -07001672}
1673
Mel Gorman76ab0f52010-05-24 14:32:28 -07001674/* Compact all nodes in the system */
Jason Liu7964c062013-01-11 14:31:47 -08001675static void compact_nodes(void)
Mel Gorman76ab0f52010-05-24 14:32:28 -07001676{
1677 int nid;
1678
Hugh Dickins8575ec22012-03-21 16:33:53 -07001679 /* Flush pending updates to the LRU lists */
1680 lru_add_drain_all();
1681
Mel Gorman76ab0f52010-05-24 14:32:28 -07001682 for_each_online_node(nid)
1683 compact_node(nid);
Mel Gorman76ab0f52010-05-24 14:32:28 -07001684}
1685
1686/* The written value is actually unused, all memory is compacted */
1687int sysctl_compact_memory;
1688
Yaowei Baifec4eb22016-01-14 15:20:09 -08001689/*
1690 * This is the entry point for compacting all nodes via
1691 * /proc/sys/vm/compact_memory
1692 */
Mel Gorman76ab0f52010-05-24 14:32:28 -07001693int sysctl_compaction_handler(struct ctl_table *table, int write,
1694 void __user *buffer, size_t *length, loff_t *ppos)
1695{
1696 if (write)
Jason Liu7964c062013-01-11 14:31:47 -08001697 compact_nodes();
Mel Gorman76ab0f52010-05-24 14:32:28 -07001698
1699 return 0;
1700}
Mel Gormaned4a6d72010-05-24 14:32:29 -07001701
Mel Gorman5e771902010-05-24 14:32:31 -07001702int sysctl_extfrag_handler(struct ctl_table *table, int write,
1703 void __user *buffer, size_t *length, loff_t *ppos)
1704{
1705 proc_dointvec_minmax(table, write, buffer, length, ppos);
1706
1707 return 0;
1708}
1709
Mel Gormaned4a6d72010-05-24 14:32:29 -07001710#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
Rashika Kheria74e77fb2014-04-03 14:48:01 -07001711static ssize_t sysfs_compact_node(struct device *dev,
Kay Sievers10fbcf42011-12-21 14:48:43 -08001712 struct device_attribute *attr,
Mel Gormaned4a6d72010-05-24 14:32:29 -07001713 const char *buf, size_t count)
1714{
Hugh Dickins8575ec22012-03-21 16:33:53 -07001715 int nid = dev->id;
1716
1717 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1718 /* Flush pending updates to the LRU lists */
1719 lru_add_drain_all();
1720
1721 compact_node(nid);
1722 }
Mel Gormaned4a6d72010-05-24 14:32:29 -07001723
1724 return count;
1725}
Kay Sievers10fbcf42011-12-21 14:48:43 -08001726static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001727
1728int compaction_register_node(struct node *node)
1729{
Kay Sievers10fbcf42011-12-21 14:48:43 -08001730 return device_create_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001731}
1732
1733void compaction_unregister_node(struct node *node)
1734{
Kay Sievers10fbcf42011-12-21 14:48:43 -08001735 return device_remove_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001736}
1737#endif /* CONFIG_SYSFS && CONFIG_NUMA */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001738
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001739static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1740{
Vlastimil Babka172400c2016-05-05 16:22:32 -07001741 return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001742}
1743
1744static bool kcompactd_node_suitable(pg_data_t *pgdat)
1745{
1746 int zoneid;
1747 struct zone *zone;
1748 enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
1749
1750 for (zoneid = 0; zoneid < classzone_idx; zoneid++) {
1751 zone = &pgdat->node_zones[zoneid];
1752
1753 if (!populated_zone(zone))
1754 continue;
1755
1756 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
1757 classzone_idx) == COMPACT_CONTINUE)
1758 return true;
1759 }
1760
1761 return false;
1762}
1763
1764static void kcompactd_do_work(pg_data_t *pgdat)
1765{
1766 /*
1767 * With no special task, compact all zones so that a page of requested
1768 * order is allocatable.
1769 */
1770 int zoneid;
1771 struct zone *zone;
1772 struct compact_control cc = {
1773 .order = pgdat->kcompactd_max_order,
1774 .classzone_idx = pgdat->kcompactd_classzone_idx,
1775 .mode = MIGRATE_SYNC_LIGHT,
1776 .ignore_skip_hint = true,
1777
1778 };
1779 bool success = false;
1780
1781 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1782 cc.classzone_idx);
1783 count_vm_event(KCOMPACTD_WAKE);
1784
1785 for (zoneid = 0; zoneid < cc.classzone_idx; zoneid++) {
1786 int status;
1787
1788 zone = &pgdat->node_zones[zoneid];
1789 if (!populated_zone(zone))
1790 continue;
1791
1792 if (compaction_deferred(zone, cc.order))
1793 continue;
1794
1795 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1796 COMPACT_CONTINUE)
1797 continue;
1798
1799 cc.nr_freepages = 0;
1800 cc.nr_migratepages = 0;
1801 cc.zone = zone;
1802 INIT_LIST_HEAD(&cc.freepages);
1803 INIT_LIST_HEAD(&cc.migratepages);
1804
Vlastimil Babka172400c2016-05-05 16:22:32 -07001805 if (kthread_should_stop())
1806 return;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001807 status = compact_zone(zone, &cc);
1808
1809 if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
1810 cc.classzone_idx, 0)) {
1811 success = true;
1812 compaction_defer_reset(zone, cc.order, false);
1813 } else if (status == COMPACT_COMPLETE) {
1814 /*
1815 * We use sync migration mode here, so we defer like
1816 * sync direct compaction does.
1817 */
1818 defer_compaction(zone, cc.order);
1819 }
1820
1821 VM_BUG_ON(!list_empty(&cc.freepages));
1822 VM_BUG_ON(!list_empty(&cc.migratepages));
1823 }
1824
1825 /*
1826 * Regardless of success, we are done until woken up next. But remember
1827 * the requested order/classzone_idx in case it was higher/tighter than
1828 * our current ones
1829 */
1830 if (pgdat->kcompactd_max_order <= cc.order)
1831 pgdat->kcompactd_max_order = 0;
1832 if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
1833 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1834}
1835
1836void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
1837{
1838 if (!order)
1839 return;
1840
1841 if (pgdat->kcompactd_max_order < order)
1842 pgdat->kcompactd_max_order = order;
1843
1844 if (pgdat->kcompactd_classzone_idx > classzone_idx)
1845 pgdat->kcompactd_classzone_idx = classzone_idx;
1846
1847 if (!waitqueue_active(&pgdat->kcompactd_wait))
1848 return;
1849
1850 if (!kcompactd_node_suitable(pgdat))
1851 return;
1852
1853 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
1854 classzone_idx);
1855 wake_up_interruptible(&pgdat->kcompactd_wait);
1856}
1857
1858/*
1859 * The background compaction daemon, started as a kernel thread
1860 * from the init process.
1861 */
1862static int kcompactd(void *p)
1863{
1864 pg_data_t *pgdat = (pg_data_t*)p;
1865 struct task_struct *tsk = current;
1866
1867 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1868
1869 if (!cpumask_empty(cpumask))
1870 set_cpus_allowed_ptr(tsk, cpumask);
1871
1872 set_freezable();
1873
1874 pgdat->kcompactd_max_order = 0;
1875 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1876
1877 while (!kthread_should_stop()) {
1878 trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
1879 wait_event_freezable(pgdat->kcompactd_wait,
1880 kcompactd_work_requested(pgdat));
1881
1882 kcompactd_do_work(pgdat);
1883 }
1884
1885 return 0;
1886}
1887
1888/*
1889 * This kcompactd start function will be called by init and node-hot-add.
1890 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
1891 */
1892int kcompactd_run(int nid)
1893{
1894 pg_data_t *pgdat = NODE_DATA(nid);
1895 int ret = 0;
1896
1897 if (pgdat->kcompactd)
1898 return 0;
1899
1900 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
1901 if (IS_ERR(pgdat->kcompactd)) {
1902 pr_err("Failed to start kcompactd on node %d\n", nid);
1903 ret = PTR_ERR(pgdat->kcompactd);
1904 pgdat->kcompactd = NULL;
1905 }
1906 return ret;
1907}
1908
1909/*
1910 * Called by memory hotplug when all memory in a node is offlined. Caller must
1911 * hold mem_hotplug_begin/end().
1912 */
1913void kcompactd_stop(int nid)
1914{
1915 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
1916
1917 if (kcompactd) {
1918 kthread_stop(kcompactd);
1919 NODE_DATA(nid)->kcompactd = NULL;
1920 }
1921}
1922
1923/*
1924 * It's optimal to keep kcompactd on the same CPUs as their memory, but
1925 * not required for correctness. So if the last cpu in a node goes
1926 * away, we get changed to run anywhere: as the first one comes back,
1927 * restore their cpu bindings.
1928 */
1929static int cpu_callback(struct notifier_block *nfb, unsigned long action,
1930 void *hcpu)
1931{
1932 int nid;
1933
1934 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
1935 for_each_node_state(nid, N_MEMORY) {
1936 pg_data_t *pgdat = NODE_DATA(nid);
1937 const struct cpumask *mask;
1938
1939 mask = cpumask_of_node(pgdat->node_id);
1940
1941 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
1942 /* One of our CPUs online: restore mask */
1943 set_cpus_allowed_ptr(pgdat->kcompactd, mask);
1944 }
1945 }
1946 return NOTIFY_OK;
1947}
1948
1949static int __init kcompactd_init(void)
1950{
1951 int nid;
1952
1953 for_each_node_state(nid, N_MEMORY)
1954 kcompactd_run(nid);
1955 hotcpu_notifier(cpu_callback, 0);
1956 return 0;
1957}
1958subsys_initcall(kcompactd_init)
1959
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001960#endif /* CONFIG_COMPACTION */