blob: 1c7195d42e837df8af55e12f35c17a1b893f2fd3 [file] [log] [blame]
Mel Gorman748446b2010-05-24 14:32:27 -07001/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
10#include <linux/swap.h>
11#include <linux/migrate.h>
12#include <linux/compaction.h>
13#include <linux/mm_inline.h>
14#include <linux/backing-dev.h>
Mel Gorman76ab0f52010-05-24 14:32:28 -070015#include <linux/sysctl.h>
Mel Gormaned4a6d72010-05-24 14:32:29 -070016#include <linux/sysfs.h>
Rafael Aquinibf6bddf2012-12-11 16:02:42 -080017#include <linux/balloon_compaction.h>
Minchan Kim194159f2013-02-22 16:33:58 -080018#include <linux/page-isolation.h>
Mel Gorman748446b2010-05-24 14:32:27 -070019#include "internal.h"
20
Minchan Kim010fc292012-12-20 15:05:06 -080021#ifdef CONFIG_COMPACTION
22static inline void count_compact_event(enum vm_event_item item)
23{
24 count_vm_event(item);
25}
26
27static inline void count_compact_events(enum vm_event_item item, long delta)
28{
29 count_vm_events(item, delta);
30}
31#else
32#define count_compact_event(item) do { } while (0)
33#define count_compact_events(item, delta) do { } while (0)
34#endif
35
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010036#if defined CONFIG_COMPACTION || defined CONFIG_CMA
37
Mel Gormanb7aba692011-01-13 15:45:54 -080038#define CREATE_TRACE_POINTS
39#include <trace/events/compaction.h>
40
Mel Gorman748446b2010-05-24 14:32:27 -070041static unsigned long release_freepages(struct list_head *freelist)
42{
43 struct page *page, *next;
44 unsigned long count = 0;
45
46 list_for_each_entry_safe(page, next, freelist, lru) {
47 list_del(&page->lru);
48 __free_page(page);
49 count++;
50 }
51
52 return count;
53}
54
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010055static void map_pages(struct list_head *list)
56{
57 struct page *page;
58
59 list_for_each_entry(page, list, lru) {
60 arch_alloc_page(page, 0);
61 kernel_map_pages(page, 1, 1);
62 }
63}
64
Michal Nazarewicz47118af2011-12-29 13:09:50 +010065static inline bool migrate_async_suitable(int migratetype)
66{
67 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
68}
69
Mel Gormanbb13ffe2012-10-08 16:32:41 -070070#ifdef CONFIG_COMPACTION
71/* Returns true if the pageblock should be scanned for pages to isolate. */
72static inline bool isolation_suitable(struct compact_control *cc,
73 struct page *page)
74{
75 if (cc->ignore_skip_hint)
76 return true;
77
78 return !get_pageblock_skip(page);
79}
80
81/*
82 * This function is called to clear all cached information on pageblocks that
83 * should be skipped for page isolation when the migrate and free page scanner
84 * meet.
85 */
Mel Gorman62997022012-10-08 16:32:47 -070086static void __reset_isolation_suitable(struct zone *zone)
Mel Gormanbb13ffe2012-10-08 16:32:41 -070087{
88 unsigned long start_pfn = zone->zone_start_pfn;
Cody P Schafer108bcc92013-02-22 16:35:23 -080089 unsigned long end_pfn = zone_end_pfn(zone);
Mel Gormanbb13ffe2012-10-08 16:32:41 -070090 unsigned long pfn;
91
David Rientjes35979ef2014-06-04 16:08:27 -070092 zone->compact_cached_migrate_pfn[0] = start_pfn;
93 zone->compact_cached_migrate_pfn[1] = start_pfn;
Mel Gormanc89511a2012-10-08 16:32:45 -070094 zone->compact_cached_free_pfn = end_pfn;
Mel Gorman62997022012-10-08 16:32:47 -070095 zone->compact_blockskip_flush = false;
Mel Gormanbb13ffe2012-10-08 16:32:41 -070096
97 /* Walk the zone and mark every pageblock as suitable for isolation */
98 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
99 struct page *page;
100
101 cond_resched();
102
103 if (!pfn_valid(pfn))
104 continue;
105
106 page = pfn_to_page(pfn);
107 if (zone != page_zone(page))
108 continue;
109
110 clear_pageblock_skip(page);
111 }
112}
113
Mel Gorman62997022012-10-08 16:32:47 -0700114void reset_isolation_suitable(pg_data_t *pgdat)
115{
116 int zoneid;
117
118 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
119 struct zone *zone = &pgdat->node_zones[zoneid];
120 if (!populated_zone(zone))
121 continue;
122
123 /* Only flush if a full compaction finished recently */
124 if (zone->compact_blockskip_flush)
125 __reset_isolation_suitable(zone);
126 }
127}
128
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700129/*
130 * If no pages were isolated then mark this pageblock to be skipped in the
Mel Gorman62997022012-10-08 16:32:47 -0700131 * future. The information is later cleared by __reset_isolation_suitable().
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700132 */
Mel Gormanc89511a2012-10-08 16:32:45 -0700133static void update_pageblock_skip(struct compact_control *cc,
134 struct page *page, unsigned long nr_isolated,
David Rientjes35979ef2014-06-04 16:08:27 -0700135 bool set_unsuitable, bool migrate_scanner)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700136{
Mel Gormanc89511a2012-10-08 16:32:45 -0700137 struct zone *zone = cc->zone;
David Rientjes35979ef2014-06-04 16:08:27 -0700138 unsigned long pfn;
Joonsoo Kim6815bf32013-12-18 17:08:52 -0800139
140 if (cc->ignore_skip_hint)
141 return;
142
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700143 if (!page)
144 return;
145
David Rientjes35979ef2014-06-04 16:08:27 -0700146 if (nr_isolated)
147 return;
148
149 /*
150 * Only skip pageblocks when all forms of compaction will be known to
151 * fail in the near future.
152 */
153 if (set_unsuitable)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700154 set_pageblock_skip(page);
Mel Gormanc89511a2012-10-08 16:32:45 -0700155
David Rientjes35979ef2014-06-04 16:08:27 -0700156 pfn = page_to_pfn(page);
157
158 /* Update where async and sync compaction should restart */
159 if (migrate_scanner) {
160 if (cc->finished_update_migrate)
161 return;
162 if (pfn > zone->compact_cached_migrate_pfn[0])
163 zone->compact_cached_migrate_pfn[0] = pfn;
David Rientjese0b9dae2014-06-04 16:08:28 -0700164 if (cc->mode != MIGRATE_ASYNC &&
165 pfn > zone->compact_cached_migrate_pfn[1])
David Rientjes35979ef2014-06-04 16:08:27 -0700166 zone->compact_cached_migrate_pfn[1] = pfn;
167 } else {
168 if (cc->finished_update_free)
169 return;
170 if (pfn < zone->compact_cached_free_pfn)
171 zone->compact_cached_free_pfn = pfn;
Mel Gormanc89511a2012-10-08 16:32:45 -0700172 }
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700173}
174#else
175static inline bool isolation_suitable(struct compact_control *cc,
176 struct page *page)
177{
178 return true;
179}
180
Mel Gormanc89511a2012-10-08 16:32:45 -0700181static void update_pageblock_skip(struct compact_control *cc,
182 struct page *page, unsigned long nr_isolated,
David Rientjes35979ef2014-06-04 16:08:27 -0700183 bool set_unsuitable, bool migrate_scanner)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700184{
185}
186#endif /* CONFIG_COMPACTION */
187
Mel Gorman2a1402a2012-10-08 16:32:33 -0700188static inline bool should_release_lock(spinlock_t *lock)
189{
190 return need_resched() || spin_is_contended(lock);
191}
192
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100193/*
Mel Gormanc67fe372012-08-21 16:16:17 -0700194 * Compaction requires the taking of some coarse locks that are potentially
195 * very heavily contended. Check if the process needs to be scheduled or
196 * if the lock is contended. For async compaction, back out in the event
197 * if contention is severe. For sync compaction, schedule.
198 *
199 * Returns true if the lock is held.
200 * Returns false if the lock is released and compaction should abort
201 */
202static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
203 bool locked, struct compact_control *cc)
204{
Mel Gorman2a1402a2012-10-08 16:32:33 -0700205 if (should_release_lock(lock)) {
Mel Gormanc67fe372012-08-21 16:16:17 -0700206 if (locked) {
207 spin_unlock_irqrestore(lock, *flags);
208 locked = false;
209 }
210
211 /* async aborts if taking too long or contended */
David Rientjese0b9dae2014-06-04 16:08:28 -0700212 if (cc->mode == MIGRATE_ASYNC) {
Shaohua Lie64c5232012-10-08 16:32:27 -0700213 cc->contended = true;
Mel Gormanc67fe372012-08-21 16:16:17 -0700214 return false;
215 }
216
217 cond_resched();
Mel Gormanc67fe372012-08-21 16:16:17 -0700218 }
219
220 if (!locked)
221 spin_lock_irqsave(lock, *flags);
222 return true;
223}
224
Vlastimil Babkabe976572014-06-04 16:10:41 -0700225/*
226 * Aside from avoiding lock contention, compaction also periodically checks
227 * need_resched() and either schedules in sync compaction or aborts async
228 * compaction. This is similar to what compact_checklock_irqsave() does, but
229 * is used where no lock is concerned.
230 *
231 * Returns false when no scheduling was needed, or sync compaction scheduled.
232 * Returns true when async compaction should abort.
233 */
234static inline bool compact_should_abort(struct compact_control *cc)
235{
236 /* async compaction aborts if contended */
237 if (need_resched()) {
238 if (cc->mode == MIGRATE_ASYNC) {
239 cc->contended = true;
240 return true;
241 }
242
243 cond_resched();
244 }
245
246 return false;
247}
248
Mel Gormanf40d1e42012-10-08 16:32:36 -0700249/* Returns true if the page is within a block suitable for migration to */
250static bool suitable_migration_target(struct page *page)
251{
Joonsoo Kim7d348b92014-04-07 15:37:03 -0700252 /* If the page is a large free page, then disallow migration */
Mel Gormanf40d1e42012-10-08 16:32:36 -0700253 if (PageBuddy(page) && page_order(page) >= pageblock_order)
Joonsoo Kim7d348b92014-04-07 15:37:03 -0700254 return false;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700255
256 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
Joonsoo Kim7d348b92014-04-07 15:37:03 -0700257 if (migrate_async_suitable(get_pageblock_migratetype(page)))
Mel Gormanf40d1e42012-10-08 16:32:36 -0700258 return true;
259
260 /* Otherwise skip the block */
261 return false;
262}
263
Mel Gormanc67fe372012-08-21 16:16:17 -0700264/*
Jerome Marchand9e4be472013-11-12 15:07:12 -0800265 * Isolate free pages onto a private freelist. If @strict is true, will abort
266 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
267 * (even though it may still end up isolating some pages).
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100268 */
Mel Gormanf40d1e42012-10-08 16:32:36 -0700269static unsigned long isolate_freepages_block(struct compact_control *cc,
270 unsigned long blockpfn,
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100271 unsigned long end_pfn,
272 struct list_head *freelist,
273 bool strict)
Mel Gorman748446b2010-05-24 14:32:27 -0700274{
Mel Gormanb7aba692011-01-13 15:45:54 -0800275 int nr_scanned = 0, total_isolated = 0;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700276 struct page *cursor, *valid_page = NULL;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700277 unsigned long flags;
278 bool locked = false;
Joonsoo Kim01ead532014-04-07 15:37:04 -0700279 bool checked_pageblock = false;
Mel Gorman748446b2010-05-24 14:32:27 -0700280
Mel Gorman748446b2010-05-24 14:32:27 -0700281 cursor = pfn_to_page(blockpfn);
282
Mel Gormanf40d1e42012-10-08 16:32:36 -0700283 /* Isolate free pages. */
Mel Gorman748446b2010-05-24 14:32:27 -0700284 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
285 int isolated, i;
286 struct page *page = cursor;
287
Mel Gormanb7aba692011-01-13 15:45:54 -0800288 nr_scanned++;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700289 if (!pfn_valid_within(blockpfn))
Laura Abbott2af120b2014-03-10 15:49:44 -0700290 goto isolate_fail;
291
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700292 if (!valid_page)
293 valid_page = page;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700294 if (!PageBuddy(page))
Laura Abbott2af120b2014-03-10 15:49:44 -0700295 goto isolate_fail;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700296
297 /*
298 * The zone lock must be held to isolate freepages.
299 * Unfortunately this is a very coarse lock and can be
300 * heavily contended if there are parallel allocations
301 * or parallel compactions. For async compaction do not
302 * spin on the lock and we acquire the lock as late as
303 * possible.
304 */
305 locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
306 locked, cc);
307 if (!locked)
308 break;
309
310 /* Recheck this is a suitable migration target under lock */
Joonsoo Kim01ead532014-04-07 15:37:04 -0700311 if (!strict && !checked_pageblock) {
312 /*
313 * We need to check suitability of pageblock only once
314 * and this isolate_freepages_block() is called with
315 * pageblock range, so just check once is sufficient.
316 */
317 checked_pageblock = true;
318 if (!suitable_migration_target(page))
319 break;
320 }
Mel Gormanf40d1e42012-10-08 16:32:36 -0700321
322 /* Recheck this is a buddy page under lock */
323 if (!PageBuddy(page))
Laura Abbott2af120b2014-03-10 15:49:44 -0700324 goto isolate_fail;
Mel Gorman748446b2010-05-24 14:32:27 -0700325
326 /* Found a free page, break it into order-0 pages */
327 isolated = split_free_page(page);
328 total_isolated += isolated;
329 for (i = 0; i < isolated; i++) {
330 list_add(&page->lru, freelist);
331 page++;
332 }
333
334 /* If a page was split, advance to the end of it */
335 if (isolated) {
336 blockpfn += isolated - 1;
337 cursor += isolated - 1;
Laura Abbott2af120b2014-03-10 15:49:44 -0700338 continue;
Mel Gorman748446b2010-05-24 14:32:27 -0700339 }
Laura Abbott2af120b2014-03-10 15:49:44 -0700340
341isolate_fail:
342 if (strict)
343 break;
344 else
345 continue;
346
Mel Gorman748446b2010-05-24 14:32:27 -0700347 }
348
Mel Gormanb7aba692011-01-13 15:45:54 -0800349 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
Mel Gormanf40d1e42012-10-08 16:32:36 -0700350
351 /*
352 * If strict isolation is requested by CMA then check that all the
353 * pages requested were isolated. If there were any failures, 0 is
354 * returned and CMA will fail.
355 */
Laura Abbott2af120b2014-03-10 15:49:44 -0700356 if (strict && blockpfn < end_pfn)
Mel Gormanf40d1e42012-10-08 16:32:36 -0700357 total_isolated = 0;
358
359 if (locked)
360 spin_unlock_irqrestore(&cc->zone->lock, flags);
361
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700362 /* Update the pageblock-skip if the whole pageblock was scanned */
363 if (blockpfn == end_pfn)
David Rientjes35979ef2014-06-04 16:08:27 -0700364 update_pageblock_skip(cc, valid_page, total_isolated, true,
365 false);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700366
Minchan Kim010fc292012-12-20 15:05:06 -0800367 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
Mel Gorman397487d2012-10-19 12:00:10 +0100368 if (total_isolated)
Minchan Kim010fc292012-12-20 15:05:06 -0800369 count_compact_events(COMPACTISOLATED, total_isolated);
Mel Gorman748446b2010-05-24 14:32:27 -0700370 return total_isolated;
371}
372
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100373/**
374 * isolate_freepages_range() - isolate free pages.
375 * @start_pfn: The first PFN to start isolating.
376 * @end_pfn: The one-past-last PFN.
377 *
378 * Non-free pages, invalid PFNs, or zone boundaries within the
379 * [start_pfn, end_pfn) range are considered errors, cause function to
380 * undo its actions and return zero.
381 *
382 * Otherwise, function returns one-past-the-last PFN of isolated page
383 * (which may be greater then end_pfn if end fell in a middle of
384 * a free page).
385 */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100386unsigned long
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700387isolate_freepages_range(struct compact_control *cc,
388 unsigned long start_pfn, unsigned long end_pfn)
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100389{
Mel Gormanf40d1e42012-10-08 16:32:36 -0700390 unsigned long isolated, pfn, block_end_pfn;
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100391 LIST_HEAD(freelist);
392
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100393 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700394 if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100395 break;
396
397 /*
398 * On subsequent iterations ALIGN() is actually not needed,
399 * but we keep it that we not to complicate the code.
400 */
401 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
402 block_end_pfn = min(block_end_pfn, end_pfn);
403
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700404 isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100405 &freelist, true);
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100406
407 /*
408 * In strict mode, isolate_freepages_block() returns 0 if
409 * there are any holes in the block (ie. invalid PFNs or
410 * non-free pages).
411 */
412 if (!isolated)
413 break;
414
415 /*
416 * If we managed to isolate pages, it is always (1 << n) *
417 * pageblock_nr_pages for some non-negative n. (Max order
418 * page may span two pageblocks).
419 */
420 }
421
422 /* split_free_page does not map the pages */
423 map_pages(&freelist);
424
425 if (pfn < end_pfn) {
426 /* Loop terminated early, cleanup. */
427 release_freepages(&freelist);
428 return 0;
429 }
430
431 /* We don't use freelists for anything. */
432 return pfn;
433}
434
Mel Gorman748446b2010-05-24 14:32:27 -0700435/* Update the number of anon and file isolated pages in the zone */
Mel Gormanc67fe372012-08-21 16:16:17 -0700436static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
Mel Gorman748446b2010-05-24 14:32:27 -0700437{
438 struct page *page;
Minchan Kimb9e84ac2011-10-31 17:06:44 -0700439 unsigned int count[2] = { 0, };
Mel Gorman748446b2010-05-24 14:32:27 -0700440
Minchan Kimb9e84ac2011-10-31 17:06:44 -0700441 list_for_each_entry(page, &cc->migratepages, lru)
442 count[!!page_is_file_cache(page)]++;
Mel Gorman748446b2010-05-24 14:32:27 -0700443
Mel Gormanc67fe372012-08-21 16:16:17 -0700444 /* If locked we can use the interrupt unsafe versions */
445 if (locked) {
446 __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
447 __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
448 } else {
449 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
450 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
451 }
Mel Gorman748446b2010-05-24 14:32:27 -0700452}
453
454/* Similar to reclaim, but different enough that they don't share logic */
455static bool too_many_isolated(struct zone *zone)
456{
Minchan Kimbc693042010-09-09 16:38:00 -0700457 unsigned long active, inactive, isolated;
Mel Gorman748446b2010-05-24 14:32:27 -0700458
459 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
460 zone_page_state(zone, NR_INACTIVE_ANON);
Minchan Kimbc693042010-09-09 16:38:00 -0700461 active = zone_page_state(zone, NR_ACTIVE_FILE) +
462 zone_page_state(zone, NR_ACTIVE_ANON);
Mel Gorman748446b2010-05-24 14:32:27 -0700463 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
464 zone_page_state(zone, NR_ISOLATED_ANON);
465
Minchan Kimbc693042010-09-09 16:38:00 -0700466 return isolated > (inactive + active) / 2;
Mel Gorman748446b2010-05-24 14:32:27 -0700467}
468
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100469/**
470 * isolate_migratepages_range() - isolate all migrate-able pages in range.
471 * @zone: Zone pages are in.
472 * @cc: Compaction control structure.
473 * @low_pfn: The first PFN of the range.
474 * @end_pfn: The one-past-the-last PFN of the range.
Minchan Kime46a2872012-10-08 16:33:48 -0700475 * @unevictable: true if it allows to isolate unevictable pages
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100476 *
477 * Isolate all pages that can be migrated from the range specified by
478 * [low_pfn, end_pfn). Returns zero if there is a fatal signal
479 * pending), otherwise PFN of the first page that was not scanned
480 * (which may be both less, equal to or more then end_pfn).
481 *
482 * Assumes that cc->migratepages is empty and cc->nr_migratepages is
483 * zero.
484 *
485 * Apart from cc->migratepages and cc->nr_migratetypes this function
486 * does not modify any cc's fields, in particular it does not modify
487 * (or read for that matter) cc->migrate_pfn.
Mel Gorman748446b2010-05-24 14:32:27 -0700488 */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100489unsigned long
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100490isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
Minchan Kime46a2872012-10-08 16:33:48 -0700491 unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
Mel Gorman748446b2010-05-24 14:32:27 -0700492{
Mel Gorman9927af742011-01-13 15:45:59 -0800493 unsigned long last_pageblock_nr = 0, pageblock_nr;
Mel Gormanb7aba692011-01-13 15:45:54 -0800494 unsigned long nr_scanned = 0, nr_isolated = 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700495 struct list_head *migratelist = &cc->migratepages;
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700496 struct lruvec *lruvec;
Mel Gormanc67fe372012-08-21 16:16:17 -0700497 unsigned long flags;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700498 bool locked = false;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700499 struct page *page = NULL, *valid_page = NULL;
David Rientjes35979ef2014-06-04 16:08:27 -0700500 bool set_unsuitable = true;
David Rientjese0b9dae2014-06-04 16:08:28 -0700501 const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
502 ISOLATE_ASYNC_MIGRATE : 0) |
David Rientjesda1c67a2014-04-07 15:37:34 -0700503 (unevictable ? ISOLATE_UNEVICTABLE : 0);
Mel Gorman748446b2010-05-24 14:32:27 -0700504
Mel Gorman748446b2010-05-24 14:32:27 -0700505 /*
506 * Ensure that there are not too many pages isolated from the LRU
507 * list by either parallel reclaimers or compaction. If there are,
508 * delay for some time until fewer pages are isolated
509 */
510 while (unlikely(too_many_isolated(zone))) {
Mel Gormanf9e35b32011-06-15 15:08:52 -0700511 /* async migration should just abort */
David Rientjese0b9dae2014-06-04 16:08:28 -0700512 if (cc->mode == MIGRATE_ASYNC)
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100513 return 0;
Mel Gormanf9e35b32011-06-15 15:08:52 -0700514
Mel Gorman748446b2010-05-24 14:32:27 -0700515 congestion_wait(BLK_RW_ASYNC, HZ/10);
516
517 if (fatal_signal_pending(current))
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100518 return 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700519 }
520
Vlastimil Babkabe976572014-06-04 16:10:41 -0700521 if (compact_should_abort(cc))
522 return 0;
David Rientjesaeef4b82014-06-04 16:08:31 -0700523
Mel Gorman748446b2010-05-24 14:32:27 -0700524 /* Time to isolate some pages for migration */
Mel Gorman748446b2010-05-24 14:32:27 -0700525 for (; low_pfn < end_pfn; low_pfn++) {
Andrea Arcangelib2eef8c2011-03-22 16:33:10 -0700526 /* give a chance to irqs before checking need_resched() */
Joonsoo Kimbe1aa032014-04-07 15:37:05 -0700527 if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
Mel Gorman2a1402a2012-10-08 16:32:33 -0700528 if (should_release_lock(&zone->lru_lock)) {
529 spin_unlock_irqrestore(&zone->lru_lock, flags);
530 locked = false;
531 }
Andrea Arcangelib2eef8c2011-03-22 16:33:10 -0700532 }
Mel Gormanc67fe372012-08-21 16:16:17 -0700533
Mel Gorman0bf380b2012-02-03 15:37:18 -0800534 /*
535 * migrate_pfn does not necessarily start aligned to a
536 * pageblock. Ensure that pfn_valid is called when moving
537 * into a new MAX_ORDER_NR_PAGES range in case of large
538 * memory holes within the zone
539 */
540 if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
541 if (!pfn_valid(low_pfn)) {
542 low_pfn += MAX_ORDER_NR_PAGES - 1;
543 continue;
544 }
545 }
546
Mel Gorman748446b2010-05-24 14:32:27 -0700547 if (!pfn_valid_within(low_pfn))
548 continue;
Mel Gormanb7aba692011-01-13 15:45:54 -0800549 nr_scanned++;
Mel Gorman748446b2010-05-24 14:32:27 -0700550
Mel Gormandc908602012-02-08 17:13:38 -0800551 /*
552 * Get the page and ensure the page is within the same zone.
553 * See the comment in isolate_freepages about overlapping
554 * nodes. It is deliberate that the new zone lock is not taken
555 * as memory compaction should not move pages between nodes.
556 */
Mel Gorman748446b2010-05-24 14:32:27 -0700557 page = pfn_to_page(low_pfn);
Mel Gormandc908602012-02-08 17:13:38 -0800558 if (page_zone(page) != zone)
559 continue;
560
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700561 if (!valid_page)
562 valid_page = page;
563
564 /* If isolation recently failed, do not retry */
565 pageblock_nr = low_pfn >> pageblock_order;
Joonsoo Kimc122b202014-04-07 15:37:06 -0700566 if (last_pageblock_nr != pageblock_nr) {
567 int mt;
568
569 last_pageblock_nr = pageblock_nr;
570 if (!isolation_suitable(cc, page))
571 goto next_pageblock;
572
573 /*
574 * For async migration, also only scan in MOVABLE
575 * blocks. Async migration is optimistic to see if
576 * the minimum amount of work satisfies the allocation
577 */
578 mt = get_pageblock_migratetype(page);
David Rientjese0b9dae2014-06-04 16:08:28 -0700579 if (cc->mode == MIGRATE_ASYNC &&
580 !migrate_async_suitable(mt)) {
David Rientjes35979ef2014-06-04 16:08:27 -0700581 set_unsuitable = false;
Joonsoo Kimc122b202014-04-07 15:37:06 -0700582 goto next_pageblock;
583 }
584 }
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700585
Mel Gorman6c144662014-01-23 15:53:38 -0800586 /*
587 * Skip if free. page_order cannot be used without zone->lock
588 * as nothing prevents parallel allocations or buddy merging.
589 */
Mel Gorman748446b2010-05-24 14:32:27 -0700590 if (PageBuddy(page))
591 continue;
592
Mel Gorman9927af742011-01-13 15:45:59 -0800593 /*
Rafael Aquinibf6bddf2012-12-11 16:02:42 -0800594 * Check may be lockless but that's ok as we recheck later.
595 * It's possible to migrate LRU pages and balloon pages
596 * Skip any other type of page
597 */
598 if (!PageLRU(page)) {
599 if (unlikely(balloon_page_movable(page))) {
600 if (locked && balloon_page_isolate(page)) {
601 /* Successfully isolated */
Joonsoo Kimb6c75012014-04-07 15:37:07 -0700602 goto isolate_success;
Rafael Aquinibf6bddf2012-12-11 16:02:42 -0800603 }
604 }
Andrea Arcangelibc835012011-01-13 15:47:08 -0800605 continue;
Rafael Aquinibf6bddf2012-12-11 16:02:42 -0800606 }
Andrea Arcangelibc835012011-01-13 15:47:08 -0800607
608 /*
Mel Gorman2a1402a2012-10-08 16:32:33 -0700609 * PageLRU is set. lru_lock normally excludes isolation
610 * splitting and collapsing (collapsing has already happened
611 * if PageLRU is set) but the lock is not necessarily taken
612 * here and it is wasteful to take it just to check transhuge.
613 * Check TransHuge without lock and skip the whole pageblock if
614 * it's either a transhuge or hugetlbfs page, as calling
615 * compound_order() without preventing THP from splitting the
616 * page underneath us may return surprising results.
Andrea Arcangelibc835012011-01-13 15:47:08 -0800617 */
618 if (PageTransHuge(page)) {
Mel Gorman2a1402a2012-10-08 16:32:33 -0700619 if (!locked)
620 goto next_pageblock;
621 low_pfn += (1 << compound_order(page)) - 1;
622 continue;
623 }
624
David Rientjes119d6d52014-04-03 14:48:00 -0700625 /*
626 * Migration will fail if an anonymous page is pinned in memory,
627 * so avoid taking lru_lock and isolating it unnecessarily in an
628 * admittedly racy check.
629 */
630 if (!page_mapping(page) &&
631 page_count(page) > page_mapcount(page))
632 continue;
633
Mel Gorman2a1402a2012-10-08 16:32:33 -0700634 /* Check if it is ok to still hold the lock */
635 locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
636 locked, cc);
637 if (!locked || fatal_signal_pending(current))
638 break;
639
640 /* Recheck PageLRU and PageTransHuge under lock */
641 if (!PageLRU(page))
642 continue;
643 if (PageTransHuge(page)) {
Andrea Arcangelibc835012011-01-13 15:47:08 -0800644 low_pfn += (1 << compound_order(page)) - 1;
645 continue;
646 }
647
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700648 lruvec = mem_cgroup_page_lruvec(page, zone);
649
Mel Gorman748446b2010-05-24 14:32:27 -0700650 /* Try isolate the page */
Konstantin Khlebnikovf3fd4a62012-05-29 15:06:54 -0700651 if (__isolate_lru_page(page, mode) != 0)
Mel Gorman748446b2010-05-24 14:32:27 -0700652 continue;
653
Sasha Levin309381fea2014-01-23 15:52:54 -0800654 VM_BUG_ON_PAGE(PageTransCompound(page), page);
Andrea Arcangelibc835012011-01-13 15:47:08 -0800655
Mel Gorman748446b2010-05-24 14:32:27 -0700656 /* Successfully isolated */
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700657 del_page_from_lru_list(page, lruvec, page_lru(page));
Joonsoo Kimb6c75012014-04-07 15:37:07 -0700658
659isolate_success:
660 cc->finished_update_migrate = true;
Mel Gorman748446b2010-05-24 14:32:27 -0700661 list_add(&page->lru, migratelist);
Mel Gorman748446b2010-05-24 14:32:27 -0700662 cc->nr_migratepages++;
Mel Gormanb7aba692011-01-13 15:45:54 -0800663 nr_isolated++;
Mel Gorman748446b2010-05-24 14:32:27 -0700664
665 /* Avoid isolating too much */
Hillf Danton31b83842012-01-10 15:07:59 -0800666 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
667 ++low_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -0700668 break;
Hillf Danton31b83842012-01-10 15:07:59 -0800669 }
Mel Gorman2a1402a2012-10-08 16:32:33 -0700670
671 continue;
672
673next_pageblock:
Mel Gormana9aacbc2013-02-22 16:32:25 -0800674 low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
Mel Gorman748446b2010-05-24 14:32:27 -0700675 }
676
Mel Gormanc67fe372012-08-21 16:16:17 -0700677 acct_isolated(zone, locked, cc);
Mel Gorman748446b2010-05-24 14:32:27 -0700678
Mel Gormanc67fe372012-08-21 16:16:17 -0700679 if (locked)
680 spin_unlock_irqrestore(&zone->lru_lock, flags);
Mel Gorman748446b2010-05-24 14:32:27 -0700681
Vlastimil Babka50b5b092014-01-21 15:51:10 -0800682 /*
683 * Update the pageblock-skip information and cached scanner pfn,
684 * if the whole pageblock was scanned without isolating any page.
Vlastimil Babka50b5b092014-01-21 15:51:10 -0800685 */
David Rientjes35979ef2014-06-04 16:08:27 -0700686 if (low_pfn == end_pfn)
687 update_pageblock_skip(cc, valid_page, nr_isolated,
688 set_unsuitable, true);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700689
Mel Gormanb7aba692011-01-13 15:45:54 -0800690 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
691
Minchan Kim010fc292012-12-20 15:05:06 -0800692 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
Mel Gorman397487d2012-10-19 12:00:10 +0100693 if (nr_isolated)
Minchan Kim010fc292012-12-20 15:05:06 -0800694 count_compact_events(COMPACTISOLATED, nr_isolated);
Mel Gorman397487d2012-10-19 12:00:10 +0100695
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100696 return low_pfn;
697}
698
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100699#endif /* CONFIG_COMPACTION || CONFIG_CMA */
700#ifdef CONFIG_COMPACTION
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100701/*
702 * Based on information in the current compact_control, find blocks
703 * suitable for isolating free pages from and then isolate them.
704 */
705static void isolate_freepages(struct zone *zone,
706 struct compact_control *cc)
707{
708 struct page *page;
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700709 unsigned long block_start_pfn; /* start of current pageblock */
710 unsigned long block_end_pfn; /* end of current pageblock */
711 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100712 int nr_freepages = cc->nr_freepages;
713 struct list_head *freelist = &cc->freepages;
714
715 /*
716 * Initialise the free scanner. The starting point is where we last
Vlastimil Babka49e068f2014-05-06 12:50:03 -0700717 * successfully isolated from, zone-cached value, or the end of the
718 * zone when isolating for the first time. We need this aligned to
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700719 * the pageblock boundary, because we do
720 * block_start_pfn -= pageblock_nr_pages in the for loop.
721 * For ending point, take care when isolating in last pageblock of a
722 * a zone which ends in the middle of a pageblock.
Vlastimil Babka49e068f2014-05-06 12:50:03 -0700723 * The low boundary is the end of the pageblock the migration scanner
724 * is using.
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100725 */
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700726 block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
727 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
728 zone_end_pfn(zone));
Vlastimil Babka7ed695e2014-01-21 15:51:09 -0800729 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100730
731 /*
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100732 * Isolate free pages until enough are available to migrate the
733 * pages on cc->migratepages. We stop searching if the migrate
734 * and free page scanners meet or enough free pages are isolated.
735 */
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700736 for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
737 block_end_pfn = block_start_pfn,
738 block_start_pfn -= pageblock_nr_pages) {
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100739 unsigned long isolated;
740
David Rientjesf6ea3ad2013-09-30 13:45:03 -0700741 /*
742 * This can iterate a massively long zone without finding any
743 * suitable migration targets, so periodically check if we need
Vlastimil Babkabe976572014-06-04 16:10:41 -0700744 * to schedule, or even abort async compaction.
David Rientjesf6ea3ad2013-09-30 13:45:03 -0700745 */
Vlastimil Babkabe976572014-06-04 16:10:41 -0700746 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
747 && compact_should_abort(cc))
748 break;
David Rientjesf6ea3ad2013-09-30 13:45:03 -0700749
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700750 if (!pfn_valid(block_start_pfn))
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100751 continue;
752
753 /*
754 * Check for overlapping nodes/zones. It's possible on some
755 * configurations to have a setup like
756 * node0 node1 node0
757 * i.e. it's possible that all pages within a zones range of
758 * pages do not belong to a single zone.
759 */
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700760 page = pfn_to_page(block_start_pfn);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100761 if (page_zone(page) != zone)
762 continue;
763
764 /* Check the block is suitable for migration */
Linus Torvalds68e3e922012-06-03 20:05:57 -0700765 if (!suitable_migration_target(page))
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100766 continue;
Linus Torvalds68e3e922012-06-03 20:05:57 -0700767
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700768 /* If isolation recently failed, do not retry */
769 if (!isolation_suitable(cc, page))
770 continue;
771
Mel Gormanf40d1e42012-10-08 16:32:36 -0700772 /* Found a block suitable for isolating free pages from */
Vlastimil Babkae9ade562014-06-04 16:08:34 -0700773 cc->free_pfn = block_start_pfn;
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700774 isolated = isolate_freepages_block(cc, block_start_pfn,
775 block_end_pfn, freelist, false);
Mel Gormanf40d1e42012-10-08 16:32:36 -0700776 nr_freepages += isolated;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100777
778 /*
Vlastimil Babkae9ade562014-06-04 16:08:34 -0700779 * Set a flag that we successfully isolated in this pageblock.
780 * In the next loop iteration, zone->compact_cached_free_pfn
781 * will not be updated and thus it will effectively contain the
782 * highest pageblock we isolated pages from.
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100783 */
Vlastimil Babkae9ade562014-06-04 16:08:34 -0700784 if (isolated)
Mel Gormanc89511a2012-10-08 16:32:45 -0700785 cc->finished_update_free = true;
Vlastimil Babkabe976572014-06-04 16:10:41 -0700786
787 /*
788 * isolate_freepages_block() might have aborted due to async
789 * compaction being contended
790 */
791 if (cc->contended)
792 break;
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100793 }
794
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100795 /* split_free_page does not map the pages */
796 map_pages(freelist);
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100797
Vlastimil Babka7ed695e2014-01-21 15:51:09 -0800798 /*
799 * If we crossed the migrate scanner, we want to keep it that way
800 * so that compact_finished() may detect this
801 */
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700802 if (block_start_pfn < low_pfn)
Vlastimil Babkae9ade562014-06-04 16:08:34 -0700803 cc->free_pfn = cc->migrate_pfn;
Vlastimil Babkac96b9e52014-06-04 16:07:26 -0700804
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100805 cc->nr_freepages = nr_freepages;
Mel Gorman748446b2010-05-24 14:32:27 -0700806}
807
808/*
809 * This is a migrate-callback that "allocates" freepages by taking pages
810 * from the isolated freelists in the block we are migrating to.
811 */
812static struct page *compaction_alloc(struct page *migratepage,
813 unsigned long data,
814 int **result)
815{
816 struct compact_control *cc = (struct compact_control *)data;
817 struct page *freepage;
818
Vlastimil Babkabe976572014-06-04 16:10:41 -0700819 /*
820 * Isolate free pages if necessary, and if we are not aborting due to
821 * contention.
822 */
Mel Gorman748446b2010-05-24 14:32:27 -0700823 if (list_empty(&cc->freepages)) {
Vlastimil Babkabe976572014-06-04 16:10:41 -0700824 if (!cc->contended)
825 isolate_freepages(cc->zone, cc);
Mel Gorman748446b2010-05-24 14:32:27 -0700826
827 if (list_empty(&cc->freepages))
828 return NULL;
829 }
830
831 freepage = list_entry(cc->freepages.next, struct page, lru);
832 list_del(&freepage->lru);
833 cc->nr_freepages--;
834
835 return freepage;
836}
837
838/*
David Rientjesd53aea32014-06-04 16:08:26 -0700839 * This is a migrate-callback that "frees" freepages back to the isolated
840 * freelist. All pages on the freelist are from the same zone, so there is no
841 * special handling needed for NUMA.
842 */
843static void compaction_free(struct page *page, unsigned long data)
844{
845 struct compact_control *cc = (struct compact_control *)data;
846
847 list_add(&page->lru, &cc->freepages);
848 cc->nr_freepages++;
849}
850
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100851/* possible outcome of isolate_migratepages */
852typedef enum {
853 ISOLATE_ABORT, /* Abort compaction now */
854 ISOLATE_NONE, /* No pages isolated, continue scanning */
855 ISOLATE_SUCCESS, /* Pages isolated, migrate */
856} isolate_migrate_t;
857
858/*
859 * Isolate all pages that can be migrated from the block pointed to by
860 * the migrate scanner within compact_control.
861 */
862static isolate_migrate_t isolate_migratepages(struct zone *zone,
863 struct compact_control *cc)
864{
865 unsigned long low_pfn, end_pfn;
866
867 /* Do not scan outside zone boundaries */
868 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
869
870 /* Only scan within a pageblock boundary */
Mel Gormana9aacbc2013-02-22 16:32:25 -0800871 end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100872
873 /* Do not cross the free scanner or scan within a memory hole */
874 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
875 cc->migrate_pfn = end_pfn;
876 return ISOLATE_NONE;
877 }
878
879 /* Perform the isolation */
Minchan Kime46a2872012-10-08 16:33:48 -0700880 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
Shaohua Lie64c5232012-10-08 16:32:27 -0700881 if (!low_pfn || cc->contended)
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100882 return ISOLATE_ABORT;
883
884 cc->migrate_pfn = low_pfn;
885
886 return ISOLATE_SUCCESS;
887}
888
Mel Gorman748446b2010-05-24 14:32:27 -0700889static int compact_finished(struct zone *zone,
Andrea Arcangeli5a03b052011-01-13 15:47:11 -0800890 struct compact_control *cc)
Mel Gorman748446b2010-05-24 14:32:27 -0700891{
Mel Gorman8fb74b92013-01-11 14:32:16 -0800892 unsigned int order;
Andrea Arcangeli5a03b052011-01-13 15:47:11 -0800893 unsigned long watermark;
Mel Gorman56de7262010-05-24 14:32:30 -0700894
Vlastimil Babkabe976572014-06-04 16:10:41 -0700895 if (cc->contended || fatal_signal_pending(current))
Mel Gorman748446b2010-05-24 14:32:27 -0700896 return COMPACT_PARTIAL;
897
Mel Gorman753341a2012-10-08 16:32:40 -0700898 /* Compaction run completes if the migrate and free scanner meet */
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700899 if (cc->free_pfn <= cc->migrate_pfn) {
Vlastimil Babka55b7c4c2014-01-21 15:51:11 -0800900 /* Let the next compaction start anew. */
David Rientjes35979ef2014-06-04 16:08:27 -0700901 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
902 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
Vlastimil Babka55b7c4c2014-01-21 15:51:11 -0800903 zone->compact_cached_free_pfn = zone_end_pfn(zone);
904
Mel Gorman62997022012-10-08 16:32:47 -0700905 /*
906 * Mark that the PG_migrate_skip information should be cleared
907 * by kswapd when it goes to sleep. kswapd does not set the
908 * flag itself as the decision to be clear should be directly
909 * based on an allocation request.
910 */
911 if (!current_is_kswapd())
912 zone->compact_blockskip_flush = true;
913
Mel Gorman748446b2010-05-24 14:32:27 -0700914 return COMPACT_COMPLETE;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700915 }
Mel Gorman748446b2010-05-24 14:32:27 -0700916
Johannes Weiner82478fb2011-01-20 14:44:21 -0800917 /*
918 * order == -1 is expected when compacting via
919 * /proc/sys/vm/compact_memory
920 */
Mel Gorman56de7262010-05-24 14:32:30 -0700921 if (cc->order == -1)
922 return COMPACT_CONTINUE;
923
Michal Hocko3957c772011-06-15 15:08:25 -0700924 /* Compaction run is not finished if the watermark is not met */
925 watermark = low_wmark_pages(zone);
926 watermark += (1 << cc->order);
927
928 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
929 return COMPACT_CONTINUE;
930
Mel Gorman56de7262010-05-24 14:32:30 -0700931 /* Direct compactor: Is a suitable page free? */
Mel Gorman8fb74b92013-01-11 14:32:16 -0800932 for (order = cc->order; order < MAX_ORDER; order++) {
933 struct free_area *area = &zone->free_area[order];
Mel Gorman56de7262010-05-24 14:32:30 -0700934
Mel Gorman8fb74b92013-01-11 14:32:16 -0800935 /* Job done if page is free of the right migratetype */
936 if (!list_empty(&area->free_list[cc->migratetype]))
937 return COMPACT_PARTIAL;
938
939 /* Job done if allocation would set block type */
940 if (cc->order >= pageblock_order && area->nr_free)
941 return COMPACT_PARTIAL;
Mel Gorman56de7262010-05-24 14:32:30 -0700942 }
943
Mel Gorman748446b2010-05-24 14:32:27 -0700944 return COMPACT_CONTINUE;
945}
946
Mel Gorman3e7d3442011-01-13 15:45:56 -0800947/*
948 * compaction_suitable: Is this suitable to run compaction on this zone now?
949 * Returns
950 * COMPACT_SKIPPED - If there are too few free pages for compaction
951 * COMPACT_PARTIAL - If the allocation would succeed without compaction
952 * COMPACT_CONTINUE - If compaction should run now
953 */
954unsigned long compaction_suitable(struct zone *zone, int order)
955{
956 int fragindex;
957 unsigned long watermark;
958
959 /*
Michal Hocko3957c772011-06-15 15:08:25 -0700960 * order == -1 is expected when compacting via
961 * /proc/sys/vm/compact_memory
962 */
963 if (order == -1)
964 return COMPACT_CONTINUE;
965
966 /*
Mel Gorman3e7d3442011-01-13 15:45:56 -0800967 * Watermarks for order-0 must be met for compaction. Note the 2UL.
968 * This is because during migration, copies of pages need to be
969 * allocated and for a short time, the footprint is higher
970 */
971 watermark = low_wmark_pages(zone) + (2UL << order);
972 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
973 return COMPACT_SKIPPED;
974
975 /*
976 * fragmentation index determines if allocation failures are due to
977 * low memory or external fragmentation
978 *
Shaohua Lia582a732011-06-15 15:08:49 -0700979 * index of -1000 implies allocations might succeed depending on
980 * watermarks
Mel Gorman3e7d3442011-01-13 15:45:56 -0800981 * index towards 0 implies failure is due to lack of memory
982 * index towards 1000 implies failure is due to fragmentation
983 *
984 * Only compact if a failure would be due to fragmentation.
985 */
986 fragindex = fragmentation_index(zone, order);
987 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
988 return COMPACT_SKIPPED;
989
Shaohua Lia582a732011-06-15 15:08:49 -0700990 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
991 0, 0))
Mel Gorman3e7d3442011-01-13 15:45:56 -0800992 return COMPACT_PARTIAL;
993
994 return COMPACT_CONTINUE;
995}
996
Mel Gorman748446b2010-05-24 14:32:27 -0700997static int compact_zone(struct zone *zone, struct compact_control *cc)
998{
999 int ret;
Mel Gormanc89511a2012-10-08 16:32:45 -07001000 unsigned long start_pfn = zone->zone_start_pfn;
Cody P Schafer108bcc92013-02-22 16:35:23 -08001001 unsigned long end_pfn = zone_end_pfn(zone);
David Rientjese0b9dae2014-06-04 16:08:28 -07001002 const bool sync = cc->mode != MIGRATE_ASYNC;
Mel Gorman748446b2010-05-24 14:32:27 -07001003
Mel Gorman3e7d3442011-01-13 15:45:56 -08001004 ret = compaction_suitable(zone, cc->order);
1005 switch (ret) {
1006 case COMPACT_PARTIAL:
1007 case COMPACT_SKIPPED:
1008 /* Compaction is likely to fail */
1009 return ret;
1010 case COMPACT_CONTINUE:
1011 /* Fall through to compaction */
1012 ;
1013 }
1014
Mel Gormanc89511a2012-10-08 16:32:45 -07001015 /*
Vlastimil Babkad3132e42014-01-21 15:51:08 -08001016 * Clear pageblock skip if there were failures recently and compaction
1017 * is about to be retried after being deferred. kswapd does not do
1018 * this reset as it'll reset the cached information when going to sleep.
1019 */
1020 if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
1021 __reset_isolation_suitable(zone);
1022
1023 /*
Mel Gormanc89511a2012-10-08 16:32:45 -07001024 * Setup to move all movable pages to the end of the zone. Used cached
1025 * information on where the scanners should start but check that it
1026 * is initialised by ensuring the values are within zone boundaries.
1027 */
David Rientjese0b9dae2014-06-04 16:08:28 -07001028 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
Mel Gormanc89511a2012-10-08 16:32:45 -07001029 cc->free_pfn = zone->compact_cached_free_pfn;
1030 if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
1031 cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
1032 zone->compact_cached_free_pfn = cc->free_pfn;
1033 }
1034 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
1035 cc->migrate_pfn = start_pfn;
David Rientjes35979ef2014-06-04 16:08:27 -07001036 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1037 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
Mel Gormanc89511a2012-10-08 16:32:45 -07001038 }
Mel Gorman748446b2010-05-24 14:32:27 -07001039
Mel Gorman0eb927c2014-01-21 15:51:05 -08001040 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
1041
Mel Gorman748446b2010-05-24 14:32:27 -07001042 migrate_prep_local();
1043
1044 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
Minchan Kim9d502c12011-03-22 16:30:39 -07001045 int err;
Mel Gorman748446b2010-05-24 14:32:27 -07001046
Mel Gormanf9e35b32011-06-15 15:08:52 -07001047 switch (isolate_migratepages(zone, cc)) {
1048 case ISOLATE_ABORT:
1049 ret = COMPACT_PARTIAL;
Rafael Aquini5733c7d2012-12-11 16:02:47 -08001050 putback_movable_pages(&cc->migratepages);
Shaohua Lie64c5232012-10-08 16:32:27 -07001051 cc->nr_migratepages = 0;
Mel Gormanf9e35b32011-06-15 15:08:52 -07001052 goto out;
1053 case ISOLATE_NONE:
Mel Gorman748446b2010-05-24 14:32:27 -07001054 continue;
Mel Gormanf9e35b32011-06-15 15:08:52 -07001055 case ISOLATE_SUCCESS:
1056 ;
1057 }
Mel Gorman748446b2010-05-24 14:32:27 -07001058
Vlastimil Babkaf8c93012014-06-04 16:08:32 -07001059 if (!cc->nr_migratepages)
1060 continue;
1061
David Rientjesd53aea32014-06-04 16:08:26 -07001062 err = migrate_pages(&cc->migratepages, compaction_alloc,
David Rientjese0b9dae2014-06-04 16:08:28 -07001063 compaction_free, (unsigned long)cc, cc->mode,
Mel Gorman7b2a2d42012-10-19 14:07:31 +01001064 MR_COMPACTION);
Mel Gorman748446b2010-05-24 14:32:27 -07001065
Vlastimil Babkaf8c93012014-06-04 16:08:32 -07001066 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1067 &cc->migratepages);
Mel Gorman748446b2010-05-24 14:32:27 -07001068
Vlastimil Babkaf8c93012014-06-04 16:08:32 -07001069 /* All pages were either migrated or will be released */
1070 cc->nr_migratepages = 0;
Minchan Kim9d502c12011-03-22 16:30:39 -07001071 if (err) {
Rafael Aquini5733c7d2012-12-11 16:02:47 -08001072 putback_movable_pages(&cc->migratepages);
Vlastimil Babka7ed695e2014-01-21 15:51:09 -08001073 /*
1074 * migrate_pages() may return -ENOMEM when scanners meet
1075 * and we want compact_finished() to detect it
1076 */
1077 if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
David Rientjes4bf2bba2012-07-11 14:02:13 -07001078 ret = COMPACT_PARTIAL;
1079 goto out;
1080 }
Mel Gorman748446b2010-05-24 14:32:27 -07001081 }
Mel Gorman748446b2010-05-24 14:32:27 -07001082 }
1083
Mel Gormanf9e35b32011-06-15 15:08:52 -07001084out:
Mel Gorman748446b2010-05-24 14:32:27 -07001085 /* Release free pages and check accounting */
1086 cc->nr_freepages -= release_freepages(&cc->freepages);
1087 VM_BUG_ON(cc->nr_freepages != 0);
1088
Mel Gorman0eb927c2014-01-21 15:51:05 -08001089 trace_mm_compaction_end(ret);
1090
Mel Gorman748446b2010-05-24 14:32:27 -07001091 return ret;
1092}
Mel Gorman76ab0f52010-05-24 14:32:28 -07001093
David Rientjese0b9dae2014-06-04 16:08:28 -07001094static unsigned long compact_zone_order(struct zone *zone, int order,
1095 gfp_t gfp_mask, enum migrate_mode mode, bool *contended)
Mel Gorman56de7262010-05-24 14:32:30 -07001096{
Shaohua Lie64c5232012-10-08 16:32:27 -07001097 unsigned long ret;
Mel Gorman56de7262010-05-24 14:32:30 -07001098 struct compact_control cc = {
1099 .nr_freepages = 0,
1100 .nr_migratepages = 0,
1101 .order = order,
1102 .migratetype = allocflags_to_migratetype(gfp_mask),
1103 .zone = zone,
David Rientjese0b9dae2014-06-04 16:08:28 -07001104 .mode = mode,
Mel Gorman56de7262010-05-24 14:32:30 -07001105 };
1106 INIT_LIST_HEAD(&cc.freepages);
1107 INIT_LIST_HEAD(&cc.migratepages);
1108
Shaohua Lie64c5232012-10-08 16:32:27 -07001109 ret = compact_zone(zone, &cc);
1110
1111 VM_BUG_ON(!list_empty(&cc.freepages));
1112 VM_BUG_ON(!list_empty(&cc.migratepages));
1113
1114 *contended = cc.contended;
1115 return ret;
Mel Gorman56de7262010-05-24 14:32:30 -07001116}
1117
Mel Gorman5e771902010-05-24 14:32:31 -07001118int sysctl_extfrag_threshold = 500;
1119
Mel Gorman56de7262010-05-24 14:32:30 -07001120/**
1121 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1122 * @zonelist: The zonelist used for the current allocation
1123 * @order: The order of the current allocation
1124 * @gfp_mask: The GFP mask of the current allocation
1125 * @nodemask: The allowed nodes to allocate from
David Rientjese0b9dae2014-06-04 16:08:28 -07001126 * @mode: The migration mode for async, sync light, or sync migration
Mel Gorman661c4cb2012-10-08 16:32:31 -07001127 * @contended: Return value that is true if compaction was aborted due to lock contention
Vlastimil Babka53853e22014-10-09 15:27:02 -07001128 * @candidate_zone: Return the zone where we think allocation should succeed
Mel Gorman56de7262010-05-24 14:32:30 -07001129 *
1130 * This is the main entry point for direct page compaction.
1131 */
1132unsigned long try_to_compact_pages(struct zonelist *zonelist,
Mel Gorman77f1fe62011-01-13 15:45:57 -08001133 int order, gfp_t gfp_mask, nodemask_t *nodemask,
Vlastimil Babka53853e22014-10-09 15:27:02 -07001134 enum migrate_mode mode, bool *contended,
1135 struct zone **candidate_zone)
Mel Gorman56de7262010-05-24 14:32:30 -07001136{
1137 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1138 int may_enter_fs = gfp_mask & __GFP_FS;
1139 int may_perform_io = gfp_mask & __GFP_IO;
Mel Gorman56de7262010-05-24 14:32:30 -07001140 struct zoneref *z;
1141 struct zone *zone;
Vlastimil Babka53853e22014-10-09 15:27:02 -07001142 int rc = COMPACT_DEFERRED;
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07001143 int alloc_flags = 0;
Mel Gorman56de7262010-05-24 14:32:30 -07001144
Mel Gorman4ffb6332012-10-08 16:29:09 -07001145 /* Check if the GFP flags allow compaction */
Andrea Arcangelic5a73c32011-01-13 15:47:11 -08001146 if (!order || !may_enter_fs || !may_perform_io)
Vlastimil Babka53853e22014-10-09 15:27:02 -07001147 return COMPACT_SKIPPED;
Mel Gorman56de7262010-05-24 14:32:30 -07001148
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07001149#ifdef CONFIG_CMA
1150 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
1151 alloc_flags |= ALLOC_CMA;
1152#endif
Mel Gorman56de7262010-05-24 14:32:30 -07001153 /* Compact each zone in the list */
1154 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1155 nodemask) {
Mel Gorman56de7262010-05-24 14:32:30 -07001156 int status;
1157
Vlastimil Babka53853e22014-10-09 15:27:02 -07001158 if (compaction_deferred(zone, order))
1159 continue;
1160
David Rientjese0b9dae2014-06-04 16:08:28 -07001161 status = compact_zone_order(zone, order, gfp_mask, mode,
Mel Gorman8fb74b92013-01-11 14:32:16 -08001162 contended);
Mel Gorman56de7262010-05-24 14:32:30 -07001163 rc = max(status, rc);
1164
Mel Gorman3e7d3442011-01-13 15:45:56 -08001165 /* If a normal allocation would succeed, stop compacting */
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -07001166 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
Vlastimil Babka53853e22014-10-09 15:27:02 -07001167 alloc_flags)) {
1168 *candidate_zone = zone;
1169 /*
1170 * We think the allocation will succeed in this zone,
1171 * but it is not certain, hence the false. The caller
1172 * will repeat this with true if allocation indeed
1173 * succeeds in this zone.
1174 */
1175 compaction_defer_reset(zone, order, false);
Mel Gorman56de7262010-05-24 14:32:30 -07001176 break;
Vlastimil Babka53853e22014-10-09 15:27:02 -07001177 } else if (mode != MIGRATE_ASYNC) {
1178 /*
1179 * We think that allocation won't succeed in this zone
1180 * so we defer compaction there. If it ends up
1181 * succeeding after all, it will be reset.
1182 */
1183 defer_compaction(zone, order);
1184 }
Mel Gorman56de7262010-05-24 14:32:30 -07001185 }
1186
1187 return rc;
1188}
1189
1190
Mel Gorman76ab0f52010-05-24 14:32:28 -07001191/* Compact all zones within a node */
Andrew Morton7103f162013-02-22 16:32:33 -08001192static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
Mel Gorman76ab0f52010-05-24 14:32:28 -07001193{
1194 int zoneid;
Mel Gorman76ab0f52010-05-24 14:32:28 -07001195 struct zone *zone;
1196
Mel Gorman76ab0f52010-05-24 14:32:28 -07001197 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
Mel Gorman76ab0f52010-05-24 14:32:28 -07001198
1199 zone = &pgdat->node_zones[zoneid];
1200 if (!populated_zone(zone))
1201 continue;
1202
Rik van Riel7be62de2012-03-21 16:33:52 -07001203 cc->nr_freepages = 0;
1204 cc->nr_migratepages = 0;
1205 cc->zone = zone;
1206 INIT_LIST_HEAD(&cc->freepages);
1207 INIT_LIST_HEAD(&cc->migratepages);
Mel Gorman76ab0f52010-05-24 14:32:28 -07001208
Dan Carpenteraad6ec32012-03-21 16:33:54 -07001209 if (cc->order == -1 || !compaction_deferred(zone, cc->order))
Rik van Riel7be62de2012-03-21 16:33:52 -07001210 compact_zone(zone, cc);
Mel Gorman76ab0f52010-05-24 14:32:28 -07001211
Rik van Rielaff62242012-03-21 16:33:52 -07001212 if (cc->order > 0) {
Vlastimil Babkade6c60a2014-01-21 15:51:07 -08001213 if (zone_watermark_ok(zone, cc->order,
1214 low_wmark_pages(zone), 0, 0))
1215 compaction_defer_reset(zone, cc->order, false);
Rik van Rielaff62242012-03-21 16:33:52 -07001216 }
1217
Rik van Riel7be62de2012-03-21 16:33:52 -07001218 VM_BUG_ON(!list_empty(&cc->freepages));
1219 VM_BUG_ON(!list_empty(&cc->migratepages));
Mel Gorman76ab0f52010-05-24 14:32:28 -07001220 }
Mel Gorman76ab0f52010-05-24 14:32:28 -07001221}
1222
Andrew Morton7103f162013-02-22 16:32:33 -08001223void compact_pgdat(pg_data_t *pgdat, int order)
Rik van Riel7be62de2012-03-21 16:33:52 -07001224{
1225 struct compact_control cc = {
1226 .order = order,
David Rientjese0b9dae2014-06-04 16:08:28 -07001227 .mode = MIGRATE_ASYNC,
Rik van Riel7be62de2012-03-21 16:33:52 -07001228 };
1229
Mel Gorman3a7200a2013-09-11 14:22:19 -07001230 if (!order)
1231 return;
1232
Andrew Morton7103f162013-02-22 16:32:33 -08001233 __compact_pgdat(pgdat, &cc);
Rik van Riel7be62de2012-03-21 16:33:52 -07001234}
1235
Andrew Morton7103f162013-02-22 16:32:33 -08001236static void compact_node(int nid)
Rik van Riel7be62de2012-03-21 16:33:52 -07001237{
Rik van Riel7be62de2012-03-21 16:33:52 -07001238 struct compact_control cc = {
1239 .order = -1,
David Rientjese0b9dae2014-06-04 16:08:28 -07001240 .mode = MIGRATE_SYNC,
David Rientjes91ca9182014-04-03 14:47:23 -07001241 .ignore_skip_hint = true,
Rik van Riel7be62de2012-03-21 16:33:52 -07001242 };
1243
Andrew Morton7103f162013-02-22 16:32:33 -08001244 __compact_pgdat(NODE_DATA(nid), &cc);
Rik van Riel7be62de2012-03-21 16:33:52 -07001245}
1246
Mel Gorman76ab0f52010-05-24 14:32:28 -07001247/* Compact all nodes in the system */
Jason Liu7964c062013-01-11 14:31:47 -08001248static void compact_nodes(void)
Mel Gorman76ab0f52010-05-24 14:32:28 -07001249{
1250 int nid;
1251
Hugh Dickins8575ec22012-03-21 16:33:53 -07001252 /* Flush pending updates to the LRU lists */
1253 lru_add_drain_all();
1254
Mel Gorman76ab0f52010-05-24 14:32:28 -07001255 for_each_online_node(nid)
1256 compact_node(nid);
Mel Gorman76ab0f52010-05-24 14:32:28 -07001257}
1258
1259/* The written value is actually unused, all memory is compacted */
1260int sysctl_compact_memory;
1261
1262/* This is the entry point for compacting all nodes via /proc/sys/vm */
1263int sysctl_compaction_handler(struct ctl_table *table, int write,
1264 void __user *buffer, size_t *length, loff_t *ppos)
1265{
1266 if (write)
Jason Liu7964c062013-01-11 14:31:47 -08001267 compact_nodes();
Mel Gorman76ab0f52010-05-24 14:32:28 -07001268
1269 return 0;
1270}
Mel Gormaned4a6d72010-05-24 14:32:29 -07001271
Mel Gorman5e771902010-05-24 14:32:31 -07001272int sysctl_extfrag_handler(struct ctl_table *table, int write,
1273 void __user *buffer, size_t *length, loff_t *ppos)
1274{
1275 proc_dointvec_minmax(table, write, buffer, length, ppos);
1276
1277 return 0;
1278}
1279
Mel Gormaned4a6d72010-05-24 14:32:29 -07001280#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
Rashika Kheria74e77fb2014-04-03 14:48:01 -07001281static ssize_t sysfs_compact_node(struct device *dev,
Kay Sievers10fbcf42011-12-21 14:48:43 -08001282 struct device_attribute *attr,
Mel Gormaned4a6d72010-05-24 14:32:29 -07001283 const char *buf, size_t count)
1284{
Hugh Dickins8575ec22012-03-21 16:33:53 -07001285 int nid = dev->id;
1286
1287 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1288 /* Flush pending updates to the LRU lists */
1289 lru_add_drain_all();
1290
1291 compact_node(nid);
1292 }
Mel Gormaned4a6d72010-05-24 14:32:29 -07001293
1294 return count;
1295}
Kay Sievers10fbcf42011-12-21 14:48:43 -08001296static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001297
1298int compaction_register_node(struct node *node)
1299{
Kay Sievers10fbcf42011-12-21 14:48:43 -08001300 return device_create_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001301}
1302
1303void compaction_unregister_node(struct node *node)
1304{
Kay Sievers10fbcf42011-12-21 14:48:43 -08001305 return device_remove_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001306}
1307#endif /* CONFIG_SYSFS && CONFIG_NUMA */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001308
1309#endif /* CONFIG_COMPACTION */