blob: 20011a850fef8119bfdc8cb26200446283827920 [file] [log] [blame]
Mel Gorman748446b2010-05-24 14:32:27 -07001/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
10#include <linux/swap.h>
11#include <linux/migrate.h>
12#include <linux/compaction.h>
13#include <linux/mm_inline.h>
14#include <linux/backing-dev.h>
Mel Gorman76ab0f52010-05-24 14:32:28 -070015#include <linux/sysctl.h>
Mel Gormaned4a6d72010-05-24 14:32:29 -070016#include <linux/sysfs.h>
Mel Gorman748446b2010-05-24 14:32:27 -070017#include "internal.h"
18
Mel Gormanb7aba692011-01-13 15:45:54 -080019#define CREATE_TRACE_POINTS
20#include <trace/events/compaction.h>
21
Mel Gorman748446b2010-05-24 14:32:27 -070022/*
23 * compact_control is used to track pages being migrated and the free pages
24 * they are being migrated to during memory compaction. The free_pfn starts
25 * at the end of a zone and migrate_pfn begins at the start. Movable pages
26 * are moved to the end of a zone during a compaction run and the run
27 * completes when free_pfn <= migrate_pfn
28 */
29struct compact_control {
30 struct list_head freepages; /* List of free pages to migrate to */
31 struct list_head migratepages; /* List of pages being migrated */
32 unsigned long nr_freepages; /* Number of isolated free pages */
33 unsigned long nr_migratepages; /* Number of pages to migrate */
34 unsigned long free_pfn; /* isolate_freepages search base */
35 unsigned long migrate_pfn; /* isolate_migratepages search base */
36
37 /* Account for isolated anon and file pages */
38 unsigned long nr_anon;
39 unsigned long nr_file;
40
Mel Gorman56de7262010-05-24 14:32:30 -070041 unsigned int order; /* order a direct compactor needs */
42 int migratetype; /* MOVABLE, RECLAIMABLE etc */
Mel Gorman748446b2010-05-24 14:32:27 -070043 struct zone *zone;
44};
45
46static unsigned long release_freepages(struct list_head *freelist)
47{
48 struct page *page, *next;
49 unsigned long count = 0;
50
51 list_for_each_entry_safe(page, next, freelist, lru) {
52 list_del(&page->lru);
53 __free_page(page);
54 count++;
55 }
56
57 return count;
58}
59
60/* Isolate free pages onto a private freelist. Must hold zone->lock */
61static unsigned long isolate_freepages_block(struct zone *zone,
62 unsigned long blockpfn,
63 struct list_head *freelist)
64{
65 unsigned long zone_end_pfn, end_pfn;
Mel Gormanb7aba692011-01-13 15:45:54 -080066 int nr_scanned = 0, total_isolated = 0;
Mel Gorman748446b2010-05-24 14:32:27 -070067 struct page *cursor;
68
69 /* Get the last PFN we should scan for free pages at */
70 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
71 end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
72
73 /* Find the first usable PFN in the block to initialse page cursor */
74 for (; blockpfn < end_pfn; blockpfn++) {
75 if (pfn_valid_within(blockpfn))
76 break;
77 }
78 cursor = pfn_to_page(blockpfn);
79
80 /* Isolate free pages. This assumes the block is valid */
81 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
82 int isolated, i;
83 struct page *page = cursor;
84
85 if (!pfn_valid_within(blockpfn))
86 continue;
Mel Gormanb7aba692011-01-13 15:45:54 -080087 nr_scanned++;
Mel Gorman748446b2010-05-24 14:32:27 -070088
89 if (!PageBuddy(page))
90 continue;
91
92 /* Found a free page, break it into order-0 pages */
93 isolated = split_free_page(page);
94 total_isolated += isolated;
95 for (i = 0; i < isolated; i++) {
96 list_add(&page->lru, freelist);
97 page++;
98 }
99
100 /* If a page was split, advance to the end of it */
101 if (isolated) {
102 blockpfn += isolated - 1;
103 cursor += isolated - 1;
104 }
105 }
106
Mel Gormanb7aba692011-01-13 15:45:54 -0800107 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
Mel Gorman748446b2010-05-24 14:32:27 -0700108 return total_isolated;
109}
110
111/* Returns true if the page is within a block suitable for migration to */
112static bool suitable_migration_target(struct page *page)
113{
114
115 int migratetype = get_pageblock_migratetype(page);
116
117 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
118 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
119 return false;
120
121 /* If the page is a large free page, then allow migration */
122 if (PageBuddy(page) && page_order(page) >= pageblock_order)
123 return true;
124
125 /* If the block is MIGRATE_MOVABLE, allow migration */
126 if (migratetype == MIGRATE_MOVABLE)
127 return true;
128
129 /* Otherwise skip the block */
130 return false;
131}
132
133/*
134 * Based on information in the current compact_control, find blocks
135 * suitable for isolating free pages from and then isolate them.
136 */
137static void isolate_freepages(struct zone *zone,
138 struct compact_control *cc)
139{
140 struct page *page;
141 unsigned long high_pfn, low_pfn, pfn;
142 unsigned long flags;
143 int nr_freepages = cc->nr_freepages;
144 struct list_head *freelist = &cc->freepages;
145
146 pfn = cc->free_pfn;
147 low_pfn = cc->migrate_pfn + pageblock_nr_pages;
148 high_pfn = low_pfn;
149
150 /*
151 * Isolate free pages until enough are available to migrate the
152 * pages on cc->migratepages. We stop searching if the migrate
153 * and free page scanners meet or enough free pages are isolated.
154 */
155 spin_lock_irqsave(&zone->lock, flags);
156 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
157 pfn -= pageblock_nr_pages) {
158 unsigned long isolated;
159
160 if (!pfn_valid(pfn))
161 continue;
162
163 /*
164 * Check for overlapping nodes/zones. It's possible on some
165 * configurations to have a setup like
166 * node0 node1 node0
167 * i.e. it's possible that all pages within a zones range of
168 * pages do not belong to a single zone.
169 */
170 page = pfn_to_page(pfn);
171 if (page_zone(page) != zone)
172 continue;
173
174 /* Check the block is suitable for migration */
175 if (!suitable_migration_target(page))
176 continue;
177
178 /* Found a block suitable for isolating free pages from */
179 isolated = isolate_freepages_block(zone, pfn, freelist);
180 nr_freepages += isolated;
181
182 /*
183 * Record the highest PFN we isolated pages from. When next
184 * looking for free pages, the search will restart here as
185 * page migration may have returned some pages to the allocator
186 */
187 if (isolated)
188 high_pfn = max(high_pfn, pfn);
189 }
190 spin_unlock_irqrestore(&zone->lock, flags);
191
192 /* split_free_page does not map the pages */
193 list_for_each_entry(page, freelist, lru) {
194 arch_alloc_page(page, 0);
195 kernel_map_pages(page, 1, 1);
196 }
197
198 cc->free_pfn = high_pfn;
199 cc->nr_freepages = nr_freepages;
200}
201
202/* Update the number of anon and file isolated pages in the zone */
203static void acct_isolated(struct zone *zone, struct compact_control *cc)
204{
205 struct page *page;
206 unsigned int count[NR_LRU_LISTS] = { 0, };
207
208 list_for_each_entry(page, &cc->migratepages, lru) {
209 int lru = page_lru_base_type(page);
210 count[lru]++;
211 }
212
213 cc->nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
214 cc->nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
215 __mod_zone_page_state(zone, NR_ISOLATED_ANON, cc->nr_anon);
216 __mod_zone_page_state(zone, NR_ISOLATED_FILE, cc->nr_file);
217}
218
219/* Similar to reclaim, but different enough that they don't share logic */
220static bool too_many_isolated(struct zone *zone)
221{
Minchan Kimbc693042010-09-09 16:38:00 -0700222 unsigned long active, inactive, isolated;
Mel Gorman748446b2010-05-24 14:32:27 -0700223
224 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
225 zone_page_state(zone, NR_INACTIVE_ANON);
Minchan Kimbc693042010-09-09 16:38:00 -0700226 active = zone_page_state(zone, NR_ACTIVE_FILE) +
227 zone_page_state(zone, NR_ACTIVE_ANON);
Mel Gorman748446b2010-05-24 14:32:27 -0700228 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
229 zone_page_state(zone, NR_ISOLATED_ANON);
230
Minchan Kimbc693042010-09-09 16:38:00 -0700231 return isolated > (inactive + active) / 2;
Mel Gorman748446b2010-05-24 14:32:27 -0700232}
233
234/*
235 * Isolate all pages that can be migrated from the block pointed to by
236 * the migrate scanner within compact_control.
237 */
238static unsigned long isolate_migratepages(struct zone *zone,
239 struct compact_control *cc)
240{
241 unsigned long low_pfn, end_pfn;
Mel Gormanb7aba692011-01-13 15:45:54 -0800242 unsigned long nr_scanned = 0, nr_isolated = 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700243 struct list_head *migratelist = &cc->migratepages;
244
245 /* Do not scan outside zone boundaries */
246 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
247
248 /* Only scan within a pageblock boundary */
249 end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
250
251 /* Do not cross the free scanner or scan within a memory hole */
252 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
253 cc->migrate_pfn = end_pfn;
254 return 0;
255 }
256
257 /*
258 * Ensure that there are not too many pages isolated from the LRU
259 * list by either parallel reclaimers or compaction. If there are,
260 * delay for some time until fewer pages are isolated
261 */
262 while (unlikely(too_many_isolated(zone))) {
263 congestion_wait(BLK_RW_ASYNC, HZ/10);
264
265 if (fatal_signal_pending(current))
266 return 0;
267 }
268
269 /* Time to isolate some pages for migration */
270 spin_lock_irq(&zone->lru_lock);
271 for (; low_pfn < end_pfn; low_pfn++) {
272 struct page *page;
273 if (!pfn_valid_within(low_pfn))
274 continue;
Mel Gormanb7aba692011-01-13 15:45:54 -0800275 nr_scanned++;
Mel Gorman748446b2010-05-24 14:32:27 -0700276
277 /* Get the page and skip if free */
278 page = pfn_to_page(low_pfn);
279 if (PageBuddy(page))
280 continue;
281
282 /* Try isolate the page */
283 if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
284 continue;
285
286 /* Successfully isolated */
287 del_page_from_lru_list(zone, page, page_lru(page));
288 list_add(&page->lru, migratelist);
Mel Gorman748446b2010-05-24 14:32:27 -0700289 cc->nr_migratepages++;
Mel Gormanb7aba692011-01-13 15:45:54 -0800290 nr_isolated++;
Mel Gorman748446b2010-05-24 14:32:27 -0700291
292 /* Avoid isolating too much */
293 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
294 break;
295 }
296
297 acct_isolated(zone, cc);
298
299 spin_unlock_irq(&zone->lru_lock);
300 cc->migrate_pfn = low_pfn;
301
Mel Gormanb7aba692011-01-13 15:45:54 -0800302 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
303
Mel Gorman748446b2010-05-24 14:32:27 -0700304 return cc->nr_migratepages;
305}
306
307/*
308 * This is a migrate-callback that "allocates" freepages by taking pages
309 * from the isolated freelists in the block we are migrating to.
310 */
311static struct page *compaction_alloc(struct page *migratepage,
312 unsigned long data,
313 int **result)
314{
315 struct compact_control *cc = (struct compact_control *)data;
316 struct page *freepage;
317
318 /* Isolate free pages if necessary */
319 if (list_empty(&cc->freepages)) {
320 isolate_freepages(cc->zone, cc);
321
322 if (list_empty(&cc->freepages))
323 return NULL;
324 }
325
326 freepage = list_entry(cc->freepages.next, struct page, lru);
327 list_del(&freepage->lru);
328 cc->nr_freepages--;
329
330 return freepage;
331}
332
333/*
334 * We cannot control nr_migratepages and nr_freepages fully when migration is
335 * running as migrate_pages() has no knowledge of compact_control. When
336 * migration is complete, we count the number of pages on the lists by hand.
337 */
338static void update_nr_listpages(struct compact_control *cc)
339{
340 int nr_migratepages = 0;
341 int nr_freepages = 0;
342 struct page *page;
343
344 list_for_each_entry(page, &cc->migratepages, lru)
345 nr_migratepages++;
346 list_for_each_entry(page, &cc->freepages, lru)
347 nr_freepages++;
348
349 cc->nr_migratepages = nr_migratepages;
350 cc->nr_freepages = nr_freepages;
351}
352
353static int compact_finished(struct zone *zone,
354 struct compact_control *cc)
355{
Mel Gorman56de7262010-05-24 14:32:30 -0700356 unsigned int order;
357 unsigned long watermark = low_wmark_pages(zone) + (1 << cc->order);
358
Mel Gorman748446b2010-05-24 14:32:27 -0700359 if (fatal_signal_pending(current))
360 return COMPACT_PARTIAL;
361
362 /* Compaction run completes if the migrate and free scanner meet */
363 if (cc->free_pfn <= cc->migrate_pfn)
364 return COMPACT_COMPLETE;
365
Mel Gorman56de7262010-05-24 14:32:30 -0700366 /* Compaction run is not finished if the watermark is not met */
367 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
368 return COMPACT_CONTINUE;
369
370 if (cc->order == -1)
371 return COMPACT_CONTINUE;
372
373 /* Direct compactor: Is a suitable page free? */
374 for (order = cc->order; order < MAX_ORDER; order++) {
375 /* Job done if page is free of the right migratetype */
376 if (!list_empty(&zone->free_area[order].free_list[cc->migratetype]))
377 return COMPACT_PARTIAL;
378
379 /* Job done if allocation would set block type */
380 if (order >= pageblock_order && zone->free_area[order].nr_free)
381 return COMPACT_PARTIAL;
382 }
383
Mel Gorman748446b2010-05-24 14:32:27 -0700384 return COMPACT_CONTINUE;
385}
386
387static int compact_zone(struct zone *zone, struct compact_control *cc)
388{
389 int ret;
390
391 /* Setup to move all movable pages to the end of the zone */
392 cc->migrate_pfn = zone->zone_start_pfn;
393 cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
394 cc->free_pfn &= ~(pageblock_nr_pages-1);
395
396 migrate_prep_local();
397
398 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
399 unsigned long nr_migrate, nr_remaining;
400
401 if (!isolate_migratepages(zone, cc))
402 continue;
403
404 nr_migrate = cc->nr_migratepages;
405 migrate_pages(&cc->migratepages, compaction_alloc,
406 (unsigned long)cc, 0);
407 update_nr_listpages(cc);
408 nr_remaining = cc->nr_migratepages;
409
410 count_vm_event(COMPACTBLOCKS);
411 count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
412 if (nr_remaining)
413 count_vm_events(COMPACTPAGEFAILED, nr_remaining);
Mel Gormanb7aba692011-01-13 15:45:54 -0800414 trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
415 nr_remaining);
Mel Gorman748446b2010-05-24 14:32:27 -0700416
417 /* Release LRU pages not migrated */
418 if (!list_empty(&cc->migratepages)) {
419 putback_lru_pages(&cc->migratepages);
420 cc->nr_migratepages = 0;
421 }
422
423 }
424
425 /* Release free pages and check accounting */
426 cc->nr_freepages -= release_freepages(&cc->freepages);
427 VM_BUG_ON(cc->nr_freepages != 0);
428
429 return ret;
430}
Mel Gorman76ab0f52010-05-24 14:32:28 -0700431
Mel Gorman56de7262010-05-24 14:32:30 -0700432static unsigned long compact_zone_order(struct zone *zone,
433 int order, gfp_t gfp_mask)
434{
435 struct compact_control cc = {
436 .nr_freepages = 0,
437 .nr_migratepages = 0,
438 .order = order,
439 .migratetype = allocflags_to_migratetype(gfp_mask),
440 .zone = zone,
441 };
442 INIT_LIST_HEAD(&cc.freepages);
443 INIT_LIST_HEAD(&cc.migratepages);
444
445 return compact_zone(zone, &cc);
446}
447
Mel Gorman5e771902010-05-24 14:32:31 -0700448int sysctl_extfrag_threshold = 500;
449
Mel Gorman56de7262010-05-24 14:32:30 -0700450/**
451 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
452 * @zonelist: The zonelist used for the current allocation
453 * @order: The order of the current allocation
454 * @gfp_mask: The GFP mask of the current allocation
455 * @nodemask: The allowed nodes to allocate from
456 *
457 * This is the main entry point for direct page compaction.
458 */
459unsigned long try_to_compact_pages(struct zonelist *zonelist,
460 int order, gfp_t gfp_mask, nodemask_t *nodemask)
461{
462 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
463 int may_enter_fs = gfp_mask & __GFP_FS;
464 int may_perform_io = gfp_mask & __GFP_IO;
465 unsigned long watermark;
466 struct zoneref *z;
467 struct zone *zone;
468 int rc = COMPACT_SKIPPED;
469
470 /*
471 * Check whether it is worth even starting compaction. The order check is
472 * made because an assumption is made that the page allocator can satisfy
473 * the "cheaper" orders without taking special steps
474 */
475 if (order <= PAGE_ALLOC_COSTLY_ORDER || !may_enter_fs || !may_perform_io)
476 return rc;
477
478 count_vm_event(COMPACTSTALL);
479
480 /* Compact each zone in the list */
481 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
482 nodemask) {
483 int fragindex;
484 int status;
485
486 /*
487 * Watermarks for order-0 must be met for compaction. Note
488 * the 2UL. This is because during migration, copies of
489 * pages need to be allocated and for a short time, the
490 * footprint is higher
491 */
492 watermark = low_wmark_pages(zone) + (2UL << order);
493 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
494 continue;
495
496 /*
497 * fragmentation index determines if allocation failures are
498 * due to low memory or external fragmentation
499 *
500 * index of -1 implies allocations might succeed depending
501 * on watermarks
502 * index towards 0 implies failure is due to lack of memory
503 * index towards 1000 implies failure is due to fragmentation
504 *
505 * Only compact if a failure would be due to fragmentation.
506 */
507 fragindex = fragmentation_index(zone, order);
Mel Gorman5e771902010-05-24 14:32:31 -0700508 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
Mel Gorman56de7262010-05-24 14:32:30 -0700509 continue;
510
511 if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) {
512 rc = COMPACT_PARTIAL;
513 break;
514 }
515
516 status = compact_zone_order(zone, order, gfp_mask);
517 rc = max(status, rc);
518
519 if (zone_watermark_ok(zone, order, watermark, 0, 0))
520 break;
521 }
522
523 return rc;
524}
525
526
Mel Gorman76ab0f52010-05-24 14:32:28 -0700527/* Compact all zones within a node */
528static int compact_node(int nid)
529{
530 int zoneid;
531 pg_data_t *pgdat;
532 struct zone *zone;
533
534 if (nid < 0 || nid >= nr_node_ids || !node_online(nid))
535 return -EINVAL;
536 pgdat = NODE_DATA(nid);
537
538 /* Flush pending updates to the LRU lists */
539 lru_add_drain_all();
540
541 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
542 struct compact_control cc = {
543 .nr_freepages = 0,
544 .nr_migratepages = 0,
Mel Gorman56de7262010-05-24 14:32:30 -0700545 .order = -1,
Mel Gorman76ab0f52010-05-24 14:32:28 -0700546 };
547
548 zone = &pgdat->node_zones[zoneid];
549 if (!populated_zone(zone))
550 continue;
551
552 cc.zone = zone;
553 INIT_LIST_HEAD(&cc.freepages);
554 INIT_LIST_HEAD(&cc.migratepages);
555
556 compact_zone(zone, &cc);
557
558 VM_BUG_ON(!list_empty(&cc.freepages));
559 VM_BUG_ON(!list_empty(&cc.migratepages));
560 }
561
562 return 0;
563}
564
565/* Compact all nodes in the system */
566static int compact_nodes(void)
567{
568 int nid;
569
570 for_each_online_node(nid)
571 compact_node(nid);
572
573 return COMPACT_COMPLETE;
574}
575
576/* The written value is actually unused, all memory is compacted */
577int sysctl_compact_memory;
578
579/* This is the entry point for compacting all nodes via /proc/sys/vm */
580int sysctl_compaction_handler(struct ctl_table *table, int write,
581 void __user *buffer, size_t *length, loff_t *ppos)
582{
583 if (write)
584 return compact_nodes();
585
586 return 0;
587}
Mel Gormaned4a6d72010-05-24 14:32:29 -0700588
Mel Gorman5e771902010-05-24 14:32:31 -0700589int sysctl_extfrag_handler(struct ctl_table *table, int write,
590 void __user *buffer, size_t *length, loff_t *ppos)
591{
592 proc_dointvec_minmax(table, write, buffer, length, ppos);
593
594 return 0;
595}
596
Mel Gormaned4a6d72010-05-24 14:32:29 -0700597#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
598ssize_t sysfs_compact_node(struct sys_device *dev,
599 struct sysdev_attribute *attr,
600 const char *buf, size_t count)
601{
602 compact_node(dev->id);
603
604 return count;
605}
606static SYSDEV_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
607
608int compaction_register_node(struct node *node)
609{
610 return sysdev_create_file(&node->sysdev, &attr_compact);
611}
612
613void compaction_unregister_node(struct node *node)
614{
615 return sysdev_remove_file(&node->sysdev, &attr_compact);
616}
617#endif /* CONFIG_SYSFS && CONFIG_NUMA */