Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_COMPACTION_H |
| 2 | #define _LINUX_COMPACTION_H |
| 3 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 4 | /* Return values for compact_zone() and try_to_compact_pages() */ |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 5 | /* compaction didn't start as it was deferred due to past failures */ |
| 6 | #define COMPACT_DEFERRED 0 |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 7 | /* compaction didn't start as it was not possible or direct reclaim was more suitable */ |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 8 | #define COMPACT_SKIPPED 1 |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 9 | /* compaction should continue to another pageblock */ |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 10 | #define COMPACT_CONTINUE 2 |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 11 | /* direct compaction partially compacted a zone and there are suitable pages */ |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 12 | #define COMPACT_PARTIAL 3 |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 13 | /* The full zone was compacted */ |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 14 | #define COMPACT_COMPLETE 4 |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 15 | /* For more detailed tracepoint output */ |
| 16 | #define COMPACT_NO_SUITABLE_PAGE 5 |
| 17 | #define COMPACT_NOT_SUITABLE_ZONE 6 |
Vlastimil Babka | 2d1e104 | 2015-11-05 18:48:02 -0800 | [diff] [blame] | 18 | #define COMPACT_CONTENDED 7 |
Vlastimil Babka | fa6c7b4 | 2015-11-05 18:47:56 -0800 | [diff] [blame] | 19 | /* When adding new states, please adjust include/trace/events/compaction.h */ |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 20 | |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 21 | /* Used to signal whether compaction detected need_sched() or lock contention */ |
| 22 | /* No contention detected */ |
| 23 | #define COMPACT_CONTENDED_NONE 0 |
| 24 | /* Either need_sched() was true or fatal signal pending */ |
| 25 | #define COMPACT_CONTENDED_SCHED 1 |
| 26 | /* Zone lock or lru_lock was contended in async compaction */ |
| 27 | #define COMPACT_CONTENDED_LOCK 2 |
| 28 | |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 29 | struct alloc_context; /* in mm/internal.h */ |
| 30 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 31 | #ifdef CONFIG_COMPACTION |
| 32 | extern int sysctl_compact_memory; |
| 33 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, |
| 34 | void __user *buffer, size_t *length, loff_t *ppos); |
Mel Gorman | 5e77190 | 2010-05-24 14:32:31 -0700 | [diff] [blame] | 35 | extern int sysctl_extfrag_threshold; |
| 36 | extern int sysctl_extfrag_handler(struct ctl_table *table, int write, |
| 37 | void __user *buffer, size_t *length, loff_t *ppos); |
Eric B Munson | 5bbe354 | 2015-04-15 16:13:20 -0700 | [diff] [blame] | 38 | extern int sysctl_compact_unevictable_allowed; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 39 | |
| 40 | extern int fragmentation_index(struct zone *zone, unsigned int order); |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 41 | extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, |
| 42 | int alloc_flags, const struct alloc_context *ac, |
| 43 | enum migrate_mode mode, int *contended); |
Andrew Morton | 7103f16 | 2013-02-22 16:32:33 -0800 | [diff] [blame] | 44 | extern void compact_pgdat(pg_data_t *pgdat, int order); |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 45 | extern void reset_isolation_suitable(pg_data_t *pgdat); |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 46 | extern unsigned long compaction_suitable(struct zone *zone, int order, |
| 47 | int alloc_flags, int classzone_idx); |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 48 | |
Joonsoo Kim | 24e2716 | 2015-02-11 15:27:09 -0800 | [diff] [blame] | 49 | extern void defer_compaction(struct zone *zone, int order); |
| 50 | extern bool compaction_deferred(struct zone *zone, int order); |
| 51 | extern void compaction_defer_reset(struct zone *zone, int order, |
| 52 | bool alloc_success); |
| 53 | extern bool compaction_restarting(struct zone *zone, int order); |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 54 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 55 | #else |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 56 | static inline unsigned long try_to_compact_pages(gfp_t gfp_mask, |
| 57 | unsigned int order, int alloc_flags, |
| 58 | const struct alloc_context *ac, |
| 59 | enum migrate_mode mode, int *contended) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 60 | { |
| 61 | return COMPACT_CONTINUE; |
| 62 | } |
| 63 | |
Andrew Morton | 7103f16 | 2013-02-22 16:32:33 -0800 | [diff] [blame] | 64 | static inline void compact_pgdat(pg_data_t *pgdat, int order) |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 65 | { |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 66 | } |
| 67 | |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 68 | static inline void reset_isolation_suitable(pg_data_t *pgdat) |
| 69 | { |
| 70 | } |
| 71 | |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 72 | static inline unsigned long compaction_suitable(struct zone *zone, int order, |
| 73 | int alloc_flags, int classzone_idx) |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 74 | { |
| 75 | return COMPACT_SKIPPED; |
| 76 | } |
| 77 | |
Rik van Riel | aff6224 | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 78 | static inline void defer_compaction(struct zone *zone, int order) |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 79 | { |
| 80 | } |
| 81 | |
Rik van Riel | aff6224 | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 82 | static inline bool compaction_deferred(struct zone *zone, int order) |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 83 | { |
Gavin Shan | c59e261 | 2012-07-31 16:42:49 -0700 | [diff] [blame] | 84 | return true; |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 85 | } |
| 86 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 87 | #endif /* CONFIG_COMPACTION */ |
| 88 | |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 89 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) |
| 90 | extern int compaction_register_node(struct node *node); |
| 91 | extern void compaction_unregister_node(struct node *node); |
| 92 | |
| 93 | #else |
| 94 | |
| 95 | static inline int compaction_register_node(struct node *node) |
| 96 | { |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | static inline void compaction_unregister_node(struct node *node) |
| 101 | { |
| 102 | } |
| 103 | #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ |
| 104 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 105 | #endif /* _LINUX_COMPACTION_H */ |