Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_COMPACTION_H |
| 2 | #define _LINUX_COMPACTION_H |
| 3 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 4 | /* Return values for compact_zone() and try_to_compact_pages() */ |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 5 | /* compaction didn't start as it was deferred due to past failures */ |
| 6 | #define COMPACT_DEFERRED 0 |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 7 | /* compaction didn't start as it was not possible or direct reclaim was more suitable */ |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 8 | #define COMPACT_SKIPPED 1 |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 9 | /* compaction should continue to another pageblock */ |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 10 | #define COMPACT_CONTINUE 2 |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 11 | /* direct compaction partially compacted a zone and there are suitable pages */ |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 12 | #define COMPACT_PARTIAL 3 |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 13 | /* The full zone was compacted */ |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 14 | #define COMPACT_COMPLETE 4 |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 15 | |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame^] | 16 | /* Used to signal whether compaction detected need_sched() or lock contention */ |
| 17 | /* No contention detected */ |
| 18 | #define COMPACT_CONTENDED_NONE 0 |
| 19 | /* Either need_sched() was true or fatal signal pending */ |
| 20 | #define COMPACT_CONTENDED_SCHED 1 |
| 21 | /* Zone lock or lru_lock was contended in async compaction */ |
| 22 | #define COMPACT_CONTENDED_LOCK 2 |
| 23 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 24 | #ifdef CONFIG_COMPACTION |
| 25 | extern int sysctl_compact_memory; |
| 26 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, |
| 27 | void __user *buffer, size_t *length, loff_t *ppos); |
Mel Gorman | 5e77190 | 2010-05-24 14:32:31 -0700 | [diff] [blame] | 28 | extern int sysctl_extfrag_threshold; |
| 29 | extern int sysctl_extfrag_handler(struct ctl_table *table, int write, |
| 30 | void __user *buffer, size_t *length, loff_t *ppos); |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 31 | |
| 32 | extern int fragmentation_index(struct zone *zone, unsigned int order); |
| 33 | extern unsigned long try_to_compact_pages(struct zonelist *zonelist, |
Mel Gorman | 77f1fe6 | 2011-01-13 15:45:57 -0800 | [diff] [blame] | 34 | int order, gfp_t gfp_mask, nodemask_t *mask, |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame^] | 35 | enum migrate_mode mode, int *contended, |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 36 | struct zone **candidate_zone); |
Andrew Morton | 7103f16 | 2013-02-22 16:32:33 -0800 | [diff] [blame] | 37 | extern void compact_pgdat(pg_data_t *pgdat, int order); |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 38 | extern void reset_isolation_suitable(pg_data_t *pgdat); |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 39 | extern unsigned long compaction_suitable(struct zone *zone, int order); |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 40 | |
| 41 | /* Do not skip compaction more than 64 times */ |
| 42 | #define COMPACT_MAX_DEFER_SHIFT 6 |
| 43 | |
| 44 | /* |
| 45 | * Compaction is deferred when compaction fails to result in a page |
| 46 | * allocation success. 1 << compact_defer_limit compactions are skipped up |
| 47 | * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT |
| 48 | */ |
Rik van Riel | aff6224 | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 49 | static inline void defer_compaction(struct zone *zone, int order) |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 50 | { |
| 51 | zone->compact_considered = 0; |
| 52 | zone->compact_defer_shift++; |
| 53 | |
Rik van Riel | aff6224 | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 54 | if (order < zone->compact_order_failed) |
| 55 | zone->compact_order_failed = order; |
| 56 | |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 57 | if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) |
| 58 | zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; |
| 59 | } |
| 60 | |
| 61 | /* Returns true if compaction should be skipped this time */ |
Rik van Riel | aff6224 | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 62 | static inline bool compaction_deferred(struct zone *zone, int order) |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 63 | { |
| 64 | unsigned long defer_limit = 1UL << zone->compact_defer_shift; |
| 65 | |
Rik van Riel | aff6224 | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 66 | if (order < zone->compact_order_failed) |
| 67 | return false; |
| 68 | |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 69 | /* Avoid possible overflow */ |
| 70 | if (++zone->compact_considered > defer_limit) |
| 71 | zone->compact_considered = defer_limit; |
| 72 | |
Gavin Shan | c59e261 | 2012-07-31 16:42:49 -0700 | [diff] [blame] | 73 | return zone->compact_considered < defer_limit; |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 74 | } |
| 75 | |
Vlastimil Babka | de6c60a | 2014-01-21 15:51:07 -0800 | [diff] [blame] | 76 | /* |
| 77 | * Update defer tracking counters after successful compaction of given order, |
| 78 | * which means an allocation either succeeded (alloc_success == true) or is |
| 79 | * expected to succeed. |
| 80 | */ |
| 81 | static inline void compaction_defer_reset(struct zone *zone, int order, |
| 82 | bool alloc_success) |
| 83 | { |
| 84 | if (alloc_success) { |
| 85 | zone->compact_considered = 0; |
| 86 | zone->compact_defer_shift = 0; |
| 87 | } |
| 88 | if (order >= zone->compact_order_failed) |
| 89 | zone->compact_order_failed = order + 1; |
| 90 | } |
| 91 | |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 92 | /* Returns true if restarting compaction after many failures */ |
| 93 | static inline bool compaction_restarting(struct zone *zone, int order) |
| 94 | { |
| 95 | if (order < zone->compact_order_failed) |
| 96 | return false; |
| 97 | |
| 98 | return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && |
| 99 | zone->compact_considered >= 1UL << zone->compact_defer_shift; |
| 100 | } |
| 101 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 102 | #else |
| 103 | static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, |
Mel Gorman | 77f1fe6 | 2011-01-13 15:45:57 -0800 | [diff] [blame] | 104 | int order, gfp_t gfp_mask, nodemask_t *nodemask, |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame^] | 105 | enum migrate_mode mode, int *contended, |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 106 | struct zone **candidate_zone) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 107 | { |
| 108 | return COMPACT_CONTINUE; |
| 109 | } |
| 110 | |
Andrew Morton | 7103f16 | 2013-02-22 16:32:33 -0800 | [diff] [blame] | 111 | static inline void compact_pgdat(pg_data_t *pgdat, int order) |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 112 | { |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 113 | } |
| 114 | |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 115 | static inline void reset_isolation_suitable(pg_data_t *pgdat) |
| 116 | { |
| 117 | } |
| 118 | |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 119 | static inline unsigned long compaction_suitable(struct zone *zone, int order) |
| 120 | { |
| 121 | return COMPACT_SKIPPED; |
| 122 | } |
| 123 | |
Rik van Riel | aff6224 | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 124 | static inline void defer_compaction(struct zone *zone, int order) |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 125 | { |
| 126 | } |
| 127 | |
Rik van Riel | aff6224 | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 128 | static inline bool compaction_deferred(struct zone *zone, int order) |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 129 | { |
Gavin Shan | c59e261 | 2012-07-31 16:42:49 -0700 | [diff] [blame] | 130 | return true; |
Mel Gorman | 4f92e25 | 2010-05-24 14:32:32 -0700 | [diff] [blame] | 131 | } |
| 132 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 133 | #endif /* CONFIG_COMPACTION */ |
| 134 | |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 135 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) |
| 136 | extern int compaction_register_node(struct node *node); |
| 137 | extern void compaction_unregister_node(struct node *node); |
| 138 | |
| 139 | #else |
| 140 | |
| 141 | static inline int compaction_register_node(struct node *node) |
| 142 | { |
| 143 | return 0; |
| 144 | } |
| 145 | |
| 146 | static inline void compaction_unregister_node(struct node *node) |
| 147 | { |
| 148 | } |
| 149 | #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ |
| 150 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 151 | #endif /* _LINUX_COMPACTION_H */ |