blob: 60bdf8dc02a3dcb9ea4b27cf5414b4a408e95a51 [file] [log] [blame]
Mel Gorman748446b2010-05-24 14:32:27 -07001#ifndef _LINUX_COMPACTION_H
2#define _LINUX_COMPACTION_H
3
Mel Gorman56de7262010-05-24 14:32:30 -07004/* Return values for compact_zone() and try_to_compact_pages() */
Vlastimil Babka53853e22014-10-09 15:27:02 -07005/* compaction didn't start as it was deferred due to past failures */
6#define COMPACT_DEFERRED 0
Mel Gorman56de7262010-05-24 14:32:30 -07007/* compaction didn't start as it was not possible or direct reclaim was more suitable */
Vlastimil Babka53853e22014-10-09 15:27:02 -07008#define COMPACT_SKIPPED 1
Mel Gorman56de7262010-05-24 14:32:30 -07009/* compaction should continue to another pageblock */
Vlastimil Babka53853e22014-10-09 15:27:02 -070010#define COMPACT_CONTINUE 2
Mel Gorman56de7262010-05-24 14:32:30 -070011/* direct compaction partially compacted a zone and there are suitable pages */
Vlastimil Babka53853e22014-10-09 15:27:02 -070012#define COMPACT_PARTIAL 3
Mel Gorman56de7262010-05-24 14:32:30 -070013/* The full zone was compacted */
Vlastimil Babka53853e22014-10-09 15:27:02 -070014#define COMPACT_COMPLETE 4
Mel Gorman748446b2010-05-24 14:32:27 -070015
Vlastimil Babka1f9efde2014-10-09 15:27:14 -070016/* Used to signal whether compaction detected need_sched() or lock contention */
17/* No contention detected */
18#define COMPACT_CONTENDED_NONE 0
19/* Either need_sched() was true or fatal signal pending */
20#define COMPACT_CONTENDED_SCHED 1
21/* Zone lock or lru_lock was contended in async compaction */
22#define COMPACT_CONTENDED_LOCK 2
23
Mel Gorman76ab0f52010-05-24 14:32:28 -070024#ifdef CONFIG_COMPACTION
25extern int sysctl_compact_memory;
26extern int sysctl_compaction_handler(struct ctl_table *table, int write,
27 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman5e771902010-05-24 14:32:31 -070028extern int sysctl_extfrag_threshold;
29extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
30 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman56de7262010-05-24 14:32:30 -070031
32extern int fragmentation_index(struct zone *zone, unsigned int order);
33extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
Mel Gorman77f1fe62011-01-13 15:45:57 -080034 int order, gfp_t gfp_mask, nodemask_t *mask,
Vlastimil Babka1f9efde2014-10-09 15:27:14 -070035 enum migrate_mode mode, int *contended,
Vlastimil Babka53853e22014-10-09 15:27:02 -070036 struct zone **candidate_zone);
Andrew Morton7103f162013-02-22 16:32:33 -080037extern void compact_pgdat(pg_data_t *pgdat, int order);
Mel Gorman62997022012-10-08 16:32:47 -070038extern void reset_isolation_suitable(pg_data_t *pgdat);
Mel Gorman3e7d3442011-01-13 15:45:56 -080039extern unsigned long compaction_suitable(struct zone *zone, int order);
Mel Gorman4f92e252010-05-24 14:32:32 -070040
41/* Do not skip compaction more than 64 times */
42#define COMPACT_MAX_DEFER_SHIFT 6
43
44/*
45 * Compaction is deferred when compaction fails to result in a page
46 * allocation success. 1 << compact_defer_limit compactions are skipped up
47 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
48 */
Rik van Rielaff62242012-03-21 16:33:52 -070049static inline void defer_compaction(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -070050{
51 zone->compact_considered = 0;
52 zone->compact_defer_shift++;
53
Rik van Rielaff62242012-03-21 16:33:52 -070054 if (order < zone->compact_order_failed)
55 zone->compact_order_failed = order;
56
Mel Gorman4f92e252010-05-24 14:32:32 -070057 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
58 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
59}
60
61/* Returns true if compaction should be skipped this time */
Rik van Rielaff62242012-03-21 16:33:52 -070062static inline bool compaction_deferred(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -070063{
64 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
65
Rik van Rielaff62242012-03-21 16:33:52 -070066 if (order < zone->compact_order_failed)
67 return false;
68
Mel Gorman4f92e252010-05-24 14:32:32 -070069 /* Avoid possible overflow */
70 if (++zone->compact_considered > defer_limit)
71 zone->compact_considered = defer_limit;
72
Gavin Shanc59e2612012-07-31 16:42:49 -070073 return zone->compact_considered < defer_limit;
Mel Gorman4f92e252010-05-24 14:32:32 -070074}
75
Vlastimil Babkade6c60a2014-01-21 15:51:07 -080076/*
77 * Update defer tracking counters after successful compaction of given order,
78 * which means an allocation either succeeded (alloc_success == true) or is
79 * expected to succeed.
80 */
81static inline void compaction_defer_reset(struct zone *zone, int order,
82 bool alloc_success)
83{
84 if (alloc_success) {
85 zone->compact_considered = 0;
86 zone->compact_defer_shift = 0;
87 }
88 if (order >= zone->compact_order_failed)
89 zone->compact_order_failed = order + 1;
90}
91
Mel Gorman62997022012-10-08 16:32:47 -070092/* Returns true if restarting compaction after many failures */
93static inline bool compaction_restarting(struct zone *zone, int order)
94{
95 if (order < zone->compact_order_failed)
96 return false;
97
98 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
99 zone->compact_considered >= 1UL << zone->compact_defer_shift;
100}
101
Mel Gorman56de7262010-05-24 14:32:30 -0700102#else
103static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
Mel Gorman77f1fe62011-01-13 15:45:57 -0800104 int order, gfp_t gfp_mask, nodemask_t *nodemask,
Vlastimil Babka1f9efde2014-10-09 15:27:14 -0700105 enum migrate_mode mode, int *contended,
Vlastimil Babka53853e22014-10-09 15:27:02 -0700106 struct zone **candidate_zone)
Mel Gorman56de7262010-05-24 14:32:30 -0700107{
108 return COMPACT_CONTINUE;
109}
110
Andrew Morton7103f162013-02-22 16:32:33 -0800111static inline void compact_pgdat(pg_data_t *pgdat, int order)
Rik van Riel7be62de2012-03-21 16:33:52 -0700112{
Rik van Riel7be62de2012-03-21 16:33:52 -0700113}
114
Mel Gorman62997022012-10-08 16:32:47 -0700115static inline void reset_isolation_suitable(pg_data_t *pgdat)
116{
117}
118
Mel Gorman3e7d3442011-01-13 15:45:56 -0800119static inline unsigned long compaction_suitable(struct zone *zone, int order)
120{
121 return COMPACT_SKIPPED;
122}
123
Rik van Rielaff62242012-03-21 16:33:52 -0700124static inline void defer_compaction(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -0700125{
126}
127
Rik van Rielaff62242012-03-21 16:33:52 -0700128static inline bool compaction_deferred(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -0700129{
Gavin Shanc59e2612012-07-31 16:42:49 -0700130 return true;
Mel Gorman4f92e252010-05-24 14:32:32 -0700131}
132
Mel Gorman76ab0f52010-05-24 14:32:28 -0700133#endif /* CONFIG_COMPACTION */
134
Mel Gormaned4a6d72010-05-24 14:32:29 -0700135#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
136extern int compaction_register_node(struct node *node);
137extern void compaction_unregister_node(struct node *node);
138
139#else
140
141static inline int compaction_register_node(struct node *node)
142{
143 return 0;
144}
145
146static inline void compaction_unregister_node(struct node *node)
147{
148}
149#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
150
Mel Gorman748446b2010-05-24 14:32:27 -0700151#endif /* _LINUX_COMPACTION_H */