blob: 091d72e70d8a708f55d447b8559814caa2952b1a [file] [log] [blame]
Mel Gorman748446b2010-05-24 14:32:27 -07001#ifndef _LINUX_COMPACTION_H
2#define _LINUX_COMPACTION_H
3
Mel Gorman56de7262010-05-24 14:32:30 -07004/* Return values for compact_zone() and try_to_compact_pages() */
5/* compaction didn't start as it was not possible or direct reclaim was more suitable */
6#define COMPACT_SKIPPED 0
7/* compaction should continue to another pageblock */
8#define COMPACT_CONTINUE 1
9/* direct compaction partially compacted a zone and there are suitable pages */
10#define COMPACT_PARTIAL 2
11/* The full zone was compacted */
12#define COMPACT_COMPLETE 3
Mel Gorman748446b2010-05-24 14:32:27 -070013
Mel Gorman76ab0f52010-05-24 14:32:28 -070014#ifdef CONFIG_COMPACTION
15extern int sysctl_compact_memory;
16extern int sysctl_compaction_handler(struct ctl_table *table, int write,
17 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman5e771902010-05-24 14:32:31 -070018extern int sysctl_extfrag_threshold;
19extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
20 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman56de7262010-05-24 14:32:30 -070021
22extern int fragmentation_index(struct zone *zone, unsigned int order);
23extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
Mel Gorman77f1fe62011-01-13 15:45:57 -080024 int order, gfp_t gfp_mask, nodemask_t *mask,
Mel Gorman47ecfcb2013-01-11 09:27:01 +000025 bool sync, bool *contended);
Andrew Morton7103f162013-02-22 16:32:33 -080026extern void compact_pgdat(pg_data_t *pgdat, int order);
Mel Gorman62997022012-10-08 16:32:47 -070027extern void reset_isolation_suitable(pg_data_t *pgdat);
Mel Gorman3e7d3442011-01-13 15:45:56 -080028extern unsigned long compaction_suitable(struct zone *zone, int order);
Mel Gorman4f92e252010-05-24 14:32:32 -070029
30/* Do not skip compaction more than 64 times */
31#define COMPACT_MAX_DEFER_SHIFT 6
32
33/*
34 * Compaction is deferred when compaction fails to result in a page
35 * allocation success. 1 << compact_defer_limit compactions are skipped up
36 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
37 */
Rik van Rielaff62242012-03-21 16:33:52 -070038static inline void defer_compaction(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -070039{
40 zone->compact_considered = 0;
41 zone->compact_defer_shift++;
42
Rik van Rielaff62242012-03-21 16:33:52 -070043 if (order < zone->compact_order_failed)
44 zone->compact_order_failed = order;
45
Mel Gorman4f92e252010-05-24 14:32:32 -070046 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
47 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
48}
49
50/* Returns true if compaction should be skipped this time */
Rik van Rielaff62242012-03-21 16:33:52 -070051static inline bool compaction_deferred(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -070052{
53 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
54
Rik van Rielaff62242012-03-21 16:33:52 -070055 if (order < zone->compact_order_failed)
56 return false;
57
Mel Gorman4f92e252010-05-24 14:32:32 -070058 /* Avoid possible overflow */
59 if (++zone->compact_considered > defer_limit)
60 zone->compact_considered = defer_limit;
61
Gavin Shanc59e2612012-07-31 16:42:49 -070062 return zone->compact_considered < defer_limit;
Mel Gorman4f92e252010-05-24 14:32:32 -070063}
64
Mel Gorman62997022012-10-08 16:32:47 -070065/* Returns true if restarting compaction after many failures */
66static inline bool compaction_restarting(struct zone *zone, int order)
67{
68 if (order < zone->compact_order_failed)
69 return false;
70
71 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
72 zone->compact_considered >= 1UL << zone->compact_defer_shift;
73}
74
Mel Gorman56de7262010-05-24 14:32:30 -070075#else
76static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
Mel Gorman77f1fe62011-01-13 15:45:57 -080077 int order, gfp_t gfp_mask, nodemask_t *nodemask,
Mel Gorman47ecfcb2013-01-11 09:27:01 +000078 bool sync, bool *contended)
Mel Gorman56de7262010-05-24 14:32:30 -070079{
80 return COMPACT_CONTINUE;
81}
82
Andrew Morton7103f162013-02-22 16:32:33 -080083static inline void compact_pgdat(pg_data_t *pgdat, int order)
Rik van Riel7be62de2012-03-21 16:33:52 -070084{
Rik van Riel7be62de2012-03-21 16:33:52 -070085}
86
Mel Gorman62997022012-10-08 16:32:47 -070087static inline void reset_isolation_suitable(pg_data_t *pgdat)
88{
89}
90
Mel Gorman3e7d3442011-01-13 15:45:56 -080091static inline unsigned long compaction_suitable(struct zone *zone, int order)
92{
93 return COMPACT_SKIPPED;
94}
95
Rik van Rielaff62242012-03-21 16:33:52 -070096static inline void defer_compaction(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -070097{
98}
99
Rik van Rielaff62242012-03-21 16:33:52 -0700100static inline bool compaction_deferred(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -0700101{
Gavin Shanc59e2612012-07-31 16:42:49 -0700102 return true;
Mel Gorman4f92e252010-05-24 14:32:32 -0700103}
104
Mel Gorman76ab0f52010-05-24 14:32:28 -0700105#endif /* CONFIG_COMPACTION */
106
Mel Gormaned4a6d72010-05-24 14:32:29 -0700107#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
108extern int compaction_register_node(struct node *node);
109extern void compaction_unregister_node(struct node *node);
110
111#else
112
113static inline int compaction_register_node(struct node *node)
114{
115 return 0;
116}
117
118static inline void compaction_unregister_node(struct node *node)
119{
120}
121#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
122
Mel Gorman748446b2010-05-24 14:32:27 -0700123#endif /* _LINUX_COMPACTION_H */