blob: 501d7513aac1bba793e65d06c92bee9dde5d9205 [file] [log] [blame]
Mel Gorman748446b2010-05-24 14:32:27 -07001#ifndef _LINUX_COMPACTION_H
2#define _LINUX_COMPACTION_H
3
Mel Gorman56de7262010-05-24 14:32:30 -07004/* Return values for compact_zone() and try_to_compact_pages() */
Vlastimil Babka53853e22014-10-09 15:27:02 -07005/* compaction didn't start as it was deferred due to past failures */
6#define COMPACT_DEFERRED 0
Mel Gorman56de7262010-05-24 14:32:30 -07007/* compaction didn't start as it was not possible or direct reclaim was more suitable */
Vlastimil Babka53853e22014-10-09 15:27:02 -07008#define COMPACT_SKIPPED 1
Mel Gorman56de7262010-05-24 14:32:30 -07009/* compaction should continue to another pageblock */
Vlastimil Babka53853e22014-10-09 15:27:02 -070010#define COMPACT_CONTINUE 2
Mel Gorman56de7262010-05-24 14:32:30 -070011/* direct compaction partially compacted a zone and there are suitable pages */
Vlastimil Babka53853e22014-10-09 15:27:02 -070012#define COMPACT_PARTIAL 3
Mel Gorman56de7262010-05-24 14:32:30 -070013/* The full zone was compacted */
Vlastimil Babka53853e22014-10-09 15:27:02 -070014#define COMPACT_COMPLETE 4
Joonsoo Kim837d0262015-02-11 15:27:06 -080015/* For more detailed tracepoint output */
16#define COMPACT_NO_SUITABLE_PAGE 5
17#define COMPACT_NOT_SUITABLE_ZONE 6
Joonsoo Kim16c4a092015-02-11 15:27:01 -080018/* When adding new state, please change compaction_status_string, too */
Mel Gorman748446b2010-05-24 14:32:27 -070019
Vlastimil Babka1f9efde2014-10-09 15:27:14 -070020/* Used to signal whether compaction detected need_sched() or lock contention */
21/* No contention detected */
22#define COMPACT_CONTENDED_NONE 0
23/* Either need_sched() was true or fatal signal pending */
24#define COMPACT_CONTENDED_SCHED 1
25/* Zone lock or lru_lock was contended in async compaction */
26#define COMPACT_CONTENDED_LOCK 2
27
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -080028struct alloc_context; /* in mm/internal.h */
29
Mel Gorman76ab0f52010-05-24 14:32:28 -070030#ifdef CONFIG_COMPACTION
31extern int sysctl_compact_memory;
32extern int sysctl_compaction_handler(struct ctl_table *table, int write,
33 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman5e771902010-05-24 14:32:31 -070034extern int sysctl_extfrag_threshold;
35extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
36 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman56de7262010-05-24 14:32:30 -070037
38extern int fragmentation_index(struct zone *zone, unsigned int order);
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -080039extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
40 int alloc_flags, const struct alloc_context *ac,
41 enum migrate_mode mode, int *contended);
Andrew Morton7103f162013-02-22 16:32:33 -080042extern void compact_pgdat(pg_data_t *pgdat, int order);
Mel Gorman62997022012-10-08 16:32:47 -070043extern void reset_isolation_suitable(pg_data_t *pgdat);
Vlastimil Babkaebff3982014-12-10 15:43:22 -080044extern unsigned long compaction_suitable(struct zone *zone, int order,
45 int alloc_flags, int classzone_idx);
Mel Gorman4f92e252010-05-24 14:32:32 -070046
47/* Do not skip compaction more than 64 times */
48#define COMPACT_MAX_DEFER_SHIFT 6
49
50/*
51 * Compaction is deferred when compaction fails to result in a page
52 * allocation success. 1 << compact_defer_limit compactions are skipped up
53 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
54 */
Rik van Rielaff62242012-03-21 16:33:52 -070055static inline void defer_compaction(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -070056{
57 zone->compact_considered = 0;
58 zone->compact_defer_shift++;
59
Rik van Rielaff62242012-03-21 16:33:52 -070060 if (order < zone->compact_order_failed)
61 zone->compact_order_failed = order;
62
Mel Gorman4f92e252010-05-24 14:32:32 -070063 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
64 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
65}
66
67/* Returns true if compaction should be skipped this time */
Rik van Rielaff62242012-03-21 16:33:52 -070068static inline bool compaction_deferred(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -070069{
70 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
71
Rik van Rielaff62242012-03-21 16:33:52 -070072 if (order < zone->compact_order_failed)
73 return false;
74
Mel Gorman4f92e252010-05-24 14:32:32 -070075 /* Avoid possible overflow */
76 if (++zone->compact_considered > defer_limit)
77 zone->compact_considered = defer_limit;
78
Gavin Shanc59e2612012-07-31 16:42:49 -070079 return zone->compact_considered < defer_limit;
Mel Gorman4f92e252010-05-24 14:32:32 -070080}
81
Vlastimil Babkade6c60a2014-01-21 15:51:07 -080082/*
83 * Update defer tracking counters after successful compaction of given order,
84 * which means an allocation either succeeded (alloc_success == true) or is
85 * expected to succeed.
86 */
87static inline void compaction_defer_reset(struct zone *zone, int order,
88 bool alloc_success)
89{
90 if (alloc_success) {
91 zone->compact_considered = 0;
92 zone->compact_defer_shift = 0;
93 }
94 if (order >= zone->compact_order_failed)
95 zone->compact_order_failed = order + 1;
96}
97
Mel Gorman62997022012-10-08 16:32:47 -070098/* Returns true if restarting compaction after many failures */
99static inline bool compaction_restarting(struct zone *zone, int order)
100{
101 if (order < zone->compact_order_failed)
102 return false;
103
104 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
105 zone->compact_considered >= 1UL << zone->compact_defer_shift;
106}
107
Mel Gorman56de7262010-05-24 14:32:30 -0700108#else
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -0800109static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
110 unsigned int order, int alloc_flags,
111 const struct alloc_context *ac,
112 enum migrate_mode mode, int *contended)
Mel Gorman56de7262010-05-24 14:32:30 -0700113{
114 return COMPACT_CONTINUE;
115}
116
Andrew Morton7103f162013-02-22 16:32:33 -0800117static inline void compact_pgdat(pg_data_t *pgdat, int order)
Rik van Riel7be62de2012-03-21 16:33:52 -0700118{
Rik van Riel7be62de2012-03-21 16:33:52 -0700119}
120
Mel Gorman62997022012-10-08 16:32:47 -0700121static inline void reset_isolation_suitable(pg_data_t *pgdat)
122{
123}
124
Vlastimil Babkaebff3982014-12-10 15:43:22 -0800125static inline unsigned long compaction_suitable(struct zone *zone, int order,
126 int alloc_flags, int classzone_idx)
Mel Gorman3e7d3442011-01-13 15:45:56 -0800127{
128 return COMPACT_SKIPPED;
129}
130
Rik van Rielaff62242012-03-21 16:33:52 -0700131static inline void defer_compaction(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -0700132{
133}
134
Rik van Rielaff62242012-03-21 16:33:52 -0700135static inline bool compaction_deferred(struct zone *zone, int order)
Mel Gorman4f92e252010-05-24 14:32:32 -0700136{
Gavin Shanc59e2612012-07-31 16:42:49 -0700137 return true;
Mel Gorman4f92e252010-05-24 14:32:32 -0700138}
139
Mel Gorman76ab0f52010-05-24 14:32:28 -0700140#endif /* CONFIG_COMPACTION */
141
Mel Gormaned4a6d72010-05-24 14:32:29 -0700142#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
143extern int compaction_register_node(struct node *node);
144extern void compaction_unregister_node(struct node *node);
145
146#else
147
148static inline int compaction_register_node(struct node *node)
149{
150 return 0;
151}
152
153static inline void compaction_unregister_node(struct node *node)
154{
155}
156#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
157
Mel Gorman748446b2010-05-24 14:32:27 -0700158#endif /* _LINUX_COMPACTION_H */