Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_VMSTAT_H |
| 2 | #define _LINUX_VMSTAT_H |
| 3 | |
| 4 | #include <linux/types.h> |
| 5 | #include <linux/percpu.h> |
Christoph Lameter | 9617729 | 2007-02-10 01:43:03 -0800 | [diff] [blame] | 6 | #include <linux/mm.h> |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 7 | #include <linux/mmzone.h> |
| 8 | #include <asm/atomic.h> |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 9 | |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 10 | #ifdef CONFIG_ZONE_DMA |
| 11 | #define DMA_ZONE(xx) xx##_DMA, |
| 12 | #else |
| 13 | #define DMA_ZONE(xx) |
| 14 | #endif |
| 15 | |
Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 16 | #ifdef CONFIG_ZONE_DMA32 |
| 17 | #define DMA32_ZONE(xx) xx##_DMA32, |
| 18 | #else |
| 19 | #define DMA32_ZONE(xx) |
| 20 | #endif |
| 21 | |
| 22 | #ifdef CONFIG_HIGHMEM |
| 23 | #define HIGHMEM_ZONE(xx) , xx##_HIGH |
| 24 | #else |
| 25 | #define HIGHMEM_ZONE(xx) |
| 26 | #endif |
| 27 | |
Adam Litke | 3b11630 | 2008-04-28 02:13:06 -0700 | [diff] [blame] | 28 | |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 29 | #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 30 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 31 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, |
| 32 | FOR_ALL_ZONES(PGALLOC), |
| 33 | PGFREE, PGACTIVATE, PGDEACTIVATE, |
| 34 | PGFAULT, PGMAJFAULT, |
| 35 | FOR_ALL_ZONES(PGREFILL), |
| 36 | FOR_ALL_ZONES(PGSTEAL), |
| 37 | FOR_ALL_ZONES(PGSCAN_KSWAPD), |
| 38 | FOR_ALL_ZONES(PGSCAN_DIRECT), |
Mel Gorman | 24cf72518 | 2009-06-16 15:33:23 -0700 | [diff] [blame] | 39 | #ifdef CONFIG_NUMA |
| 40 | PGSCAN_ZONE_RECLAIM_FAILED, |
| 41 | #endif |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 42 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, |
KOSAKI Motohiro | bb3ab59 | 2009-12-14 17:58:55 -0800 | [diff] [blame] | 43 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, |
| 44 | KSWAPD_SKIP_CONGESTION_WAIT, |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 45 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 46 | #ifdef CONFIG_COMPACTION |
| 47 | COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED, |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 48 | COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 49 | #endif |
Adam Litke | 3b11630 | 2008-04-28 02:13:06 -0700 | [diff] [blame] | 50 | #ifdef CONFIG_HUGETLB_PAGE |
| 51 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, |
| 52 | #endif |
Lee Schermerhorn | bbfd28e | 2008-10-18 20:26:40 -0700 | [diff] [blame] | 53 | UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ |
| 54 | UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ |
| 55 | UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 56 | UNEVICTABLE_PGMLOCKED, |
| 57 | UNEVICTABLE_PGMUNLOCKED, |
| 58 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ |
| 59 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ |
Lee Schermerhorn | 985737c | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 60 | UNEVICTABLE_MLOCKFREED, |
Andi Kleen | 81ab420 | 2011-04-14 15:22:06 -0700 | [diff] [blame] | 61 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 62 | THP_FAULT_ALLOC, |
| 63 | THP_FAULT_FALLBACK, |
| 64 | THP_COLLAPSE_ALLOC, |
| 65 | THP_COLLAPSE_ALLOC_FAILED, |
| 66 | THP_SPLIT, |
| 67 | #endif |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 68 | NR_VM_EVENT_ITEMS |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 69 | }; |
| 70 | |
Adrian Bunk | c748e13 | 2008-07-23 21:27:03 -0700 | [diff] [blame] | 71 | extern int sysctl_stat_interval; |
| 72 | |
Andrew Morton | 780a065 | 2007-02-10 01:44:41 -0800 | [diff] [blame] | 73 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 74 | /* |
| 75 | * Light weight per cpu counter implementation. |
| 76 | * |
| 77 | * Counters should only be incremented and no critical kernel component |
| 78 | * should rely on the counter values. |
| 79 | * |
| 80 | * Counters are handled completely inline. On many platforms the code |
| 81 | * generated will simply be the increment of a global address. |
| 82 | */ |
| 83 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 84 | struct vm_event_state { |
| 85 | unsigned long event[NR_VM_EVENT_ITEMS]; |
| 86 | }; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 87 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 88 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 89 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 90 | static inline void __count_vm_event(enum vm_event_item item) |
| 91 | { |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 92 | __this_cpu_inc(vm_event_states.event[item]); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 93 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 94 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 95 | static inline void count_vm_event(enum vm_event_item item) |
| 96 | { |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 97 | this_cpu_inc(vm_event_states.event[item]); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 98 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 99 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 100 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
| 101 | { |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 102 | __this_cpu_add(vm_event_states.event[item], delta); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 103 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 104 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 105 | static inline void count_vm_events(enum vm_event_item item, long delta) |
| 106 | { |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 107 | this_cpu_add(vm_event_states.event[item], delta); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 108 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 109 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 110 | extern void all_vm_events(unsigned long *); |
Magnus Damm | e903387 | 2006-12-22 01:08:01 -0800 | [diff] [blame] | 111 | #ifdef CONFIG_HOTPLUG |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 112 | extern void vm_events_fold_cpu(int cpu); |
Magnus Damm | e903387 | 2006-12-22 01:08:01 -0800 | [diff] [blame] | 113 | #else |
| 114 | static inline void vm_events_fold_cpu(int cpu) |
| 115 | { |
| 116 | } |
| 117 | #endif |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 118 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 119 | #else |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 120 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 121 | /* Disable counters */ |
Andrew Morton | 780a065 | 2007-02-10 01:44:41 -0800 | [diff] [blame] | 122 | static inline void count_vm_event(enum vm_event_item item) |
| 123 | { |
| 124 | } |
| 125 | static inline void count_vm_events(enum vm_event_item item, long delta) |
| 126 | { |
| 127 | } |
| 128 | static inline void __count_vm_event(enum vm_event_item item) |
| 129 | { |
| 130 | } |
| 131 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
| 132 | { |
| 133 | } |
| 134 | static inline void all_vm_events(unsigned long *ret) |
| 135 | { |
| 136 | } |
| 137 | static inline void vm_events_fold_cpu(int cpu) |
| 138 | { |
| 139 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 140 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 141 | #endif /* CONFIG_VM_EVENT_COUNTERS */ |
| 142 | |
| 143 | #define __count_zone_vm_events(item, zone, delta) \ |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 144 | __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ |
| 145 | zone_idx(zone), delta) |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 146 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 147 | /* |
| 148 | * Zone based page accounting with per cpu differentials. |
| 149 | */ |
| 150 | extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 151 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 152 | static inline void zone_page_state_add(long x, struct zone *zone, |
| 153 | enum zone_stat_item item) |
| 154 | { |
| 155 | atomic_long_add(x, &zone->vm_stat[item]); |
| 156 | atomic_long_add(x, &vm_stat[item]); |
| 157 | } |
| 158 | |
| 159 | static inline unsigned long global_page_state(enum zone_stat_item item) |
| 160 | { |
| 161 | long x = atomic_long_read(&vm_stat[item]); |
| 162 | #ifdef CONFIG_SMP |
| 163 | if (x < 0) |
| 164 | x = 0; |
| 165 | #endif |
| 166 | return x; |
| 167 | } |
| 168 | |
| 169 | static inline unsigned long zone_page_state(struct zone *zone, |
| 170 | enum zone_stat_item item) |
| 171 | { |
| 172 | long x = atomic_long_read(&zone->vm_stat[item]); |
| 173 | #ifdef CONFIG_SMP |
| 174 | if (x < 0) |
| 175 | x = 0; |
| 176 | #endif |
| 177 | return x; |
| 178 | } |
| 179 | |
Christoph Lameter | aa45484 | 2010-09-09 16:38:17 -0700 | [diff] [blame] | 180 | /* |
| 181 | * More accurate version that also considers the currently pending |
| 182 | * deltas. For that we need to loop over all cpus to find the current |
| 183 | * deltas. There is no synchronization so the result cannot be |
| 184 | * exactly accurate either. |
| 185 | */ |
| 186 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, |
| 187 | enum zone_stat_item item) |
| 188 | { |
| 189 | long x = atomic_long_read(&zone->vm_stat[item]); |
| 190 | |
| 191 | #ifdef CONFIG_SMP |
| 192 | int cpu; |
| 193 | for_each_online_cpu(cpu) |
| 194 | x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; |
| 195 | |
| 196 | if (x < 0) |
| 197 | x = 0; |
| 198 | #endif |
| 199 | return x; |
| 200 | } |
| 201 | |
Wu Fengguang | adea02a | 2009-09-21 17:01:42 -0700 | [diff] [blame] | 202 | extern unsigned long global_reclaimable_pages(void); |
| 203 | extern unsigned long zone_reclaimable_pages(struct zone *zone); |
Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 204 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 205 | #ifdef CONFIG_NUMA |
| 206 | /* |
| 207 | * Determine the per node value of a stat item. This function |
| 208 | * is called frequently in a NUMA machine, so try to be as |
| 209 | * frugal as possible. |
| 210 | */ |
| 211 | static inline unsigned long node_page_state(int node, |
| 212 | enum zone_stat_item item) |
| 213 | { |
| 214 | struct zone *zones = NODE_DATA(node)->node_zones; |
| 215 | |
| 216 | return |
Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 217 | #ifdef CONFIG_ZONE_DMA |
| 218 | zone_page_state(&zones[ZONE_DMA], item) + |
| 219 | #endif |
Christoph Lameter | fb0e794 | 2006-09-25 23:31:13 -0700 | [diff] [blame] | 220 | #ifdef CONFIG_ZONE_DMA32 |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 221 | zone_page_state(&zones[ZONE_DMA32], item) + |
| 222 | #endif |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 223 | #ifdef CONFIG_HIGHMEM |
| 224 | zone_page_state(&zones[ZONE_HIGHMEM], item) + |
| 225 | #endif |
Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 226 | zone_page_state(&zones[ZONE_NORMAL], item) + |
| 227 | zone_page_state(&zones[ZONE_MOVABLE], item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 228 | } |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 229 | |
Andi Kleen | 78afd56 | 2011-03-22 16:33:12 -0700 | [diff] [blame] | 230 | extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 231 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 232 | #else |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 233 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 234 | #define node_page_state(node, item) global_page_state(item) |
Andi Kleen | 78afd56 | 2011-03-22 16:33:12 -0700 | [diff] [blame] | 235 | #define zone_statistics(_zl, _z, gfp) do { } while (0) |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 236 | |
| 237 | #endif /* CONFIG_NUMA */ |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 238 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 239 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) |
| 240 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) |
| 241 | |
| 242 | static inline void zap_zone_vm_stats(struct zone *zone) |
| 243 | { |
| 244 | memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); |
| 245 | } |
| 246 | |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 247 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
| 248 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 249 | #ifdef CONFIG_SMP |
| 250 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); |
| 251 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
| 252 | void __dec_zone_page_state(struct page *, enum zone_stat_item); |
| 253 | |
| 254 | void mod_zone_page_state(struct zone *, enum zone_stat_item, int); |
| 255 | void inc_zone_page_state(struct page *, enum zone_stat_item); |
| 256 | void dec_zone_page_state(struct page *, enum zone_stat_item); |
| 257 | |
| 258 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 259 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
| 260 | extern void dec_zone_state(struct zone *, enum zone_stat_item); |
| 261 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 262 | |
| 263 | void refresh_cpu_vm_stats(int); |
Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 264 | |
| 265 | int calculate_pressure_threshold(struct zone *zone); |
| 266 | int calculate_normal_threshold(struct zone *zone); |
| 267 | void set_pgdat_percpu_threshold(pg_data_t *pgdat, |
| 268 | int (*calculate_pressure)(struct zone *)); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 269 | #else /* CONFIG_SMP */ |
| 270 | |
| 271 | /* |
| 272 | * We do not maintain differentials in a single processor configuration. |
| 273 | * The functions directly modify the zone and global counters. |
| 274 | */ |
| 275 | static inline void __mod_zone_page_state(struct zone *zone, |
| 276 | enum zone_stat_item item, int delta) |
| 277 | { |
| 278 | zone_page_state_add(delta, zone, item); |
| 279 | } |
| 280 | |
Christoph Lameter | 7f4599e | 2006-07-10 04:44:30 -0700 | [diff] [blame] | 281 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
| 282 | { |
| 283 | atomic_long_inc(&zone->vm_stat[item]); |
| 284 | atomic_long_inc(&vm_stat[item]); |
| 285 | } |
| 286 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 287 | static inline void __inc_zone_page_state(struct page *page, |
| 288 | enum zone_stat_item item) |
| 289 | { |
Christoph Lameter | 7f4599e | 2006-07-10 04:44:30 -0700 | [diff] [blame] | 290 | __inc_zone_state(page_zone(page), item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 291 | } |
| 292 | |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 293 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
| 294 | { |
| 295 | atomic_long_dec(&zone->vm_stat[item]); |
| 296 | atomic_long_dec(&vm_stat[item]); |
| 297 | } |
| 298 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 299 | static inline void __dec_zone_page_state(struct page *page, |
| 300 | enum zone_stat_item item) |
| 301 | { |
Uwe Kleine-König | 57ce36f | 2008-02-25 16:45:03 +0100 | [diff] [blame] | 302 | __dec_zone_state(page_zone(page), item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 303 | } |
| 304 | |
| 305 | /* |
| 306 | * We only use atomic operations to update counters. So there is no need to |
| 307 | * disable interrupts. |
| 308 | */ |
| 309 | #define inc_zone_page_state __inc_zone_page_state |
| 310 | #define dec_zone_page_state __dec_zone_page_state |
| 311 | #define mod_zone_page_state __mod_zone_page_state |
| 312 | |
Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 313 | #define set_pgdat_percpu_threshold(pgdat, callback) { } |
Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 314 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 315 | static inline void refresh_cpu_vm_stats(int cpu) { } |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 316 | #endif |
| 317 | |
| 318 | #endif /* _LINUX_VMSTAT_H */ |