blob: 0d5712b0206c7edb1236284fe71b7d926df29cee [file] [log] [blame]
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001/*
2 * linux/mm/vmstat.c
3 *
4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
Christoph Lameter2244b952006-06-30 01:55:33 -07006 *
7 * zoned VM statistics
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
Christoph Lameter7cc36bb2014-10-09 15:29:43 -070010 * Copyright (C) 2008-2014 Christoph Lameter
Christoph Lameterf6ac2352006-06-30 01:55:32 -070011 */
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +040012#include <linux/fs.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070013#include <linux/mm.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040014#include <linux/err.h>
Christoph Lameter2244b952006-06-30 01:55:33 -070015#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Christoph Lameterdf9ecab2006-08-31 21:27:35 -070017#include <linux/cpu.h>
Christoph Lameter7cc36bb2014-10-09 15:29:43 -070018#include <linux/cpumask.h>
Adrian Bunkc748e132008-07-23 21:27:03 -070019#include <linux/vmstat.h>
Andrew Morton3c486872015-02-10 14:09:43 -080020#include <linux/proc_fs.h>
21#include <linux/seq_file.h>
22#include <linux/debugfs.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040023#include <linux/sched.h>
Mel Gormanf1a5ab12010-05-24 14:32:26 -070024#include <linux/math64.h>
Michael Rubin79da8262010-10-26 14:21:36 -070025#include <linux/writeback.h>
Namhyung Kim36deb0b2010-10-26 14:22:04 -070026#include <linux/compaction.h>
Lisa Du6e543d52013-09-11 14:22:36 -070027#include <linux/mm_inline.h>
Joonsoo Kim48c96a32014-12-12 16:56:01 -080028#include <linux/page_ext.h>
29#include <linux/page_owner.h>
Lisa Du6e543d52013-09-11 14:22:36 -070030
31#include "internal.h"
Christoph Lameterf6ac2352006-06-30 01:55:32 -070032
Christoph Lameterf8891e52006-06-30 01:55:45 -070033#ifdef CONFIG_VM_EVENT_COUNTERS
34DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
35EXPORT_PER_CPU_SYMBOL(vm_event_states);
36
Minchan Kim31f961a2010-08-09 17:18:59 -070037static void sum_vm_events(unsigned long *ret)
Christoph Lameterf8891e52006-06-30 01:55:45 -070038{
Christoph Lameter9eccf2a2008-02-04 22:29:22 -080039 int cpu;
Christoph Lameterf8891e52006-06-30 01:55:45 -070040 int i;
41
42 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
43
Minchan Kim31f961a2010-08-09 17:18:59 -070044 for_each_online_cpu(cpu) {
Christoph Lameterf8891e52006-06-30 01:55:45 -070045 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
46
Christoph Lameterf8891e52006-06-30 01:55:45 -070047 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
48 ret[i] += this->event[i];
49 }
50}
51
52/*
53 * Accumulate the vm event counters across all CPUs.
54 * The result is unavoidably approximate - it can change
55 * during and after execution of this function.
56*/
57void all_vm_events(unsigned long *ret)
58{
KOSAKI Motohirob5be1132008-05-12 14:02:06 -070059 get_online_cpus();
Minchan Kim31f961a2010-08-09 17:18:59 -070060 sum_vm_events(ret);
KOSAKI Motohirob5be1132008-05-12 14:02:06 -070061 put_online_cpus();
Christoph Lameterf8891e52006-06-30 01:55:45 -070062}
Heiko Carstens32dd66f2006-07-10 04:44:31 -070063EXPORT_SYMBOL_GPL(all_vm_events);
Christoph Lameterf8891e52006-06-30 01:55:45 -070064
Christoph Lameterf8891e52006-06-30 01:55:45 -070065/*
66 * Fold the foreign cpu events into our own.
67 *
68 * This is adding to the events on one processor
69 * but keeps the global counts constant.
70 */
71void vm_events_fold_cpu(int cpu)
72{
73 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
74 int i;
75
76 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
77 count_vm_events(i, fold_state->event[i]);
78 fold_state->event[i] = 0;
79 }
80}
Christoph Lameterf8891e52006-06-30 01:55:45 -070081
82#endif /* CONFIG_VM_EVENT_COUNTERS */
83
Christoph Lameter2244b952006-06-30 01:55:33 -070084/*
85 * Manage combined zone based / global counters
86 *
87 * vm_stat contains the global counters
88 */
Dimitri Sivanicha1cb2c62011-10-31 17:09:46 -070089atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
Christoph Lameter2244b952006-06-30 01:55:33 -070090EXPORT_SYMBOL(vm_stat);
91
92#ifdef CONFIG_SMP
93
Mel Gormanb44129b2011-01-13 15:45:43 -080094int calculate_pressure_threshold(struct zone *zone)
Mel Gorman88f5acf2011-01-13 15:45:41 -080095{
96 int threshold;
97 int watermark_distance;
98
99 /*
100 * As vmstats are not up to date, there is drift between the estimated
101 * and real values. For high thresholds and a high number of CPUs, it
102 * is possible for the min watermark to be breached while the estimated
103 * value looks fine. The pressure threshold is a reduced value such
104 * that even the maximum amount of drift will not accidentally breach
105 * the min watermark
106 */
107 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
108 threshold = max(1, (int)(watermark_distance / num_online_cpus()));
109
110 /*
111 * Maximum threshold is 125
112 */
113 threshold = min(125, threshold);
114
115 return threshold;
116}
117
Mel Gormanb44129b2011-01-13 15:45:43 -0800118int calculate_normal_threshold(struct zone *zone)
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700119{
120 int threshold;
121 int mem; /* memory in 128 MB units */
122
123 /*
124 * The threshold scales with the number of processors and the amount
125 * of memory per zone. More memory means that we can defer updates for
126 * longer, more processors could lead to more contention.
127 * fls() is used to have a cheap way of logarithmic scaling.
128 *
129 * Some sample thresholds:
130 *
131 * Threshold Processors (fls) Zonesize fls(mem+1)
132 * ------------------------------------------------------------------
133 * 8 1 1 0.9-1 GB 4
134 * 16 2 2 0.9-1 GB 4
135 * 20 2 2 1-2 GB 5
136 * 24 2 2 2-4 GB 6
137 * 28 2 2 4-8 GB 7
138 * 32 2 2 8-16 GB 8
139 * 4 2 2 <128M 1
140 * 30 4 3 2-4 GB 5
141 * 48 4 3 8-16 GB 8
142 * 32 8 4 1-2 GB 4
143 * 32 8 4 0.9-1GB 4
144 * 10 16 5 <128M 1
145 * 40 16 5 900M 4
146 * 70 64 7 2-4 GB 5
147 * 84 64 7 4-8 GB 6
148 * 108 512 9 4-8 GB 6
149 * 125 1024 10 8-16 GB 8
150 * 125 1024 10 16-32 GB 9
151 */
152
Jiang Liub40da042013-02-22 16:33:52 -0800153 mem = zone->managed_pages >> (27 - PAGE_SHIFT);
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700154
155 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
156
157 /*
158 * Maximum threshold is 125
159 */
160 threshold = min(125, threshold);
161
162 return threshold;
163}
Christoph Lameter2244b952006-06-30 01:55:33 -0700164
165/*
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700166 * Refresh the thresholds for each zone.
Christoph Lameter2244b952006-06-30 01:55:33 -0700167 */
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700168void refresh_zone_stat_thresholds(void)
Christoph Lameter2244b952006-06-30 01:55:33 -0700169{
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700170 struct zone *zone;
171 int cpu;
172 int threshold;
173
KOSAKI Motohiroee99c712009-03-31 15:19:31 -0700174 for_each_populated_zone(zone) {
Christoph Lameteraa454842010-09-09 16:38:17 -0700175 unsigned long max_drift, tolerate_drift;
176
Mel Gormanb44129b2011-01-13 15:45:43 -0800177 threshold = calculate_normal_threshold(zone);
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700178
179 for_each_online_cpu(cpu)
Christoph Lameter99dcc3e2010-01-05 15:34:51 +0900180 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
181 = threshold;
Christoph Lameteraa454842010-09-09 16:38:17 -0700182
183 /*
184 * Only set percpu_drift_mark if there is a danger that
185 * NR_FREE_PAGES reports the low watermark is ok when in fact
186 * the min watermark could be breached by an allocation
187 */
188 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
189 max_drift = num_online_cpus() * threshold;
190 if (max_drift > tolerate_drift)
191 zone->percpu_drift_mark = high_wmark_pages(zone) +
192 max_drift;
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700193 }
Christoph Lameter2244b952006-06-30 01:55:33 -0700194}
195
Mel Gormanb44129b2011-01-13 15:45:43 -0800196void set_pgdat_percpu_threshold(pg_data_t *pgdat,
197 int (*calculate_pressure)(struct zone *))
Mel Gorman88f5acf2011-01-13 15:45:41 -0800198{
199 struct zone *zone;
200 int cpu;
201 int threshold;
202 int i;
203
Mel Gorman88f5acf2011-01-13 15:45:41 -0800204 for (i = 0; i < pgdat->nr_zones; i++) {
205 zone = &pgdat->node_zones[i];
206 if (!zone->percpu_drift_mark)
207 continue;
208
Mel Gormanb44129b2011-01-13 15:45:43 -0800209 threshold = (*calculate_pressure)(zone);
Mel Gormanbb0b6df2014-08-06 16:07:18 -0700210 for_each_online_cpu(cpu)
Mel Gorman88f5acf2011-01-13 15:45:41 -0800211 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
212 = threshold;
213 }
Mel Gorman88f5acf2011-01-13 15:45:41 -0800214}
215
Christoph Lameter2244b952006-06-30 01:55:33 -0700216/*
Jianyu Zhanbea04b02014-06-04 16:09:51 -0700217 * For use when we know that interrupts are disabled,
218 * or when we know that preemption is disabled and that
219 * particular counter cannot be updated from interrupt context.
Christoph Lameter2244b952006-06-30 01:55:33 -0700220 */
221void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
222 int delta)
223{
Christoph Lameter12938a92010-12-06 11:16:20 -0600224 struct per_cpu_pageset __percpu *pcp = zone->pageset;
225 s8 __percpu *p = pcp->vm_stat_diff + item;
Christoph Lameter2244b952006-06-30 01:55:33 -0700226 long x;
Christoph Lameter12938a92010-12-06 11:16:20 -0600227 long t;
Christoph Lameter2244b952006-06-30 01:55:33 -0700228
Christoph Lameter12938a92010-12-06 11:16:20 -0600229 x = delta + __this_cpu_read(*p);
Christoph Lameter2244b952006-06-30 01:55:33 -0700230
Christoph Lameter12938a92010-12-06 11:16:20 -0600231 t = __this_cpu_read(pcp->stat_threshold);
232
233 if (unlikely(x > t || x < -t)) {
Christoph Lameter2244b952006-06-30 01:55:33 -0700234 zone_page_state_add(x, zone, item);
235 x = 0;
236 }
Christoph Lameter12938a92010-12-06 11:16:20 -0600237 __this_cpu_write(*p, x);
Christoph Lameter2244b952006-06-30 01:55:33 -0700238}
239EXPORT_SYMBOL(__mod_zone_page_state);
240
241/*
Christoph Lameter2244b952006-06-30 01:55:33 -0700242 * Optimized increment and decrement functions.
243 *
244 * These are only for a single page and therefore can take a struct page *
245 * argument instead of struct zone *. This allows the inclusion of the code
246 * generated for page_zone(page) into the optimized functions.
247 *
248 * No overflow check is necessary and therefore the differential can be
249 * incremented or decremented in place which may allow the compilers to
250 * generate better code.
Christoph Lameter2244b952006-06-30 01:55:33 -0700251 * The increment or decrement is known and therefore one boundary check can
252 * be omitted.
253 *
Christoph Lameterdf9ecab2006-08-31 21:27:35 -0700254 * NOTE: These functions are very performance sensitive. Change only
255 * with care.
256 *
Christoph Lameter2244b952006-06-30 01:55:33 -0700257 * Some processors have inc/dec instructions that are atomic vs an interrupt.
258 * However, the code must first determine the differential location in a zone
259 * based on the processor number and then inc/dec the counter. There is no
260 * guarantee without disabling preemption that the processor will not change
261 * in between and therefore the atomicity vs. interrupt cannot be exploited
262 * in a useful way here.
263 */
Christoph Lameterc8785382007-02-10 01:43:01 -0800264void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700265{
Christoph Lameter12938a92010-12-06 11:16:20 -0600266 struct per_cpu_pageset __percpu *pcp = zone->pageset;
267 s8 __percpu *p = pcp->vm_stat_diff + item;
268 s8 v, t;
Christoph Lameter2244b952006-06-30 01:55:33 -0700269
Christoph Lameter908ee0f2010-12-06 11:40:02 -0600270 v = __this_cpu_inc_return(*p);
Christoph Lameter12938a92010-12-06 11:16:20 -0600271 t = __this_cpu_read(pcp->stat_threshold);
272 if (unlikely(v > t)) {
273 s8 overstep = t >> 1;
Christoph Lameter2244b952006-06-30 01:55:33 -0700274
Christoph Lameter12938a92010-12-06 11:16:20 -0600275 zone_page_state_add(v + overstep, zone, item);
276 __this_cpu_write(*p, -overstep);
Christoph Lameter2244b952006-06-30 01:55:33 -0700277 }
278}
Christoph Lameterca889e62006-06-30 01:55:44 -0700279
280void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
281{
282 __inc_zone_state(page_zone(page), item);
283}
Christoph Lameter2244b952006-06-30 01:55:33 -0700284EXPORT_SYMBOL(__inc_zone_page_state);
285
Christoph Lameterc8785382007-02-10 01:43:01 -0800286void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700287{
Christoph Lameter12938a92010-12-06 11:16:20 -0600288 struct per_cpu_pageset __percpu *pcp = zone->pageset;
289 s8 __percpu *p = pcp->vm_stat_diff + item;
290 s8 v, t;
Christoph Lameter2244b952006-06-30 01:55:33 -0700291
Christoph Lameter908ee0f2010-12-06 11:40:02 -0600292 v = __this_cpu_dec_return(*p);
Christoph Lameter12938a92010-12-06 11:16:20 -0600293 t = __this_cpu_read(pcp->stat_threshold);
294 if (unlikely(v < - t)) {
295 s8 overstep = t >> 1;
Christoph Lameter2244b952006-06-30 01:55:33 -0700296
Christoph Lameter12938a92010-12-06 11:16:20 -0600297 zone_page_state_add(v - overstep, zone, item);
298 __this_cpu_write(*p, overstep);
Christoph Lameter2244b952006-06-30 01:55:33 -0700299 }
300}
Christoph Lameterc8785382007-02-10 01:43:01 -0800301
302void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
303{
304 __dec_zone_state(page_zone(page), item);
305}
Christoph Lameter2244b952006-06-30 01:55:33 -0700306EXPORT_SYMBOL(__dec_zone_page_state);
307
Heiko Carstens41561532012-01-12 17:17:30 -0800308#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
Christoph Lameter7c839122010-12-14 10:28:46 -0600309/*
310 * If we have cmpxchg_local support then we do not need to incur the overhead
311 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
312 *
313 * mod_state() modifies the zone counter state through atomic per cpu
314 * operations.
315 *
316 * Overstep mode specifies how overstep should handled:
317 * 0 No overstepping
318 * 1 Overstepping half of threshold
319 * -1 Overstepping minus half of threshold
320*/
321static inline void mod_state(struct zone *zone,
322 enum zone_stat_item item, int delta, int overstep_mode)
323{
324 struct per_cpu_pageset __percpu *pcp = zone->pageset;
325 s8 __percpu *p = pcp->vm_stat_diff + item;
326 long o, n, t, z;
327
328 do {
329 z = 0; /* overflow to zone counters */
330
331 /*
332 * The fetching of the stat_threshold is racy. We may apply
333 * a counter threshold to the wrong the cpu if we get
Christoph Lameterd3bc2362011-04-14 15:21:58 -0700334 * rescheduled while executing here. However, the next
335 * counter update will apply the threshold again and
336 * therefore bring the counter under the threshold again.
337 *
338 * Most of the time the thresholds are the same anyways
339 * for all cpus in a zone.
Christoph Lameter7c839122010-12-14 10:28:46 -0600340 */
341 t = this_cpu_read(pcp->stat_threshold);
342
343 o = this_cpu_read(*p);
344 n = delta + o;
345
346 if (n > t || n < -t) {
347 int os = overstep_mode * (t >> 1) ;
348
349 /* Overflow must be added to zone counters */
350 z = n + os;
351 n = -os;
352 }
353 } while (this_cpu_cmpxchg(*p, o, n) != o);
354
355 if (z)
356 zone_page_state_add(z, zone, item);
357}
358
359void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
360 int delta)
361{
362 mod_state(zone, item, delta, 0);
363}
364EXPORT_SYMBOL(mod_zone_page_state);
365
366void inc_zone_state(struct zone *zone, enum zone_stat_item item)
367{
368 mod_state(zone, item, 1, 1);
369}
370
371void inc_zone_page_state(struct page *page, enum zone_stat_item item)
372{
373 mod_state(page_zone(page), item, 1, 1);
374}
375EXPORT_SYMBOL(inc_zone_page_state);
376
377void dec_zone_page_state(struct page *page, enum zone_stat_item item)
378{
379 mod_state(page_zone(page), item, -1, -1);
380}
381EXPORT_SYMBOL(dec_zone_page_state);
382#else
383/*
384 * Use interrupt disable to serialize counter updates
385 */
386void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
387 int delta)
388{
389 unsigned long flags;
390
391 local_irq_save(flags);
392 __mod_zone_page_state(zone, item, delta);
393 local_irq_restore(flags);
394}
395EXPORT_SYMBOL(mod_zone_page_state);
396
Christoph Lameterca889e62006-06-30 01:55:44 -0700397void inc_zone_state(struct zone *zone, enum zone_stat_item item)
398{
399 unsigned long flags;
400
401 local_irq_save(flags);
402 __inc_zone_state(zone, item);
403 local_irq_restore(flags);
404}
405
Christoph Lameter2244b952006-06-30 01:55:33 -0700406void inc_zone_page_state(struct page *page, enum zone_stat_item item)
407{
408 unsigned long flags;
409 struct zone *zone;
Christoph Lameter2244b952006-06-30 01:55:33 -0700410
411 zone = page_zone(page);
412 local_irq_save(flags);
Christoph Lameterca889e62006-06-30 01:55:44 -0700413 __inc_zone_state(zone, item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700414 local_irq_restore(flags);
415}
416EXPORT_SYMBOL(inc_zone_page_state);
417
418void dec_zone_page_state(struct page *page, enum zone_stat_item item)
419{
420 unsigned long flags;
Christoph Lameter2244b952006-06-30 01:55:33 -0700421
Christoph Lameter2244b952006-06-30 01:55:33 -0700422 local_irq_save(flags);
Christoph Lametera302eb42006-08-31 21:27:34 -0700423 __dec_zone_page_state(page, item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700424 local_irq_restore(flags);
425}
426EXPORT_SYMBOL(dec_zone_page_state);
Christoph Lameter7c839122010-12-14 10:28:46 -0600427#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700428
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700429
430/*
431 * Fold a differential into the global counters.
432 * Returns the number of counters updated.
433 */
434static int fold_diff(int *diff)
Christoph Lameter4edb0742013-09-11 14:21:31 -0700435{
436 int i;
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700437 int changes = 0;
Christoph Lameter4edb0742013-09-11 14:21:31 -0700438
439 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700440 if (diff[i]) {
Christoph Lameter4edb0742013-09-11 14:21:31 -0700441 atomic_long_add(diff[i], &vm_stat[i]);
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700442 changes++;
443 }
444 return changes;
Christoph Lameter4edb0742013-09-11 14:21:31 -0700445}
446
Christoph Lameter2244b952006-06-30 01:55:33 -0700447/*
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700448 * Update the zone counters for the current cpu.
Christoph Lametera7f75e22008-02-04 22:29:16 -0800449 *
Christoph Lameter4037d452007-05-09 02:35:14 -0700450 * Note that refresh_cpu_vm_stats strives to only access
451 * node local memory. The per cpu pagesets on remote zones are placed
452 * in the memory local to the processor using that pageset. So the
453 * loop over all zones will access a series of cachelines local to
454 * the processor.
455 *
456 * The call to zone_page_state_add updates the cachelines with the
457 * statistics in the remote zone struct as well as the global cachelines
458 * with the global counters. These could cause remote node cache line
459 * bouncing and will have to be only done when necessary.
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700460 *
461 * The function returns the number of global counters updated.
Christoph Lameter2244b952006-06-30 01:55:33 -0700462 */
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700463static int refresh_cpu_vm_stats(void)
Christoph Lameter2244b952006-06-30 01:55:33 -0700464{
465 struct zone *zone;
466 int i;
Christoph Lametera7f75e22008-02-04 22:29:16 -0800467 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700468 int changes = 0;
Christoph Lameter2244b952006-06-30 01:55:33 -0700469
KOSAKI Motohiroee99c712009-03-31 15:19:31 -0700470 for_each_populated_zone(zone) {
Christoph Lameterfbc2edb2013-09-11 14:21:32 -0700471 struct per_cpu_pageset __percpu *p = zone->pageset;
Christoph Lameter2244b952006-06-30 01:55:33 -0700472
Christoph Lameterfbc2edb2013-09-11 14:21:32 -0700473 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
474 int v;
Christoph Lameter2244b952006-06-30 01:55:33 -0700475
Christoph Lameterfbc2edb2013-09-11 14:21:32 -0700476 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
477 if (v) {
Christoph Lametera7f75e22008-02-04 22:29:16 -0800478
Christoph Lametera7f75e22008-02-04 22:29:16 -0800479 atomic_long_add(v, &zone->vm_stat[i]);
480 global_diff[i] += v;
Christoph Lameter4037d452007-05-09 02:35:14 -0700481#ifdef CONFIG_NUMA
482 /* 3 seconds idle till flush */
Christoph Lameterfbc2edb2013-09-11 14:21:32 -0700483 __this_cpu_write(p->expire, 3);
Christoph Lameter4037d452007-05-09 02:35:14 -0700484#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700485 }
Christoph Lameterfbc2edb2013-09-11 14:21:32 -0700486 }
Dimitri Sivanich468fd622008-04-28 02:13:37 -0700487 cond_resched();
Christoph Lameter4037d452007-05-09 02:35:14 -0700488#ifdef CONFIG_NUMA
489 /*
490 * Deal with draining the remote pageset of this
491 * processor
492 *
493 * Check if there are pages remaining in this pageset
494 * if not then there is nothing to expire.
495 */
Christoph Lameterfbc2edb2013-09-11 14:21:32 -0700496 if (!__this_cpu_read(p->expire) ||
497 !__this_cpu_read(p->pcp.count))
Christoph Lameter4037d452007-05-09 02:35:14 -0700498 continue;
499
500 /*
501 * We never drain zones local to this processor.
502 */
503 if (zone_to_nid(zone) == numa_node_id()) {
Christoph Lameterfbc2edb2013-09-11 14:21:32 -0700504 __this_cpu_write(p->expire, 0);
Christoph Lameter4037d452007-05-09 02:35:14 -0700505 continue;
506 }
507
Christoph Lameterfbc2edb2013-09-11 14:21:32 -0700508 if (__this_cpu_dec_return(p->expire))
Christoph Lameter4037d452007-05-09 02:35:14 -0700509 continue;
510
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700511 if (__this_cpu_read(p->pcp.count)) {
Christoph Lameter7c8e0182014-06-04 16:07:56 -0700512 drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700513 changes++;
514 }
Christoph Lameter4037d452007-05-09 02:35:14 -0700515#endif
Christoph Lameter2244b952006-06-30 01:55:33 -0700516 }
Christoph Lameter7cc36bb2014-10-09 15:29:43 -0700517 changes += fold_diff(global_diff);
518 return changes;
Christoph Lameter2244b952006-06-30 01:55:33 -0700519}
520
Cody P Schafer40f4b1e2013-04-29 15:08:38 -0700521/*
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700522 * Fold the data for an offline cpu into the global array.
523 * There cannot be any access by the offline cpu and therefore
524 * synchronization is simplified.
525 */
526void cpu_vm_stats_fold(int cpu)
527{
528 struct zone *zone;
529 int i;
530 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
531
532 for_each_populated_zone(zone) {
533 struct per_cpu_pageset *p;
534
535 p = per_cpu_ptr(zone->pageset, cpu);
536
537 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
538 if (p->vm_stat_diff[i]) {
539 int v;
540
541 v = p->vm_stat_diff[i];
542 p->vm_stat_diff[i] = 0;
543 atomic_long_add(v, &zone->vm_stat[i]);
544 global_diff[i] += v;
545 }
546 }
547
Christoph Lameter4edb0742013-09-11 14:21:31 -0700548 fold_diff(global_diff);
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700549}
550
551/*
Cody P Schafer40f4b1e2013-04-29 15:08:38 -0700552 * this is only called if !populated_zone(zone), which implies no other users of
553 * pset->vm_stat_diff[] exsist.
554 */
Minchan Kim5a883812012-10-08 16:33:39 -0700555void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
556{
557 int i;
558
559 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
560 if (pset->vm_stat_diff[i]) {
561 int v = pset->vm_stat_diff[i];
562 pset->vm_stat_diff[i] = 0;
563 atomic_long_add(v, &zone->vm_stat[i]);
564 atomic_long_add(v, &vm_stat[i]);
565 }
566}
Christoph Lameter2244b952006-06-30 01:55:33 -0700567#endif
568
Christoph Lameterca889e62006-06-30 01:55:44 -0700569#ifdef CONFIG_NUMA
570/*
571 * zonelist = the list of zones passed to the allocator
572 * z = the zone from which the allocation occurred.
573 *
574 * Must be called with interrupts disabled.
Andi Kleen78afd562011-03-22 16:33:12 -0700575 *
576 * When __GFP_OTHER_NODE is set assume the node of the preferred
577 * zone is the local node. This is useful for daemons who allocate
578 * memory on behalf of other processes.
Christoph Lameterca889e62006-06-30 01:55:44 -0700579 */
Andi Kleen78afd562011-03-22 16:33:12 -0700580void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
Christoph Lameterca889e62006-06-30 01:55:44 -0700581{
Mel Gorman18ea7e72008-04-28 02:12:14 -0700582 if (z->zone_pgdat == preferred_zone->zone_pgdat) {
Christoph Lameterca889e62006-06-30 01:55:44 -0700583 __inc_zone_state(z, NUMA_HIT);
584 } else {
585 __inc_zone_state(z, NUMA_MISS);
Mel Gorman18ea7e72008-04-28 02:12:14 -0700586 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
Christoph Lameterca889e62006-06-30 01:55:44 -0700587 }
Andi Kleen78afd562011-03-22 16:33:12 -0700588 if (z->node == ((flags & __GFP_OTHER_NODE) ?
589 preferred_zone->node : numa_node_id()))
Christoph Lameterca889e62006-06-30 01:55:44 -0700590 __inc_zone_state(z, NUMA_LOCAL);
591 else
592 __inc_zone_state(z, NUMA_OTHER);
593}
Andrew Mortonc2d42c12015-11-05 18:48:43 -0800594
595/*
596 * Determine the per node value of a stat item.
597 */
598unsigned long node_page_state(int node, enum zone_stat_item item)
599{
600 struct zone *zones = NODE_DATA(node)->node_zones;
601
602 return
603#ifdef CONFIG_ZONE_DMA
604 zone_page_state(&zones[ZONE_DMA], item) +
605#endif
606#ifdef CONFIG_ZONE_DMA32
607 zone_page_state(&zones[ZONE_DMA32], item) +
608#endif
609#ifdef CONFIG_HIGHMEM
610 zone_page_state(&zones[ZONE_HIGHMEM], item) +
611#endif
612 zone_page_state(&zones[ZONE_NORMAL], item) +
613 zone_page_state(&zones[ZONE_MOVABLE], item);
614}
615
Christoph Lameterca889e62006-06-30 01:55:44 -0700616#endif
617
Mel Gormand7a57522010-05-24 14:32:25 -0700618#ifdef CONFIG_COMPACTION
Namhyung Kim36deb0b2010-10-26 14:22:04 -0700619
Mel Gormand7a57522010-05-24 14:32:25 -0700620struct contig_page_info {
621 unsigned long free_pages;
622 unsigned long free_blocks_total;
623 unsigned long free_blocks_suitable;
624};
625
626/*
627 * Calculate the number of free pages in a zone, how many contiguous
628 * pages are free and how many are large enough to satisfy an allocation of
629 * the target size. Note that this function makes no attempt to estimate
630 * how many suitable free blocks there *might* be if MOVABLE pages were
631 * migrated. Calculating that is possible, but expensive and can be
632 * figured out from userspace
633 */
634static void fill_contig_page_info(struct zone *zone,
635 unsigned int suitable_order,
636 struct contig_page_info *info)
637{
638 unsigned int order;
639
640 info->free_pages = 0;
641 info->free_blocks_total = 0;
642 info->free_blocks_suitable = 0;
643
644 for (order = 0; order < MAX_ORDER; order++) {
645 unsigned long blocks;
646
647 /* Count number of free blocks */
648 blocks = zone->free_area[order].nr_free;
649 info->free_blocks_total += blocks;
650
651 /* Count free base pages */
652 info->free_pages += blocks << order;
653
654 /* Count the suitable free blocks */
655 if (order >= suitable_order)
656 info->free_blocks_suitable += blocks <<
657 (order - suitable_order);
658 }
659}
Mel Gormanf1a5ab12010-05-24 14:32:26 -0700660
661/*
662 * A fragmentation index only makes sense if an allocation of a requested
663 * size would fail. If that is true, the fragmentation index indicates
664 * whether external fragmentation or a lack of memory was the problem.
665 * The value can be used to determine if page reclaim or compaction
666 * should be used
667 */
Mel Gorman56de7262010-05-24 14:32:30 -0700668static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
Mel Gormanf1a5ab12010-05-24 14:32:26 -0700669{
670 unsigned long requested = 1UL << order;
671
672 if (!info->free_blocks_total)
673 return 0;
674
675 /* Fragmentation index only makes sense when a request would fail */
676 if (info->free_blocks_suitable)
677 return -1000;
678
679 /*
680 * Index is between 0 and 1 so return within 3 decimal places
681 *
682 * 0 => allocation would fail due to lack of memory
683 * 1 => allocation would fail due to fragmentation
684 */
685 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
686}
Mel Gorman56de7262010-05-24 14:32:30 -0700687
688/* Same as __fragmentation index but allocs contig_page_info on stack */
689int fragmentation_index(struct zone *zone, unsigned int order)
690{
691 struct contig_page_info info;
692
693 fill_contig_page_info(zone, order, &info);
694 return __fragmentation_index(order, &info);
695}
Mel Gormand7a57522010-05-24 14:32:25 -0700696#endif
697
David Rientjes0d6617c2011-09-14 16:21:05 -0700698#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700699#ifdef CONFIG_ZONE_DMA
700#define TEXT_FOR_DMA(xx) xx "_dma",
701#else
702#define TEXT_FOR_DMA(xx)
703#endif
704
705#ifdef CONFIG_ZONE_DMA32
706#define TEXT_FOR_DMA32(xx) xx "_dma32",
707#else
708#define TEXT_FOR_DMA32(xx)
709#endif
710
711#ifdef CONFIG_HIGHMEM
712#define TEXT_FOR_HIGHMEM(xx) xx "_high",
713#else
714#define TEXT_FOR_HIGHMEM(xx)
715#endif
716
717#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
718 TEXT_FOR_HIGHMEM(xx) xx "_movable",
719
720const char * const vmstat_text[] = {
Konstantin Khlebnikov09316c02014-10-09 15:29:32 -0700721 /* enum zone_stat_item countes */
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700722 "nr_free_pages",
Johannes Weiner81c0a2b2013-09-11 14:20:47 -0700723 "nr_alloc_batch",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700724 "nr_inactive_anon",
725 "nr_active_anon",
726 "nr_inactive_file",
727 "nr_active_file",
728 "nr_unevictable",
729 "nr_mlock",
730 "nr_anon_pages",
731 "nr_mapped",
732 "nr_file_pages",
733 "nr_dirty",
734 "nr_writeback",
735 "nr_slab_reclaimable",
736 "nr_slab_unreclaimable",
737 "nr_page_table_pages",
738 "nr_kernel_stack",
739 "nr_unstable",
740 "nr_bounce",
741 "nr_vmscan_write",
Mel Gorman49ea7eb2011-10-31 17:07:59 -0700742 "nr_vmscan_immediate_reclaim",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700743 "nr_writeback_temp",
744 "nr_isolated_anon",
745 "nr_isolated_file",
746 "nr_shmem",
747 "nr_dirtied",
748 "nr_written",
Mel Gorman0d5d8232014-08-06 16:07:16 -0700749 "nr_pages_scanned",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700750
751#ifdef CONFIG_NUMA
752 "numa_hit",
753 "numa_miss",
754 "numa_foreign",
755 "numa_interleave",
756 "numa_local",
757 "numa_other",
758#endif
Johannes Weinera5289102014-04-03 14:47:51 -0700759 "workingset_refault",
760 "workingset_activate",
Johannes Weiner449dd692014-04-03 14:47:56 -0700761 "workingset_nodereclaim",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700762 "nr_anon_transparent_hugepages",
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -0700763 "nr_free_cma",
Konstantin Khlebnikov09316c02014-10-09 15:29:32 -0700764
765 /* enum writeback_stat_item counters */
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700766 "nr_dirty_threshold",
767 "nr_dirty_background_threshold",
768
769#ifdef CONFIG_VM_EVENT_COUNTERS
Konstantin Khlebnikov09316c02014-10-09 15:29:32 -0700770 /* enum vm_event_item counters */
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700771 "pgpgin",
772 "pgpgout",
773 "pswpin",
774 "pswpout",
775
776 TEXTS_FOR_ZONES("pgalloc")
777
778 "pgfree",
779 "pgactivate",
780 "pgdeactivate",
781
782 "pgfault",
783 "pgmajfault",
784
785 TEXTS_FOR_ZONES("pgrefill")
Ying Han904249a2012-04-25 16:01:48 -0700786 TEXTS_FOR_ZONES("pgsteal_kswapd")
787 TEXTS_FOR_ZONES("pgsteal_direct")
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700788 TEXTS_FOR_ZONES("pgscan_kswapd")
789 TEXTS_FOR_ZONES("pgscan_direct")
Mel Gorman68243e72012-07-31 16:44:39 -0700790 "pgscan_direct_throttle",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700791
792#ifdef CONFIG_NUMA
793 "zone_reclaim_failed",
794#endif
795 "pginodesteal",
796 "slabs_scanned",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700797 "kswapd_inodesteal",
798 "kswapd_low_wmark_hit_quickly",
799 "kswapd_high_wmark_hit_quickly",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700800 "pageoutrun",
801 "allocstall",
802
803 "pgrotated",
804
Dave Hansen5509a5d2014-04-03 14:48:19 -0700805 "drop_pagecache",
806 "drop_slab",
807
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000808#ifdef CONFIG_NUMA_BALANCING
809 "numa_pte_updates",
Mel Gorman72403b42013-11-12 15:08:32 -0800810 "numa_huge_pte_updates",
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000811 "numa_hint_faults",
812 "numa_hint_faults_local",
813 "numa_pages_migrated",
814#endif
Mel Gorman5647bc22012-10-19 10:46:20 +0100815#ifdef CONFIG_MIGRATION
816 "pgmigrate_success",
817 "pgmigrate_fail",
818#endif
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700819#ifdef CONFIG_COMPACTION
Mel Gorman397487d2012-10-19 12:00:10 +0100820 "compact_migrate_scanned",
821 "compact_free_scanned",
822 "compact_isolated",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700823 "compact_stall",
824 "compact_fail",
825 "compact_success",
826#endif
827
828#ifdef CONFIG_HUGETLB_PAGE
829 "htlb_buddy_alloc_success",
830 "htlb_buddy_alloc_fail",
831#endif
832 "unevictable_pgs_culled",
833 "unevictable_pgs_scanned",
834 "unevictable_pgs_rescued",
835 "unevictable_pgs_mlocked",
836 "unevictable_pgs_munlocked",
837 "unevictable_pgs_cleared",
838 "unevictable_pgs_stranded",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700839
840#ifdef CONFIG_TRANSPARENT_HUGEPAGE
841 "thp_fault_alloc",
842 "thp_fault_fallback",
843 "thp_collapse_alloc",
844 "thp_collapse_alloc_failed",
845 "thp_split",
Kirill A. Shutemovd8a8e1f2012-12-12 13:51:09 -0800846 "thp_zero_page_alloc",
847 "thp_zero_page_alloc_failed",
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700848#endif
Konstantin Khlebnikov09316c02014-10-09 15:29:32 -0700849#ifdef CONFIG_MEMORY_BALLOON
850 "balloon_inflate",
851 "balloon_deflate",
852#ifdef CONFIG_BALLOON_COMPACTION
853 "balloon_migrate",
854#endif
855#endif /* CONFIG_MEMORY_BALLOON */
Mel Gormanec659932014-01-21 14:33:16 -0800856#ifdef CONFIG_DEBUG_TLBFLUSH
Dave Hansen6df46862013-09-11 14:20:24 -0700857#ifdef CONFIG_SMP
Dave Hansen9824cf92013-09-11 14:20:23 -0700858 "nr_tlb_remote_flush",
859 "nr_tlb_remote_flush_received",
Mel Gormanec659932014-01-21 14:33:16 -0800860#endif /* CONFIG_SMP */
Dave Hansen9824cf92013-09-11 14:20:23 -0700861 "nr_tlb_local_flush_all",
862 "nr_tlb_local_flush_one",
Mel Gormanec659932014-01-21 14:33:16 -0800863#endif /* CONFIG_DEBUG_TLBFLUSH */
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700864
Davidlohr Bueso4f115142014-06-04 16:06:46 -0700865#ifdef CONFIG_DEBUG_VM_VMACACHE
866 "vmacache_find_calls",
867 "vmacache_find_hits",
Davidlohr Buesof5f302e2014-12-12 16:56:10 -0800868 "vmacache_full_flushes",
Davidlohr Bueso4f115142014-06-04 16:06:46 -0700869#endif
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700870#endif /* CONFIG_VM_EVENTS_COUNTERS */
871};
David Rientjes0d6617c2011-09-14 16:21:05 -0700872#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700873
874
Andrew Morton3c486872015-02-10 14:09:43 -0800875#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
876 defined(CONFIG_PROC_FS)
877static void *frag_start(struct seq_file *m, loff_t *pos)
878{
879 pg_data_t *pgdat;
880 loff_t node = *pos;
881
882 for (pgdat = first_online_pgdat();
883 pgdat && node;
884 pgdat = next_online_pgdat(pgdat))
885 --node;
886
887 return pgdat;
888}
889
890static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
891{
892 pg_data_t *pgdat = (pg_data_t *)arg;
893
894 (*pos)++;
895 return next_online_pgdat(pgdat);
896}
897
898static void frag_stop(struct seq_file *m, void *arg)
899{
900}
901
902/* Walk all the zones in a node and print using a callback */
903static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
904 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
905{
906 struct zone *zone;
907 struct zone *node_zones = pgdat->node_zones;
908 unsigned long flags;
909
910 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
911 if (!populated_zone(zone))
912 continue;
913
914 spin_lock_irqsave(&zone->lock, flags);
915 print(m, pgdat, zone);
916 spin_unlock_irqrestore(&zone->lock, flags);
917 }
918}
919#endif
920
Mel Gormand7a57522010-05-24 14:32:25 -0700921#ifdef CONFIG_PROC_FS
Andrew Morton3c486872015-02-10 14:09:43 -0800922static char * const migratetype_names[MIGRATE_TYPES] = {
923 "Unmovable",
Andrew Morton3c486872015-02-10 14:09:43 -0800924 "Movable",
Vlastimil Babka475a2f92015-12-11 13:40:29 -0800925 "Reclaimable",
Mel Gorman0aaa29a2015-11-06 16:28:37 -0800926 "HighAtomic",
Andrew Morton3c486872015-02-10 14:09:43 -0800927#ifdef CONFIG_CMA
928 "CMA",
929#endif
930#ifdef CONFIG_MEMORY_ISOLATION
931 "Isolate",
932#endif
933};
934
Mel Gorman467c9962007-10-16 01:26:02 -0700935static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
936 struct zone *zone)
937{
938 int order;
939
940 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
941 for (order = 0; order < MAX_ORDER; ++order)
942 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
943 seq_putc(m, '\n');
944}
945
946/*
947 * This walks the free areas for each zone.
948 */
949static int frag_show(struct seq_file *m, void *arg)
950{
951 pg_data_t *pgdat = (pg_data_t *)arg;
952 walk_zones_in_node(m, pgdat, frag_show_print);
953 return 0;
954}
955
956static void pagetypeinfo_showfree_print(struct seq_file *m,
957 pg_data_t *pgdat, struct zone *zone)
958{
959 int order, mtype;
960
961 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
962 seq_printf(m, "Node %4d, zone %8s, type %12s ",
963 pgdat->node_id,
964 zone->name,
965 migratetype_names[mtype]);
966 for (order = 0; order < MAX_ORDER; ++order) {
967 unsigned long freecount = 0;
968 struct free_area *area;
969 struct list_head *curr;
970
971 area = &(zone->free_area[order]);
972
973 list_for_each(curr, &area->free_list[mtype])
974 freecount++;
975 seq_printf(m, "%6lu ", freecount);
976 }
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700977 seq_putc(m, '\n');
978 }
Mel Gorman467c9962007-10-16 01:26:02 -0700979}
980
981/* Print out the free pages at each order for each migatetype */
982static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
983{
984 int order;
985 pg_data_t *pgdat = (pg_data_t *)arg;
986
987 /* Print header */
988 seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
989 for (order = 0; order < MAX_ORDER; ++order)
990 seq_printf(m, "%6d ", order);
991 seq_putc(m, '\n');
992
993 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
994
995 return 0;
996}
997
998static void pagetypeinfo_showblockcount_print(struct seq_file *m,
999 pg_data_t *pgdat, struct zone *zone)
1000{
1001 int mtype;
1002 unsigned long pfn;
1003 unsigned long start_pfn = zone->zone_start_pfn;
Cody P Schafer108bcc92013-02-22 16:35:23 -08001004 unsigned long end_pfn = zone_end_pfn(zone);
Mel Gorman467c9962007-10-16 01:26:02 -07001005 unsigned long count[MIGRATE_TYPES] = { 0, };
1006
1007 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1008 struct page *page;
1009
1010 if (!pfn_valid(pfn))
1011 continue;
1012
1013 page = pfn_to_page(pfn);
Mel Gormaneb335752009-05-13 17:34:48 +01001014
1015 /* Watch for unexpected holes punched in the memmap */
1016 if (!memmap_valid_within(pfn, page, zone))
Mel Gormane80d6a22008-08-14 11:10:14 +01001017 continue;
Mel Gormaneb335752009-05-13 17:34:48 +01001018
Mel Gorman467c9962007-10-16 01:26:02 -07001019 mtype = get_pageblock_migratetype(page);
1020
Mel Gormane80d6a22008-08-14 11:10:14 +01001021 if (mtype < MIGRATE_TYPES)
1022 count[mtype]++;
Mel Gorman467c9962007-10-16 01:26:02 -07001023 }
1024
1025 /* Print counts */
1026 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1027 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1028 seq_printf(m, "%12lu ", count[mtype]);
1029 seq_putc(m, '\n');
1030}
1031
1032/* Print out the free pages at each order for each migratetype */
1033static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1034{
1035 int mtype;
1036 pg_data_t *pgdat = (pg_data_t *)arg;
1037
1038 seq_printf(m, "\n%-23s", "Number of blocks type ");
1039 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1040 seq_printf(m, "%12s ", migratetype_names[mtype]);
1041 seq_putc(m, '\n');
1042 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
1043
1044 return 0;
1045}
1046
Joonsoo Kim48c96a32014-12-12 16:56:01 -08001047#ifdef CONFIG_PAGE_OWNER
1048static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1049 pg_data_t *pgdat,
1050 struct zone *zone)
1051{
1052 struct page *page;
1053 struct page_ext *page_ext;
1054 unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
1055 unsigned long end_pfn = pfn + zone->spanned_pages;
1056 unsigned long count[MIGRATE_TYPES] = { 0, };
1057 int pageblock_mt, page_mt;
1058 int i;
1059
1060 /* Scan block by block. First and last block may be incomplete */
1061 pfn = zone->zone_start_pfn;
1062
1063 /*
1064 * Walk the zone in pageblock_nr_pages steps. If a page block spans
1065 * a zone boundary, it will be double counted between zones. This does
1066 * not matter as the mixed block count will still be correct
1067 */
1068 for (; pfn < end_pfn; ) {
1069 if (!pfn_valid(pfn)) {
1070 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
1071 continue;
1072 }
1073
1074 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
1075 block_end_pfn = min(block_end_pfn, end_pfn);
1076
1077 page = pfn_to_page(pfn);
1078 pageblock_mt = get_pfnblock_migratetype(page, pfn);
1079
1080 for (; pfn < block_end_pfn; pfn++) {
1081 if (!pfn_valid_within(pfn))
1082 continue;
1083
1084 page = pfn_to_page(pfn);
1085 if (PageBuddy(page)) {
1086 pfn += (1UL << page_order(page)) - 1;
1087 continue;
1088 }
1089
1090 if (PageReserved(page))
1091 continue;
1092
1093 page_ext = lookup_page_ext(page);
1094
1095 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1096 continue;
1097
1098 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
1099 if (pageblock_mt != page_mt) {
1100 if (is_migrate_cma(pageblock_mt))
1101 count[MIGRATE_MOVABLE]++;
1102 else
1103 count[pageblock_mt]++;
1104
1105 pfn = block_end_pfn;
1106 break;
1107 }
1108 pfn += (1UL << page_ext->order) - 1;
1109 }
1110 }
1111
1112 /* Print counts */
1113 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1114 for (i = 0; i < MIGRATE_TYPES; i++)
1115 seq_printf(m, "%12lu ", count[i]);
1116 seq_putc(m, '\n');
1117}
1118#endif /* CONFIG_PAGE_OWNER */
1119
1120/*
1121 * Print out the number of pageblocks for each migratetype that contain pages
1122 * of other types. This gives an indication of how well fallbacks are being
1123 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1124 * to determine what is going on
1125 */
1126static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1127{
1128#ifdef CONFIG_PAGE_OWNER
1129 int mtype;
1130
1131 if (!page_owner_inited)
1132 return;
1133
1134 drain_all_pages(NULL);
1135
1136 seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1137 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1138 seq_printf(m, "%12s ", migratetype_names[mtype]);
1139 seq_putc(m, '\n');
1140
1141 walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
1142#endif /* CONFIG_PAGE_OWNER */
1143}
1144
Mel Gorman467c9962007-10-16 01:26:02 -07001145/*
1146 * This prints out statistics in relation to grouping pages by mobility.
1147 * It is expensive to collect so do not constantly read the file.
1148 */
1149static int pagetypeinfo_show(struct seq_file *m, void *arg)
1150{
1151 pg_data_t *pgdat = (pg_data_t *)arg;
1152
KOSAKI Motohiro41b25a32008-04-30 00:52:13 -07001153 /* check memoryless node */
Lai Jiangshana47b53c2012-12-12 13:51:37 -08001154 if (!node_state(pgdat->node_id, N_MEMORY))
KOSAKI Motohiro41b25a32008-04-30 00:52:13 -07001155 return 0;
1156
Mel Gorman467c9962007-10-16 01:26:02 -07001157 seq_printf(m, "Page block order: %d\n", pageblock_order);
1158 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
1159 seq_putc(m, '\n');
1160 pagetypeinfo_showfree(m, pgdat);
1161 pagetypeinfo_showblockcount(m, pgdat);
Joonsoo Kim48c96a32014-12-12 16:56:01 -08001162 pagetypeinfo_showmixedcount(m, pgdat);
Mel Gorman467c9962007-10-16 01:26:02 -07001163
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001164 return 0;
1165}
1166
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04001167static const struct seq_operations fragmentation_op = {
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001168 .start = frag_start,
1169 .next = frag_next,
1170 .stop = frag_stop,
1171 .show = frag_show,
1172};
1173
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04001174static int fragmentation_open(struct inode *inode, struct file *file)
1175{
1176 return seq_open(file, &fragmentation_op);
1177}
1178
1179static const struct file_operations fragmentation_file_operations = {
1180 .open = fragmentation_open,
1181 .read = seq_read,
1182 .llseek = seq_lseek,
1183 .release = seq_release,
1184};
1185
Alexey Dobriyan74e2e8e2008-10-06 04:15:36 +04001186static const struct seq_operations pagetypeinfo_op = {
Mel Gorman467c9962007-10-16 01:26:02 -07001187 .start = frag_start,
1188 .next = frag_next,
1189 .stop = frag_stop,
1190 .show = pagetypeinfo_show,
1191};
1192
Alexey Dobriyan74e2e8e2008-10-06 04:15:36 +04001193static int pagetypeinfo_open(struct inode *inode, struct file *file)
1194{
1195 return seq_open(file, &pagetypeinfo_op);
1196}
1197
1198static const struct file_operations pagetypeinfo_file_ops = {
1199 .open = pagetypeinfo_open,
1200 .read = seq_read,
1201 .llseek = seq_lseek,
1202 .release = seq_release,
1203};
1204
Mel Gorman467c9962007-10-16 01:26:02 -07001205static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1206 struct zone *zone)
1207{
1208 int i;
1209 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1210 seq_printf(m,
1211 "\n pages free %lu"
1212 "\n min %lu"
1213 "\n low %lu"
1214 "\n high %lu"
Wu Fengguang08d9ae72009-06-16 15:32:30 -07001215 "\n scanned %lu"
Mel Gorman467c9962007-10-16 01:26:02 -07001216 "\n spanned %lu"
Jiang Liu9feedc92012-12-12 13:52:12 -08001217 "\n present %lu"
1218 "\n managed %lu",
Mel Gorman88f5acf2011-01-13 15:45:41 -08001219 zone_page_state(zone, NR_FREE_PAGES),
Mel Gorman41858962009-06-16 15:32:12 -07001220 min_wmark_pages(zone),
1221 low_wmark_pages(zone),
1222 high_wmark_pages(zone),
Mel Gorman0d5d8232014-08-06 16:07:16 -07001223 zone_page_state(zone, NR_PAGES_SCANNED),
Mel Gorman467c9962007-10-16 01:26:02 -07001224 zone->spanned_pages,
Jiang Liu9feedc92012-12-12 13:52:12 -08001225 zone->present_pages,
1226 zone->managed_pages);
Mel Gorman467c9962007-10-16 01:26:02 -07001227
1228 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1229 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
1230 zone_page_state(zone, i));
1231
1232 seq_printf(m,
Mel Gorman3484b2d2014-08-06 16:07:14 -07001233 "\n protection: (%ld",
Mel Gorman467c9962007-10-16 01:26:02 -07001234 zone->lowmem_reserve[0]);
1235 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
Mel Gorman3484b2d2014-08-06 16:07:14 -07001236 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
Mel Gorman467c9962007-10-16 01:26:02 -07001237 seq_printf(m,
1238 ")"
1239 "\n pagesets");
1240 for_each_online_cpu(i) {
1241 struct per_cpu_pageset *pageset;
Mel Gorman467c9962007-10-16 01:26:02 -07001242
Christoph Lameter99dcc3e2010-01-05 15:34:51 +09001243 pageset = per_cpu_ptr(zone->pageset, i);
Christoph Lameter3dfa5722008-02-04 22:29:19 -08001244 seq_printf(m,
1245 "\n cpu: %i"
1246 "\n count: %i"
1247 "\n high: %i"
1248 "\n batch: %i",
1249 i,
1250 pageset->pcp.count,
1251 pageset->pcp.high,
1252 pageset->pcp.batch);
Mel Gorman467c9962007-10-16 01:26:02 -07001253#ifdef CONFIG_SMP
1254 seq_printf(m, "\n vm stats threshold: %d",
1255 pageset->stat_threshold);
1256#endif
1257 }
1258 seq_printf(m,
1259 "\n all_unreclaimable: %u"
Rik van Riel556adec2008-10-18 20:26:34 -07001260 "\n start_pfn: %lu"
1261 "\n inactive_ratio: %u",
Lisa Du6e543d52013-09-11 14:22:36 -07001262 !zone_reclaimable(zone),
Rik van Riel556adec2008-10-18 20:26:34 -07001263 zone->zone_start_pfn,
1264 zone->inactive_ratio);
Mel Gorman467c9962007-10-16 01:26:02 -07001265 seq_putc(m, '\n');
1266}
1267
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001268/*
1269 * Output information about zones in @pgdat.
1270 */
1271static int zoneinfo_show(struct seq_file *m, void *arg)
1272{
Mel Gorman467c9962007-10-16 01:26:02 -07001273 pg_data_t *pgdat = (pg_data_t *)arg;
1274 walk_zones_in_node(m, pgdat, zoneinfo_show_print);
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001275 return 0;
1276}
1277
Alexey Dobriyan5c9fe622008-10-06 04:19:42 +04001278static const struct seq_operations zoneinfo_op = {
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001279 .start = frag_start, /* iterate over all zones. The same as in
1280 * fragmentation. */
1281 .next = frag_next,
1282 .stop = frag_stop,
1283 .show = zoneinfo_show,
1284};
1285
Alexey Dobriyan5c9fe622008-10-06 04:19:42 +04001286static int zoneinfo_open(struct inode *inode, struct file *file)
1287{
1288 return seq_open(file, &zoneinfo_op);
1289}
1290
1291static const struct file_operations proc_zoneinfo_file_operations = {
1292 .open = zoneinfo_open,
1293 .read = seq_read,
1294 .llseek = seq_lseek,
1295 .release = seq_release,
1296};
1297
Michael Rubin79da8262010-10-26 14:21:36 -07001298enum writeback_stat_item {
1299 NR_DIRTY_THRESHOLD,
1300 NR_DIRTY_BG_THRESHOLD,
1301 NR_VM_WRITEBACK_STAT_ITEMS,
1302};
1303
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001304static void *vmstat_start(struct seq_file *m, loff_t *pos)
1305{
Christoph Lameter2244b952006-06-30 01:55:33 -07001306 unsigned long *v;
Michael Rubin79da8262010-10-26 14:21:36 -07001307 int i, stat_items_size;
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001308
1309 if (*pos >= ARRAY_SIZE(vmstat_text))
1310 return NULL;
Michael Rubin79da8262010-10-26 14:21:36 -07001311 stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1312 NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001313
Christoph Lameterf8891e52006-06-30 01:55:45 -07001314#ifdef CONFIG_VM_EVENT_COUNTERS
Michael Rubin79da8262010-10-26 14:21:36 -07001315 stat_items_size += sizeof(struct vm_event_state);
Christoph Lameterf8891e52006-06-30 01:55:45 -07001316#endif
Michael Rubin79da8262010-10-26 14:21:36 -07001317
1318 v = kmalloc(stat_items_size, GFP_KERNEL);
Christoph Lameter2244b952006-06-30 01:55:33 -07001319 m->private = v;
1320 if (!v)
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001321 return ERR_PTR(-ENOMEM);
Christoph Lameter2244b952006-06-30 01:55:33 -07001322 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1323 v[i] = global_page_state(i);
Michael Rubin79da8262010-10-26 14:21:36 -07001324 v += NR_VM_ZONE_STAT_ITEMS;
1325
1326 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1327 v + NR_DIRTY_THRESHOLD);
1328 v += NR_VM_WRITEBACK_STAT_ITEMS;
1329
Christoph Lameterf8891e52006-06-30 01:55:45 -07001330#ifdef CONFIG_VM_EVENT_COUNTERS
Michael Rubin79da8262010-10-26 14:21:36 -07001331 all_vm_events(v);
1332 v[PGPGIN] /= 2; /* sectors -> kbytes */
1333 v[PGPGOUT] /= 2;
Christoph Lameterf8891e52006-06-30 01:55:45 -07001334#endif
Wu Fengguangff8b16d2010-11-04 01:56:49 +08001335 return (unsigned long *)m->private + *pos;
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001336}
1337
1338static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1339{
1340 (*pos)++;
1341 if (*pos >= ARRAY_SIZE(vmstat_text))
1342 return NULL;
1343 return (unsigned long *)m->private + *pos;
1344}
1345
1346static int vmstat_show(struct seq_file *m, void *arg)
1347{
1348 unsigned long *l = arg;
1349 unsigned long off = l - (unsigned long *)m->private;
1350
1351 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1352 return 0;
1353}
1354
1355static void vmstat_stop(struct seq_file *m, void *arg)
1356{
1357 kfree(m->private);
1358 m->private = NULL;
1359}
1360
Alexey Dobriyanb6aa44a2008-10-06 04:17:48 +04001361static const struct seq_operations vmstat_op = {
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001362 .start = vmstat_start,
1363 .next = vmstat_next,
1364 .stop = vmstat_stop,
1365 .show = vmstat_show,
1366};
1367
Alexey Dobriyanb6aa44a2008-10-06 04:17:48 +04001368static int vmstat_open(struct inode *inode, struct file *file)
1369{
1370 return seq_open(file, &vmstat_op);
1371}
1372
1373static const struct file_operations proc_vmstat_file_operations = {
1374 .open = vmstat_open,
1375 .read = seq_read,
1376 .llseek = seq_lseek,
1377 .release = seq_release,
1378};
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001379#endif /* CONFIG_PROC_FS */
1380
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001381#ifdef CONFIG_SMP
Michal Hocko373ccbe2015-12-11 13:40:32 -08001382static struct workqueue_struct *vmstat_wq;
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001383static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
Christoph Lameter77461ab2007-05-09 02:35:13 -07001384int sysctl_stat_interval __read_mostly = HZ;
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001385static cpumask_var_t cpu_stat_off;
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001386
1387static void vmstat_update(struct work_struct *w)
1388{
Linus Torvalds176bed12015-10-15 13:01:50 -07001389 if (refresh_cpu_vm_stats()) {
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001390 /*
1391 * Counters were updated so we expect more updates
1392 * to occur in the future. Keep on running the
1393 * update worker thread.
1394 */
Michal Hocko373ccbe2015-12-11 13:40:32 -08001395 queue_delayed_work_on(smp_processor_id(), vmstat_wq,
Linus Torvalds176bed12015-10-15 13:01:50 -07001396 this_cpu_ptr(&vmstat_work),
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001397 round_jiffies_relative(sysctl_stat_interval));
Linus Torvalds176bed12015-10-15 13:01:50 -07001398 } else {
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001399 /*
1400 * We did not update any counters so the app may be in
1401 * a mode where it does not cause counter updates.
1402 * We may be uselessly running vmstat_update.
1403 * Defer the checking for differentials to the
1404 * shepherd thread on a different processor.
1405 */
1406 int r;
1407 /*
1408 * Shepherd work thread does not race since it never
1409 * changes the bit if its zero but the cpu
1410 * online / off line code may race if
1411 * worker threads are still allowed during
1412 * shutdown / startup.
1413 */
1414 r = cpumask_test_and_set_cpu(smp_processor_id(),
1415 cpu_stat_off);
1416 VM_BUG_ON(r);
1417 }
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001418}
1419
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001420/*
1421 * Check if the diffs for a certain cpu indicate that
1422 * an update is needed.
1423 */
1424static bool need_update(int cpu)
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001425{
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001426 struct zone *zone;
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001427
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001428 for_each_populated_zone(zone) {
1429 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1430
1431 BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1432 /*
1433 * The fast way of checking if there are any vmstat diffs.
1434 * This works because the diffs are byte sized items.
1435 */
1436 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
1437 return true;
1438
1439 }
1440 return false;
1441}
1442
1443
1444/*
1445 * Shepherd worker thread that checks the
1446 * differentials of processors that have their worker
1447 * threads for vm statistics updates disabled because of
1448 * inactivity.
1449 */
1450static void vmstat_shepherd(struct work_struct *w);
1451
1452static DECLARE_DELAYED_WORK(shepherd, vmstat_shepherd);
1453
1454static void vmstat_shepherd(struct work_struct *w)
1455{
1456 int cpu;
1457
1458 get_online_cpus();
1459 /* Check processors whose vmstat worker threads have been disabled */
1460 for_each_cpu(cpu, cpu_stat_off)
1461 if (need_update(cpu) &&
1462 cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
1463
Michal Hocko373ccbe2015-12-11 13:40:32 -08001464 queue_delayed_work_on(cpu, vmstat_wq,
Christoph Lameter57c2e362015-02-11 15:28:36 -08001465 &per_cpu(vmstat_work, cpu), 0);
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001466
1467 put_online_cpus();
1468
1469 schedule_delayed_work(&shepherd,
1470 round_jiffies_relative(sysctl_stat_interval));
1471
1472}
1473
1474static void __init start_shepherd_timer(void)
1475{
1476 int cpu;
1477
1478 for_each_possible_cpu(cpu)
Michal Hockoba4877b2015-02-11 15:28:24 -08001479 INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu),
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001480 vmstat_update);
1481
1482 if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
1483 BUG();
1484 cpumask_copy(cpu_stat_off, cpu_online_mask);
1485
1486 schedule_delayed_work(&shepherd,
1487 round_jiffies_relative(sysctl_stat_interval));
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001488}
1489
Toshi Kani807a1bd2013-11-12 15:08:13 -08001490static void vmstat_cpu_dead(int node)
1491{
1492 int cpu;
1493
1494 get_online_cpus();
1495 for_each_online_cpu(cpu)
1496 if (cpu_to_node(cpu) == node)
1497 goto end;
1498
1499 node_clear_state(node, N_CPU);
1500end:
1501 put_online_cpus();
1502}
1503
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001504/*
1505 * Use the cpu notifier to insure that the thresholds are recalculated
1506 * when necessary.
1507 */
Paul Gortmaker0db06282013-06-19 14:53:51 -04001508static int vmstat_cpuup_callback(struct notifier_block *nfb,
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001509 unsigned long action,
1510 void *hcpu)
1511{
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001512 long cpu = (long)hcpu;
1513
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001514 switch (action) {
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001515 case CPU_ONLINE:
1516 case CPU_ONLINE_FROZEN:
KAMEZAWA Hiroyuki5ee28a42010-09-09 16:38:14 -07001517 refresh_zone_stat_thresholds();
Christoph Lameterad596922010-01-05 15:34:51 +09001518 node_set_state(cpu_to_node(cpu), N_CPU);
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001519 cpumask_set_cpu(cpu, cpu_stat_off);
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001520 break;
1521 case CPU_DOWN_PREPARE:
1522 case CPU_DOWN_PREPARE_FROZEN:
Tejun Heoafe2c512010-12-14 16:21:17 +01001523 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001524 cpumask_clear_cpu(cpu, cpu_stat_off);
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001525 break;
1526 case CPU_DOWN_FAILED:
1527 case CPU_DOWN_FAILED_FROZEN:
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001528 cpumask_set_cpu(cpu, cpu_stat_off);
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001529 break;
Andy Whitcroftce421c72006-12-06 20:33:08 -08001530 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001531 case CPU_DEAD_FROZEN:
Andy Whitcroftce421c72006-12-06 20:33:08 -08001532 refresh_zone_stat_thresholds();
Toshi Kani807a1bd2013-11-12 15:08:13 -08001533 vmstat_cpu_dead(cpu_to_node(cpu));
Andy Whitcroftce421c72006-12-06 20:33:08 -08001534 break;
1535 default:
1536 break;
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001537 }
1538 return NOTIFY_OK;
1539}
1540
Paul Gortmaker0db06282013-06-19 14:53:51 -04001541static struct notifier_block vmstat_notifier =
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001542 { &vmstat_cpuup_callback, NULL, 0 };
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04001543#endif
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001544
Adrian Bunke2fc88d2007-10-16 01:26:27 -07001545static int __init setup_vmstat(void)
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001546{
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04001547#ifdef CONFIG_SMP
Srivatsa S. Bhat0be94ba2014-03-11 02:12:27 +05301548 cpu_notifier_register_begin();
1549 __register_cpu_notifier(&vmstat_notifier);
Christoph Lameterd1187ed2007-05-09 02:35:12 -07001550
Christoph Lameter7cc36bb2014-10-09 15:29:43 -07001551 start_shepherd_timer();
Srivatsa S. Bhat0be94ba2014-03-11 02:12:27 +05301552 cpu_notifier_register_done();
Michal Hocko373ccbe2015-12-11 13:40:32 -08001553 vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04001554#endif
1555#ifdef CONFIG_PROC_FS
1556 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
Alexey Dobriyan74e2e8e2008-10-06 04:15:36 +04001557 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
Alexey Dobriyanb6aa44a2008-10-06 04:17:48 +04001558 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
Alexey Dobriyan5c9fe622008-10-06 04:19:42 +04001559 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
Alexey Dobriyan8f32f7e2008-10-06 04:13:52 +04001560#endif
Christoph Lameterdf9ecab2006-08-31 21:27:35 -07001561 return 0;
1562}
1563module_init(setup_vmstat)
Mel Gormand7a57522010-05-24 14:32:25 -07001564
1565#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
Mel Gormand7a57522010-05-24 14:32:25 -07001566
1567/*
1568 * Return an index indicating how much of the available free memory is
1569 * unusable for an allocation of the requested size.
1570 */
1571static int unusable_free_index(unsigned int order,
1572 struct contig_page_info *info)
1573{
1574 /* No free memory is interpreted as all free memory is unusable */
1575 if (info->free_pages == 0)
1576 return 1000;
1577
1578 /*
1579 * Index should be a value between 0 and 1. Return a value to 3
1580 * decimal places.
1581 *
1582 * 0 => no fragmentation
1583 * 1 => high fragmentation
1584 */
1585 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1586
1587}
1588
1589static void unusable_show_print(struct seq_file *m,
1590 pg_data_t *pgdat, struct zone *zone)
1591{
1592 unsigned int order;
1593 int index;
1594 struct contig_page_info info;
1595
1596 seq_printf(m, "Node %d, zone %8s ",
1597 pgdat->node_id,
1598 zone->name);
1599 for (order = 0; order < MAX_ORDER; ++order) {
1600 fill_contig_page_info(zone, order, &info);
1601 index = unusable_free_index(order, &info);
1602 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1603 }
1604
1605 seq_putc(m, '\n');
1606}
1607
1608/*
1609 * Display unusable free space index
1610 *
1611 * The unusable free space index measures how much of the available free
1612 * memory cannot be used to satisfy an allocation of a given size and is a
1613 * value between 0 and 1. The higher the value, the more of free memory is
1614 * unusable and by implication, the worse the external fragmentation is. This
1615 * can be expressed as a percentage by multiplying by 100.
1616 */
1617static int unusable_show(struct seq_file *m, void *arg)
1618{
1619 pg_data_t *pgdat = (pg_data_t *)arg;
1620
1621 /* check memoryless node */
Lai Jiangshana47b53c2012-12-12 13:51:37 -08001622 if (!node_state(pgdat->node_id, N_MEMORY))
Mel Gormand7a57522010-05-24 14:32:25 -07001623 return 0;
1624
1625 walk_zones_in_node(m, pgdat, unusable_show_print);
1626
1627 return 0;
1628}
1629
1630static const struct seq_operations unusable_op = {
1631 .start = frag_start,
1632 .next = frag_next,
1633 .stop = frag_stop,
1634 .show = unusable_show,
1635};
1636
1637static int unusable_open(struct inode *inode, struct file *file)
1638{
1639 return seq_open(file, &unusable_op);
1640}
1641
1642static const struct file_operations unusable_file_ops = {
1643 .open = unusable_open,
1644 .read = seq_read,
1645 .llseek = seq_lseek,
1646 .release = seq_release,
1647};
1648
Mel Gormanf1a5ab12010-05-24 14:32:26 -07001649static void extfrag_show_print(struct seq_file *m,
1650 pg_data_t *pgdat, struct zone *zone)
1651{
1652 unsigned int order;
1653 int index;
1654
1655 /* Alloc on stack as interrupts are disabled for zone walk */
1656 struct contig_page_info info;
1657
1658 seq_printf(m, "Node %d, zone %8s ",
1659 pgdat->node_id,
1660 zone->name);
1661 for (order = 0; order < MAX_ORDER; ++order) {
1662 fill_contig_page_info(zone, order, &info);
Mel Gorman56de7262010-05-24 14:32:30 -07001663 index = __fragmentation_index(order, &info);
Mel Gormanf1a5ab12010-05-24 14:32:26 -07001664 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1665 }
1666
1667 seq_putc(m, '\n');
1668}
1669
1670/*
1671 * Display fragmentation index for orders that allocations would fail for
1672 */
1673static int extfrag_show(struct seq_file *m, void *arg)
1674{
1675 pg_data_t *pgdat = (pg_data_t *)arg;
1676
1677 walk_zones_in_node(m, pgdat, extfrag_show_print);
1678
1679 return 0;
1680}
1681
1682static const struct seq_operations extfrag_op = {
1683 .start = frag_start,
1684 .next = frag_next,
1685 .stop = frag_stop,
1686 .show = extfrag_show,
1687};
1688
1689static int extfrag_open(struct inode *inode, struct file *file)
1690{
1691 return seq_open(file, &extfrag_op);
1692}
1693
1694static const struct file_operations extfrag_file_ops = {
1695 .open = extfrag_open,
1696 .read = seq_read,
1697 .llseek = seq_lseek,
1698 .release = seq_release,
1699};
1700
Mel Gormand7a57522010-05-24 14:32:25 -07001701static int __init extfrag_debug_init(void)
1702{
Sasikantha babubde8bd82012-05-29 15:06:22 -07001703 struct dentry *extfrag_debug_root;
1704
Mel Gormand7a57522010-05-24 14:32:25 -07001705 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1706 if (!extfrag_debug_root)
1707 return -ENOMEM;
1708
1709 if (!debugfs_create_file("unusable_index", 0444,
1710 extfrag_debug_root, NULL, &unusable_file_ops))
Sasikantha babubde8bd82012-05-29 15:06:22 -07001711 goto fail;
Mel Gormand7a57522010-05-24 14:32:25 -07001712
Mel Gormanf1a5ab12010-05-24 14:32:26 -07001713 if (!debugfs_create_file("extfrag_index", 0444,
1714 extfrag_debug_root, NULL, &extfrag_file_ops))
Sasikantha babubde8bd82012-05-29 15:06:22 -07001715 goto fail;
Mel Gormanf1a5ab12010-05-24 14:32:26 -07001716
Mel Gormand7a57522010-05-24 14:32:25 -07001717 return 0;
Sasikantha babubde8bd82012-05-29 15:06:22 -07001718fail:
1719 debugfs_remove_recursive(extfrag_debug_root);
1720 return -ENOMEM;
Mel Gormand7a57522010-05-24 14:32:25 -07001721}
1722
1723module_init(extfrag_debug_init);
1724#endif