blob: 448455b7fd910af4ff4cf1b530bde57d7d02e79d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/mm.h>
2#include <linux/hugetlb.h>
Dave Hansen22e057c2011-03-22 16:33:00 -07003#include <linux/huge_mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/mount.h>
5#include <linux/seq_file.h>
Mauricio Line070ad42005-09-03 15:55:10 -07006#include <linux/highmem.h>
Kees Cook5096add2007-05-08 00:26:04 -07007#include <linux/ptrace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07009#include <linux/pagemap.h>
10#include <linux/mempolicy.h>
Dave Hansen22e057c2011-03-22 16:33:00 -070011#include <linux/rmap.h>
Matt Mackall85863e42008-02-04 22:29:04 -080012#include <linux/swap.h>
13#include <linux/swapops.h>
Mauricio Line070ad42005-09-03 15:55:10 -070014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/elf.h>
16#include <asm/uaccess.h>
Mauricio Line070ad42005-09-03 15:55:10 -070017#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "internal.h"
19
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080020void task_mem(struct seq_file *m, struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -070021{
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080022 unsigned long data, text, lib, swap;
Hugh Dickins365e9c872005-10-29 18:16:18 -070023 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
24
25 /*
26 * Note: to minimize their overhead, mm maintains hiwater_vm and
27 * hiwater_rss only when about to *lower* total_vm or rss. Any
28 * collector of these hiwater stats must therefore get total_vm
29 * and rss too, which will usually be the higher. Barriers? not
30 * worth the effort, such snapshots can always be inconsistent.
31 */
32 hiwater_vm = total_vm = mm->total_vm;
33 if (hiwater_vm < mm->hiwater_vm)
34 hiwater_vm = mm->hiwater_vm;
35 hiwater_rss = total_rss = get_mm_rss(mm);
36 if (hiwater_rss < mm->hiwater_rss)
37 hiwater_rss = mm->hiwater_rss;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
40 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
41 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080042 swap = get_mm_counter(mm, MM_SWAPENTS);
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080043 seq_printf(m,
Hugh Dickins365e9c872005-10-29 18:16:18 -070044 "VmPeak:\t%8lu kB\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 "VmSize:\t%8lu kB\n"
46 "VmLck:\t%8lu kB\n"
Christoph Lameterbc3e53f2011-10-31 17:07:30 -070047 "VmPin:\t%8lu kB\n"
Hugh Dickins365e9c872005-10-29 18:16:18 -070048 "VmHWM:\t%8lu kB\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 "VmRSS:\t%8lu kB\n"
50 "VmData:\t%8lu kB\n"
51 "VmStk:\t%8lu kB\n"
52 "VmExe:\t%8lu kB\n"
53 "VmLib:\t%8lu kB\n"
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080054 "VmPTE:\t%8lu kB\n"
55 "VmSwap:\t%8lu kB\n",
Hugh Dickins365e9c872005-10-29 18:16:18 -070056 hiwater_vm << (PAGE_SHIFT-10),
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -070057 total_vm << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 mm->locked_vm << (PAGE_SHIFT-10),
Christoph Lameterbc3e53f2011-10-31 17:07:30 -070059 mm->pinned_vm << (PAGE_SHIFT-10),
Hugh Dickins365e9c872005-10-29 18:16:18 -070060 hiwater_rss << (PAGE_SHIFT-10),
61 total_rss << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 data << (PAGE_SHIFT-10),
63 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080064 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
65 swap << (PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070066}
67
68unsigned long task_vsize(struct mm_struct *mm)
69{
70 return PAGE_SIZE * mm->total_vm;
71}
72
Alexey Dobriyana2ade7b2011-01-12 17:00:32 -080073unsigned long task_statm(struct mm_struct *mm,
74 unsigned long *shared, unsigned long *text,
75 unsigned long *data, unsigned long *resident)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076{
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -080077 *shared = get_mm_counter(mm, MM_FILEPAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
79 >> PAGE_SHIFT;
80 *data = mm->total_vm - mm->shared_vm;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -080081 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 return mm->total_vm;
83}
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085static void pad_len_spaces(struct seq_file *m, int len)
86{
87 len = 25 + sizeof(void*) * 6 - len;
88 if (len < 1)
89 len = 1;
90 seq_printf(m, "%*c", len, ' ');
91}
92
KAMEZAWA Hiroyuki9e781442012-10-19 17:00:55 +090093#ifdef CONFIG_NUMA
94/*
95 * These functions are for numa_maps but called in generic **maps seq_file
96 * ->start(), ->stop() ops.
97 *
98 * numa_maps scans all vmas under mmap_sem and checks their mempolicy.
99 * Each mempolicy object is controlled by reference counting. The problem here
100 * is how to avoid accessing dead mempolicy object.
101 *
102 * Because we're holding mmap_sem while reading seq_file, it's safe to access
103 * each vma's mempolicy, no vma objects will never drop refs to mempolicy.
104 *
105 * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy
106 * is set and replaced under mmap_sem but unrefed and cleared under task_lock().
107 * So, without task_lock(), we cannot trust get_vma_policy() because we cannot
108 * gurantee the task never exits under us. But taking task_lock() around
109 * get_vma_plicy() causes lock order problem.
110 *
111 * To access task->mempolicy without lock, we hold a reference count of an
112 * object pointed by task->mempolicy and remember it. This will guarantee
113 * that task->mempolicy points to an alive object or NULL in numa_maps accesses.
114 */
115static void hold_task_mempolicy(struct proc_maps_private *priv)
116{
117 struct task_struct *task = priv->task;
118
119 task_lock(task);
120 priv->task_mempolicy = task->mempolicy;
121 mpol_get(priv->task_mempolicy);
122 task_unlock(task);
123}
124static void release_task_mempolicy(struct proc_maps_private *priv)
125{
126 mpol_put(priv->task_mempolicy);
127}
128#else
129static void hold_task_mempolicy(struct proc_maps_private *priv)
130{
131}
132static void release_task_mempolicy(struct proc_maps_private *priv)
133{
134}
135#endif
136
Matt Mackalla6198792008-02-04 22:29:03 -0800137static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
Mauricio Line070ad42005-09-03 15:55:10 -0700138{
Matt Mackalla6198792008-02-04 22:29:03 -0800139 if (vma && vma != priv->tail_vma) {
140 struct mm_struct *mm = vma->vm_mm;
KAMEZAWA Hiroyuki9e781442012-10-19 17:00:55 +0900141 release_task_mempolicy(priv);
Matt Mackalla6198792008-02-04 22:29:03 -0800142 up_read(&mm->mmap_sem);
143 mmput(mm);
144 }
145}
146
147static void *m_start(struct seq_file *m, loff_t *pos)
148{
149 struct proc_maps_private *priv = m->private;
150 unsigned long last_addr = m->version;
151 struct mm_struct *mm;
152 struct vm_area_struct *vma, *tail_vma = NULL;
153 loff_t l = *pos;
154
155 /* Clear the per syscall fields in priv */
156 priv->task = NULL;
157 priv->tail_vma = NULL;
158
159 /*
160 * We remember last_addr rather than next_addr to hit with
161 * mmap_cache most of the time. We have zero last_addr at
162 * the beginning and also after lseek. We will have -1 last_addr
163 * after the end of the vmas.
164 */
165
166 if (last_addr == -1UL)
167 return NULL;
168
169 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
170 if (!priv->task)
Al Viroec6fd8a2011-02-15 22:22:54 -0500171 return ERR_PTR(-ESRCH);
Matt Mackalla6198792008-02-04 22:29:03 -0800172
Cong Wange7dcd992012-05-31 16:26:17 -0700173 mm = mm_access(priv->task, PTRACE_MODE_READ);
Al Viroec6fd8a2011-02-15 22:22:54 -0500174 if (!mm || IS_ERR(mm))
175 return mm;
Oleg Nesterov00f89d22009-07-10 03:27:38 +0200176 down_read(&mm->mmap_sem);
Matt Mackalla6198792008-02-04 22:29:03 -0800177
Stephen Wilson31db58b2011-03-13 15:49:15 -0400178 tail_vma = get_gate_vma(priv->task->mm);
Matt Mackalla6198792008-02-04 22:29:03 -0800179 priv->tail_vma = tail_vma;
KAMEZAWA Hiroyuki9e781442012-10-19 17:00:55 +0900180 hold_task_mempolicy(priv);
Matt Mackalla6198792008-02-04 22:29:03 -0800181 /* Start with last addr hint */
182 vma = find_vma(mm, last_addr);
183 if (last_addr && vma) {
184 vma = vma->vm_next;
185 goto out;
186 }
187
188 /*
189 * Check the vma index is within the range and do
190 * sequential scan until m_index.
191 */
192 vma = NULL;
193 if ((unsigned long)l < mm->map_count) {
194 vma = mm->mmap;
195 while (l-- && vma)
196 vma = vma->vm_next;
197 goto out;
198 }
199
200 if (l != mm->map_count)
201 tail_vma = NULL; /* After gate vma */
202
203out:
204 if (vma)
205 return vma;
206
KAMEZAWA Hiroyuki9e781442012-10-19 17:00:55 +0900207 release_task_mempolicy(priv);
Matt Mackalla6198792008-02-04 22:29:03 -0800208 /* End of vmas has been reached */
209 m->version = (tail_vma != NULL)? 0: -1UL;
210 up_read(&mm->mmap_sem);
211 mmput(mm);
212 return tail_vma;
213}
214
215static void *m_next(struct seq_file *m, void *v, loff_t *pos)
216{
217 struct proc_maps_private *priv = m->private;
218 struct vm_area_struct *vma = v;
219 struct vm_area_struct *tail_vma = priv->tail_vma;
220
221 (*pos)++;
222 if (vma && (vma != tail_vma) && vma->vm_next)
223 return vma->vm_next;
224 vma_stop(priv, vma);
225 return (vma != tail_vma)? tail_vma: NULL;
226}
227
228static void m_stop(struct seq_file *m, void *v)
229{
230 struct proc_maps_private *priv = m->private;
231 struct vm_area_struct *vma = v;
232
Linus Torvalds76597cd2011-03-27 19:09:29 -0700233 if (!IS_ERR(vma))
234 vma_stop(priv, vma);
Matt Mackalla6198792008-02-04 22:29:03 -0800235 if (priv->task)
236 put_task_struct(priv->task);
237}
238
239static int do_maps_open(struct inode *inode, struct file *file,
Jan Engelhardt03a44822008-02-08 04:21:19 -0800240 const struct seq_operations *ops)
Matt Mackalla6198792008-02-04 22:29:03 -0800241{
242 struct proc_maps_private *priv;
243 int ret = -ENOMEM;
244 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
245 if (priv) {
246 priv->pid = proc_pid(inode);
247 ret = seq_open(file, ops);
248 if (!ret) {
249 struct seq_file *m = file->private_data;
250 m->private = priv;
251 } else {
252 kfree(priv);
253 }
254 }
255 return ret;
256}
Mauricio Line070ad42005-09-03 15:55:10 -0700257
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700258static void
259show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
Mauricio Line070ad42005-09-03 15:55:10 -0700261 struct mm_struct *mm = vma->vm_mm;
262 struct file *file = vma->vm_file;
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700263 struct proc_maps_private *priv = m->private;
264 struct task_struct *task = priv->task;
KOSAKI Motohiroca16d142011-05-26 19:16:19 +0900265 vm_flags_t flags = vma->vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 unsigned long ino = 0;
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700267 unsigned long long pgoff = 0;
Mikulas Patockaa09a79f2011-05-09 13:01:09 +0200268 unsigned long start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 dev_t dev = 0;
270 int len;
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700271 const char *name = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
273 if (file) {
Josef "Jeff" Sipek2fddfee2006-12-08 02:36:36 -0800274 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 dev = inode->i_sb->s_dev;
276 ino = inode->i_ino;
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700277 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 }
279
Linus Torvaldsd7824372010-08-15 11:35:52 -0700280 /* We don't show the stack guard page in /proc/maps */
281 start = vma->vm_start;
Mikulas Patockaa09a79f2011-05-09 13:01:09 +0200282 if (stack_guard_page_start(vma, start))
283 start += PAGE_SIZE;
284 end = vma->vm_end;
285 if (stack_guard_page_end(vma, end))
286 end -= PAGE_SIZE;
Linus Torvaldsd7824372010-08-15 11:35:52 -0700287
Clement Calmels1804dc62008-08-20 14:09:00 -0700288 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
Linus Torvaldsd7824372010-08-15 11:35:52 -0700289 start,
Mikulas Patockaa09a79f2011-05-09 13:01:09 +0200290 end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 flags & VM_READ ? 'r' : '-',
292 flags & VM_WRITE ? 'w' : '-',
293 flags & VM_EXEC ? 'x' : '-',
294 flags & VM_MAYSHARE ? 's' : 'p',
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700295 pgoff,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 MAJOR(dev), MINOR(dev), ino, &len);
297
298 /*
299 * Print the dentry name for named mappings, and a
300 * special [heap] marker for the heap:
301 */
Mauricio Line070ad42005-09-03 15:55:10 -0700302 if (file) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 pad_len_spaces(m, len);
Jan Blunckc32c2f62008-02-14 19:38:43 -0800304 seq_path(m, &file->f_path, "\n");
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700305 goto done;
306 }
307
308 name = arch_vma_name(vma);
309 if (!name) {
310 pid_t tid;
311
312 if (!mm) {
313 name = "[vdso]";
314 goto done;
315 }
316
317 if (vma->vm_start <= mm->brk &&
318 vma->vm_end >= mm->start_brk) {
319 name = "[heap]";
320 goto done;
321 }
322
323 tid = vm_is_stack(task, vma, is_pid);
324
325 if (tid != 0) {
326 /*
327 * Thread stack in /proc/PID/task/TID/maps or
328 * the main process stack.
329 */
330 if (!is_pid || (vma->vm_start <= mm->start_stack &&
331 vma->vm_end >= mm->start_stack)) {
332 name = "[stack]";
Ingo Molnare6e54942006-06-27 02:53:50 -0700333 } else {
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700334 /* Thread stack in /proc/PID/maps */
335 pad_len_spaces(m, len);
336 seq_printf(m, "[stack:%d]", tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 }
Ingo Molnare6e54942006-06-27 02:53:50 -0700338 }
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700339 }
340
341done:
342 if (name) {
343 pad_len_spaces(m, len);
344 seq_puts(m, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 }
346 seq_putc(m, '\n');
Joe Korty7c88db02008-10-16 15:27:09 +0400347}
348
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700349static int show_map(struct seq_file *m, void *v, int is_pid)
Joe Korty7c88db02008-10-16 15:27:09 +0400350{
351 struct vm_area_struct *vma = v;
352 struct proc_maps_private *priv = m->private;
353 struct task_struct *task = priv->task;
354
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700355 show_map_vma(m, vma, is_pid);
Mauricio Line070ad42005-09-03 15:55:10 -0700356
Mauricio Line070ad42005-09-03 15:55:10 -0700357 if (m->count < m->size) /* vma is copied successfully */
Stephen Wilson31db58b2011-03-13 15:49:15 -0400358 m->version = (vma != get_gate_vma(task->mm))
359 ? vma->vm_start : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 return 0;
361}
362
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700363static int show_pid_map(struct seq_file *m, void *v)
364{
365 return show_map(m, v, 1);
366}
367
368static int show_tid_map(struct seq_file *m, void *v)
369{
370 return show_map(m, v, 0);
371}
372
Jan Engelhardt03a44822008-02-08 04:21:19 -0800373static const struct seq_operations proc_pid_maps_op = {
Matt Mackalla6198792008-02-04 22:29:03 -0800374 .start = m_start,
375 .next = m_next,
376 .stop = m_stop,
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700377 .show = show_pid_map
Matt Mackalla6198792008-02-04 22:29:03 -0800378};
379
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700380static const struct seq_operations proc_tid_maps_op = {
381 .start = m_start,
382 .next = m_next,
383 .stop = m_stop,
384 .show = show_tid_map
385};
386
387static int pid_maps_open(struct inode *inode, struct file *file)
Matt Mackalla6198792008-02-04 22:29:03 -0800388{
389 return do_maps_open(inode, file, &proc_pid_maps_op);
390}
391
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700392static int tid_maps_open(struct inode *inode, struct file *file)
393{
394 return do_maps_open(inode, file, &proc_tid_maps_op);
395}
396
397const struct file_operations proc_pid_maps_operations = {
398 .open = pid_maps_open,
399 .read = seq_read,
400 .llseek = seq_lseek,
401 .release = seq_release_private,
402};
403
404const struct file_operations proc_tid_maps_operations = {
405 .open = tid_maps_open,
Matt Mackalla6198792008-02-04 22:29:03 -0800406 .read = seq_read,
407 .llseek = seq_lseek,
408 .release = seq_release_private,
409};
410
411/*
412 * Proportional Set Size(PSS): my share of RSS.
413 *
414 * PSS of a process is the count of pages it has in memory, where each
415 * page is divided by the number of processes sharing it. So if a
416 * process has 1000 pages all to itself, and 1000 shared with one other
417 * process, its PSS will be 1500.
418 *
419 * To keep (accumulated) division errors low, we adopt a 64bit
420 * fixed-point pss counter to minimize division errors. So (pss >>
421 * PSS_SHIFT) would be the real byte count.
422 *
423 * A shift of 12 before division means (assuming 4K page size):
424 * - 1M 3-user-pages add up to 8KB errors;
425 * - supports mapcount up to 2^24, or 16M;
426 * - supports PSS up to 2^52 bytes, or 4PB.
427 */
428#define PSS_SHIFT 12
429
Matt Mackall1e883282008-02-04 22:29:07 -0800430#ifdef CONFIG_PROC_PAGE_MONITOR
Peter Zijlstra214e4712008-04-28 02:12:55 -0700431struct mem_size_stats {
Matt Mackalla6198792008-02-04 22:29:03 -0800432 struct vm_area_struct *vma;
433 unsigned long resident;
434 unsigned long shared_clean;
435 unsigned long shared_dirty;
436 unsigned long private_clean;
437 unsigned long private_dirty;
438 unsigned long referenced;
Nikanth Karthikesanb40d4f82010-10-27 15:34:10 -0700439 unsigned long anonymous;
Dave Hansen4031a212011-03-22 16:33:01 -0700440 unsigned long anonymous_thp;
Peter Zijlstra214e4712008-04-28 02:12:55 -0700441 unsigned long swap;
Konstantin Khlebnikovbca15542012-05-31 16:26:20 -0700442 unsigned long nonlinear;
Matt Mackalla6198792008-02-04 22:29:03 -0800443 u64 pss;
444};
445
Dave Hansenae11c4d2011-03-22 16:32:58 -0700446
447static void smaps_pte_entry(pte_t ptent, unsigned long addr,
Dave Hansen3c9acc72011-03-22 16:32:59 -0700448 unsigned long ptent_size, struct mm_walk *walk)
Dave Hansenae11c4d2011-03-22 16:32:58 -0700449{
450 struct mem_size_stats *mss = walk->private;
451 struct vm_area_struct *vma = mss->vma;
Konstantin Khlebnikovbca15542012-05-31 16:26:20 -0700452 pgoff_t pgoff = linear_page_index(vma, addr);
Konstantin Khlebnikovb1d4d9e2012-05-31 16:26:20 -0700453 struct page *page = NULL;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700454 int mapcount;
455
Konstantin Khlebnikovb1d4d9e2012-05-31 16:26:20 -0700456 if (pte_present(ptent)) {
457 page = vm_normal_page(vma, addr, ptent);
458 } else if (is_swap_pte(ptent)) {
459 swp_entry_t swpent = pte_to_swp_entry(ptent);
460
461 if (!non_swap_entry(swpent))
462 mss->swap += ptent_size;
463 else if (is_migration_entry(swpent))
464 page = migration_entry_to_page(swpent);
Konstantin Khlebnikovbca15542012-05-31 16:26:20 -0700465 } else if (pte_file(ptent)) {
466 if (pte_to_pgoff(ptent) != pgoff)
467 mss->nonlinear += ptent_size;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700468 }
469
Dave Hansenae11c4d2011-03-22 16:32:58 -0700470 if (!page)
471 return;
472
473 if (PageAnon(page))
Dave Hansen3c9acc72011-03-22 16:32:59 -0700474 mss->anonymous += ptent_size;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700475
Konstantin Khlebnikovbca15542012-05-31 16:26:20 -0700476 if (page->index != pgoff)
477 mss->nonlinear += ptent_size;
478
Dave Hansen3c9acc72011-03-22 16:32:59 -0700479 mss->resident += ptent_size;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700480 /* Accumulate the size in pages that have been accessed. */
481 if (pte_young(ptent) || PageReferenced(page))
Dave Hansen3c9acc72011-03-22 16:32:59 -0700482 mss->referenced += ptent_size;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700483 mapcount = page_mapcount(page);
484 if (mapcount >= 2) {
485 if (pte_dirty(ptent) || PageDirty(page))
Dave Hansen3c9acc72011-03-22 16:32:59 -0700486 mss->shared_dirty += ptent_size;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700487 else
Dave Hansen3c9acc72011-03-22 16:32:59 -0700488 mss->shared_clean += ptent_size;
489 mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700490 } else {
491 if (pte_dirty(ptent) || PageDirty(page))
Dave Hansen3c9acc72011-03-22 16:32:59 -0700492 mss->private_dirty += ptent_size;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700493 else
Dave Hansen3c9acc72011-03-22 16:32:59 -0700494 mss->private_clean += ptent_size;
495 mss->pss += (ptent_size << PSS_SHIFT);
Dave Hansenae11c4d2011-03-22 16:32:58 -0700496 }
497}
498
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800499static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -0700500 struct mm_walk *walk)
Mauricio Line070ad42005-09-03 15:55:10 -0700501{
Dave Hansen21650092008-06-12 15:21:47 -0700502 struct mem_size_stats *mss = walk->private;
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800503 struct vm_area_struct *vma = mss->vma;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700504 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700505 spinlock_t *ptl;
Mauricio Line070ad42005-09-03 15:55:10 -0700506
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700507 if (pmd_trans_huge_lock(pmd, vma) == 1) {
508 smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
Dave Hansen22e057c2011-03-22 16:33:00 -0700509 spin_unlock(&walk->mm->page_table_lock);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700510 mss->anonymous_thp += HPAGE_PMD_SIZE;
511 return 0;
Dave Hansen22e057c2011-03-22 16:33:00 -0700512 }
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700513
514 if (pmd_trans_unstable(pmd))
515 return 0;
Dave Hansen22e057c2011-03-22 16:33:00 -0700516 /*
517 * The mmap_sem held all the way back in m_start() is what
518 * keeps khugepaged out of here and from collapsing things
519 * in here.
520 */
Hugh Dickins705e87c2005-10-29 18:16:27 -0700521 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Dave Hansenae11c4d2011-03-22 16:32:58 -0700522 for (; addr != end; pte++, addr += PAGE_SIZE)
Dave Hansen3c9acc72011-03-22 16:32:59 -0700523 smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700524 pte_unmap_unlock(pte - 1, ptl);
525 cond_resched();
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800526 return 0;
Mauricio Line070ad42005-09-03 15:55:10 -0700527}
528
Cyrill Gorcunov834f82e2012-12-17 16:03:13 -0800529static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
530{
531 /*
532 * Don't forget to update Documentation/ on changes.
533 */
534 static const char mnemonics[BITS_PER_LONG][2] = {
535 /*
536 * In case if we meet a flag we don't know about.
537 */
538 [0 ... (BITS_PER_LONG-1)] = "??",
539
540 [ilog2(VM_READ)] = "rd",
541 [ilog2(VM_WRITE)] = "wr",
542 [ilog2(VM_EXEC)] = "ex",
543 [ilog2(VM_SHARED)] = "sh",
544 [ilog2(VM_MAYREAD)] = "mr",
545 [ilog2(VM_MAYWRITE)] = "mw",
546 [ilog2(VM_MAYEXEC)] = "me",
547 [ilog2(VM_MAYSHARE)] = "ms",
548 [ilog2(VM_GROWSDOWN)] = "gd",
549 [ilog2(VM_PFNMAP)] = "pf",
550 [ilog2(VM_DENYWRITE)] = "dw",
551 [ilog2(VM_LOCKED)] = "lo",
552 [ilog2(VM_IO)] = "io",
553 [ilog2(VM_SEQ_READ)] = "sr",
554 [ilog2(VM_RAND_READ)] = "rr",
555 [ilog2(VM_DONTCOPY)] = "dc",
556 [ilog2(VM_DONTEXPAND)] = "de",
557 [ilog2(VM_ACCOUNT)] = "ac",
558 [ilog2(VM_NORESERVE)] = "nr",
559 [ilog2(VM_HUGETLB)] = "ht",
560 [ilog2(VM_NONLINEAR)] = "nl",
561 [ilog2(VM_ARCH_1)] = "ar",
562 [ilog2(VM_DONTDUMP)] = "dd",
563 [ilog2(VM_MIXEDMAP)] = "mm",
564 [ilog2(VM_HUGEPAGE)] = "hg",
565 [ilog2(VM_NOHUGEPAGE)] = "nh",
566 [ilog2(VM_MERGEABLE)] = "mg",
567 };
568 size_t i;
569
570 seq_puts(m, "VmFlags: ");
571 for (i = 0; i < BITS_PER_LONG; i++) {
572 if (vma->vm_flags & (1UL << i)) {
573 seq_printf(m, "%c%c ",
574 mnemonics[i][0], mnemonics[i][1]);
575 }
576 }
577 seq_putc(m, '\n');
578}
579
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700580static int show_smap(struct seq_file *m, void *v, int is_pid)
Mauricio Line070ad42005-09-03 15:55:10 -0700581{
Joe Korty7c88db02008-10-16 15:27:09 +0400582 struct proc_maps_private *priv = m->private;
583 struct task_struct *task = priv->task;
Mauricio Line070ad42005-09-03 15:55:10 -0700584 struct vm_area_struct *vma = v;
Mauricio Line070ad42005-09-03 15:55:10 -0700585 struct mem_size_stats mss;
Dave Hansen21650092008-06-12 15:21:47 -0700586 struct mm_walk smaps_walk = {
587 .pmd_entry = smaps_pte_range,
588 .mm = vma->vm_mm,
589 .private = &mss,
590 };
Mauricio Line070ad42005-09-03 15:55:10 -0700591
592 memset(&mss, 0, sizeof mss);
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800593 mss.vma = vma;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900594 /* mmap_sem is held in m_start */
Nick Piggin5ddfae12006-03-06 15:42:57 -0800595 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
Dave Hansen21650092008-06-12 15:21:47 -0700596 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
Matt Mackall4752c362008-02-04 22:29:02 -0800597
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700598 show_map_vma(m, vma, is_pid);
Matt Mackall4752c362008-02-04 22:29:02 -0800599
600 seq_printf(m,
601 "Size: %8lu kB\n"
602 "Rss: %8lu kB\n"
603 "Pss: %8lu kB\n"
604 "Shared_Clean: %8lu kB\n"
605 "Shared_Dirty: %8lu kB\n"
606 "Private_Clean: %8lu kB\n"
607 "Private_Dirty: %8lu kB\n"
Peter Zijlstra214e4712008-04-28 02:12:55 -0700608 "Referenced: %8lu kB\n"
Nikanth Karthikesanb40d4f82010-10-27 15:34:10 -0700609 "Anonymous: %8lu kB\n"
Dave Hansen4031a212011-03-22 16:33:01 -0700610 "AnonHugePages: %8lu kB\n"
Mel Gorman08fba692009-01-06 14:38:53 -0800611 "Swap: %8lu kB\n"
Mel Gorman33402892009-01-06 14:38:54 -0800612 "KernelPageSize: %8lu kB\n"
Nikanth Karthikesan2d905082011-01-13 15:45:53 -0800613 "MMUPageSize: %8lu kB\n"
614 "Locked: %8lu kB\n",
Matt Mackall4752c362008-02-04 22:29:02 -0800615 (vma->vm_end - vma->vm_start) >> 10,
616 mss.resident >> 10,
617 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
618 mss.shared_clean >> 10,
619 mss.shared_dirty >> 10,
620 mss.private_clean >> 10,
621 mss.private_dirty >> 10,
Peter Zijlstra214e4712008-04-28 02:12:55 -0700622 mss.referenced >> 10,
Nikanth Karthikesanb40d4f82010-10-27 15:34:10 -0700623 mss.anonymous >> 10,
Dave Hansen4031a212011-03-22 16:33:01 -0700624 mss.anonymous_thp >> 10,
Mel Gorman08fba692009-01-06 14:38:53 -0800625 mss.swap >> 10,
Mel Gorman33402892009-01-06 14:38:54 -0800626 vma_kernel_pagesize(vma) >> 10,
Nikanth Karthikesan2d905082011-01-13 15:45:53 -0800627 vma_mmu_pagesize(vma) >> 10,
628 (vma->vm_flags & VM_LOCKED) ?
629 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
Matt Mackall4752c362008-02-04 22:29:02 -0800630
Konstantin Khlebnikovbca15542012-05-31 16:26:20 -0700631 if (vma->vm_flags & VM_NONLINEAR)
632 seq_printf(m, "Nonlinear: %8lu kB\n",
633 mss.nonlinear >> 10);
634
Cyrill Gorcunov834f82e2012-12-17 16:03:13 -0800635 show_smap_vma_flags(m, vma);
636
Joe Korty7c88db02008-10-16 15:27:09 +0400637 if (m->count < m->size) /* vma is copied successfully */
Stephen Wilson31db58b2011-03-13 15:49:15 -0400638 m->version = (vma != get_gate_vma(task->mm))
639 ? vma->vm_start : 0;
Joe Korty7c88db02008-10-16 15:27:09 +0400640 return 0;
Mauricio Line070ad42005-09-03 15:55:10 -0700641}
642
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700643static int show_pid_smap(struct seq_file *m, void *v)
644{
645 return show_smap(m, v, 1);
646}
647
648static int show_tid_smap(struct seq_file *m, void *v)
649{
650 return show_smap(m, v, 0);
651}
652
Jan Engelhardt03a44822008-02-08 04:21:19 -0800653static const struct seq_operations proc_pid_smaps_op = {
Matt Mackalla6198792008-02-04 22:29:03 -0800654 .start = m_start,
655 .next = m_next,
656 .stop = m_stop,
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700657 .show = show_pid_smap
Matt Mackalla6198792008-02-04 22:29:03 -0800658};
659
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700660static const struct seq_operations proc_tid_smaps_op = {
661 .start = m_start,
662 .next = m_next,
663 .stop = m_stop,
664 .show = show_tid_smap
665};
666
667static int pid_smaps_open(struct inode *inode, struct file *file)
Matt Mackalla6198792008-02-04 22:29:03 -0800668{
669 return do_maps_open(inode, file, &proc_pid_smaps_op);
670}
671
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700672static int tid_smaps_open(struct inode *inode, struct file *file)
673{
674 return do_maps_open(inode, file, &proc_tid_smaps_op);
675}
676
677const struct file_operations proc_pid_smaps_operations = {
678 .open = pid_smaps_open,
679 .read = seq_read,
680 .llseek = seq_lseek,
681 .release = seq_release_private,
682};
683
684const struct file_operations proc_tid_smaps_operations = {
685 .open = tid_smaps_open,
Matt Mackalla6198792008-02-04 22:29:03 -0800686 .read = seq_read,
687 .llseek = seq_lseek,
688 .release = seq_release_private,
689};
690
691static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
Dave Hansen21650092008-06-12 15:21:47 -0700692 unsigned long end, struct mm_walk *walk)
Matt Mackalla6198792008-02-04 22:29:03 -0800693{
Dave Hansen21650092008-06-12 15:21:47 -0700694 struct vm_area_struct *vma = walk->private;
Matt Mackalla6198792008-02-04 22:29:03 -0800695 pte_t *pte, ptent;
696 spinlock_t *ptl;
697 struct page *page;
698
Kirill A. Shutemove1803772012-12-12 13:50:59 -0800699 split_huge_page_pmd(vma, addr, pmd);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700700 if (pmd_trans_unstable(pmd))
701 return 0;
Dave Hansen03319322011-03-22 16:32:56 -0700702
Matt Mackalla6198792008-02-04 22:29:03 -0800703 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
704 for (; addr != end; pte++, addr += PAGE_SIZE) {
705 ptent = *pte;
706 if (!pte_present(ptent))
707 continue;
708
709 page = vm_normal_page(vma, addr, ptent);
710 if (!page)
711 continue;
712
713 /* Clear accessed and referenced bits. */
714 ptep_test_and_clear_young(vma, addr, pte);
715 ClearPageReferenced(page);
716 }
717 pte_unmap_unlock(pte - 1, ptl);
718 cond_resched();
719 return 0;
720}
721
Moussa A. Ba398499d2009-09-21 17:02:29 -0700722#define CLEAR_REFS_ALL 1
723#define CLEAR_REFS_ANON 2
724#define CLEAR_REFS_MAPPED 3
725
Matt Mackallf248dcb2008-02-04 22:29:03 -0800726static ssize_t clear_refs_write(struct file *file, const char __user *buf,
727 size_t count, loff_t *ppos)
David Rientjesb813e932007-05-06 14:49:24 -0700728{
Matt Mackallf248dcb2008-02-04 22:29:03 -0800729 struct task_struct *task;
Vincent Lifb92a4b2009-09-22 16:45:36 -0700730 char buffer[PROC_NUMBUF];
Matt Mackallf248dcb2008-02-04 22:29:03 -0800731 struct mm_struct *mm;
David Rientjesb813e932007-05-06 14:49:24 -0700732 struct vm_area_struct *vma;
Alexey Dobriyan0a8cb8e2011-05-26 16:25:50 -0700733 int type;
734 int rv;
David Rientjesb813e932007-05-06 14:49:24 -0700735
Matt Mackallf248dcb2008-02-04 22:29:03 -0800736 memset(buffer, 0, sizeof(buffer));
737 if (count > sizeof(buffer) - 1)
738 count = sizeof(buffer) - 1;
739 if (copy_from_user(buffer, buf, count))
740 return -EFAULT;
Alexey Dobriyan0a8cb8e2011-05-26 16:25:50 -0700741 rv = kstrtoint(strstrip(buffer), 10, &type);
742 if (rv < 0)
743 return rv;
Moussa A. Ba398499d2009-09-21 17:02:29 -0700744 if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
Matt Mackallf248dcb2008-02-04 22:29:03 -0800745 return -EINVAL;
Matt Mackallf248dcb2008-02-04 22:29:03 -0800746 task = get_proc_task(file->f_path.dentry->d_inode);
747 if (!task)
748 return -ESRCH;
749 mm = get_task_mm(task);
750 if (mm) {
Andrew Morton20cbc972008-07-05 12:29:05 -0700751 struct mm_walk clear_refs_walk = {
752 .pmd_entry = clear_refs_pte_range,
753 .mm = mm,
754 };
Matt Mackallf248dcb2008-02-04 22:29:03 -0800755 down_read(&mm->mmap_sem);
Dave Hansen21650092008-06-12 15:21:47 -0700756 for (vma = mm->mmap; vma; vma = vma->vm_next) {
757 clear_refs_walk.private = vma;
Moussa A. Ba398499d2009-09-21 17:02:29 -0700758 if (is_vm_hugetlb_page(vma))
759 continue;
760 /*
761 * Writing 1 to /proc/pid/clear_refs affects all pages.
762 *
763 * Writing 2 to /proc/pid/clear_refs only affects
764 * Anonymous pages.
765 *
766 * Writing 3 to /proc/pid/clear_refs only affects file
767 * mapped pages.
768 */
769 if (type == CLEAR_REFS_ANON && vma->vm_file)
770 continue;
771 if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
772 continue;
773 walk_page_range(vma->vm_start, vma->vm_end,
774 &clear_refs_walk);
Dave Hansen21650092008-06-12 15:21:47 -0700775 }
Matt Mackallf248dcb2008-02-04 22:29:03 -0800776 flush_tlb_mm(mm);
777 up_read(&mm->mmap_sem);
778 mmput(mm);
779 }
780 put_task_struct(task);
Vincent Lifb92a4b2009-09-22 16:45:36 -0700781
782 return count;
David Rientjesb813e932007-05-06 14:49:24 -0700783}
784
Matt Mackallf248dcb2008-02-04 22:29:03 -0800785const struct file_operations proc_clear_refs_operations = {
786 .write = clear_refs_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200787 .llseek = noop_llseek,
Matt Mackallf248dcb2008-02-04 22:29:03 -0800788};
789
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700790typedef struct {
791 u64 pme;
792} pagemap_entry_t;
793
Matt Mackall85863e42008-02-04 22:29:04 -0800794struct pagemapread {
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900795 int pos, len;
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700796 pagemap_entry_t *buffer;
Matt Mackall85863e42008-02-04 22:29:04 -0800797};
798
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700799#define PAGEMAP_WALK_SIZE (PMD_SIZE)
800#define PAGEMAP_WALK_MASK (PMD_MASK)
801
Hans Rosenfeldf16278c2008-03-21 18:46:59 -0500802#define PM_ENTRY_BYTES sizeof(u64)
803#define PM_STATUS_BITS 3
804#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
805#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
806#define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
807#define PM_PSHIFT_BITS 6
808#define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
809#define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
810#define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
811#define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
812#define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
813
814#define PM_PRESENT PM_STATUS(4LL)
815#define PM_SWAP PM_STATUS(2LL)
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -0700816#define PM_FILE PM_STATUS(1LL)
Hans Rosenfeldf16278c2008-03-21 18:46:59 -0500817#define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
Matt Mackall85863e42008-02-04 22:29:04 -0800818#define PM_END_OF_BUFFER 1
819
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700820static inline pagemap_entry_t make_pme(u64 val)
821{
822 return (pagemap_entry_t) { .pme = val };
823}
824
825static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
Matt Mackall85863e42008-02-04 22:29:04 -0800826 struct pagemapread *pm)
827{
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700828 pm->buffer[pm->pos++] = *pme;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900829 if (pm->pos >= pm->len)
Thomas Tuttleaae86792008-06-05 22:46:31 -0700830 return PM_END_OF_BUFFER;
Matt Mackall85863e42008-02-04 22:29:04 -0800831 return 0;
832}
833
834static int pagemap_pte_hole(unsigned long start, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -0700835 struct mm_walk *walk)
Matt Mackall85863e42008-02-04 22:29:04 -0800836{
Dave Hansen21650092008-06-12 15:21:47 -0700837 struct pagemapread *pm = walk->private;
Matt Mackall85863e42008-02-04 22:29:04 -0800838 unsigned long addr;
839 int err = 0;
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700840 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
841
Matt Mackall85863e42008-02-04 22:29:04 -0800842 for (addr = start; addr < end; addr += PAGE_SIZE) {
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700843 err = add_to_pagemap(addr, &pme, pm);
Matt Mackall85863e42008-02-04 22:29:04 -0800844 if (err)
845 break;
846 }
847 return err;
848}
849
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -0700850static void pte_to_pagemap_entry(pagemap_entry_t *pme,
851 struct vm_area_struct *vma, unsigned long addr, pte_t pte)
Matt Mackall85863e42008-02-04 22:29:04 -0800852{
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -0700853 u64 frame, flags;
854 struct page *page = NULL;
Matt Mackall85863e42008-02-04 22:29:04 -0800855
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -0700856 if (pte_present(pte)) {
857 frame = pte_pfn(pte);
858 flags = PM_PRESENT;
859 page = vm_normal_page(vma, addr, pte);
860 } else if (is_swap_pte(pte)) {
861 swp_entry_t entry = pte_to_swp_entry(pte);
862
863 frame = swp_type(entry) |
864 (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
865 flags = PM_SWAP;
866 if (is_migration_entry(entry))
867 page = migration_entry_to_page(entry);
868 } else {
Konstantin Khlebnikov16fbdce2012-05-10 13:01:43 -0700869 *pme = make_pme(PM_NOT_PRESENT);
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -0700870 return;
871 }
872
873 if (page && !PageAnon(page))
874 flags |= PM_FILE;
875
876 *pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags);
Dave Hansenbcf80392008-06-12 15:21:48 -0700877}
878
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700879#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700880static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
881 pmd_t pmd, int offset)
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700882{
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700883 /*
884 * Currently pmd for thp is always present because thp can not be
885 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
886 * This if-check is just to prepare for future implementation.
887 */
888 if (pmd_present(pmd))
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700889 *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
890 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
Konstantin Khlebnikov16fbdce2012-05-10 13:01:43 -0700891 else
892 *pme = make_pme(PM_NOT_PRESENT);
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700893}
894#else
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700895static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
896 pmd_t pmd, int offset)
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700897{
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700898}
899#endif
900
Matt Mackall85863e42008-02-04 22:29:04 -0800901static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -0700902 struct mm_walk *walk)
Matt Mackall85863e42008-02-04 22:29:04 -0800903{
Dave Hansenbcf80392008-06-12 15:21:48 -0700904 struct vm_area_struct *vma;
Dave Hansen21650092008-06-12 15:21:47 -0700905 struct pagemapread *pm = walk->private;
Matt Mackall85863e42008-02-04 22:29:04 -0800906 pte_t *pte;
907 int err = 0;
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700908 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
Matt Mackall85863e42008-02-04 22:29:04 -0800909
Dave Hansenbcf80392008-06-12 15:21:48 -0700910 /* find the first VMA at or above 'addr' */
911 vma = find_vma(walk->mm, addr);
Sasha Levin08fa29d2012-05-29 15:06:15 -0700912 if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700913 for (; addr != end; addr += PAGE_SIZE) {
914 unsigned long offset;
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700915
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700916 offset = (addr & ~PAGEMAP_WALK_MASK) >>
917 PAGE_SHIFT;
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700918 thp_pmd_to_pagemap_entry(&pme, *pmd, offset);
919 err = add_to_pagemap(addr, &pme, pm);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700920 if (err)
921 break;
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700922 }
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700923 spin_unlock(&walk->mm->page_table_lock);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700924 return err;
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700925 }
926
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -0700927 if (pmd_trans_unstable(pmd))
928 return 0;
Matt Mackall85863e42008-02-04 22:29:04 -0800929 for (; addr != end; addr += PAGE_SIZE) {
Dave Hansenbcf80392008-06-12 15:21:48 -0700930
931 /* check to see if we've left 'vma' behind
932 * and need a new, higher one */
Konstantin Khlebnikov16fbdce2012-05-10 13:01:43 -0700933 if (vma && (addr >= vma->vm_end)) {
Dave Hansenbcf80392008-06-12 15:21:48 -0700934 vma = find_vma(walk->mm, addr);
Konstantin Khlebnikov16fbdce2012-05-10 13:01:43 -0700935 pme = make_pme(PM_NOT_PRESENT);
936 }
Dave Hansenbcf80392008-06-12 15:21:48 -0700937
938 /* check that 'vma' actually covers this address,
939 * and that it isn't a huge page vma */
940 if (vma && (vma->vm_start <= addr) &&
941 !is_vm_hugetlb_page(vma)) {
942 pte = pte_offset_map(pmd, addr);
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -0700943 pte_to_pagemap_entry(&pme, vma, addr, *pte);
Dave Hansenbcf80392008-06-12 15:21:48 -0700944 /* unmap before userspace copy */
945 pte_unmap(pte);
946 }
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700947 err = add_to_pagemap(addr, &pme, pm);
Matt Mackall85863e42008-02-04 22:29:04 -0800948 if (err)
949 return err;
950 }
951
952 cond_resched();
953
954 return err;
955}
956
Naoya Horiguchi1a5cb812010-05-24 14:32:12 -0700957#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700958static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme,
959 pte_t pte, int offset)
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800960{
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800961 if (pte_present(pte))
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700962 *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
963 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
Konstantin Khlebnikov16fbdce2012-05-10 13:01:43 -0700964 else
965 *pme = make_pme(PM_NOT_PRESENT);
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800966}
967
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700968/* This function walks within one hugetlb entry in the single call */
969static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
970 unsigned long addr, unsigned long end,
971 struct mm_walk *walk)
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800972{
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800973 struct pagemapread *pm = walk->private;
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800974 int err = 0;
Konstantin Khlebnikov16fbdce2012-05-10 13:01:43 -0700975 pagemap_entry_t pme;
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800976
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800977 for (; addr != end; addr += PAGE_SIZE) {
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700978 int offset = (addr & ~hmask) >> PAGE_SHIFT;
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700979 huge_pte_to_pagemap_entry(&pme, *pte, offset);
980 err = add_to_pagemap(addr, &pme, pm);
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800981 if (err)
982 return err;
983 }
984
985 cond_resched();
986
987 return err;
988}
Naoya Horiguchi1a5cb812010-05-24 14:32:12 -0700989#endif /* HUGETLB_PAGE */
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800990
Matt Mackall85863e42008-02-04 22:29:04 -0800991/*
992 * /proc/pid/pagemap - an array mapping virtual pages to pfns
993 *
Hans Rosenfeldf16278c2008-03-21 18:46:59 -0500994 * For each page in the address space, this file contains one 64-bit entry
995 * consisting of the following:
996 *
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -0700997 * Bits 0-54 page frame number (PFN) if present
Hans Rosenfeldf16278c2008-03-21 18:46:59 -0500998 * Bits 0-4 swap type if swapped
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -0700999 * Bits 5-54 swap offset if swapped
Hans Rosenfeldf16278c2008-03-21 18:46:59 -05001000 * Bits 55-60 page shift (page size = 1<<page shift)
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001001 * Bit 61 page is file-page or shared-anon
Hans Rosenfeldf16278c2008-03-21 18:46:59 -05001002 * Bit 62 page swapped
1003 * Bit 63 page present
1004 *
1005 * If the page is not present but in swap, then the PFN contains an
1006 * encoding of the swap file number and the page's offset into the
1007 * swap. Unmapped pages return a null PFN. This allows determining
Matt Mackall85863e42008-02-04 22:29:04 -08001008 * precisely which pages are mapped (or in swap) and comparing mapped
1009 * pages between processes.
1010 *
1011 * Efficient users of this interface will use /proc/pid/maps to
1012 * determine which areas of memory are actually mapped and llseek to
1013 * skip over unmapped regions.
1014 */
1015static ssize_t pagemap_read(struct file *file, char __user *buf,
1016 size_t count, loff_t *ppos)
1017{
1018 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
Matt Mackall85863e42008-02-04 22:29:04 -08001019 struct mm_struct *mm;
1020 struct pagemapread pm;
Matt Mackall85863e42008-02-04 22:29:04 -08001021 int ret = -ESRCH;
Alexey Dobriyanee1e6ab2008-07-21 14:21:36 -07001022 struct mm_walk pagemap_walk = {};
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001023 unsigned long src;
1024 unsigned long svpfn;
1025 unsigned long start_vaddr;
1026 unsigned long end_vaddr;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001027 int copied = 0;
Matt Mackall85863e42008-02-04 22:29:04 -08001028
1029 if (!task)
1030 goto out;
1031
Matt Mackall85863e42008-02-04 22:29:04 -08001032 ret = -EINVAL;
1033 /* file position must be aligned */
Thomas Tuttleaae86792008-06-05 22:46:31 -07001034 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
Marcelo Tosattifb393802008-03-13 12:32:35 -07001035 goto out_task;
Matt Mackall85863e42008-02-04 22:29:04 -08001036
1037 ret = 0;
Vitaly Mayatskikh08161782009-04-30 15:08:18 -07001038 if (!count)
1039 goto out_task;
1040
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001041 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1042 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001043 ret = -ENOMEM;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001044 if (!pm.buffer)
KOSAKI Motohiro98bc93e2011-05-26 16:25:53 -07001045 goto out_task;
1046
Cong Wange7dcd992012-05-31 16:26:17 -07001047 mm = mm_access(task, PTRACE_MODE_READ);
KOSAKI Motohiro98bc93e2011-05-26 16:25:53 -07001048 ret = PTR_ERR(mm);
1049 if (!mm || IS_ERR(mm))
1050 goto out_free;
Matt Mackall85863e42008-02-04 22:29:04 -08001051
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001052 pagemap_walk.pmd_entry = pagemap_pte_range;
1053 pagemap_walk.pte_hole = pagemap_pte_hole;
Naoya Horiguchi1a5cb812010-05-24 14:32:12 -07001054#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001055 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
Naoya Horiguchi1a5cb812010-05-24 14:32:12 -07001056#endif
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001057 pagemap_walk.mm = mm;
1058 pagemap_walk.private = &pm;
Matt Mackall85863e42008-02-04 22:29:04 -08001059
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001060 src = *ppos;
1061 svpfn = src / PM_ENTRY_BYTES;
1062 start_vaddr = svpfn << PAGE_SHIFT;
1063 end_vaddr = TASK_SIZE_OF(task);
Matt Mackall85863e42008-02-04 22:29:04 -08001064
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001065 /* watch out for wraparound */
1066 if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
1067 start_vaddr = end_vaddr;
1068
1069 /*
1070 * The odds are that this will stop walking way
1071 * before end_vaddr, because the length of the
1072 * user buffer is tracked in "pm", and the walk
1073 * will stop when we hit the end of the buffer.
1074 */
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001075 ret = 0;
1076 while (count && (start_vaddr < end_vaddr)) {
1077 int len;
1078 unsigned long end;
Matt Mackall85863e42008-02-04 22:29:04 -08001079
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001080 pm.pos = 0;
Naoya Horiguchiea251c12010-11-24 12:57:13 -08001081 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001082 /* overflow ? */
1083 if (end < start_vaddr || end > end_vaddr)
1084 end = end_vaddr;
1085 down_read(&mm->mmap_sem);
1086 ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1087 up_read(&mm->mmap_sem);
1088 start_vaddr = end;
1089
1090 len = min(count, PM_ENTRY_BYTES * pm.pos);
Dan Carpenter309361e02010-04-06 13:45:39 +03001091 if (copy_to_user(buf, pm.buffer, len)) {
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001092 ret = -EFAULT;
KOSAKI Motohiro98bc93e2011-05-26 16:25:53 -07001093 goto out_mm;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001094 }
1095 copied += len;
1096 buf += len;
1097 count -= len;
Matt Mackall85863e42008-02-04 22:29:04 -08001098 }
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001099 *ppos += copied;
1100 if (!ret || ret == PM_END_OF_BUFFER)
1101 ret = copied;
1102
Marcelo Tosattifb393802008-03-13 12:32:35 -07001103out_mm:
1104 mmput(mm);
KOSAKI Motohiro98bc93e2011-05-26 16:25:53 -07001105out_free:
1106 kfree(pm.buffer);
Matt Mackall85863e42008-02-04 22:29:04 -08001107out_task:
1108 put_task_struct(task);
1109out:
1110 return ret;
1111}
1112
1113const struct file_operations proc_pagemap_operations = {
1114 .llseek = mem_lseek, /* borrow this */
1115 .read = pagemap_read,
1116};
Matt Mackall1e883282008-02-04 22:29:07 -08001117#endif /* CONFIG_PROC_PAGE_MONITOR */
Matt Mackall85863e42008-02-04 22:29:04 -08001118
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001119#ifdef CONFIG_NUMA
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001120
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001121struct numa_maps {
1122 struct vm_area_struct *vma;
1123 unsigned long pages;
1124 unsigned long anon;
1125 unsigned long active;
1126 unsigned long writeback;
1127 unsigned long mapcount_max;
1128 unsigned long dirty;
1129 unsigned long swapcache;
1130 unsigned long node[MAX_NUMNODES];
1131};
1132
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001133struct numa_maps_private {
1134 struct proc_maps_private proc_maps;
1135 struct numa_maps md;
1136};
1137
Dave Hanseneb4866d2011-09-20 15:19:38 -07001138static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1139 unsigned long nr_pages)
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001140{
1141 int count = page_mapcount(page);
1142
Dave Hanseneb4866d2011-09-20 15:19:38 -07001143 md->pages += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001144 if (pte_dirty || PageDirty(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001145 md->dirty += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001146
1147 if (PageSwapCache(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001148 md->swapcache += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001149
1150 if (PageActive(page) || PageUnevictable(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001151 md->active += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001152
1153 if (PageWriteback(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001154 md->writeback += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001155
1156 if (PageAnon(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001157 md->anon += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001158
1159 if (count > md->mapcount_max)
1160 md->mapcount_max = count;
1161
Dave Hanseneb4866d2011-09-20 15:19:38 -07001162 md->node[page_to_nid(page)] += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001163}
1164
Dave Hansen3200a8a2011-09-20 15:19:39 -07001165static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1166 unsigned long addr)
1167{
1168 struct page *page;
1169 int nid;
1170
1171 if (!pte_present(pte))
1172 return NULL;
1173
1174 page = vm_normal_page(vma, addr, pte);
1175 if (!page)
1176 return NULL;
1177
1178 if (PageReserved(page))
1179 return NULL;
1180
1181 nid = page_to_nid(page);
Lai Jiangshan4ff1b2c2012-12-12 13:51:25 -08001182 if (!node_isset(nid, node_states[N_MEMORY]))
Dave Hansen3200a8a2011-09-20 15:19:39 -07001183 return NULL;
1184
1185 return page;
1186}
1187
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001188static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1189 unsigned long end, struct mm_walk *walk)
1190{
1191 struct numa_maps *md;
1192 spinlock_t *ptl;
1193 pte_t *orig_pte;
1194 pte_t *pte;
1195
1196 md = walk->private;
Dave Hansen32ef4382011-09-20 15:19:41 -07001197
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001198 if (pmd_trans_huge_lock(pmd, md->vma) == 1) {
1199 pte_t huge_pte = *(pte_t *)pmd;
1200 struct page *page;
1201
1202 page = can_gather_numa_stats(huge_pte, md->vma, addr);
1203 if (page)
1204 gather_stats(page, md, pte_dirty(huge_pte),
1205 HPAGE_PMD_SIZE/PAGE_SIZE);
Dave Hansen32ef4382011-09-20 15:19:41 -07001206 spin_unlock(&walk->mm->page_table_lock);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001207 return 0;
Dave Hansen32ef4382011-09-20 15:19:41 -07001208 }
1209
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001210 if (pmd_trans_unstable(pmd))
1211 return 0;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001212 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1213 do {
Dave Hansen3200a8a2011-09-20 15:19:39 -07001214 struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001215 if (!page)
1216 continue;
Dave Hanseneb4866d2011-09-20 15:19:38 -07001217 gather_stats(page, md, pte_dirty(*pte), 1);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001218
1219 } while (pte++, addr += PAGE_SIZE, addr != end);
1220 pte_unmap_unlock(orig_pte, ptl);
1221 return 0;
1222}
1223#ifdef CONFIG_HUGETLB_PAGE
1224static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1225 unsigned long addr, unsigned long end, struct mm_walk *walk)
1226{
1227 struct numa_maps *md;
1228 struct page *page;
1229
1230 if (pte_none(*pte))
1231 return 0;
1232
1233 page = pte_page(*pte);
1234 if (!page)
1235 return 0;
1236
1237 md = walk->private;
Dave Hanseneb4866d2011-09-20 15:19:38 -07001238 gather_stats(page, md, pte_dirty(*pte), 1);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001239 return 0;
1240}
1241
1242#else
1243static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1244 unsigned long addr, unsigned long end, struct mm_walk *walk)
1245{
1246 return 0;
1247}
1248#endif
1249
1250/*
1251 * Display pages allocated per node and memory policy via /proc.
1252 */
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001253static int show_numa_map(struct seq_file *m, void *v, int is_pid)
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001254{
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001255 struct numa_maps_private *numa_priv = m->private;
1256 struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001257 struct vm_area_struct *vma = v;
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001258 struct numa_maps *md = &numa_priv->md;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001259 struct file *file = vma->vm_file;
David Rientjes32f85162012-10-16 17:31:23 -07001260 struct task_struct *task = proc_priv->task;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001261 struct mm_struct *mm = vma->vm_mm;
1262 struct mm_walk walk = {};
1263 struct mempolicy *pol;
1264 int n;
1265 char buffer[50];
1266
1267 if (!mm)
1268 return 0;
1269
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001270 /* Ensure we start with an empty set of numa_maps statistics. */
1271 memset(md, 0, sizeof(*md));
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001272
1273 md->vma = vma;
1274
1275 walk.hugetlb_entry = gather_hugetbl_stats;
1276 walk.pmd_entry = gather_pte_stats;
1277 walk.private = md;
1278 walk.mm = mm;
1279
David Rientjes32f85162012-10-16 17:31:23 -07001280 pol = get_vma_policy(task, vma, vma->vm_start);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001281 mpol_to_str(buffer, sizeof(buffer), pol, 0);
1282 mpol_cond_put(pol);
1283
1284 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1285
1286 if (file) {
1287 seq_printf(m, " file=");
1288 seq_path(m, &file->f_path, "\n\t= ");
1289 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1290 seq_printf(m, " heap");
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001291 } else {
David Rientjes32f85162012-10-16 17:31:23 -07001292 pid_t tid = vm_is_stack(task, vma, is_pid);
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001293 if (tid != 0) {
1294 /*
1295 * Thread stack in /proc/PID/task/TID/maps or
1296 * the main process stack.
1297 */
1298 if (!is_pid || (vma->vm_start <= mm->start_stack &&
1299 vma->vm_end >= mm->start_stack))
1300 seq_printf(m, " stack");
1301 else
1302 seq_printf(m, " stack:%d", tid);
1303 }
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001304 }
1305
Andrew Mortonfc360bd2011-10-31 17:06:32 -07001306 if (is_vm_hugetlb_page(vma))
1307 seq_printf(m, " huge");
1308
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001309 walk_page_range(vma->vm_start, vma->vm_end, &walk);
1310
1311 if (!md->pages)
1312 goto out;
1313
1314 if (md->anon)
1315 seq_printf(m, " anon=%lu", md->anon);
1316
1317 if (md->dirty)
1318 seq_printf(m, " dirty=%lu", md->dirty);
1319
1320 if (md->pages != md->anon && md->pages != md->dirty)
1321 seq_printf(m, " mapped=%lu", md->pages);
1322
1323 if (md->mapcount_max > 1)
1324 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1325
1326 if (md->swapcache)
1327 seq_printf(m, " swapcache=%lu", md->swapcache);
1328
1329 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1330 seq_printf(m, " active=%lu", md->active);
1331
1332 if (md->writeback)
1333 seq_printf(m, " writeback=%lu", md->writeback);
1334
Lai Jiangshan4ff1b2c2012-12-12 13:51:25 -08001335 for_each_node_state(n, N_MEMORY)
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001336 if (md->node[n])
1337 seq_printf(m, " N%d=%lu", n, md->node[n]);
1338out:
1339 seq_putc(m, '\n');
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001340
1341 if (m->count < m->size)
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001342 m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001343 return 0;
1344}
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001345
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001346static int show_pid_numa_map(struct seq_file *m, void *v)
1347{
1348 return show_numa_map(m, v, 1);
1349}
1350
1351static int show_tid_numa_map(struct seq_file *m, void *v)
1352{
1353 return show_numa_map(m, v, 0);
1354}
1355
Jan Engelhardt03a44822008-02-08 04:21:19 -08001356static const struct seq_operations proc_pid_numa_maps_op = {
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001357 .start = m_start,
1358 .next = m_next,
1359 .stop = m_stop,
1360 .show = show_pid_numa_map,
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001361};
Eric W. Biederman662795d2006-06-26 00:25:48 -07001362
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001363static const struct seq_operations proc_tid_numa_maps_op = {
1364 .start = m_start,
1365 .next = m_next,
1366 .stop = m_stop,
1367 .show = show_tid_numa_map,
1368};
1369
1370static int numa_maps_open(struct inode *inode, struct file *file,
1371 const struct seq_operations *ops)
Eric W. Biederman662795d2006-06-26 00:25:48 -07001372{
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001373 struct numa_maps_private *priv;
1374 int ret = -ENOMEM;
1375 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1376 if (priv) {
1377 priv->proc_maps.pid = proc_pid(inode);
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001378 ret = seq_open(file, ops);
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001379 if (!ret) {
1380 struct seq_file *m = file->private_data;
1381 m->private = priv;
1382 } else {
1383 kfree(priv);
1384 }
1385 }
1386 return ret;
Eric W. Biederman662795d2006-06-26 00:25:48 -07001387}
1388
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001389static int pid_numa_maps_open(struct inode *inode, struct file *file)
1390{
1391 return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1392}
1393
1394static int tid_numa_maps_open(struct inode *inode, struct file *file)
1395{
1396 return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1397}
1398
1399const struct file_operations proc_pid_numa_maps_operations = {
1400 .open = pid_numa_maps_open,
1401 .read = seq_read,
1402 .llseek = seq_lseek,
1403 .release = seq_release_private,
1404};
1405
1406const struct file_operations proc_tid_numa_maps_operations = {
1407 .open = tid_numa_maps_open,
Eric W. Biederman662795d2006-06-26 00:25:48 -07001408 .read = seq_read,
1409 .llseek = seq_lseek,
Eric W. Biederman99f89552006-06-26 00:25:55 -07001410 .release = seq_release_private,
Eric W. Biederman662795d2006-06-26 00:25:48 -07001411};
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001412#endif /* CONFIG_NUMA */