blob: 47f5b145f56eee5aaefc3b52aac8fa78b978b692 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/mm.h>
2#include <linux/hugetlb.h>
3#include <linux/mount.h>
4#include <linux/seq_file.h>
Mauricio Line070ad42005-09-03 15:55:10 -07005#include <linux/highmem.h>
Kees Cook5096add2007-05-08 00:26:04 -07006#include <linux/ptrace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07008#include <linux/pagemap.h>
9#include <linux/mempolicy.h>
Matt Mackall85863e42008-02-04 22:29:04 -080010#include <linux/swap.h>
11#include <linux/swapops.h>
Mauricio Line070ad42005-09-03 15:55:10 -070012
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/elf.h>
14#include <asm/uaccess.h>
Mauricio Line070ad42005-09-03 15:55:10 -070015#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include "internal.h"
17
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080018void task_mem(struct seq_file *m, struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -070019{
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080020 unsigned long data, text, lib, swap;
Hugh Dickins365e9c872005-10-29 18:16:18 -070021 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
22
23 /*
24 * Note: to minimize their overhead, mm maintains hiwater_vm and
25 * hiwater_rss only when about to *lower* total_vm or rss. Any
26 * collector of these hiwater stats must therefore get total_vm
27 * and rss too, which will usually be the higher. Barriers? not
28 * worth the effort, such snapshots can always be inconsistent.
29 */
30 hiwater_vm = total_vm = mm->total_vm;
31 if (hiwater_vm < mm->hiwater_vm)
32 hiwater_vm = mm->hiwater_vm;
33 hiwater_rss = total_rss = get_mm_rss(mm);
34 if (hiwater_rss < mm->hiwater_rss)
35 hiwater_rss = mm->hiwater_rss;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
38 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
39 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080040 swap = get_mm_counter(mm, MM_SWAPENTS);
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080041 seq_printf(m,
Hugh Dickins365e9c872005-10-29 18:16:18 -070042 "VmPeak:\t%8lu kB\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 "VmSize:\t%8lu kB\n"
44 "VmLck:\t%8lu kB\n"
Hugh Dickins365e9c872005-10-29 18:16:18 -070045 "VmHWM:\t%8lu kB\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 "VmRSS:\t%8lu kB\n"
47 "VmData:\t%8lu kB\n"
48 "VmStk:\t%8lu kB\n"
49 "VmExe:\t%8lu kB\n"
50 "VmLib:\t%8lu kB\n"
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080051 "VmPTE:\t%8lu kB\n"
52 "VmSwap:\t%8lu kB\n",
Hugh Dickins365e9c872005-10-29 18:16:18 -070053 hiwater_vm << (PAGE_SHIFT-10),
54 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 mm->locked_vm << (PAGE_SHIFT-10),
Hugh Dickins365e9c872005-10-29 18:16:18 -070056 hiwater_rss << (PAGE_SHIFT-10),
57 total_rss << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 data << (PAGE_SHIFT-10),
59 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080060 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
61 swap << (PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070062}
63
64unsigned long task_vsize(struct mm_struct *mm)
65{
66 return PAGE_SIZE * mm->total_vm;
67}
68
69int task_statm(struct mm_struct *mm, int *shared, int *text,
70 int *data, int *resident)
71{
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -080072 *shared = get_mm_counter(mm, MM_FILEPAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
74 >> PAGE_SHIFT;
75 *data = mm->total_vm - mm->shared_vm;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -080076 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 return mm->total_vm;
78}
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080static void pad_len_spaces(struct seq_file *m, int len)
81{
82 len = 25 + sizeof(void*) * 6 - len;
83 if (len < 1)
84 len = 1;
85 seq_printf(m, "%*c", len, ' ');
86}
87
Matt Mackalla6198792008-02-04 22:29:03 -080088static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
Mauricio Line070ad42005-09-03 15:55:10 -070089{
Matt Mackalla6198792008-02-04 22:29:03 -080090 if (vma && vma != priv->tail_vma) {
91 struct mm_struct *mm = vma->vm_mm;
92 up_read(&mm->mmap_sem);
93 mmput(mm);
94 }
95}
96
97static void *m_start(struct seq_file *m, loff_t *pos)
98{
99 struct proc_maps_private *priv = m->private;
100 unsigned long last_addr = m->version;
101 struct mm_struct *mm;
102 struct vm_area_struct *vma, *tail_vma = NULL;
103 loff_t l = *pos;
104
105 /* Clear the per syscall fields in priv */
106 priv->task = NULL;
107 priv->tail_vma = NULL;
108
109 /*
110 * We remember last_addr rather than next_addr to hit with
111 * mmap_cache most of the time. We have zero last_addr at
112 * the beginning and also after lseek. We will have -1 last_addr
113 * after the end of the vmas.
114 */
115
116 if (last_addr == -1UL)
117 return NULL;
118
119 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
120 if (!priv->task)
121 return NULL;
122
123 mm = mm_for_maps(priv->task);
124 if (!mm)
125 return NULL;
Oleg Nesterov00f89d22009-07-10 03:27:38 +0200126 down_read(&mm->mmap_sem);
Matt Mackalla6198792008-02-04 22:29:03 -0800127
128 tail_vma = get_gate_vma(priv->task);
129 priv->tail_vma = tail_vma;
130
131 /* Start with last addr hint */
132 vma = find_vma(mm, last_addr);
133 if (last_addr && vma) {
134 vma = vma->vm_next;
135 goto out;
136 }
137
138 /*
139 * Check the vma index is within the range and do
140 * sequential scan until m_index.
141 */
142 vma = NULL;
143 if ((unsigned long)l < mm->map_count) {
144 vma = mm->mmap;
145 while (l-- && vma)
146 vma = vma->vm_next;
147 goto out;
148 }
149
150 if (l != mm->map_count)
151 tail_vma = NULL; /* After gate vma */
152
153out:
154 if (vma)
155 return vma;
156
157 /* End of vmas has been reached */
158 m->version = (tail_vma != NULL)? 0: -1UL;
159 up_read(&mm->mmap_sem);
160 mmput(mm);
161 return tail_vma;
162}
163
164static void *m_next(struct seq_file *m, void *v, loff_t *pos)
165{
166 struct proc_maps_private *priv = m->private;
167 struct vm_area_struct *vma = v;
168 struct vm_area_struct *tail_vma = priv->tail_vma;
169
170 (*pos)++;
171 if (vma && (vma != tail_vma) && vma->vm_next)
172 return vma->vm_next;
173 vma_stop(priv, vma);
174 return (vma != tail_vma)? tail_vma: NULL;
175}
176
177static void m_stop(struct seq_file *m, void *v)
178{
179 struct proc_maps_private *priv = m->private;
180 struct vm_area_struct *vma = v;
181
182 vma_stop(priv, vma);
183 if (priv->task)
184 put_task_struct(priv->task);
185}
186
187static int do_maps_open(struct inode *inode, struct file *file,
Jan Engelhardt03a44822008-02-08 04:21:19 -0800188 const struct seq_operations *ops)
Matt Mackalla6198792008-02-04 22:29:03 -0800189{
190 struct proc_maps_private *priv;
191 int ret = -ENOMEM;
192 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
193 if (priv) {
194 priv->pid = proc_pid(inode);
195 ret = seq_open(file, ops);
196 if (!ret) {
197 struct seq_file *m = file->private_data;
198 m->private = priv;
199 } else {
200 kfree(priv);
201 }
202 }
203 return ret;
204}
Mauricio Line070ad42005-09-03 15:55:10 -0700205
Joe Korty7c88db02008-10-16 15:27:09 +0400206static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
Mauricio Line070ad42005-09-03 15:55:10 -0700208 struct mm_struct *mm = vma->vm_mm;
209 struct file *file = vma->vm_file;
210 int flags = vma->vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 unsigned long ino = 0;
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700212 unsigned long long pgoff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 dev_t dev = 0;
214 int len;
215
216 if (file) {
Josef "Jeff" Sipek2fddfee2006-12-08 02:36:36 -0800217 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 dev = inode->i_sb->s_dev;
219 ino = inode->i_ino;
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700220 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 }
222
Clement Calmels1804dc62008-08-20 14:09:00 -0700223 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
Mauricio Line070ad42005-09-03 15:55:10 -0700224 vma->vm_start,
225 vma->vm_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 flags & VM_READ ? 'r' : '-',
227 flags & VM_WRITE ? 'w' : '-',
228 flags & VM_EXEC ? 'x' : '-',
229 flags & VM_MAYSHARE ? 's' : 'p',
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700230 pgoff,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 MAJOR(dev), MINOR(dev), ino, &len);
232
233 /*
234 * Print the dentry name for named mappings, and a
235 * special [heap] marker for the heap:
236 */
Mauricio Line070ad42005-09-03 15:55:10 -0700237 if (file) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 pad_len_spaces(m, len);
Jan Blunckc32c2f62008-02-14 19:38:43 -0800239 seq_path(m, &file->f_path, "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 } else {
Ingo Molnare6e54942006-06-27 02:53:50 -0700241 const char *name = arch_vma_name(vma);
242 if (!name) {
243 if (mm) {
244 if (vma->vm_start <= mm->start_brk &&
Mauricio Line070ad42005-09-03 15:55:10 -0700245 vma->vm_end >= mm->brk) {
Ingo Molnare6e54942006-06-27 02:53:50 -0700246 name = "[heap]";
247 } else if (vma->vm_start <= mm->start_stack &&
248 vma->vm_end >= mm->start_stack) {
249 name = "[stack]";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 }
Ingo Molnare6e54942006-06-27 02:53:50 -0700251 } else {
252 name = "[vdso]";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 }
Ingo Molnare6e54942006-06-27 02:53:50 -0700254 }
255 if (name) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 pad_len_spaces(m, len);
Ingo Molnare6e54942006-06-27 02:53:50 -0700257 seq_puts(m, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 }
259 }
260 seq_putc(m, '\n');
Joe Korty7c88db02008-10-16 15:27:09 +0400261}
262
263static int show_map(struct seq_file *m, void *v)
264{
265 struct vm_area_struct *vma = v;
266 struct proc_maps_private *priv = m->private;
267 struct task_struct *task = priv->task;
268
269 show_map_vma(m, vma);
Mauricio Line070ad42005-09-03 15:55:10 -0700270
Mauricio Line070ad42005-09-03 15:55:10 -0700271 if (m->count < m->size) /* vma is copied successfully */
272 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 return 0;
274}
275
Jan Engelhardt03a44822008-02-08 04:21:19 -0800276static const struct seq_operations proc_pid_maps_op = {
Matt Mackalla6198792008-02-04 22:29:03 -0800277 .start = m_start,
278 .next = m_next,
279 .stop = m_stop,
280 .show = show_map
281};
282
283static int maps_open(struct inode *inode, struct file *file)
284{
285 return do_maps_open(inode, file, &proc_pid_maps_op);
286}
287
288const struct file_operations proc_maps_operations = {
289 .open = maps_open,
290 .read = seq_read,
291 .llseek = seq_lseek,
292 .release = seq_release_private,
293};
294
295/*
296 * Proportional Set Size(PSS): my share of RSS.
297 *
298 * PSS of a process is the count of pages it has in memory, where each
299 * page is divided by the number of processes sharing it. So if a
300 * process has 1000 pages all to itself, and 1000 shared with one other
301 * process, its PSS will be 1500.
302 *
303 * To keep (accumulated) division errors low, we adopt a 64bit
304 * fixed-point pss counter to minimize division errors. So (pss >>
305 * PSS_SHIFT) would be the real byte count.
306 *
307 * A shift of 12 before division means (assuming 4K page size):
308 * - 1M 3-user-pages add up to 8KB errors;
309 * - supports mapcount up to 2^24, or 16M;
310 * - supports PSS up to 2^52 bytes, or 4PB.
311 */
312#define PSS_SHIFT 12
313
Matt Mackall1e883282008-02-04 22:29:07 -0800314#ifdef CONFIG_PROC_PAGE_MONITOR
Peter Zijlstra214e4712008-04-28 02:12:55 -0700315struct mem_size_stats {
Matt Mackalla6198792008-02-04 22:29:03 -0800316 struct vm_area_struct *vma;
317 unsigned long resident;
318 unsigned long shared_clean;
319 unsigned long shared_dirty;
320 unsigned long private_clean;
321 unsigned long private_dirty;
322 unsigned long referenced;
Peter Zijlstra214e4712008-04-28 02:12:55 -0700323 unsigned long swap;
Matt Mackalla6198792008-02-04 22:29:03 -0800324 u64 pss;
325};
326
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800327static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -0700328 struct mm_walk *walk)
Mauricio Line070ad42005-09-03 15:55:10 -0700329{
Dave Hansen21650092008-06-12 15:21:47 -0700330 struct mem_size_stats *mss = walk->private;
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800331 struct vm_area_struct *vma = mss->vma;
Mauricio Line070ad42005-09-03 15:55:10 -0700332 pte_t *pte, ptent;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700333 spinlock_t *ptl;
Mauricio Line070ad42005-09-03 15:55:10 -0700334 struct page *page;
Fengguang Wuec4dd3e2008-02-04 22:28:56 -0800335 int mapcount;
Mauricio Line070ad42005-09-03 15:55:10 -0700336
Hugh Dickins705e87c2005-10-29 18:16:27 -0700337 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
David Rientjes826fad12007-05-06 14:49:21 -0700338 for (; addr != end; pte++, addr += PAGE_SIZE) {
Mauricio Line070ad42005-09-03 15:55:10 -0700339 ptent = *pte;
Peter Zijlstra214e4712008-04-28 02:12:55 -0700340
341 if (is_swap_pte(ptent)) {
342 mss->swap += PAGE_SIZE;
343 continue;
344 }
345
Hugh Dickins705e87c2005-10-29 18:16:27 -0700346 if (!pte_present(ptent))
Mauricio Line070ad42005-09-03 15:55:10 -0700347 continue;
348
Nick Pigginad820c5d2006-03-06 15:42:58 -0800349 page = vm_normal_page(vma, addr, ptent);
350 if (!page)
Mauricio Line070ad42005-09-03 15:55:10 -0700351 continue;
352
Minchan Kim7f53a092010-01-08 14:43:10 -0800353 mss->resident += PAGE_SIZE;
David Rientjesf79f1772007-05-06 14:49:22 -0700354 /* Accumulate the size in pages that have been accessed. */
355 if (pte_young(ptent) || PageReferenced(page))
356 mss->referenced += PAGE_SIZE;
Fengguang Wuec4dd3e2008-02-04 22:28:56 -0800357 mapcount = page_mapcount(page);
358 if (mapcount >= 2) {
Mauricio Line070ad42005-09-03 15:55:10 -0700359 if (pte_dirty(ptent))
360 mss->shared_dirty += PAGE_SIZE;
361 else
362 mss->shared_clean += PAGE_SIZE;
Fengguang Wuec4dd3e2008-02-04 22:28:56 -0800363 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
Mauricio Line070ad42005-09-03 15:55:10 -0700364 } else {
365 if (pte_dirty(ptent))
366 mss->private_dirty += PAGE_SIZE;
367 else
368 mss->private_clean += PAGE_SIZE;
Fengguang Wuec4dd3e2008-02-04 22:28:56 -0800369 mss->pss += (PAGE_SIZE << PSS_SHIFT);
Mauricio Line070ad42005-09-03 15:55:10 -0700370 }
David Rientjes826fad12007-05-06 14:49:21 -0700371 }
Hugh Dickins705e87c2005-10-29 18:16:27 -0700372 pte_unmap_unlock(pte - 1, ptl);
373 cond_resched();
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800374 return 0;
Mauricio Line070ad42005-09-03 15:55:10 -0700375}
376
Mauricio Line070ad42005-09-03 15:55:10 -0700377static int show_smap(struct seq_file *m, void *v)
378{
Joe Korty7c88db02008-10-16 15:27:09 +0400379 struct proc_maps_private *priv = m->private;
380 struct task_struct *task = priv->task;
Mauricio Line070ad42005-09-03 15:55:10 -0700381 struct vm_area_struct *vma = v;
Mauricio Line070ad42005-09-03 15:55:10 -0700382 struct mem_size_stats mss;
Dave Hansen21650092008-06-12 15:21:47 -0700383 struct mm_walk smaps_walk = {
384 .pmd_entry = smaps_pte_range,
385 .mm = vma->vm_mm,
386 .private = &mss,
387 };
Mauricio Line070ad42005-09-03 15:55:10 -0700388
389 memset(&mss, 0, sizeof mss);
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800390 mss.vma = vma;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900391 /* mmap_sem is held in m_start */
Nick Piggin5ddfae12006-03-06 15:42:57 -0800392 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
Dave Hansen21650092008-06-12 15:21:47 -0700393 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
Matt Mackall4752c362008-02-04 22:29:02 -0800394
Joe Korty7c88db02008-10-16 15:27:09 +0400395 show_map_vma(m, vma);
Matt Mackall4752c362008-02-04 22:29:02 -0800396
397 seq_printf(m,
398 "Size: %8lu kB\n"
399 "Rss: %8lu kB\n"
400 "Pss: %8lu kB\n"
401 "Shared_Clean: %8lu kB\n"
402 "Shared_Dirty: %8lu kB\n"
403 "Private_Clean: %8lu kB\n"
404 "Private_Dirty: %8lu kB\n"
Peter Zijlstra214e4712008-04-28 02:12:55 -0700405 "Referenced: %8lu kB\n"
Mel Gorman08fba692009-01-06 14:38:53 -0800406 "Swap: %8lu kB\n"
Mel Gorman33402892009-01-06 14:38:54 -0800407 "KernelPageSize: %8lu kB\n"
408 "MMUPageSize: %8lu kB\n",
Matt Mackall4752c362008-02-04 22:29:02 -0800409 (vma->vm_end - vma->vm_start) >> 10,
410 mss.resident >> 10,
411 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
412 mss.shared_clean >> 10,
413 mss.shared_dirty >> 10,
414 mss.private_clean >> 10,
415 mss.private_dirty >> 10,
Peter Zijlstra214e4712008-04-28 02:12:55 -0700416 mss.referenced >> 10,
Mel Gorman08fba692009-01-06 14:38:53 -0800417 mss.swap >> 10,
Mel Gorman33402892009-01-06 14:38:54 -0800418 vma_kernel_pagesize(vma) >> 10,
419 vma_mmu_pagesize(vma) >> 10);
Matt Mackall4752c362008-02-04 22:29:02 -0800420
Joe Korty7c88db02008-10-16 15:27:09 +0400421 if (m->count < m->size) /* vma is copied successfully */
422 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
423 return 0;
Mauricio Line070ad42005-09-03 15:55:10 -0700424}
425
Jan Engelhardt03a44822008-02-08 04:21:19 -0800426static const struct seq_operations proc_pid_smaps_op = {
Matt Mackalla6198792008-02-04 22:29:03 -0800427 .start = m_start,
428 .next = m_next,
429 .stop = m_stop,
430 .show = show_smap
431};
432
433static int smaps_open(struct inode *inode, struct file *file)
434{
435 return do_maps_open(inode, file, &proc_pid_smaps_op);
436}
437
438const struct file_operations proc_smaps_operations = {
439 .open = smaps_open,
440 .read = seq_read,
441 .llseek = seq_lseek,
442 .release = seq_release_private,
443};
444
445static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
Dave Hansen21650092008-06-12 15:21:47 -0700446 unsigned long end, struct mm_walk *walk)
Matt Mackalla6198792008-02-04 22:29:03 -0800447{
Dave Hansen21650092008-06-12 15:21:47 -0700448 struct vm_area_struct *vma = walk->private;
Matt Mackalla6198792008-02-04 22:29:03 -0800449 pte_t *pte, ptent;
450 spinlock_t *ptl;
451 struct page *page;
452
453 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
454 for (; addr != end; pte++, addr += PAGE_SIZE) {
455 ptent = *pte;
456 if (!pte_present(ptent))
457 continue;
458
459 page = vm_normal_page(vma, addr, ptent);
460 if (!page)
461 continue;
462
463 /* Clear accessed and referenced bits. */
464 ptep_test_and_clear_young(vma, addr, pte);
465 ClearPageReferenced(page);
466 }
467 pte_unmap_unlock(pte - 1, ptl);
468 cond_resched();
469 return 0;
470}
471
Moussa A. Ba398499d2009-09-21 17:02:29 -0700472#define CLEAR_REFS_ALL 1
473#define CLEAR_REFS_ANON 2
474#define CLEAR_REFS_MAPPED 3
475
Matt Mackallf248dcb2008-02-04 22:29:03 -0800476static ssize_t clear_refs_write(struct file *file, const char __user *buf,
477 size_t count, loff_t *ppos)
David Rientjesb813e932007-05-06 14:49:24 -0700478{
Matt Mackallf248dcb2008-02-04 22:29:03 -0800479 struct task_struct *task;
Vincent Lifb92a4b2009-09-22 16:45:36 -0700480 char buffer[PROC_NUMBUF];
Matt Mackallf248dcb2008-02-04 22:29:03 -0800481 struct mm_struct *mm;
David Rientjesb813e932007-05-06 14:49:24 -0700482 struct vm_area_struct *vma;
Vincent Lifb92a4b2009-09-22 16:45:36 -0700483 long type;
David Rientjesb813e932007-05-06 14:49:24 -0700484
Matt Mackallf248dcb2008-02-04 22:29:03 -0800485 memset(buffer, 0, sizeof(buffer));
486 if (count > sizeof(buffer) - 1)
487 count = sizeof(buffer) - 1;
488 if (copy_from_user(buffer, buf, count))
489 return -EFAULT;
Vincent Lifb92a4b2009-09-22 16:45:36 -0700490 if (strict_strtol(strstrip(buffer), 10, &type))
491 return -EINVAL;
Moussa A. Ba398499d2009-09-21 17:02:29 -0700492 if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
Matt Mackallf248dcb2008-02-04 22:29:03 -0800493 return -EINVAL;
Matt Mackallf248dcb2008-02-04 22:29:03 -0800494 task = get_proc_task(file->f_path.dentry->d_inode);
495 if (!task)
496 return -ESRCH;
497 mm = get_task_mm(task);
498 if (mm) {
Andrew Morton20cbc972008-07-05 12:29:05 -0700499 struct mm_walk clear_refs_walk = {
500 .pmd_entry = clear_refs_pte_range,
501 .mm = mm,
502 };
Matt Mackallf248dcb2008-02-04 22:29:03 -0800503 down_read(&mm->mmap_sem);
Dave Hansen21650092008-06-12 15:21:47 -0700504 for (vma = mm->mmap; vma; vma = vma->vm_next) {
505 clear_refs_walk.private = vma;
Moussa A. Ba398499d2009-09-21 17:02:29 -0700506 if (is_vm_hugetlb_page(vma))
507 continue;
508 /*
509 * Writing 1 to /proc/pid/clear_refs affects all pages.
510 *
511 * Writing 2 to /proc/pid/clear_refs only affects
512 * Anonymous pages.
513 *
514 * Writing 3 to /proc/pid/clear_refs only affects file
515 * mapped pages.
516 */
517 if (type == CLEAR_REFS_ANON && vma->vm_file)
518 continue;
519 if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
520 continue;
521 walk_page_range(vma->vm_start, vma->vm_end,
522 &clear_refs_walk);
Dave Hansen21650092008-06-12 15:21:47 -0700523 }
Matt Mackallf248dcb2008-02-04 22:29:03 -0800524 flush_tlb_mm(mm);
525 up_read(&mm->mmap_sem);
526 mmput(mm);
527 }
528 put_task_struct(task);
Vincent Lifb92a4b2009-09-22 16:45:36 -0700529
530 return count;
David Rientjesb813e932007-05-06 14:49:24 -0700531}
532
Matt Mackallf248dcb2008-02-04 22:29:03 -0800533const struct file_operations proc_clear_refs_operations = {
534 .write = clear_refs_write,
535};
536
Matt Mackall85863e42008-02-04 22:29:04 -0800537struct pagemapread {
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900538 int pos, len;
539 u64 *buffer;
Matt Mackall85863e42008-02-04 22:29:04 -0800540};
541
Hans Rosenfeldf16278c2008-03-21 18:46:59 -0500542#define PM_ENTRY_BYTES sizeof(u64)
543#define PM_STATUS_BITS 3
544#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
545#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
546#define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
547#define PM_PSHIFT_BITS 6
548#define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
549#define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
550#define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
551#define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
552#define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
553
554#define PM_PRESENT PM_STATUS(4LL)
555#define PM_SWAP PM_STATUS(2LL)
556#define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
Matt Mackall85863e42008-02-04 22:29:04 -0800557#define PM_END_OF_BUFFER 1
558
559static int add_to_pagemap(unsigned long addr, u64 pfn,
560 struct pagemapread *pm)
561{
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900562 pm->buffer[pm->pos++] = pfn;
563 if (pm->pos >= pm->len)
Thomas Tuttleaae86792008-06-05 22:46:31 -0700564 return PM_END_OF_BUFFER;
Matt Mackall85863e42008-02-04 22:29:04 -0800565 return 0;
566}
567
568static int pagemap_pte_hole(unsigned long start, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -0700569 struct mm_walk *walk)
Matt Mackall85863e42008-02-04 22:29:04 -0800570{
Dave Hansen21650092008-06-12 15:21:47 -0700571 struct pagemapread *pm = walk->private;
Matt Mackall85863e42008-02-04 22:29:04 -0800572 unsigned long addr;
573 int err = 0;
574 for (addr = start; addr < end; addr += PAGE_SIZE) {
575 err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
576 if (err)
577 break;
578 }
579 return err;
580}
581
Adrian Bunk9d02dbc2008-04-28 02:12:11 -0700582static u64 swap_pte_to_pagemap_entry(pte_t pte)
Matt Mackall85863e42008-02-04 22:29:04 -0800583{
584 swp_entry_t e = pte_to_swp_entry(pte);
Hans Rosenfeldf16278c2008-03-21 18:46:59 -0500585 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
Matt Mackall85863e42008-02-04 22:29:04 -0800586}
587
Matt Mackall49c50342008-12-09 13:14:21 -0800588static u64 pte_to_pagemap_entry(pte_t pte)
Dave Hansenbcf80392008-06-12 15:21:48 -0700589{
Matt Mackall49c50342008-12-09 13:14:21 -0800590 u64 pme = 0;
Dave Hansenbcf80392008-06-12 15:21:48 -0700591 if (is_swap_pte(pte))
592 pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
593 | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
594 else if (pte_present(pte))
595 pme = PM_PFRAME(pte_pfn(pte))
596 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
597 return pme;
598}
599
Matt Mackall85863e42008-02-04 22:29:04 -0800600static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -0700601 struct mm_walk *walk)
Matt Mackall85863e42008-02-04 22:29:04 -0800602{
Dave Hansenbcf80392008-06-12 15:21:48 -0700603 struct vm_area_struct *vma;
Dave Hansen21650092008-06-12 15:21:47 -0700604 struct pagemapread *pm = walk->private;
Matt Mackall85863e42008-02-04 22:29:04 -0800605 pte_t *pte;
606 int err = 0;
607
Dave Hansenbcf80392008-06-12 15:21:48 -0700608 /* find the first VMA at or above 'addr' */
609 vma = find_vma(walk->mm, addr);
Matt Mackall85863e42008-02-04 22:29:04 -0800610 for (; addr != end; addr += PAGE_SIZE) {
611 u64 pfn = PM_NOT_PRESENT;
Dave Hansenbcf80392008-06-12 15:21:48 -0700612
613 /* check to see if we've left 'vma' behind
614 * and need a new, higher one */
615 if (vma && (addr >= vma->vm_end))
616 vma = find_vma(walk->mm, addr);
617
618 /* check that 'vma' actually covers this address,
619 * and that it isn't a huge page vma */
620 if (vma && (vma->vm_start <= addr) &&
621 !is_vm_hugetlb_page(vma)) {
622 pte = pte_offset_map(pmd, addr);
623 pfn = pte_to_pagemap_entry(*pte);
624 /* unmap before userspace copy */
625 pte_unmap(pte);
626 }
Matt Mackall85863e42008-02-04 22:29:04 -0800627 err = add_to_pagemap(addr, pfn, pm);
628 if (err)
629 return err;
630 }
631
632 cond_resched();
633
634 return err;
635}
636
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800637static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
638{
639 u64 pme = 0;
640 if (pte_present(pte))
641 pme = PM_PFRAME(pte_pfn(pte) + offset)
642 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
643 return pme;
644}
645
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700646/* This function walks within one hugetlb entry in the single call */
647static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
648 unsigned long addr, unsigned long end,
649 struct mm_walk *walk)
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800650{
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800651 struct pagemapread *pm = walk->private;
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800652 int err = 0;
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700653 u64 pfn;
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800654
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800655 for (; addr != end; addr += PAGE_SIZE) {
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700656 int offset = (addr & ~hmask) >> PAGE_SHIFT;
657 pfn = huge_pte_to_pagemap_entry(*pte, offset);
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800658 err = add_to_pagemap(addr, pfn, pm);
659 if (err)
660 return err;
661 }
662
663 cond_resched();
664
665 return err;
666}
667
Matt Mackall85863e42008-02-04 22:29:04 -0800668/*
669 * /proc/pid/pagemap - an array mapping virtual pages to pfns
670 *
Hans Rosenfeldf16278c2008-03-21 18:46:59 -0500671 * For each page in the address space, this file contains one 64-bit entry
672 * consisting of the following:
673 *
674 * Bits 0-55 page frame number (PFN) if present
675 * Bits 0-4 swap type if swapped
676 * Bits 5-55 swap offset if swapped
677 * Bits 55-60 page shift (page size = 1<<page shift)
678 * Bit 61 reserved for future use
679 * Bit 62 page swapped
680 * Bit 63 page present
681 *
682 * If the page is not present but in swap, then the PFN contains an
683 * encoding of the swap file number and the page's offset into the
684 * swap. Unmapped pages return a null PFN. This allows determining
Matt Mackall85863e42008-02-04 22:29:04 -0800685 * precisely which pages are mapped (or in swap) and comparing mapped
686 * pages between processes.
687 *
688 * Efficient users of this interface will use /proc/pid/maps to
689 * determine which areas of memory are actually mapped and llseek to
690 * skip over unmapped regions.
691 */
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900692#define PAGEMAP_WALK_SIZE (PMD_SIZE)
Matt Mackall85863e42008-02-04 22:29:04 -0800693static ssize_t pagemap_read(struct file *file, char __user *buf,
694 size_t count, loff_t *ppos)
695{
696 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
Matt Mackall85863e42008-02-04 22:29:04 -0800697 struct mm_struct *mm;
698 struct pagemapread pm;
Matt Mackall85863e42008-02-04 22:29:04 -0800699 int ret = -ESRCH;
Alexey Dobriyanee1e6ab2008-07-21 14:21:36 -0700700 struct mm_walk pagemap_walk = {};
Andrew Morton5d7e0d22008-07-05 01:02:01 -0700701 unsigned long src;
702 unsigned long svpfn;
703 unsigned long start_vaddr;
704 unsigned long end_vaddr;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900705 int copied = 0;
Matt Mackall85863e42008-02-04 22:29:04 -0800706
707 if (!task)
708 goto out;
709
710 ret = -EACCES;
Stephen Smalley006ebb42008-05-19 08:32:49 -0400711 if (!ptrace_may_access(task, PTRACE_MODE_READ))
Marcelo Tosattifb393802008-03-13 12:32:35 -0700712 goto out_task;
Matt Mackall85863e42008-02-04 22:29:04 -0800713
714 ret = -EINVAL;
715 /* file position must be aligned */
Thomas Tuttleaae86792008-06-05 22:46:31 -0700716 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
Marcelo Tosattifb393802008-03-13 12:32:35 -0700717 goto out_task;
Matt Mackall85863e42008-02-04 22:29:04 -0800718
719 ret = 0;
Vitaly Mayatskikh08161782009-04-30 15:08:18 -0700720
721 if (!count)
722 goto out_task;
723
Matt Mackall85863e42008-02-04 22:29:04 -0800724 mm = get_task_mm(task);
725 if (!mm)
Marcelo Tosattifb393802008-03-13 12:32:35 -0700726 goto out_task;
Matt Mackall85863e42008-02-04 22:29:04 -0800727
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900728 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
729 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
Andrew Morton5d7e0d22008-07-05 01:02:01 -0700730 ret = -ENOMEM;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900731 if (!pm.buffer)
Marcelo Tosattifb393802008-03-13 12:32:35 -0700732 goto out_mm;
Matt Mackall85863e42008-02-04 22:29:04 -0800733
Andrew Morton5d7e0d22008-07-05 01:02:01 -0700734 pagemap_walk.pmd_entry = pagemap_pte_range;
735 pagemap_walk.pte_hole = pagemap_pte_hole;
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800736 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
Andrew Morton5d7e0d22008-07-05 01:02:01 -0700737 pagemap_walk.mm = mm;
738 pagemap_walk.private = &pm;
Matt Mackall85863e42008-02-04 22:29:04 -0800739
Andrew Morton5d7e0d22008-07-05 01:02:01 -0700740 src = *ppos;
741 svpfn = src / PM_ENTRY_BYTES;
742 start_vaddr = svpfn << PAGE_SHIFT;
743 end_vaddr = TASK_SIZE_OF(task);
Matt Mackall85863e42008-02-04 22:29:04 -0800744
Andrew Morton5d7e0d22008-07-05 01:02:01 -0700745 /* watch out for wraparound */
746 if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
747 start_vaddr = end_vaddr;
748
749 /*
750 * The odds are that this will stop walking way
751 * before end_vaddr, because the length of the
752 * user buffer is tracked in "pm", and the walk
753 * will stop when we hit the end of the buffer.
754 */
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900755 ret = 0;
756 while (count && (start_vaddr < end_vaddr)) {
757 int len;
758 unsigned long end;
Matt Mackall85863e42008-02-04 22:29:04 -0800759
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900760 pm.pos = 0;
761 end = start_vaddr + PAGEMAP_WALK_SIZE;
762 /* overflow ? */
763 if (end < start_vaddr || end > end_vaddr)
764 end = end_vaddr;
765 down_read(&mm->mmap_sem);
766 ret = walk_page_range(start_vaddr, end, &pagemap_walk);
767 up_read(&mm->mmap_sem);
768 start_vaddr = end;
769
770 len = min(count, PM_ENTRY_BYTES * pm.pos);
Dan Carpenter309361e02010-04-06 13:45:39 +0300771 if (copy_to_user(buf, pm.buffer, len)) {
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900772 ret = -EFAULT;
773 goto out_free;
774 }
775 copied += len;
776 buf += len;
777 count -= len;
Matt Mackall85863e42008-02-04 22:29:04 -0800778 }
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900779 *ppos += copied;
780 if (!ret || ret == PM_END_OF_BUFFER)
781 ret = copied;
782
Matt Mackall85863e42008-02-04 22:29:04 -0800783out_free:
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900784 kfree(pm.buffer);
Marcelo Tosattifb393802008-03-13 12:32:35 -0700785out_mm:
786 mmput(mm);
Matt Mackall85863e42008-02-04 22:29:04 -0800787out_task:
788 put_task_struct(task);
789out:
790 return ret;
791}
792
793const struct file_operations proc_pagemap_operations = {
794 .llseek = mem_lseek, /* borrow this */
795 .read = pagemap_read,
796};
Matt Mackall1e883282008-02-04 22:29:07 -0800797#endif /* CONFIG_PROC_PAGE_MONITOR */
Matt Mackall85863e42008-02-04 22:29:04 -0800798
Christoph Lameter6e21c8f2005-09-03 15:54:45 -0700799#ifdef CONFIG_NUMA
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800800extern int show_numa_map(struct seq_file *m, void *v);
Christoph Lameter6e21c8f2005-09-03 15:54:45 -0700801
Jan Engelhardt03a44822008-02-08 04:21:19 -0800802static const struct seq_operations proc_pid_numa_maps_op = {
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800803 .start = m_start,
804 .next = m_next,
805 .stop = m_stop,
Alexey Dobriyan3bbfe052008-10-10 03:27:16 +0400806 .show = show_numa_map,
Christoph Lameter6e21c8f2005-09-03 15:54:45 -0700807};
Eric W. Biederman662795d2006-06-26 00:25:48 -0700808
809static int numa_maps_open(struct inode *inode, struct file *file)
810{
811 return do_maps_open(inode, file, &proc_pid_numa_maps_op);
812}
813
Arjan van de Ven00977a52007-02-12 00:55:34 -0800814const struct file_operations proc_numa_maps_operations = {
Eric W. Biederman662795d2006-06-26 00:25:48 -0700815 .open = numa_maps_open,
816 .read = seq_read,
817 .llseek = seq_lseek,
Eric W. Biederman99f89552006-06-26 00:25:55 -0700818 .release = seq_release_private,
Eric W. Biederman662795d2006-06-26 00:25:48 -0700819};
Christoph Lameter6e21c8f2005-09-03 15:54:45 -0700820#endif