blob: 5988b83836fb111e6929d1b67ac0a58775b0378b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/mm.h>
Davidlohr Bueso615d6e82014-04-07 15:37:25 -07002#include <linux/vmacache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07003#include <linux/hugetlb.h>
Dave Hansen22e057c2011-03-22 16:33:00 -07004#include <linux/huge_mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/mount.h>
6#include <linux/seq_file.h>
Mauricio Line070ad42005-09-03 15:55:10 -07007#include <linux/highmem.h>
Kees Cook5096add2007-05-08 00:26:04 -07008#include <linux/ptrace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Christoph Lameter6e21c8f2005-09-03 15:54:45 -070010#include <linux/pagemap.h>
11#include <linux/mempolicy.h>
Dave Hansen22e057c2011-03-22 16:33:00 -070012#include <linux/rmap.h>
Matt Mackall85863e42008-02-04 22:29:04 -080013#include <linux/swap.h>
14#include <linux/swapops.h>
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -070015#include <linux/mmu_notifier.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070016#include <linux/page_idle.h>
Mauricio Line070ad42005-09-03 15:55:10 -070017
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/elf.h>
19#include <asm/uaccess.h>
Mauricio Line070ad42005-09-03 15:55:10 -070020#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include "internal.h"
22
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080023void task_mem(struct seq_file *m, struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -070024{
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -080025 unsigned long data, text, lib, swap, ptes, pmds;
Hugh Dickins365e9c872005-10-29 18:16:18 -070026 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
27
28 /*
29 * Note: to minimize their overhead, mm maintains hiwater_vm and
30 * hiwater_rss only when about to *lower* total_vm or rss. Any
31 * collector of these hiwater stats must therefore get total_vm
32 * and rss too, which will usually be the higher. Barriers? not
33 * worth the effort, such snapshots can always be inconsistent.
34 */
35 hiwater_vm = total_vm = mm->total_vm;
36 if (hiwater_vm < mm->hiwater_vm)
37 hiwater_vm = mm->hiwater_vm;
38 hiwater_rss = total_rss = get_mm_rss(mm);
39 if (hiwater_rss < mm->hiwater_rss)
40 hiwater_rss = mm->hiwater_rss;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
43 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
44 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080045 swap = get_mm_counter(mm, MM_SWAPENTS);
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -080046 ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
47 pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080048 seq_printf(m,
Hugh Dickins365e9c872005-10-29 18:16:18 -070049 "VmPeak:\t%8lu kB\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 "VmSize:\t%8lu kB\n"
51 "VmLck:\t%8lu kB\n"
Christoph Lameterbc3e53f2011-10-31 17:07:30 -070052 "VmPin:\t%8lu kB\n"
Hugh Dickins365e9c872005-10-29 18:16:18 -070053 "VmHWM:\t%8lu kB\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 "VmRSS:\t%8lu kB\n"
55 "VmData:\t%8lu kB\n"
56 "VmStk:\t%8lu kB\n"
57 "VmExe:\t%8lu kB\n"
58 "VmLib:\t%8lu kB\n"
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080059 "VmPTE:\t%8lu kB\n"
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -080060 "VmPMD:\t%8lu kB\n"
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080061 "VmSwap:\t%8lu kB\n",
Hugh Dickins365e9c872005-10-29 18:16:18 -070062 hiwater_vm << (PAGE_SHIFT-10),
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -070063 total_vm << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 mm->locked_vm << (PAGE_SHIFT-10),
Christoph Lameterbc3e53f2011-10-31 17:07:30 -070065 mm->pinned_vm << (PAGE_SHIFT-10),
Hugh Dickins365e9c872005-10-29 18:16:18 -070066 hiwater_rss << (PAGE_SHIFT-10),
67 total_rss << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 data << (PAGE_SHIFT-10),
69 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
Kirill A. Shutemovdc6c9a32015-02-11 15:26:50 -080070 ptes >> 10,
71 pmds >> 10,
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080072 swap << (PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
75unsigned long task_vsize(struct mm_struct *mm)
76{
77 return PAGE_SIZE * mm->total_vm;
78}
79
Alexey Dobriyana2ade7b2011-01-12 17:00:32 -080080unsigned long task_statm(struct mm_struct *mm,
81 unsigned long *shared, unsigned long *text,
82 unsigned long *data, unsigned long *resident)
Linus Torvalds1da177e2005-04-16 15:20:36 -070083{
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -080084 *shared = get_mm_counter(mm, MM_FILEPAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
86 >> PAGE_SHIFT;
87 *data = mm->total_vm - mm->shared_vm;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -080088 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 return mm->total_vm;
90}
91
KAMEZAWA Hiroyuki9e781442012-10-19 17:00:55 +090092#ifdef CONFIG_NUMA
93/*
Oleg Nesterov498f2372014-10-09 15:27:52 -070094 * Save get_task_policy() for show_numa_map().
KAMEZAWA Hiroyuki9e781442012-10-19 17:00:55 +090095 */
96static void hold_task_mempolicy(struct proc_maps_private *priv)
97{
98 struct task_struct *task = priv->task;
99
100 task_lock(task);
Oleg Nesterov498f2372014-10-09 15:27:52 -0700101 priv->task_mempolicy = get_task_policy(task);
KAMEZAWA Hiroyuki9e781442012-10-19 17:00:55 +0900102 mpol_get(priv->task_mempolicy);
103 task_unlock(task);
104}
105static void release_task_mempolicy(struct proc_maps_private *priv)
106{
107 mpol_put(priv->task_mempolicy);
108}
109#else
110static void hold_task_mempolicy(struct proc_maps_private *priv)
111{
112}
113static void release_task_mempolicy(struct proc_maps_private *priv)
114{
115}
116#endif
117
Oleg Nesterov59b4bf12014-10-09 15:25:28 -0700118static void vma_stop(struct proc_maps_private *priv)
Mauricio Line070ad42005-09-03 15:55:10 -0700119{
Oleg Nesterov59b4bf12014-10-09 15:25:28 -0700120 struct mm_struct *mm = priv->mm;
121
122 release_task_mempolicy(priv);
123 up_read(&mm->mmap_sem);
124 mmput(mm);
Matt Mackalla6198792008-02-04 22:29:03 -0800125}
126
Oleg Nesterovad2a00e2014-10-09 15:25:39 -0700127static struct vm_area_struct *
128m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
129{
130 if (vma == priv->tail_vma)
131 return NULL;
132 return vma->vm_next ?: priv->tail_vma;
133}
134
Oleg Nesterovb8c20a92014-10-09 15:25:41 -0700135static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
136{
137 if (m->count < m->size) /* vma is copied successfully */
138 m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL;
139}
140
Oleg Nesterov0c255322014-10-09 15:25:36 -0700141static void *m_start(struct seq_file *m, loff_t *ppos)
Matt Mackalla6198792008-02-04 22:29:03 -0800142{
143 struct proc_maps_private *priv = m->private;
Oleg Nesterovb8c20a92014-10-09 15:25:41 -0700144 unsigned long last_addr = m->version;
Matt Mackalla6198792008-02-04 22:29:03 -0800145 struct mm_struct *mm;
Oleg Nesterov0c255322014-10-09 15:25:36 -0700146 struct vm_area_struct *vma;
147 unsigned int pos = *ppos;
Matt Mackalla6198792008-02-04 22:29:03 -0800148
Oleg Nesterovb8c20a92014-10-09 15:25:41 -0700149 /* See m_cache_vma(). Zero at the start or after lseek. */
150 if (last_addr == -1UL)
151 return NULL;
152
Oleg Nesterov2c033762014-10-09 15:25:51 -0700153 priv->task = get_proc_task(priv->inode);
Matt Mackalla6198792008-02-04 22:29:03 -0800154 if (!priv->task)
Al Viroec6fd8a2011-02-15 22:22:54 -0500155 return ERR_PTR(-ESRCH);
Matt Mackalla6198792008-02-04 22:29:03 -0800156
Oleg Nesterov29a40ac2014-10-09 15:25:26 -0700157 mm = priv->mm;
158 if (!mm || !atomic_inc_not_zero(&mm->mm_users))
159 return NULL;
Oleg Nesterov0c255322014-10-09 15:25:36 -0700160
Oleg Nesterov00f89d22009-07-10 03:27:38 +0200161 down_read(&mm->mmap_sem);
KAMEZAWA Hiroyuki9e781442012-10-19 17:00:55 +0900162 hold_task_mempolicy(priv);
Oleg Nesterov0c255322014-10-09 15:25:36 -0700163 priv->tail_vma = get_gate_vma(mm);
Matt Mackalla6198792008-02-04 22:29:03 -0800164
Oleg Nesterovb8c20a92014-10-09 15:25:41 -0700165 if (last_addr) {
166 vma = find_vma(mm, last_addr);
167 if (vma && (vma = m_next_vma(priv, vma)))
168 return vma;
169 }
170
171 m->version = 0;
Oleg Nesterov0c255322014-10-09 15:25:36 -0700172 if (pos < mm->map_count) {
Oleg Nesterov557c2d82014-10-09 15:25:43 -0700173 for (vma = mm->mmap; pos; pos--) {
174 m->version = vma->vm_start;
Matt Mackalla6198792008-02-04 22:29:03 -0800175 vma = vma->vm_next;
Oleg Nesterov557c2d82014-10-09 15:25:43 -0700176 }
Oleg Nesterov0c255322014-10-09 15:25:36 -0700177 return vma;
Matt Mackalla6198792008-02-04 22:29:03 -0800178 }
179
Oleg Nesterov557c2d82014-10-09 15:25:43 -0700180 /* we do not bother to update m->version in this case */
Oleg Nesterov0c255322014-10-09 15:25:36 -0700181 if (pos == mm->map_count && priv->tail_vma)
182 return priv->tail_vma;
Oleg Nesterov59b4bf12014-10-09 15:25:28 -0700183
184 vma_stop(priv);
185 return NULL;
Matt Mackalla6198792008-02-04 22:29:03 -0800186}
187
188static void *m_next(struct seq_file *m, void *v, loff_t *pos)
189{
190 struct proc_maps_private *priv = m->private;
Oleg Nesterovad2a00e2014-10-09 15:25:39 -0700191 struct vm_area_struct *next;
Matt Mackalla6198792008-02-04 22:29:03 -0800192
193 (*pos)++;
Oleg Nesterovad2a00e2014-10-09 15:25:39 -0700194 next = m_next_vma(priv, v);
Oleg Nesterov59b4bf12014-10-09 15:25:28 -0700195 if (!next)
196 vma_stop(priv);
197 return next;
Matt Mackalla6198792008-02-04 22:29:03 -0800198}
199
200static void m_stop(struct seq_file *m, void *v)
201{
202 struct proc_maps_private *priv = m->private;
Matt Mackalla6198792008-02-04 22:29:03 -0800203
Oleg Nesterov59b4bf12014-10-09 15:25:28 -0700204 if (!IS_ERR_OR_NULL(v))
205 vma_stop(priv);
Oleg Nesterov0d5f5f42014-10-09 15:25:32 -0700206 if (priv->task) {
Matt Mackalla6198792008-02-04 22:29:03 -0800207 put_task_struct(priv->task);
Oleg Nesterov0d5f5f42014-10-09 15:25:32 -0700208 priv->task = NULL;
209 }
Matt Mackalla6198792008-02-04 22:29:03 -0800210}
211
Oleg Nesterov4db7d0e2014-10-09 15:25:21 -0700212static int proc_maps_open(struct inode *inode, struct file *file,
213 const struct seq_operations *ops, int psize)
214{
215 struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
216
217 if (!priv)
218 return -ENOMEM;
219
Oleg Nesterov2c033762014-10-09 15:25:51 -0700220 priv->inode = inode;
Oleg Nesterov29a40ac2014-10-09 15:25:26 -0700221 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
222 if (IS_ERR(priv->mm)) {
223 int err = PTR_ERR(priv->mm);
224
225 seq_release_private(inode, file);
226 return err;
227 }
228
Oleg Nesterov4db7d0e2014-10-09 15:25:21 -0700229 return 0;
230}
231
Oleg Nesterov29a40ac2014-10-09 15:25:26 -0700232static int proc_map_release(struct inode *inode, struct file *file)
233{
234 struct seq_file *seq = file->private_data;
235 struct proc_maps_private *priv = seq->private;
236
237 if (priv->mm)
238 mmdrop(priv->mm);
239
240 return seq_release_private(inode, file);
241}
242
Matt Mackalla6198792008-02-04 22:29:03 -0800243static int do_maps_open(struct inode *inode, struct file *file,
Jan Engelhardt03a44822008-02-08 04:21:19 -0800244 const struct seq_operations *ops)
Matt Mackalla6198792008-02-04 22:29:03 -0800245{
Oleg Nesterov4db7d0e2014-10-09 15:25:21 -0700246 return proc_maps_open(inode, file, ops,
247 sizeof(struct proc_maps_private));
Matt Mackalla6198792008-02-04 22:29:03 -0800248}
Mauricio Line070ad42005-09-03 15:55:10 -0700249
Oleg Nesterov58cb6542014-10-09 15:25:54 -0700250static pid_t pid_of_stack(struct proc_maps_private *priv,
251 struct vm_area_struct *vma, bool is_pid)
252{
253 struct inode *inode = priv->inode;
254 struct task_struct *task;
255 pid_t ret = 0;
256
257 rcu_read_lock();
258 task = pid_task(proc_pid(inode), PIDTYPE_PID);
259 if (task) {
260 task = task_of_stack(task, vma, is_pid);
261 if (task)
262 ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
263 }
264 rcu_read_unlock();
265
266 return ret;
267}
268
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700269static void
270show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271{
Mauricio Line070ad42005-09-03 15:55:10 -0700272 struct mm_struct *mm = vma->vm_mm;
273 struct file *file = vma->vm_file;
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700274 struct proc_maps_private *priv = m->private;
KOSAKI Motohiroca16d142011-05-26 19:16:19 +0900275 vm_flags_t flags = vma->vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 unsigned long ino = 0;
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700277 unsigned long long pgoff = 0;
Mikulas Patockaa09a79f2011-05-09 13:01:09 +0200278 unsigned long start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 dev_t dev = 0;
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700280 const char *name = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
282 if (file) {
Al Viro496ad9a2013-01-23 17:07:38 -0500283 struct inode *inode = file_inode(vma->vm_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 dev = inode->i_sb->s_dev;
285 ino = inode->i_ino;
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700286 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 }
288
Linus Torvaldsd7824372010-08-15 11:35:52 -0700289 /* We don't show the stack guard page in /proc/maps */
290 start = vma->vm_start;
Mikulas Patockaa09a79f2011-05-09 13:01:09 +0200291 if (stack_guard_page_start(vma, start))
292 start += PAGE_SIZE;
293 end = vma->vm_end;
294 if (stack_guard_page_end(vma, end))
295 end -= PAGE_SIZE;
Linus Torvaldsd7824372010-08-15 11:35:52 -0700296
Tetsuo Handa652586d2013-11-14 14:31:57 -0800297 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
298 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
Linus Torvaldsd7824372010-08-15 11:35:52 -0700299 start,
Mikulas Patockaa09a79f2011-05-09 13:01:09 +0200300 end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 flags & VM_READ ? 'r' : '-',
302 flags & VM_WRITE ? 'w' : '-',
303 flags & VM_EXEC ? 'x' : '-',
304 flags & VM_MAYSHARE ? 's' : 'p',
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700305 pgoff,
Tetsuo Handa652586d2013-11-14 14:31:57 -0800306 MAJOR(dev), MINOR(dev), ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
308 /*
309 * Print the dentry name for named mappings, and a
310 * special [heap] marker for the heap:
311 */
Mauricio Line070ad42005-09-03 15:55:10 -0700312 if (file) {
Tetsuo Handa652586d2013-11-14 14:31:57 -0800313 seq_pad(m, ' ');
Miklos Szeredi2726d562015-06-19 10:30:28 +0200314 seq_file_path(m, file, "\n");
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700315 goto done;
316 }
317
Andy Lutomirski78d683e2014-05-19 15:58:32 -0700318 if (vma->vm_ops && vma->vm_ops->name) {
319 name = vma->vm_ops->name(vma);
320 if (name)
321 goto done;
322 }
323
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700324 name = arch_vma_name(vma);
325 if (!name) {
326 pid_t tid;
327
328 if (!mm) {
329 name = "[vdso]";
330 goto done;
331 }
332
333 if (vma->vm_start <= mm->brk &&
334 vma->vm_end >= mm->start_brk) {
335 name = "[heap]";
336 goto done;
337 }
338
Oleg Nesterov58cb6542014-10-09 15:25:54 -0700339 tid = pid_of_stack(priv, vma, is_pid);
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700340 if (tid != 0) {
341 /*
342 * Thread stack in /proc/PID/task/TID/maps or
343 * the main process stack.
344 */
345 if (!is_pid || (vma->vm_start <= mm->start_stack &&
346 vma->vm_end >= mm->start_stack)) {
347 name = "[stack]";
Ingo Molnare6e54942006-06-27 02:53:50 -0700348 } else {
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700349 /* Thread stack in /proc/PID/maps */
Tetsuo Handa652586d2013-11-14 14:31:57 -0800350 seq_pad(m, ' ');
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700351 seq_printf(m, "[stack:%d]", tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 }
Ingo Molnare6e54942006-06-27 02:53:50 -0700353 }
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700354 }
355
356done:
357 if (name) {
Tetsuo Handa652586d2013-11-14 14:31:57 -0800358 seq_pad(m, ' ');
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700359 seq_puts(m, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 }
361 seq_putc(m, '\n');
Joe Korty7c88db02008-10-16 15:27:09 +0400362}
363
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700364static int show_map(struct seq_file *m, void *v, int is_pid)
Joe Korty7c88db02008-10-16 15:27:09 +0400365{
Oleg Nesterovebb6cdd2014-10-09 15:25:34 -0700366 show_map_vma(m, v, is_pid);
Oleg Nesterovb8c20a92014-10-09 15:25:41 -0700367 m_cache_vma(m, v);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 return 0;
369}
370
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700371static int show_pid_map(struct seq_file *m, void *v)
372{
373 return show_map(m, v, 1);
374}
375
376static int show_tid_map(struct seq_file *m, void *v)
377{
378 return show_map(m, v, 0);
379}
380
Jan Engelhardt03a44822008-02-08 04:21:19 -0800381static const struct seq_operations proc_pid_maps_op = {
Matt Mackalla6198792008-02-04 22:29:03 -0800382 .start = m_start,
383 .next = m_next,
384 .stop = m_stop,
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700385 .show = show_pid_map
Matt Mackalla6198792008-02-04 22:29:03 -0800386};
387
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700388static const struct seq_operations proc_tid_maps_op = {
389 .start = m_start,
390 .next = m_next,
391 .stop = m_stop,
392 .show = show_tid_map
393};
394
395static int pid_maps_open(struct inode *inode, struct file *file)
Matt Mackalla6198792008-02-04 22:29:03 -0800396{
397 return do_maps_open(inode, file, &proc_pid_maps_op);
398}
399
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700400static int tid_maps_open(struct inode *inode, struct file *file)
401{
402 return do_maps_open(inode, file, &proc_tid_maps_op);
403}
404
405const struct file_operations proc_pid_maps_operations = {
406 .open = pid_maps_open,
407 .read = seq_read,
408 .llseek = seq_lseek,
Oleg Nesterov29a40ac2014-10-09 15:25:26 -0700409 .release = proc_map_release,
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700410};
411
412const struct file_operations proc_tid_maps_operations = {
413 .open = tid_maps_open,
Matt Mackalla6198792008-02-04 22:29:03 -0800414 .read = seq_read,
415 .llseek = seq_lseek,
Oleg Nesterov29a40ac2014-10-09 15:25:26 -0700416 .release = proc_map_release,
Matt Mackalla6198792008-02-04 22:29:03 -0800417};
418
419/*
420 * Proportional Set Size(PSS): my share of RSS.
421 *
422 * PSS of a process is the count of pages it has in memory, where each
423 * page is divided by the number of processes sharing it. So if a
424 * process has 1000 pages all to itself, and 1000 shared with one other
425 * process, its PSS will be 1500.
426 *
427 * To keep (accumulated) division errors low, we adopt a 64bit
428 * fixed-point pss counter to minimize division errors. So (pss >>
429 * PSS_SHIFT) would be the real byte count.
430 *
431 * A shift of 12 before division means (assuming 4K page size):
432 * - 1M 3-user-pages add up to 8KB errors;
433 * - supports mapcount up to 2^24, or 16M;
434 * - supports PSS up to 2^52 bytes, or 4PB.
435 */
436#define PSS_SHIFT 12
437
Matt Mackall1e883282008-02-04 22:29:07 -0800438#ifdef CONFIG_PROC_PAGE_MONITOR
Peter Zijlstra214e4712008-04-28 02:12:55 -0700439struct mem_size_stats {
Matt Mackalla6198792008-02-04 22:29:03 -0800440 unsigned long resident;
441 unsigned long shared_clean;
442 unsigned long shared_dirty;
443 unsigned long private_clean;
444 unsigned long private_dirty;
445 unsigned long referenced;
Nikanth Karthikesanb40d4f82010-10-27 15:34:10 -0700446 unsigned long anonymous;
Dave Hansen4031a212011-03-22 16:33:01 -0700447 unsigned long anonymous_thp;
Peter Zijlstra214e4712008-04-28 02:12:55 -0700448 unsigned long swap;
Naoya Horiguchi25ee01a2015-11-05 18:47:11 -0800449 unsigned long shared_hugetlb;
450 unsigned long private_hugetlb;
Matt Mackalla6198792008-02-04 22:29:03 -0800451 u64 pss;
Minchan Kim8334b962015-09-08 15:00:24 -0700452 u64 swap_pss;
Matt Mackalla6198792008-02-04 22:29:03 -0800453};
454
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800455static void smaps_account(struct mem_size_stats *mss, struct page *page,
456 unsigned long size, bool young, bool dirty)
457{
458 int mapcount;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700459
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800460 if (PageAnon(page))
461 mss->anonymous += size;
462
463 mss->resident += size;
464 /* Accumulate the size in pages that have been accessed. */
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700465 if (young || page_is_young(page) || PageReferenced(page))
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800466 mss->referenced += size;
467 mapcount = page_mapcount(page);
468 if (mapcount >= 2) {
469 u64 pss_delta;
470
471 if (dirty || PageDirty(page))
472 mss->shared_dirty += size;
473 else
474 mss->shared_clean += size;
475 pss_delta = (u64)size << PSS_SHIFT;
476 do_div(pss_delta, mapcount);
477 mss->pss += pss_delta;
478 } else {
479 if (dirty || PageDirty(page))
480 mss->private_dirty += size;
481 else
482 mss->private_clean += size;
483 mss->pss += (u64)size << PSS_SHIFT;
484 }
485}
486
487static void smaps_pte_entry(pte_t *pte, unsigned long addr,
488 struct mm_walk *walk)
Dave Hansenae11c4d2011-03-22 16:32:58 -0700489{
490 struct mem_size_stats *mss = walk->private;
Naoya Horiguchi14eb6fd2015-02-11 15:27:43 -0800491 struct vm_area_struct *vma = walk->vma;
Konstantin Khlebnikovb1d4d9e2012-05-31 16:26:20 -0700492 struct page *page = NULL;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700493
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800494 if (pte_present(*pte)) {
495 page = vm_normal_page(vma, addr, *pte);
496 } else if (is_swap_pte(*pte)) {
497 swp_entry_t swpent = pte_to_swp_entry(*pte);
Konstantin Khlebnikovb1d4d9e2012-05-31 16:26:20 -0700498
Minchan Kim8334b962015-09-08 15:00:24 -0700499 if (!non_swap_entry(swpent)) {
500 int mapcount;
501
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800502 mss->swap += PAGE_SIZE;
Minchan Kim8334b962015-09-08 15:00:24 -0700503 mapcount = swp_swapcount(swpent);
504 if (mapcount >= 2) {
505 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
506
507 do_div(pss_delta, mapcount);
508 mss->swap_pss += pss_delta;
509 } else {
510 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
511 }
512 } else if (is_migration_entry(swpent))
Konstantin Khlebnikovb1d4d9e2012-05-31 16:26:20 -0700513 page = migration_entry_to_page(swpent);
Dave Hansenae11c4d2011-03-22 16:32:58 -0700514 }
515
Dave Hansenae11c4d2011-03-22 16:32:58 -0700516 if (!page)
517 return;
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800518 smaps_account(mss, page, PAGE_SIZE, pte_young(*pte), pte_dirty(*pte));
Dave Hansenae11c4d2011-03-22 16:32:58 -0700519}
520
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800521#ifdef CONFIG_TRANSPARENT_HUGEPAGE
522static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
523 struct mm_walk *walk)
524{
525 struct mem_size_stats *mss = walk->private;
Naoya Horiguchi14eb6fd2015-02-11 15:27:43 -0800526 struct vm_area_struct *vma = walk->vma;
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800527 struct page *page;
528
529 /* FOLL_DUMP will return -EFAULT on huge zero page */
530 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
531 if (IS_ERR_OR_NULL(page))
532 return;
533 mss->anonymous_thp += HPAGE_PMD_SIZE;
534 smaps_account(mss, page, HPAGE_PMD_SIZE,
535 pmd_young(*pmd), pmd_dirty(*pmd));
536}
537#else
538static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
539 struct mm_walk *walk)
540{
541}
542#endif
543
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800544static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -0700545 struct mm_walk *walk)
Mauricio Line070ad42005-09-03 15:55:10 -0700546{
Naoya Horiguchi14eb6fd2015-02-11 15:27:43 -0800547 struct vm_area_struct *vma = walk->vma;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700548 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700549 spinlock_t *ptl;
Mauricio Line070ad42005-09-03 15:55:10 -0700550
Kirill A. Shutemovbf929152013-11-14 14:30:54 -0800551 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800552 smaps_pmd_entry(pmd, addr, walk);
Kirill A. Shutemovbf929152013-11-14 14:30:54 -0800553 spin_unlock(ptl);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700554 return 0;
Dave Hansen22e057c2011-03-22 16:33:00 -0700555 }
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700556
557 if (pmd_trans_unstable(pmd))
558 return 0;
Dave Hansen22e057c2011-03-22 16:33:00 -0700559 /*
560 * The mmap_sem held all the way back in m_start() is what
561 * keeps khugepaged out of here and from collapsing things
562 * in here.
563 */
Hugh Dickins705e87c2005-10-29 18:16:27 -0700564 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Dave Hansenae11c4d2011-03-22 16:32:58 -0700565 for (; addr != end; pte++, addr += PAGE_SIZE)
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800566 smaps_pte_entry(pte, addr, walk);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700567 pte_unmap_unlock(pte - 1, ptl);
568 cond_resched();
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800569 return 0;
Mauricio Line070ad42005-09-03 15:55:10 -0700570}
571
Cyrill Gorcunov834f82e2012-12-17 16:03:13 -0800572static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
573{
574 /*
575 * Don't forget to update Documentation/ on changes.
576 */
577 static const char mnemonics[BITS_PER_LONG][2] = {
578 /*
579 * In case if we meet a flag we don't know about.
580 */
581 [0 ... (BITS_PER_LONG-1)] = "??",
582
583 [ilog2(VM_READ)] = "rd",
584 [ilog2(VM_WRITE)] = "wr",
585 [ilog2(VM_EXEC)] = "ex",
586 [ilog2(VM_SHARED)] = "sh",
587 [ilog2(VM_MAYREAD)] = "mr",
588 [ilog2(VM_MAYWRITE)] = "mw",
589 [ilog2(VM_MAYEXEC)] = "me",
590 [ilog2(VM_MAYSHARE)] = "ms",
591 [ilog2(VM_GROWSDOWN)] = "gd",
592 [ilog2(VM_PFNMAP)] = "pf",
593 [ilog2(VM_DENYWRITE)] = "dw",
Qiaowei Ren4aae7e42014-11-14 07:18:25 -0800594#ifdef CONFIG_X86_INTEL_MPX
595 [ilog2(VM_MPX)] = "mp",
596#endif
Cyrill Gorcunov834f82e2012-12-17 16:03:13 -0800597 [ilog2(VM_LOCKED)] = "lo",
598 [ilog2(VM_IO)] = "io",
599 [ilog2(VM_SEQ_READ)] = "sr",
600 [ilog2(VM_RAND_READ)] = "rr",
601 [ilog2(VM_DONTCOPY)] = "dc",
602 [ilog2(VM_DONTEXPAND)] = "de",
603 [ilog2(VM_ACCOUNT)] = "ac",
604 [ilog2(VM_NORESERVE)] = "nr",
605 [ilog2(VM_HUGETLB)] = "ht",
Cyrill Gorcunov834f82e2012-12-17 16:03:13 -0800606 [ilog2(VM_ARCH_1)] = "ar",
607 [ilog2(VM_DONTDUMP)] = "dd",
Naoya Horiguchiec8e41a2013-11-12 15:07:49 -0800608#ifdef CONFIG_MEM_SOFT_DIRTY
609 [ilog2(VM_SOFTDIRTY)] = "sd",
610#endif
Cyrill Gorcunov834f82e2012-12-17 16:03:13 -0800611 [ilog2(VM_MIXEDMAP)] = "mm",
612 [ilog2(VM_HUGEPAGE)] = "hg",
613 [ilog2(VM_NOHUGEPAGE)] = "nh",
614 [ilog2(VM_MERGEABLE)] = "mg",
Andrea Arcangeli16ba6f82015-09-04 15:46:17 -0700615 [ilog2(VM_UFFD_MISSING)]= "um",
616 [ilog2(VM_UFFD_WP)] = "uw",
Cyrill Gorcunov834f82e2012-12-17 16:03:13 -0800617 };
618 size_t i;
619
620 seq_puts(m, "VmFlags: ");
621 for (i = 0; i < BITS_PER_LONG; i++) {
622 if (vma->vm_flags & (1UL << i)) {
623 seq_printf(m, "%c%c ",
624 mnemonics[i][0], mnemonics[i][1]);
625 }
626 }
627 seq_putc(m, '\n');
628}
629
Naoya Horiguchi25ee01a2015-11-05 18:47:11 -0800630#ifdef CONFIG_HUGETLB_PAGE
631static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
632 unsigned long addr, unsigned long end,
633 struct mm_walk *walk)
634{
635 struct mem_size_stats *mss = walk->private;
636 struct vm_area_struct *vma = walk->vma;
637 struct page *page = NULL;
638
639 if (pte_present(*pte)) {
640 page = vm_normal_page(vma, addr, *pte);
641 } else if (is_swap_pte(*pte)) {
642 swp_entry_t swpent = pte_to_swp_entry(*pte);
643
644 if (is_migration_entry(swpent))
645 page = migration_entry_to_page(swpent);
646 }
647 if (page) {
648 int mapcount = page_mapcount(page);
649
650 if (mapcount >= 2)
651 mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
652 else
653 mss->private_hugetlb += huge_page_size(hstate_vma(vma));
654 }
655 return 0;
656}
657#endif /* HUGETLB_PAGE */
658
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700659static int show_smap(struct seq_file *m, void *v, int is_pid)
Mauricio Line070ad42005-09-03 15:55:10 -0700660{
661 struct vm_area_struct *vma = v;
Mauricio Line070ad42005-09-03 15:55:10 -0700662 struct mem_size_stats mss;
Dave Hansen21650092008-06-12 15:21:47 -0700663 struct mm_walk smaps_walk = {
664 .pmd_entry = smaps_pte_range,
Naoya Horiguchi25ee01a2015-11-05 18:47:11 -0800665#ifdef CONFIG_HUGETLB_PAGE
666 .hugetlb_entry = smaps_hugetlb_range,
667#endif
Dave Hansen21650092008-06-12 15:21:47 -0700668 .mm = vma->vm_mm,
669 .private = &mss,
670 };
Mauricio Line070ad42005-09-03 15:55:10 -0700671
672 memset(&mss, 0, sizeof mss);
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900673 /* mmap_sem is held in m_start */
Naoya Horiguchi14eb6fd2015-02-11 15:27:43 -0800674 walk_page_vma(vma, &smaps_walk);
Matt Mackall4752c362008-02-04 22:29:02 -0800675
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700676 show_map_vma(m, vma, is_pid);
Matt Mackall4752c362008-02-04 22:29:02 -0800677
678 seq_printf(m,
679 "Size: %8lu kB\n"
680 "Rss: %8lu kB\n"
681 "Pss: %8lu kB\n"
682 "Shared_Clean: %8lu kB\n"
683 "Shared_Dirty: %8lu kB\n"
684 "Private_Clean: %8lu kB\n"
685 "Private_Dirty: %8lu kB\n"
Peter Zijlstra214e4712008-04-28 02:12:55 -0700686 "Referenced: %8lu kB\n"
Nikanth Karthikesanb40d4f82010-10-27 15:34:10 -0700687 "Anonymous: %8lu kB\n"
Dave Hansen4031a212011-03-22 16:33:01 -0700688 "AnonHugePages: %8lu kB\n"
Naoya Horiguchi25ee01a2015-11-05 18:47:11 -0800689 "Shared_Hugetlb: %8lu kB\n"
690 "Private_Hugetlb: %7lu kB\n"
Mel Gorman08fba692009-01-06 14:38:53 -0800691 "Swap: %8lu kB\n"
Minchan Kim8334b962015-09-08 15:00:24 -0700692 "SwapPss: %8lu kB\n"
Mel Gorman33402892009-01-06 14:38:54 -0800693 "KernelPageSize: %8lu kB\n"
Nikanth Karthikesan2d905082011-01-13 15:45:53 -0800694 "MMUPageSize: %8lu kB\n"
695 "Locked: %8lu kB\n",
Matt Mackall4752c362008-02-04 22:29:02 -0800696 (vma->vm_end - vma->vm_start) >> 10,
697 mss.resident >> 10,
698 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
699 mss.shared_clean >> 10,
700 mss.shared_dirty >> 10,
701 mss.private_clean >> 10,
702 mss.private_dirty >> 10,
Peter Zijlstra214e4712008-04-28 02:12:55 -0700703 mss.referenced >> 10,
Nikanth Karthikesanb40d4f82010-10-27 15:34:10 -0700704 mss.anonymous >> 10,
Dave Hansen4031a212011-03-22 16:33:01 -0700705 mss.anonymous_thp >> 10,
Naoya Horiguchi25ee01a2015-11-05 18:47:11 -0800706 mss.shared_hugetlb >> 10,
707 mss.private_hugetlb >> 10,
Mel Gorman08fba692009-01-06 14:38:53 -0800708 mss.swap >> 10,
Minchan Kim8334b962015-09-08 15:00:24 -0700709 (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
Mel Gorman33402892009-01-06 14:38:54 -0800710 vma_kernel_pagesize(vma) >> 10,
Nikanth Karthikesan2d905082011-01-13 15:45:53 -0800711 vma_mmu_pagesize(vma) >> 10,
712 (vma->vm_flags & VM_LOCKED) ?
713 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
Matt Mackall4752c362008-02-04 22:29:02 -0800714
Cyrill Gorcunov834f82e2012-12-17 16:03:13 -0800715 show_smap_vma_flags(m, vma);
Oleg Nesterovb8c20a92014-10-09 15:25:41 -0700716 m_cache_vma(m, vma);
Joe Korty7c88db02008-10-16 15:27:09 +0400717 return 0;
Mauricio Line070ad42005-09-03 15:55:10 -0700718}
719
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700720static int show_pid_smap(struct seq_file *m, void *v)
721{
722 return show_smap(m, v, 1);
723}
724
725static int show_tid_smap(struct seq_file *m, void *v)
726{
727 return show_smap(m, v, 0);
728}
729
Jan Engelhardt03a44822008-02-08 04:21:19 -0800730static const struct seq_operations proc_pid_smaps_op = {
Matt Mackalla6198792008-02-04 22:29:03 -0800731 .start = m_start,
732 .next = m_next,
733 .stop = m_stop,
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700734 .show = show_pid_smap
Matt Mackalla6198792008-02-04 22:29:03 -0800735};
736
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700737static const struct seq_operations proc_tid_smaps_op = {
738 .start = m_start,
739 .next = m_next,
740 .stop = m_stop,
741 .show = show_tid_smap
742};
743
744static int pid_smaps_open(struct inode *inode, struct file *file)
Matt Mackalla6198792008-02-04 22:29:03 -0800745{
746 return do_maps_open(inode, file, &proc_pid_smaps_op);
747}
748
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700749static int tid_smaps_open(struct inode *inode, struct file *file)
750{
751 return do_maps_open(inode, file, &proc_tid_smaps_op);
752}
753
754const struct file_operations proc_pid_smaps_operations = {
755 .open = pid_smaps_open,
756 .read = seq_read,
757 .llseek = seq_lseek,
Oleg Nesterov29a40ac2014-10-09 15:25:26 -0700758 .release = proc_map_release,
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700759};
760
761const struct file_operations proc_tid_smaps_operations = {
762 .open = tid_smaps_open,
Matt Mackalla6198792008-02-04 22:29:03 -0800763 .read = seq_read,
764 .llseek = seq_lseek,
Oleg Nesterov29a40ac2014-10-09 15:25:26 -0700765 .release = proc_map_release,
Matt Mackalla6198792008-02-04 22:29:03 -0800766};
767
Pavel Emelyanov040fa022013-07-03 15:01:16 -0700768enum clear_refs_types {
769 CLEAR_REFS_ALL = 1,
770 CLEAR_REFS_ANON,
771 CLEAR_REFS_MAPPED,
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700772 CLEAR_REFS_SOFT_DIRTY,
Petr Cermak695f0552015-02-12 15:01:00 -0800773 CLEAR_REFS_MM_HIWATER_RSS,
Pavel Emelyanov040fa022013-07-03 15:01:16 -0700774 CLEAR_REFS_LAST,
775};
776
Pavel Emelyanovaf9de7e2013-07-03 15:01:18 -0700777struct clear_refs_private {
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700778 enum clear_refs_types type;
Pavel Emelyanovaf9de7e2013-07-03 15:01:18 -0700779};
780
Kirill A. Shutemov7d5b3bf2015-02-11 15:28:08 -0800781#ifdef CONFIG_MEM_SOFT_DIRTY
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700782static inline void clear_soft_dirty(struct vm_area_struct *vma,
783 unsigned long addr, pte_t *pte)
784{
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700785 /*
786 * The soft-dirty tracker uses #PF-s to catch writes
787 * to pages, so write-protect the pte as well. See the
788 * Documentation/vm/soft-dirty.txt for full description
789 * of how soft-dirty works.
790 */
791 pte_t ptent = *pte;
Cyrill Gorcunov179ef712013-08-13 16:00:49 -0700792
793 if (pte_present(ptent)) {
794 ptent = pte_wrprotect(ptent);
Martin Schwidefskya7b76172015-04-22 14:20:47 +0200795 ptent = pte_clear_soft_dirty(ptent);
Cyrill Gorcunov179ef712013-08-13 16:00:49 -0700796 } else if (is_swap_pte(ptent)) {
797 ptent = pte_swp_clear_soft_dirty(ptent);
798 }
799
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700800 set_pte_at(vma->vm_mm, addr, pte, ptent);
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700801}
802
Kirill A. Shutemov7d5b3bf2015-02-11 15:28:08 -0800803static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
804 unsigned long addr, pmd_t *pmdp)
805{
806 pmd_t pmd = *pmdp;
807
808 pmd = pmd_wrprotect(pmd);
Martin Schwidefskya7b76172015-04-22 14:20:47 +0200809 pmd = pmd_clear_soft_dirty(pmd);
Kirill A. Shutemov7d5b3bf2015-02-11 15:28:08 -0800810
811 if (vma->vm_flags & VM_SOFTDIRTY)
812 vma->vm_flags &= ~VM_SOFTDIRTY;
813
814 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
815}
816
817#else
818
819static inline void clear_soft_dirty(struct vm_area_struct *vma,
820 unsigned long addr, pte_t *pte)
821{
822}
823
824static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
825 unsigned long addr, pmd_t *pmdp)
826{
827}
828#endif
829
Matt Mackalla6198792008-02-04 22:29:03 -0800830static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
Dave Hansen21650092008-06-12 15:21:47 -0700831 unsigned long end, struct mm_walk *walk)
Matt Mackalla6198792008-02-04 22:29:03 -0800832{
Pavel Emelyanovaf9de7e2013-07-03 15:01:18 -0700833 struct clear_refs_private *cp = walk->private;
Naoya Horiguchi5c64f522015-02-11 15:27:46 -0800834 struct vm_area_struct *vma = walk->vma;
Matt Mackalla6198792008-02-04 22:29:03 -0800835 pte_t *pte, ptent;
836 spinlock_t *ptl;
837 struct page *page;
838
Kirill A. Shutemov7d5b3bf2015-02-11 15:28:08 -0800839 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
840 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
841 clear_soft_dirty_pmd(vma, addr, pmd);
842 goto out;
843 }
844
845 page = pmd_page(*pmd);
846
847 /* Clear accessed and referenced bits. */
848 pmdp_test_and_clear_young(vma, addr, pmd);
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700849 test_and_clear_page_young(page);
Kirill A. Shutemov7d5b3bf2015-02-11 15:28:08 -0800850 ClearPageReferenced(page);
851out:
852 spin_unlock(ptl);
853 return 0;
854 }
855
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700856 if (pmd_trans_unstable(pmd))
857 return 0;
Dave Hansen03319322011-03-22 16:32:56 -0700858
Matt Mackalla6198792008-02-04 22:29:03 -0800859 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
860 for (; addr != end; pte++, addr += PAGE_SIZE) {
861 ptent = *pte;
Matt Mackalla6198792008-02-04 22:29:03 -0800862
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700863 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
864 clear_soft_dirty(vma, addr, pte);
865 continue;
866 }
867
Cyrill Gorcunov179ef712013-08-13 16:00:49 -0700868 if (!pte_present(ptent))
869 continue;
870
Matt Mackalla6198792008-02-04 22:29:03 -0800871 page = vm_normal_page(vma, addr, ptent);
872 if (!page)
873 continue;
874
875 /* Clear accessed and referenced bits. */
876 ptep_test_and_clear_young(vma, addr, pte);
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700877 test_and_clear_page_young(page);
Matt Mackalla6198792008-02-04 22:29:03 -0800878 ClearPageReferenced(page);
879 }
880 pte_unmap_unlock(pte - 1, ptl);
881 cond_resched();
882 return 0;
883}
884
Naoya Horiguchi5c64f522015-02-11 15:27:46 -0800885static int clear_refs_test_walk(unsigned long start, unsigned long end,
886 struct mm_walk *walk)
887{
888 struct clear_refs_private *cp = walk->private;
889 struct vm_area_struct *vma = walk->vma;
890
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800891 if (vma->vm_flags & VM_PFNMAP)
892 return 1;
893
Naoya Horiguchi5c64f522015-02-11 15:27:46 -0800894 /*
895 * Writing 1 to /proc/pid/clear_refs affects all pages.
896 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
897 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
898 * Writing 4 to /proc/pid/clear_refs affects all pages.
899 */
900 if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
901 return 1;
902 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
903 return 1;
904 return 0;
905}
906
Matt Mackallf248dcb2008-02-04 22:29:03 -0800907static ssize_t clear_refs_write(struct file *file, const char __user *buf,
908 size_t count, loff_t *ppos)
David Rientjesb813e932007-05-06 14:49:24 -0700909{
Matt Mackallf248dcb2008-02-04 22:29:03 -0800910 struct task_struct *task;
Vincent Lifb92a4b2009-09-22 16:45:36 -0700911 char buffer[PROC_NUMBUF];
Matt Mackallf248dcb2008-02-04 22:29:03 -0800912 struct mm_struct *mm;
David Rientjesb813e932007-05-06 14:49:24 -0700913 struct vm_area_struct *vma;
Pavel Emelyanov040fa022013-07-03 15:01:16 -0700914 enum clear_refs_types type;
915 int itype;
Alexey Dobriyan0a8cb8e2011-05-26 16:25:50 -0700916 int rv;
David Rientjesb813e932007-05-06 14:49:24 -0700917
Matt Mackallf248dcb2008-02-04 22:29:03 -0800918 memset(buffer, 0, sizeof(buffer));
919 if (count > sizeof(buffer) - 1)
920 count = sizeof(buffer) - 1;
921 if (copy_from_user(buffer, buf, count))
922 return -EFAULT;
Pavel Emelyanov040fa022013-07-03 15:01:16 -0700923 rv = kstrtoint(strstrip(buffer), 10, &itype);
Alexey Dobriyan0a8cb8e2011-05-26 16:25:50 -0700924 if (rv < 0)
925 return rv;
Pavel Emelyanov040fa022013-07-03 15:01:16 -0700926 type = (enum clear_refs_types)itype;
927 if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
Matt Mackallf248dcb2008-02-04 22:29:03 -0800928 return -EINVAL;
Pavel Emelyanov541c2372013-07-03 15:01:22 -0700929
Al Viro496ad9a2013-01-23 17:07:38 -0500930 task = get_proc_task(file_inode(file));
Matt Mackallf248dcb2008-02-04 22:29:03 -0800931 if (!task)
932 return -ESRCH;
933 mm = get_task_mm(task);
934 if (mm) {
Pavel Emelyanovaf9de7e2013-07-03 15:01:18 -0700935 struct clear_refs_private cp = {
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700936 .type = type,
Pavel Emelyanovaf9de7e2013-07-03 15:01:18 -0700937 };
Andrew Morton20cbc972008-07-05 12:29:05 -0700938 struct mm_walk clear_refs_walk = {
939 .pmd_entry = clear_refs_pte_range,
Naoya Horiguchi5c64f522015-02-11 15:27:46 -0800940 .test_walk = clear_refs_test_walk,
Andrew Morton20cbc972008-07-05 12:29:05 -0700941 .mm = mm,
Pavel Emelyanovaf9de7e2013-07-03 15:01:18 -0700942 .private = &cp,
Andrew Morton20cbc972008-07-05 12:29:05 -0700943 };
Petr Cermak695f0552015-02-12 15:01:00 -0800944
945 if (type == CLEAR_REFS_MM_HIWATER_RSS) {
946 /*
947 * Writing 5 to /proc/pid/clear_refs resets the peak
948 * resident set size to this mm's current rss value.
949 */
950 down_write(&mm->mmap_sem);
951 reset_mm_hiwater_rss(mm);
952 up_write(&mm->mmap_sem);
953 goto out_mm;
954 }
955
Matt Mackallf248dcb2008-02-04 22:29:03 -0800956 down_read(&mm->mmap_sem);
Peter Feiner64e45502014-10-13 15:55:46 -0700957 if (type == CLEAR_REFS_SOFT_DIRTY) {
958 for (vma = mm->mmap; vma; vma = vma->vm_next) {
959 if (!(vma->vm_flags & VM_SOFTDIRTY))
960 continue;
961 up_read(&mm->mmap_sem);
962 down_write(&mm->mmap_sem);
963 for (vma = mm->mmap; vma; vma = vma->vm_next) {
964 vma->vm_flags &= ~VM_SOFTDIRTY;
965 vma_set_page_prot(vma);
966 }
967 downgrade_write(&mm->mmap_sem);
968 break;
969 }
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700970 mmu_notifier_invalidate_range_start(mm, 0, -1);
Peter Feiner64e45502014-10-13 15:55:46 -0700971 }
Naoya Horiguchi5c64f522015-02-11 15:27:46 -0800972 walk_page_range(0, ~0UL, &clear_refs_walk);
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -0700973 if (type == CLEAR_REFS_SOFT_DIRTY)
974 mmu_notifier_invalidate_range_end(mm, 0, -1);
Matt Mackallf248dcb2008-02-04 22:29:03 -0800975 flush_tlb_mm(mm);
976 up_read(&mm->mmap_sem);
Petr Cermak695f0552015-02-12 15:01:00 -0800977out_mm:
Matt Mackallf248dcb2008-02-04 22:29:03 -0800978 mmput(mm);
979 }
980 put_task_struct(task);
Vincent Lifb92a4b2009-09-22 16:45:36 -0700981
982 return count;
David Rientjesb813e932007-05-06 14:49:24 -0700983}
984
Matt Mackallf248dcb2008-02-04 22:29:03 -0800985const struct file_operations proc_clear_refs_operations = {
986 .write = clear_refs_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200987 .llseek = noop_llseek,
Matt Mackallf248dcb2008-02-04 22:29:03 -0800988};
989
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700990typedef struct {
991 u64 pme;
992} pagemap_entry_t;
993
Matt Mackall85863e42008-02-04 22:29:04 -0800994struct pagemapread {
yonghua zheng8c829622013-08-13 16:01:03 -0700995 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700996 pagemap_entry_t *buffer;
Konstantin Khlebnikov1c903082015-09-08 15:00:07 -0700997 bool show_pfn;
Matt Mackall85863e42008-02-04 22:29:04 -0800998};
999
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -07001000#define PAGEMAP_WALK_SIZE (PMD_SIZE)
1001#define PAGEMAP_WALK_MASK (PMD_MASK)
1002
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001003#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
1004#define PM_PFRAME_BITS 55
1005#define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1006#define PM_SOFT_DIRTY BIT_ULL(55)
Konstantin Khlebnikov77bb4992015-09-08 15:00:10 -07001007#define PM_MMAP_EXCLUSIVE BIT_ULL(56)
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001008#define PM_FILE BIT_ULL(61)
1009#define PM_SWAP BIT_ULL(62)
1010#define PM_PRESENT BIT_ULL(63)
Hans Rosenfeldf16278c2008-03-21 18:46:59 -05001011
Matt Mackall85863e42008-02-04 22:29:04 -08001012#define PM_END_OF_BUFFER 1
1013
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001014static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
Naoya Horiguchi092b50b2012-03-21 16:33:59 -07001015{
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001016 return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
Naoya Horiguchi092b50b2012-03-21 16:33:59 -07001017}
1018
1019static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
Matt Mackall85863e42008-02-04 22:29:04 -08001020 struct pagemapread *pm)
1021{
Naoya Horiguchi092b50b2012-03-21 16:33:59 -07001022 pm->buffer[pm->pos++] = *pme;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001023 if (pm->pos >= pm->len)
Thomas Tuttleaae86792008-06-05 22:46:31 -07001024 return PM_END_OF_BUFFER;
Matt Mackall85863e42008-02-04 22:29:04 -08001025 return 0;
1026}
1027
1028static int pagemap_pte_hole(unsigned long start, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -07001029 struct mm_walk *walk)
Matt Mackall85863e42008-02-04 22:29:04 -08001030{
Dave Hansen21650092008-06-12 15:21:47 -07001031 struct pagemapread *pm = walk->private;
Peter Feiner68b5a652014-08-06 16:08:09 -07001032 unsigned long addr = start;
Matt Mackall85863e42008-02-04 22:29:04 -08001033 int err = 0;
Naoya Horiguchi092b50b2012-03-21 16:33:59 -07001034
Peter Feiner68b5a652014-08-06 16:08:09 -07001035 while (addr < end) {
1036 struct vm_area_struct *vma = find_vma(walk->mm, addr);
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001037 pagemap_entry_t pme = make_pme(0, 0);
Peter Feiner87e6d492014-09-25 16:05:18 -07001038 /* End of address space hole, which we mark as non-present. */
1039 unsigned long hole_end;
Peter Feiner68b5a652014-08-06 16:08:09 -07001040
Peter Feiner87e6d492014-09-25 16:05:18 -07001041 if (vma)
1042 hole_end = min(end, vma->vm_start);
1043 else
1044 hole_end = end;
1045
1046 for (; addr < hole_end; addr += PAGE_SIZE) {
1047 err = add_to_pagemap(addr, &pme, pm);
1048 if (err)
1049 goto out;
Peter Feiner68b5a652014-08-06 16:08:09 -07001050 }
1051
Peter Feiner87e6d492014-09-25 16:05:18 -07001052 if (!vma)
1053 break;
1054
1055 /* Addresses in the VMA. */
1056 if (vma->vm_flags & VM_SOFTDIRTY)
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001057 pme = make_pme(0, PM_SOFT_DIRTY);
Peter Feiner87e6d492014-09-25 16:05:18 -07001058 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
Peter Feiner68b5a652014-08-06 16:08:09 -07001059 err = add_to_pagemap(addr, &pme, pm);
1060 if (err)
1061 goto out;
1062 }
Matt Mackall85863e42008-02-04 22:29:04 -08001063 }
Peter Feiner68b5a652014-08-06 16:08:09 -07001064out:
Matt Mackall85863e42008-02-04 22:29:04 -08001065 return err;
1066}
1067
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001068static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001069 struct vm_area_struct *vma, unsigned long addr, pte_t pte)
Matt Mackall85863e42008-02-04 22:29:04 -08001070{
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001071 u64 frame = 0, flags = 0;
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001072 struct page *page = NULL;
Matt Mackall85863e42008-02-04 22:29:04 -08001073
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001074 if (pte_present(pte)) {
Konstantin Khlebnikov1c903082015-09-08 15:00:07 -07001075 if (pm->show_pfn)
1076 frame = pte_pfn(pte);
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001077 flags |= PM_PRESENT;
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001078 page = vm_normal_page(vma, addr, pte);
Cyrill Gorcunove9cdd6e2013-10-16 13:46:53 -07001079 if (pte_soft_dirty(pte))
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001080 flags |= PM_SOFT_DIRTY;
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001081 } else if (is_swap_pte(pte)) {
Cyrill Gorcunov179ef712013-08-13 16:00:49 -07001082 swp_entry_t entry;
1083 if (pte_swp_soft_dirty(pte))
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001084 flags |= PM_SOFT_DIRTY;
Cyrill Gorcunov179ef712013-08-13 16:00:49 -07001085 entry = pte_to_swp_entry(pte);
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001086 frame = swp_type(entry) |
1087 (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001088 flags |= PM_SWAP;
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001089 if (is_migration_entry(entry))
1090 page = migration_entry_to_page(entry);
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001091 }
1092
1093 if (page && !PageAnon(page))
1094 flags |= PM_FILE;
Konstantin Khlebnikov77bb4992015-09-08 15:00:10 -07001095 if (page && page_mapcount(page) == 1)
1096 flags |= PM_MMAP_EXCLUSIVE;
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001097 if (vma->vm_flags & VM_SOFTDIRTY)
1098 flags |= PM_SOFT_DIRTY;
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001099
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001100 return make_pme(frame, flags);
Dave Hansenbcf80392008-06-12 15:21:48 -07001101}
1102
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001103static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -07001104 struct mm_walk *walk)
Matt Mackall85863e42008-02-04 22:29:04 -08001105{
Naoya Horiguchif995ece2015-02-11 15:27:48 -08001106 struct vm_area_struct *vma = walk->vma;
Dave Hansen21650092008-06-12 15:21:47 -07001107 struct pagemapread *pm = walk->private;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001108 spinlock_t *ptl;
Konstantin Khlebnikov05fbf352015-02-11 15:27:31 -08001109 pte_t *pte, *orig_pte;
Matt Mackall85863e42008-02-04 22:29:04 -08001110 int err = 0;
1111
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001112#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1113 if (pmd_trans_huge_lock(pmdp, vma, &ptl) == 1) {
1114 u64 flags = 0, frame = 0;
1115 pmd_t pmd = *pmdp;
Pavel Emelyanov0f8975e2013-07-03 15:01:20 -07001116
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001117 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd))
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001118 flags |= PM_SOFT_DIRTY;
Cyrill Gorcunovd9104d12013-09-11 14:22:24 -07001119
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001120 /*
1121 * Currently pmd for thp is always present because thp
1122 * can not be swapped-out, migrated, or HWPOISONed
1123 * (split in such cases instead.)
1124 * This if-check is just to prepare for future implementation.
1125 */
1126 if (pmd_present(pmd)) {
Konstantin Khlebnikov77bb4992015-09-08 15:00:10 -07001127 struct page *page = pmd_page(pmd);
1128
1129 if (page_mapcount(page) == 1)
1130 flags |= PM_MMAP_EXCLUSIVE;
1131
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001132 flags |= PM_PRESENT;
Konstantin Khlebnikov1c903082015-09-08 15:00:07 -07001133 if (pm->show_pfn)
1134 frame = pmd_pfn(pmd) +
1135 ((addr & ~PMD_MASK) >> PAGE_SHIFT);
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001136 }
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -07001137
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001138 for (; addr != end; addr += PAGE_SIZE) {
1139 pagemap_entry_t pme = make_pme(frame, flags);
1140
Naoya Horiguchi092b50b2012-03-21 16:33:59 -07001141 err = add_to_pagemap(addr, &pme, pm);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001142 if (err)
1143 break;
Konstantin Khlebnikov1c903082015-09-08 15:00:07 -07001144 if (pm->show_pfn && (flags & PM_PRESENT))
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001145 frame++;
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -07001146 }
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001147 spin_unlock(ptl);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001148 return err;
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -07001149 }
1150
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001151 if (pmd_trans_unstable(pmdp))
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07001152 return 0;
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001153#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Dave Hansenbcf80392008-06-12 15:21:48 -07001154
Naoya Horiguchif995ece2015-02-11 15:27:48 -08001155 /*
1156 * We can assume that @vma always points to a valid one and @end never
1157 * goes beyond vma->vm_end.
1158 */
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001159 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
Naoya Horiguchif995ece2015-02-11 15:27:48 -08001160 for (; addr < end; pte++, addr += PAGE_SIZE) {
1161 pagemap_entry_t pme;
Peter Feiner81d0fa62014-10-09 15:28:32 -07001162
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001163 pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
Naoya Horiguchif995ece2015-02-11 15:27:48 -08001164 err = add_to_pagemap(addr, &pme, pm);
Konstantin Khlebnikov05fbf352015-02-11 15:27:31 -08001165 if (err)
Peter Feiner81d0fa62014-10-09 15:28:32 -07001166 break;
Matt Mackall85863e42008-02-04 22:29:04 -08001167 }
Naoya Horiguchif995ece2015-02-11 15:27:48 -08001168 pte_unmap_unlock(orig_pte, ptl);
Matt Mackall85863e42008-02-04 22:29:04 -08001169
1170 cond_resched();
1171
1172 return err;
1173}
1174
Naoya Horiguchi1a5cb812010-05-24 14:32:12 -07001175#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi116354d2010-04-06 14:35:04 -07001176/* This function walks within one hugetlb entry in the single call */
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001177static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
Naoya Horiguchi116354d2010-04-06 14:35:04 -07001178 unsigned long addr, unsigned long end,
1179 struct mm_walk *walk)
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001180{
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001181 struct pagemapread *pm = walk->private;
Naoya Horiguchif995ece2015-02-11 15:27:48 -08001182 struct vm_area_struct *vma = walk->vma;
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001183 u64 flags = 0, frame = 0;
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001184 int err = 0;
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001185 pte_t pte;
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001186
Naoya Horiguchif995ece2015-02-11 15:27:48 -08001187 if (vma->vm_flags & VM_SOFTDIRTY)
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001188 flags |= PM_SOFT_DIRTY;
Cyrill Gorcunovd9104d12013-09-11 14:22:24 -07001189
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001190 pte = huge_ptep_get(ptep);
1191 if (pte_present(pte)) {
1192 struct page *page = pte_page(pte);
1193
1194 if (!PageAnon(page))
1195 flags |= PM_FILE;
1196
Konstantin Khlebnikov77bb4992015-09-08 15:00:10 -07001197 if (page_mapcount(page) == 1)
1198 flags |= PM_MMAP_EXCLUSIVE;
1199
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001200 flags |= PM_PRESENT;
Konstantin Khlebnikov1c903082015-09-08 15:00:07 -07001201 if (pm->show_pfn)
1202 frame = pte_pfn(pte) +
1203 ((addr & ~hmask) >> PAGE_SHIFT);
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001204 }
1205
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001206 for (; addr != end; addr += PAGE_SIZE) {
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001207 pagemap_entry_t pme = make_pme(frame, flags);
1208
Naoya Horiguchi092b50b2012-03-21 16:33:59 -07001209 err = add_to_pagemap(addr, &pme, pm);
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001210 if (err)
1211 return err;
Konstantin Khlebnikov1c903082015-09-08 15:00:07 -07001212 if (pm->show_pfn && (flags & PM_PRESENT))
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001213 frame++;
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001214 }
1215
1216 cond_resched();
1217
1218 return err;
1219}
Naoya Horiguchi1a5cb812010-05-24 14:32:12 -07001220#endif /* HUGETLB_PAGE */
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001221
Matt Mackall85863e42008-02-04 22:29:04 -08001222/*
1223 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1224 *
Hans Rosenfeldf16278c2008-03-21 18:46:59 -05001225 * For each page in the address space, this file contains one 64-bit entry
1226 * consisting of the following:
1227 *
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001228 * Bits 0-54 page frame number (PFN) if present
Hans Rosenfeldf16278c2008-03-21 18:46:59 -05001229 * Bits 0-4 swap type if swapped
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001230 * Bits 5-54 swap offset if swapped
Konstantin Khlebnikovdeb94542015-09-08 15:00:02 -07001231 * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
Konstantin Khlebnikov77bb4992015-09-08 15:00:10 -07001232 * Bit 56 page exclusively mapped
1233 * Bits 57-60 zero
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001234 * Bit 61 page is file-page or shared-anon
Hans Rosenfeldf16278c2008-03-21 18:46:59 -05001235 * Bit 62 page swapped
1236 * Bit 63 page present
1237 *
1238 * If the page is not present but in swap, then the PFN contains an
1239 * encoding of the swap file number and the page's offset into the
1240 * swap. Unmapped pages return a null PFN. This allows determining
Matt Mackall85863e42008-02-04 22:29:04 -08001241 * precisely which pages are mapped (or in swap) and comparing mapped
1242 * pages between processes.
1243 *
1244 * Efficient users of this interface will use /proc/pid/maps to
1245 * determine which areas of memory are actually mapped and llseek to
1246 * skip over unmapped regions.
1247 */
1248static ssize_t pagemap_read(struct file *file, char __user *buf,
1249 size_t count, loff_t *ppos)
1250{
Konstantin Khlebnikova06db752015-09-08 14:59:59 -07001251 struct mm_struct *mm = file->private_data;
Matt Mackall85863e42008-02-04 22:29:04 -08001252 struct pagemapread pm;
Alexey Dobriyanee1e6ab2008-07-21 14:21:36 -07001253 struct mm_walk pagemap_walk = {};
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001254 unsigned long src;
1255 unsigned long svpfn;
1256 unsigned long start_vaddr;
1257 unsigned long end_vaddr;
Konstantin Khlebnikova06db752015-09-08 14:59:59 -07001258 int ret = 0, copied = 0;
Matt Mackall85863e42008-02-04 22:29:04 -08001259
Konstantin Khlebnikova06db752015-09-08 14:59:59 -07001260 if (!mm || !atomic_inc_not_zero(&mm->mm_users))
Matt Mackall85863e42008-02-04 22:29:04 -08001261 goto out;
1262
Matt Mackall85863e42008-02-04 22:29:04 -08001263 ret = -EINVAL;
1264 /* file position must be aligned */
Thomas Tuttleaae86792008-06-05 22:46:31 -07001265 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
Konstantin Khlebnikova06db752015-09-08 14:59:59 -07001266 goto out_mm;
Matt Mackall85863e42008-02-04 22:29:04 -08001267
1268 ret = 0;
Vitaly Mayatskikh08161782009-04-30 15:08:18 -07001269 if (!count)
Konstantin Khlebnikova06db752015-09-08 14:59:59 -07001270 goto out_mm;
Vitaly Mayatskikh08161782009-04-30 15:08:18 -07001271
Konstantin Khlebnikov1c903082015-09-08 15:00:07 -07001272 /* do not disclose physical addresses: attack vector */
1273 pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1274
yonghua zheng8c829622013-08-13 16:01:03 -07001275 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1276 pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001277 ret = -ENOMEM;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001278 if (!pm.buffer)
Konstantin Khlebnikova06db752015-09-08 14:59:59 -07001279 goto out_mm;
Matt Mackall85863e42008-02-04 22:29:04 -08001280
Konstantin Khlebnikov356515e2015-09-08 15:00:04 -07001281 pagemap_walk.pmd_entry = pagemap_pmd_range;
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001282 pagemap_walk.pte_hole = pagemap_pte_hole;
Naoya Horiguchi1a5cb812010-05-24 14:32:12 -07001283#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001284 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
Naoya Horiguchi1a5cb812010-05-24 14:32:12 -07001285#endif
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001286 pagemap_walk.mm = mm;
1287 pagemap_walk.private = &pm;
Matt Mackall85863e42008-02-04 22:29:04 -08001288
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001289 src = *ppos;
1290 svpfn = src / PM_ENTRY_BYTES;
1291 start_vaddr = svpfn << PAGE_SHIFT;
Konstantin Khlebnikova06db752015-09-08 14:59:59 -07001292 end_vaddr = mm->task_size;
Matt Mackall85863e42008-02-04 22:29:04 -08001293
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001294 /* watch out for wraparound */
Konstantin Khlebnikova06db752015-09-08 14:59:59 -07001295 if (svpfn > mm->task_size >> PAGE_SHIFT)
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001296 start_vaddr = end_vaddr;
1297
1298 /*
1299 * The odds are that this will stop walking way
1300 * before end_vaddr, because the length of the
1301 * user buffer is tracked in "pm", and the walk
1302 * will stop when we hit the end of the buffer.
1303 */
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001304 ret = 0;
1305 while (count && (start_vaddr < end_vaddr)) {
1306 int len;
1307 unsigned long end;
Matt Mackall85863e42008-02-04 22:29:04 -08001308
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001309 pm.pos = 0;
Naoya Horiguchiea251c12010-11-24 12:57:13 -08001310 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001311 /* overflow ? */
1312 if (end < start_vaddr || end > end_vaddr)
1313 end = end_vaddr;
1314 down_read(&mm->mmap_sem);
1315 ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1316 up_read(&mm->mmap_sem);
1317 start_vaddr = end;
1318
1319 len = min(count, PM_ENTRY_BYTES * pm.pos);
Dan Carpenter309361e02010-04-06 13:45:39 +03001320 if (copy_to_user(buf, pm.buffer, len)) {
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001321 ret = -EFAULT;
Konstantin Khlebnikova06db752015-09-08 14:59:59 -07001322 goto out_free;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001323 }
1324 copied += len;
1325 buf += len;
1326 count -= len;
Matt Mackall85863e42008-02-04 22:29:04 -08001327 }
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001328 *ppos += copied;
1329 if (!ret || ret == PM_END_OF_BUFFER)
1330 ret = copied;
1331
KOSAKI Motohiro98bc93e2011-05-26 16:25:53 -07001332out_free:
1333 kfree(pm.buffer);
Konstantin Khlebnikova06db752015-09-08 14:59:59 -07001334out_mm:
1335 mmput(mm);
Matt Mackall85863e42008-02-04 22:29:04 -08001336out:
1337 return ret;
1338}
1339
Pavel Emelyanov541c2372013-07-03 15:01:22 -07001340static int pagemap_open(struct inode *inode, struct file *file)
1341{
Konstantin Khlebnikova06db752015-09-08 14:59:59 -07001342 struct mm_struct *mm;
1343
Konstantin Khlebnikova06db752015-09-08 14:59:59 -07001344 mm = proc_mem_open(inode, PTRACE_MODE_READ);
1345 if (IS_ERR(mm))
1346 return PTR_ERR(mm);
1347 file->private_data = mm;
1348 return 0;
1349}
1350
1351static int pagemap_release(struct inode *inode, struct file *file)
1352{
1353 struct mm_struct *mm = file->private_data;
1354
1355 if (mm)
1356 mmdrop(mm);
Pavel Emelyanov541c2372013-07-03 15:01:22 -07001357 return 0;
1358}
1359
Matt Mackall85863e42008-02-04 22:29:04 -08001360const struct file_operations proc_pagemap_operations = {
1361 .llseek = mem_lseek, /* borrow this */
1362 .read = pagemap_read,
Pavel Emelyanov541c2372013-07-03 15:01:22 -07001363 .open = pagemap_open,
Konstantin Khlebnikova06db752015-09-08 14:59:59 -07001364 .release = pagemap_release,
Matt Mackall85863e42008-02-04 22:29:04 -08001365};
Matt Mackall1e883282008-02-04 22:29:07 -08001366#endif /* CONFIG_PROC_PAGE_MONITOR */
Matt Mackall85863e42008-02-04 22:29:04 -08001367
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001368#ifdef CONFIG_NUMA
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001369
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001370struct numa_maps {
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001371 unsigned long pages;
1372 unsigned long anon;
1373 unsigned long active;
1374 unsigned long writeback;
1375 unsigned long mapcount_max;
1376 unsigned long dirty;
1377 unsigned long swapcache;
1378 unsigned long node[MAX_NUMNODES];
1379};
1380
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001381struct numa_maps_private {
1382 struct proc_maps_private proc_maps;
1383 struct numa_maps md;
1384};
1385
Dave Hanseneb4866d2011-09-20 15:19:38 -07001386static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1387 unsigned long nr_pages)
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001388{
1389 int count = page_mapcount(page);
1390
Dave Hanseneb4866d2011-09-20 15:19:38 -07001391 md->pages += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001392 if (pte_dirty || PageDirty(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001393 md->dirty += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001394
1395 if (PageSwapCache(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001396 md->swapcache += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001397
1398 if (PageActive(page) || PageUnevictable(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001399 md->active += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001400
1401 if (PageWriteback(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001402 md->writeback += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001403
1404 if (PageAnon(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001405 md->anon += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001406
1407 if (count > md->mapcount_max)
1408 md->mapcount_max = count;
1409
Dave Hanseneb4866d2011-09-20 15:19:38 -07001410 md->node[page_to_nid(page)] += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001411}
1412
Dave Hansen3200a8a2011-09-20 15:19:39 -07001413static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1414 unsigned long addr)
1415{
1416 struct page *page;
1417 int nid;
1418
1419 if (!pte_present(pte))
1420 return NULL;
1421
1422 page = vm_normal_page(vma, addr, pte);
1423 if (!page)
1424 return NULL;
1425
1426 if (PageReserved(page))
1427 return NULL;
1428
1429 nid = page_to_nid(page);
Lai Jiangshan4ff1b2c2012-12-12 13:51:25 -08001430 if (!node_isset(nid, node_states[N_MEMORY]))
Dave Hansen3200a8a2011-09-20 15:19:39 -07001431 return NULL;
1432
1433 return page;
1434}
1435
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001436static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1437 unsigned long end, struct mm_walk *walk)
1438{
Naoya Horiguchid85f4d62015-02-11 15:27:54 -08001439 struct numa_maps *md = walk->private;
1440 struct vm_area_struct *vma = walk->vma;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001441 spinlock_t *ptl;
1442 pte_t *orig_pte;
1443 pte_t *pte;
1444
Naoya Horiguchid85f4d62015-02-11 15:27:54 -08001445 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001446 pte_t huge_pte = *(pte_t *)pmd;
1447 struct page *page;
1448
Naoya Horiguchid85f4d62015-02-11 15:27:54 -08001449 page = can_gather_numa_stats(huge_pte, vma, addr);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001450 if (page)
1451 gather_stats(page, md, pte_dirty(huge_pte),
1452 HPAGE_PMD_SIZE/PAGE_SIZE);
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08001453 spin_unlock(ptl);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001454 return 0;
Dave Hansen32ef4382011-09-20 15:19:41 -07001455 }
1456
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001457 if (pmd_trans_unstable(pmd))
1458 return 0;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001459 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1460 do {
Naoya Horiguchid85f4d62015-02-11 15:27:54 -08001461 struct page *page = can_gather_numa_stats(*pte, vma, addr);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001462 if (!page)
1463 continue;
Dave Hanseneb4866d2011-09-20 15:19:38 -07001464 gather_stats(page, md, pte_dirty(*pte), 1);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001465
1466 } while (pte++, addr += PAGE_SIZE, addr != end);
1467 pte_unmap_unlock(orig_pte, ptl);
1468 return 0;
1469}
1470#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi632fd602015-02-11 15:27:51 -08001471static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001472 unsigned long addr, unsigned long end, struct mm_walk *walk)
1473{
1474 struct numa_maps *md;
1475 struct page *page;
1476
Naoya Horiguchid4c54912014-06-06 10:00:01 -04001477 if (!pte_present(*pte))
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001478 return 0;
1479
1480 page = pte_page(*pte);
1481 if (!page)
1482 return 0;
1483
1484 md = walk->private;
Dave Hanseneb4866d2011-09-20 15:19:38 -07001485 gather_stats(page, md, pte_dirty(*pte), 1);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001486 return 0;
1487}
1488
1489#else
Naoya Horiguchi632fd602015-02-11 15:27:51 -08001490static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001491 unsigned long addr, unsigned long end, struct mm_walk *walk)
1492{
1493 return 0;
1494}
1495#endif
1496
1497/*
1498 * Display pages allocated per node and memory policy via /proc.
1499 */
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001500static int show_numa_map(struct seq_file *m, void *v, int is_pid)
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001501{
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001502 struct numa_maps_private *numa_priv = m->private;
1503 struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001504 struct vm_area_struct *vma = v;
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001505 struct numa_maps *md = &numa_priv->md;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001506 struct file *file = vma->vm_file;
1507 struct mm_struct *mm = vma->vm_mm;
Naoya Horiguchid85f4d62015-02-11 15:27:54 -08001508 struct mm_walk walk = {
1509 .hugetlb_entry = gather_hugetlb_stats,
1510 .pmd_entry = gather_pte_stats,
1511 .private = md,
1512 .mm = mm,
1513 };
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001514 struct mempolicy *pol;
David Rientjes948927e2013-11-12 15:07:28 -08001515 char buffer[64];
1516 int nid;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001517
1518 if (!mm)
1519 return 0;
1520
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001521 /* Ensure we start with an empty set of numa_maps statistics. */
1522 memset(md, 0, sizeof(*md));
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001523
Oleg Nesterov498f2372014-10-09 15:27:52 -07001524 pol = __get_vma_policy(vma, vma->vm_start);
1525 if (pol) {
1526 mpol_to_str(buffer, sizeof(buffer), pol);
1527 mpol_cond_put(pol);
1528 } else {
1529 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1530 }
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001531
1532 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1533
1534 if (file) {
Fabian Frederick17c2b4e2014-06-06 14:37:03 -07001535 seq_puts(m, " file=");
Miklos Szeredi2726d562015-06-19 10:30:28 +02001536 seq_file_path(m, file, "\n\t= ");
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001537 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
Fabian Frederick17c2b4e2014-06-06 14:37:03 -07001538 seq_puts(m, " heap");
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001539 } else {
Oleg Nesterov58cb6542014-10-09 15:25:54 -07001540 pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001541 if (tid != 0) {
1542 /*
1543 * Thread stack in /proc/PID/task/TID/maps or
1544 * the main process stack.
1545 */
1546 if (!is_pid || (vma->vm_start <= mm->start_stack &&
1547 vma->vm_end >= mm->start_stack))
Fabian Frederick17c2b4e2014-06-06 14:37:03 -07001548 seq_puts(m, " stack");
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001549 else
1550 seq_printf(m, " stack:%d", tid);
1551 }
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001552 }
1553
Andrew Mortonfc360bd2011-10-31 17:06:32 -07001554 if (is_vm_hugetlb_page(vma))
Fabian Frederick17c2b4e2014-06-06 14:37:03 -07001555 seq_puts(m, " huge");
Andrew Mortonfc360bd2011-10-31 17:06:32 -07001556
Naoya Horiguchid85f4d62015-02-11 15:27:54 -08001557 /* mmap_sem is held by m_start */
1558 walk_page_vma(vma, &walk);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001559
1560 if (!md->pages)
1561 goto out;
1562
1563 if (md->anon)
1564 seq_printf(m, " anon=%lu", md->anon);
1565
1566 if (md->dirty)
1567 seq_printf(m, " dirty=%lu", md->dirty);
1568
1569 if (md->pages != md->anon && md->pages != md->dirty)
1570 seq_printf(m, " mapped=%lu", md->pages);
1571
1572 if (md->mapcount_max > 1)
1573 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1574
1575 if (md->swapcache)
1576 seq_printf(m, " swapcache=%lu", md->swapcache);
1577
1578 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1579 seq_printf(m, " active=%lu", md->active);
1580
1581 if (md->writeback)
1582 seq_printf(m, " writeback=%lu", md->writeback);
1583
David Rientjes948927e2013-11-12 15:07:28 -08001584 for_each_node_state(nid, N_MEMORY)
1585 if (md->node[nid])
1586 seq_printf(m, " N%d=%lu", nid, md->node[nid]);
Rafael Aquini198d1592015-02-12 15:01:08 -08001587
1588 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001589out:
1590 seq_putc(m, '\n');
Oleg Nesterovb8c20a92014-10-09 15:25:41 -07001591 m_cache_vma(m, vma);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001592 return 0;
1593}
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001594
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001595static int show_pid_numa_map(struct seq_file *m, void *v)
1596{
1597 return show_numa_map(m, v, 1);
1598}
1599
1600static int show_tid_numa_map(struct seq_file *m, void *v)
1601{
1602 return show_numa_map(m, v, 0);
1603}
1604
Jan Engelhardt03a44822008-02-08 04:21:19 -08001605static const struct seq_operations proc_pid_numa_maps_op = {
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001606 .start = m_start,
1607 .next = m_next,
1608 .stop = m_stop,
1609 .show = show_pid_numa_map,
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001610};
Eric W. Biederman662795d2006-06-26 00:25:48 -07001611
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001612static const struct seq_operations proc_tid_numa_maps_op = {
1613 .start = m_start,
1614 .next = m_next,
1615 .stop = m_stop,
1616 .show = show_tid_numa_map,
1617};
1618
1619static int numa_maps_open(struct inode *inode, struct file *file,
1620 const struct seq_operations *ops)
Eric W. Biederman662795d2006-06-26 00:25:48 -07001621{
Oleg Nesterov4db7d0e2014-10-09 15:25:21 -07001622 return proc_maps_open(inode, file, ops,
1623 sizeof(struct numa_maps_private));
Eric W. Biederman662795d2006-06-26 00:25:48 -07001624}
1625
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001626static int pid_numa_maps_open(struct inode *inode, struct file *file)
1627{
1628 return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1629}
1630
1631static int tid_numa_maps_open(struct inode *inode, struct file *file)
1632{
1633 return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1634}
1635
1636const struct file_operations proc_pid_numa_maps_operations = {
1637 .open = pid_numa_maps_open,
1638 .read = seq_read,
1639 .llseek = seq_lseek,
Oleg Nesterov29a40ac2014-10-09 15:25:26 -07001640 .release = proc_map_release,
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001641};
1642
1643const struct file_operations proc_tid_numa_maps_operations = {
1644 .open = tid_numa_maps_open,
Eric W. Biederman662795d2006-06-26 00:25:48 -07001645 .read = seq_read,
1646 .llseek = seq_lseek,
Oleg Nesterov29a40ac2014-10-09 15:25:26 -07001647 .release = proc_map_release,
Eric W. Biederman662795d2006-06-26 00:25:48 -07001648};
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001649#endif /* CONFIG_NUMA */