blob: 85eec7ea6735390c6430efaa75dd1221dfd0f181 [file] [log] [blame]
Joonsoo Kim48c96a32014-12-12 16:56:01 -08001#include <linux/debugfs.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/uaccess.h>
5#include <linux/bootmem.h>
6#include <linux/stacktrace.h>
7#include <linux/page_owner.h>
8#include "internal.h"
9
10static bool page_owner_disabled = true;
11bool page_owner_inited __read_mostly;
12
13static int early_page_owner_param(char *buf)
14{
15 if (!buf)
16 return -EINVAL;
17
18 if (strcmp(buf, "on") == 0)
19 page_owner_disabled = false;
20
21 return 0;
22}
23early_param("page_owner", early_page_owner_param);
24
25static bool need_page_owner(void)
26{
27 if (page_owner_disabled)
28 return false;
29
30 return true;
31}
32
33static void init_page_owner(void)
34{
35 if (page_owner_disabled)
36 return;
37
38 page_owner_inited = true;
39}
40
41struct page_ext_operations page_owner_ops = {
42 .need = need_page_owner,
43 .init = init_page_owner,
44};
45
46void __reset_page_owner(struct page *page, unsigned int order)
47{
48 int i;
49 struct page_ext *page_ext;
50
51 for (i = 0; i < (1 << order); i++) {
52 page_ext = lookup_page_ext(page + i);
53 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
54 }
55}
56
57void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
58{
59 struct page_ext *page_ext;
60 struct stack_trace *trace;
61
62 page_ext = lookup_page_ext(page);
63
64 trace = &page_ext->trace;
65 trace->nr_entries = 0;
66 trace->max_entries = ARRAY_SIZE(page_ext->trace_entries);
67 trace->entries = &page_ext->trace_entries[0];
68 trace->skip = 3;
69 save_stack_trace(&page_ext->trace);
70
71 page_ext->order = order;
72 page_ext->gfp_mask = gfp_mask;
73
74 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
75}
76
77static ssize_t
78print_page_owner(char __user *buf, size_t count, unsigned long pfn,
79 struct page *page, struct page_ext *page_ext)
80{
81 int ret;
82 int pageblock_mt, page_mt;
83 char *kbuf;
84
85 kbuf = kmalloc(count, GFP_KERNEL);
86 if (!kbuf)
87 return -ENOMEM;
88
89 ret = snprintf(kbuf, count,
90 "Page allocated via order %u, mask 0x%x\n",
91 page_ext->order, page_ext->gfp_mask);
92
93 if (ret >= count)
94 goto err;
95
96 /* Print information relevant to grouping pages by mobility */
97 pageblock_mt = get_pfnblock_migratetype(page, pfn);
98 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
99 ret += snprintf(kbuf + ret, count - ret,
100 "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n",
101 pfn,
102 pfn >> pageblock_order,
103 pageblock_mt,
104 pageblock_mt != page_mt ? "Fallback" : " ",
105 PageLocked(page) ? "K" : " ",
106 PageError(page) ? "E" : " ",
107 PageReferenced(page) ? "R" : " ",
108 PageUptodate(page) ? "U" : " ",
109 PageDirty(page) ? "D" : " ",
110 PageLRU(page) ? "L" : " ",
111 PageActive(page) ? "A" : " ",
112 PageSlab(page) ? "S" : " ",
113 PageWriteback(page) ? "W" : " ",
114 PageCompound(page) ? "C" : " ",
115 PageSwapCache(page) ? "B" : " ",
116 PageMappedToDisk(page) ? "M" : " ");
117
118 if (ret >= count)
119 goto err;
120
121 ret += snprint_stack_trace(kbuf + ret, count - ret,
122 &page_ext->trace, 0);
123 if (ret >= count)
124 goto err;
125
126 ret += snprintf(kbuf + ret, count - ret, "\n");
127 if (ret >= count)
128 goto err;
129
130 if (copy_to_user(buf, kbuf, ret))
131 ret = -EFAULT;
132
133 kfree(kbuf);
134 return ret;
135
136err:
137 kfree(kbuf);
138 return -ENOMEM;
139}
140
141static ssize_t
142read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
143{
144 unsigned long pfn;
145 struct page *page;
146 struct page_ext *page_ext;
147
148 if (!page_owner_inited)
149 return -EINVAL;
150
151 page = NULL;
152 pfn = min_low_pfn + *ppos;
153
154 /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
155 while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
156 pfn++;
157
158 drain_all_pages(NULL);
159
160 /* Find an allocated page */
161 for (; pfn < max_pfn; pfn++) {
162 /*
163 * If the new page is in a new MAX_ORDER_NR_PAGES area,
164 * validate the area as existing, skip it if not
165 */
166 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
167 pfn += MAX_ORDER_NR_PAGES - 1;
168 continue;
169 }
170
171 /* Check for holes within a MAX_ORDER area */
172 if (!pfn_valid_within(pfn))
173 continue;
174
175 page = pfn_to_page(pfn);
176 if (PageBuddy(page)) {
177 unsigned long freepage_order = page_order_unsafe(page);
178
179 if (freepage_order < MAX_ORDER)
180 pfn += (1UL << freepage_order) - 1;
181 continue;
182 }
183
184 page_ext = lookup_page_ext(page);
185
186 /*
187 * Pages allocated before initialization of page_owner are
188 * non-buddy and have no page_owner info.
189 */
190 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
191 continue;
192
193 /* Record the next PFN to read in the file offset */
194 *ppos = (pfn - min_low_pfn) + 1;
195
196 return print_page_owner(buf, count, pfn, page, page_ext);
197 }
198
199 return 0;
200}
201
202static const struct file_operations proc_page_owner_operations = {
203 .read = read_page_owner,
204};
205
206static int __init pageowner_init(void)
207{
208 struct dentry *dentry;
209
210 if (!page_owner_inited) {
211 pr_info("page_owner is disabled\n");
212 return 0;
213 }
214
215 dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
216 NULL, &proc_page_owner_operations);
217 if (IS_ERR(dentry))
218 return PTR_ERR(dentry);
219
220 return 0;
221}
222module_init(pageowner_init)