blob: 983c3a10fa07058df249c64d5c89b9578661f672 [file] [log] [blame]
Joonsoo Kim48c96a32014-12-12 16:56:01 -08001#include <linux/debugfs.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/uaccess.h>
5#include <linux/bootmem.h>
6#include <linux/stacktrace.h>
7#include <linux/page_owner.h>
8#include "internal.h"
9
10static bool page_owner_disabled = true;
11bool page_owner_inited __read_mostly;
12
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -080013static void init_early_allocated_pages(void);
14
Joonsoo Kim48c96a32014-12-12 16:56:01 -080015static int early_page_owner_param(char *buf)
16{
17 if (!buf)
18 return -EINVAL;
19
20 if (strcmp(buf, "on") == 0)
21 page_owner_disabled = false;
22
23 return 0;
24}
25early_param("page_owner", early_page_owner_param);
26
27static bool need_page_owner(void)
28{
29 if (page_owner_disabled)
30 return false;
31
32 return true;
33}
34
35static void init_page_owner(void)
36{
37 if (page_owner_disabled)
38 return;
39
40 page_owner_inited = true;
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -080041 init_early_allocated_pages();
Joonsoo Kim48c96a32014-12-12 16:56:01 -080042}
43
44struct page_ext_operations page_owner_ops = {
45 .need = need_page_owner,
46 .init = init_page_owner,
47};
48
49void __reset_page_owner(struct page *page, unsigned int order)
50{
51 int i;
52 struct page_ext *page_ext;
53
54 for (i = 0; i < (1 << order); i++) {
55 page_ext = lookup_page_ext(page + i);
56 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
57 }
58}
59
60void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
61{
Sergei Rogachev94f759d62015-02-11 15:28:34 -080062 struct page_ext *page_ext = lookup_page_ext(page);
63 struct stack_trace trace = {
64 .nr_entries = 0,
65 .max_entries = ARRAY_SIZE(page_ext->trace_entries),
66 .entries = &page_ext->trace_entries[0],
67 .skip = 3,
68 };
Joonsoo Kim48c96a32014-12-12 16:56:01 -080069
Sergei Rogachev94f759d62015-02-11 15:28:34 -080070 save_stack_trace(&trace);
Joonsoo Kim48c96a32014-12-12 16:56:01 -080071
72 page_ext->order = order;
73 page_ext->gfp_mask = gfp_mask;
Sergei Rogachev94f759d62015-02-11 15:28:34 -080074 page_ext->nr_entries = trace.nr_entries;
Joonsoo Kim48c96a32014-12-12 16:56:01 -080075
76 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
77}
78
Joonsoo Kime2cfc912015-07-17 16:24:18 -070079gfp_t __get_page_owner_gfp(struct page *page)
80{
81 struct page_ext *page_ext = lookup_page_ext(page);
82
83 return page_ext->gfp_mask;
84}
85
Joonsoo Kim48c96a32014-12-12 16:56:01 -080086static ssize_t
87print_page_owner(char __user *buf, size_t count, unsigned long pfn,
88 struct page *page, struct page_ext *page_ext)
89{
90 int ret;
91 int pageblock_mt, page_mt;
92 char *kbuf;
Sergei Rogachev94f759d62015-02-11 15:28:34 -080093 struct stack_trace trace = {
94 .nr_entries = page_ext->nr_entries,
95 .entries = &page_ext->trace_entries[0],
96 };
Joonsoo Kim48c96a32014-12-12 16:56:01 -080097
98 kbuf = kmalloc(count, GFP_KERNEL);
99 if (!kbuf)
100 return -ENOMEM;
101
102 ret = snprintf(kbuf, count,
103 "Page allocated via order %u, mask 0x%x\n",
104 page_ext->order, page_ext->gfp_mask);
105
106 if (ret >= count)
107 goto err;
108
109 /* Print information relevant to grouping pages by mobility */
110 pageblock_mt = get_pfnblock_migratetype(page, pfn);
111 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
112 ret += snprintf(kbuf + ret, count - ret,
113 "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n",
114 pfn,
115 pfn >> pageblock_order,
116 pageblock_mt,
117 pageblock_mt != page_mt ? "Fallback" : " ",
118 PageLocked(page) ? "K" : " ",
119 PageError(page) ? "E" : " ",
120 PageReferenced(page) ? "R" : " ",
121 PageUptodate(page) ? "U" : " ",
122 PageDirty(page) ? "D" : " ",
123 PageLRU(page) ? "L" : " ",
124 PageActive(page) ? "A" : " ",
125 PageSlab(page) ? "S" : " ",
126 PageWriteback(page) ? "W" : " ",
127 PageCompound(page) ? "C" : " ",
128 PageSwapCache(page) ? "B" : " ",
129 PageMappedToDisk(page) ? "M" : " ");
130
131 if (ret >= count)
132 goto err;
133
Sergei Rogachev94f759d62015-02-11 15:28:34 -0800134 ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800135 if (ret >= count)
136 goto err;
137
138 ret += snprintf(kbuf + ret, count - ret, "\n");
139 if (ret >= count)
140 goto err;
141
142 if (copy_to_user(buf, kbuf, ret))
143 ret = -EFAULT;
144
145 kfree(kbuf);
146 return ret;
147
148err:
149 kfree(kbuf);
150 return -ENOMEM;
151}
152
153static ssize_t
154read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
155{
156 unsigned long pfn;
157 struct page *page;
158 struct page_ext *page_ext;
159
160 if (!page_owner_inited)
161 return -EINVAL;
162
163 page = NULL;
164 pfn = min_low_pfn + *ppos;
165
166 /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
167 while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
168 pfn++;
169
170 drain_all_pages(NULL);
171
172 /* Find an allocated page */
173 for (; pfn < max_pfn; pfn++) {
174 /*
175 * If the new page is in a new MAX_ORDER_NR_PAGES area,
176 * validate the area as existing, skip it if not
177 */
178 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
179 pfn += MAX_ORDER_NR_PAGES - 1;
180 continue;
181 }
182
183 /* Check for holes within a MAX_ORDER area */
184 if (!pfn_valid_within(pfn))
185 continue;
186
187 page = pfn_to_page(pfn);
188 if (PageBuddy(page)) {
189 unsigned long freepage_order = page_order_unsafe(page);
190
191 if (freepage_order < MAX_ORDER)
192 pfn += (1UL << freepage_order) - 1;
193 continue;
194 }
195
196 page_ext = lookup_page_ext(page);
197
198 /*
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -0800199 * Some pages could be missed by concurrent allocation or free,
200 * because we don't hold the zone lock.
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800201 */
202 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
203 continue;
204
205 /* Record the next PFN to read in the file offset */
206 *ppos = (pfn - min_low_pfn) + 1;
207
208 return print_page_owner(buf, count, pfn, page, page_ext);
209 }
210
211 return 0;
212}
213
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -0800214static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
215{
216 struct page *page;
217 struct page_ext *page_ext;
218 unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
219 unsigned long end_pfn = pfn + zone->spanned_pages;
220 unsigned long count = 0;
221
222 /* Scan block by block. First and last block may be incomplete */
223 pfn = zone->zone_start_pfn;
224
225 /*
226 * Walk the zone in pageblock_nr_pages steps. If a page block spans
227 * a zone boundary, it will be double counted between zones. This does
228 * not matter as the mixed block count will still be correct
229 */
230 for (; pfn < end_pfn; ) {
231 if (!pfn_valid(pfn)) {
232 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
233 continue;
234 }
235
236 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
237 block_end_pfn = min(block_end_pfn, end_pfn);
238
239 page = pfn_to_page(pfn);
240
241 for (; pfn < block_end_pfn; pfn++) {
242 if (!pfn_valid_within(pfn))
243 continue;
244
245 page = pfn_to_page(pfn);
246
247 /*
248 * We are safe to check buddy flag and order, because
249 * this is init stage and only single thread runs.
250 */
251 if (PageBuddy(page)) {
252 pfn += (1UL << page_order(page)) - 1;
253 continue;
254 }
255
256 if (PageReserved(page))
257 continue;
258
259 page_ext = lookup_page_ext(page);
260
261 /* Maybe overraping zone */
262 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
263 continue;
264
265 /* Found early allocated page */
266 set_page_owner(page, 0, 0);
267 count++;
268 }
269 }
270
271 pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
272 pgdat->node_id, zone->name, count);
273}
274
275static void init_zones_in_node(pg_data_t *pgdat)
276{
277 struct zone *zone;
278 struct zone *node_zones = pgdat->node_zones;
279 unsigned long flags;
280
281 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
282 if (!populated_zone(zone))
283 continue;
284
285 spin_lock_irqsave(&zone->lock, flags);
286 init_pages_in_zone(pgdat, zone);
287 spin_unlock_irqrestore(&zone->lock, flags);
288 }
289}
290
291static void init_early_allocated_pages(void)
292{
293 pg_data_t *pgdat;
294
295 drain_all_pages(NULL);
296 for_each_online_pgdat(pgdat)
297 init_zones_in_node(pgdat);
298}
299
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800300static const struct file_operations proc_page_owner_operations = {
301 .read = read_page_owner,
302};
303
304static int __init pageowner_init(void)
305{
306 struct dentry *dentry;
307
308 if (!page_owner_inited) {
309 pr_info("page_owner is disabled\n");
310 return 0;
311 }
312
313 dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
314 NULL, &proc_page_owner_operations);
315 if (IS_ERR(dentry))
316 return PTR_ERR(dentry);
317
318 return 0;
319}
Paul Gortmaker44c5af92015-05-01 21:57:34 -0400320late_initcall(pageowner_init)