blob: 60634dc53a885debd78878692f806c93883cbfff [file] [log] [blame]
Joonsoo Kim48c96a32014-12-12 16:56:01 -08001#include <linux/debugfs.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/uaccess.h>
5#include <linux/bootmem.h>
6#include <linux/stacktrace.h>
7#include <linux/page_owner.h>
Vlastimil Babka7dd80b82016-03-15 14:56:12 -07008#include <linux/jump_label.h>
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07009#include <linux/migrate.h>
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -070010#include <linux/stackdepot.h>
Joonsoo Kime2f612e2016-10-07 16:58:21 -070011#include <linux/seq_file.h>
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -070012
Joonsoo Kim48c96a32014-12-12 16:56:01 -080013#include "internal.h"
14
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -070015/*
16 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
17 * to use off stack temporal storage
18 */
19#define PAGE_OWNER_STACK_DEPTH (16)
20
Joonsoo Kim9300d8d2016-10-07 16:58:30 -070021struct page_owner {
22 unsigned int order;
23 gfp_t gfp_mask;
24 int last_migrate_reason;
25 depot_stack_handle_t handle;
26};
27
Joonsoo Kim48c96a32014-12-12 16:56:01 -080028static bool page_owner_disabled = true;
Vlastimil Babka7dd80b82016-03-15 14:56:12 -070029DEFINE_STATIC_KEY_FALSE(page_owner_inited);
Joonsoo Kim48c96a32014-12-12 16:56:01 -080030
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -070031static depot_stack_handle_t dummy_handle;
32static depot_stack_handle_t failure_handle;
33
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -080034static void init_early_allocated_pages(void);
35
Joonsoo Kim48c96a32014-12-12 16:56:01 -080036static int early_page_owner_param(char *buf)
37{
38 if (!buf)
39 return -EINVAL;
40
41 if (strcmp(buf, "on") == 0)
42 page_owner_disabled = false;
43
44 return 0;
45}
46early_param("page_owner", early_page_owner_param);
47
48static bool need_page_owner(void)
49{
50 if (page_owner_disabled)
51 return false;
52
53 return true;
54}
55
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -070056static noinline void register_dummy_stack(void)
57{
58 unsigned long entries[4];
59 struct stack_trace dummy;
60
61 dummy.nr_entries = 0;
62 dummy.max_entries = ARRAY_SIZE(entries);
63 dummy.entries = &entries[0];
64 dummy.skip = 0;
65
66 save_stack_trace(&dummy);
67 dummy_handle = depot_save_stack(&dummy, GFP_KERNEL);
68}
69
70static noinline void register_failure_stack(void)
71{
72 unsigned long entries[4];
73 struct stack_trace failure;
74
75 failure.nr_entries = 0;
76 failure.max_entries = ARRAY_SIZE(entries);
77 failure.entries = &entries[0];
78 failure.skip = 0;
79
80 save_stack_trace(&failure);
81 failure_handle = depot_save_stack(&failure, GFP_KERNEL);
82}
83
Joonsoo Kim48c96a32014-12-12 16:56:01 -080084static void init_page_owner(void)
85{
86 if (page_owner_disabled)
87 return;
88
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -070089 register_dummy_stack();
90 register_failure_stack();
Vlastimil Babka7dd80b82016-03-15 14:56:12 -070091 static_branch_enable(&page_owner_inited);
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -080092 init_early_allocated_pages();
Joonsoo Kim48c96a32014-12-12 16:56:01 -080093}
94
95struct page_ext_operations page_owner_ops = {
Joonsoo Kim9300d8d2016-10-07 16:58:30 -070096 .size = sizeof(struct page_owner),
Joonsoo Kim48c96a32014-12-12 16:56:01 -080097 .need = need_page_owner,
98 .init = init_page_owner,
99};
100
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700101static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
102{
103 return (void *)page_ext + page_owner_ops.offset;
104}
105
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800106void __reset_page_owner(struct page *page, unsigned int order)
107{
108 int i;
109 struct page_ext *page_ext;
110
111 for (i = 0; i < (1 << order); i++) {
112 page_ext = lookup_page_ext(page + i);
Yang Shif86e4272016-06-03 14:55:38 -0700113 if (unlikely(!page_ext))
114 continue;
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800115 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
116 }
117}
118
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700119static inline bool check_recursive_alloc(struct stack_trace *trace,
120 unsigned long ip)
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800121{
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700122 int i, count;
Yang Shif86e4272016-06-03 14:55:38 -0700123
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700124 if (!trace->nr_entries)
125 return false;
126
127 for (i = 0, count = 0; i < trace->nr_entries; i++) {
128 if (trace->entries[i] == ip && ++count == 2)
129 return true;
130 }
131
132 return false;
133}
134
135static noinline depot_stack_handle_t save_stack(gfp_t flags)
136{
137 unsigned long entries[PAGE_OWNER_STACK_DEPTH];
Sergei Rogachev94f759d62015-02-11 15:28:34 -0800138 struct stack_trace trace = {
139 .nr_entries = 0,
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700140 .entries = entries,
141 .max_entries = PAGE_OWNER_STACK_DEPTH,
142 .skip = 0
Sergei Rogachev94f759d62015-02-11 15:28:34 -0800143 };
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700144 depot_stack_handle_t handle;
145
146 save_stack_trace(&trace);
147 if (trace.nr_entries != 0 &&
148 trace.entries[trace.nr_entries-1] == ULONG_MAX)
149 trace.nr_entries--;
150
151 /*
152 * We need to check recursion here because our request to stackdepot
153 * could trigger memory allocation to save new entry. New memory
154 * allocation would reach here and call depot_save_stack() again
155 * if we don't catch it. There is still not enough memory in stackdepot
156 * so it would try to allocate memory again and loop forever.
157 */
158 if (check_recursive_alloc(&trace, _RET_IP_))
159 return dummy_handle;
160
161 handle = depot_save_stack(&trace, flags);
162 if (!handle)
163 handle = failure_handle;
164
165 return handle;
166}
167
168noinline void __set_page_owner(struct page *page, unsigned int order,
169 gfp_t gfp_mask)
170{
171 struct page_ext *page_ext = lookup_page_ext(page);
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700172 struct page_owner *page_owner;
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800173
Yang Shif86e4272016-06-03 14:55:38 -0700174 if (unlikely(!page_ext))
175 return;
176
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700177 page_owner = get_page_owner(page_ext);
178 page_owner->handle = save_stack(gfp_mask);
179 page_owner->order = order;
180 page_owner->gfp_mask = gfp_mask;
181 page_owner->last_migrate_reason = -1;
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800182
183 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
184}
185
Vlastimil Babka7cd12b42016-03-15 14:56:18 -0700186void __set_page_owner_migrate_reason(struct page *page, int reason)
187{
188 struct page_ext *page_ext = lookup_page_ext(page);
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700189 struct page_owner *page_owner;
190
Yang Shif86e4272016-06-03 14:55:38 -0700191 if (unlikely(!page_ext))
192 return;
Vlastimil Babka7cd12b42016-03-15 14:56:18 -0700193
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700194 page_owner = get_page_owner(page_ext);
195 page_owner->last_migrate_reason = reason;
Vlastimil Babka7cd12b42016-03-15 14:56:18 -0700196}
197
Joonsoo Kima9627bc2016-07-26 15:23:49 -0700198void __split_page_owner(struct page *page, unsigned int order)
Joonsoo Kime2cfc912015-07-17 16:24:18 -0700199{
Joonsoo Kima9627bc2016-07-26 15:23:49 -0700200 int i;
Joonsoo Kime2cfc912015-07-17 16:24:18 -0700201 struct page_ext *page_ext = lookup_page_ext(page);
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700202 struct page_owner *page_owner;
Joonsoo Kime2cfc912015-07-17 16:24:18 -0700203
Joonsoo Kima9627bc2016-07-26 15:23:49 -0700204 if (unlikely(!page_ext))
205 return;
206
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700207 page_owner = get_page_owner(page_ext);
208 page_owner->order = 0;
Joonsoo Kima9627bc2016-07-26 15:23:49 -0700209 for (i = 1; i < (1 << order); i++)
210 __copy_page_owner(page, page + i);
Joonsoo Kime2cfc912015-07-17 16:24:18 -0700211}
212
Vlastimil Babkad435edc2016-03-15 14:56:15 -0700213void __copy_page_owner(struct page *oldpage, struct page *newpage)
214{
215 struct page_ext *old_ext = lookup_page_ext(oldpage);
216 struct page_ext *new_ext = lookup_page_ext(newpage);
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700217 struct page_owner *old_page_owner, *new_page_owner;
Vlastimil Babkad435edc2016-03-15 14:56:15 -0700218
Yang Shif86e4272016-06-03 14:55:38 -0700219 if (unlikely(!old_ext || !new_ext))
220 return;
221
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700222 old_page_owner = get_page_owner(old_ext);
223 new_page_owner = get_page_owner(new_ext);
224 new_page_owner->order = old_page_owner->order;
225 new_page_owner->gfp_mask = old_page_owner->gfp_mask;
226 new_page_owner->last_migrate_reason =
227 old_page_owner->last_migrate_reason;
228 new_page_owner->handle = old_page_owner->handle;
Vlastimil Babkad435edc2016-03-15 14:56:15 -0700229
230 /*
231 * We don't clear the bit on the oldpage as it's going to be freed
232 * after migration. Until then, the info can be useful in case of
233 * a bug, and the overal stats will be off a bit only temporarily.
234 * Also, migrate_misplaced_transhuge_page() can still fail the
235 * migration and then we want the oldpage to retain the info. But
236 * in that case we also don't need to explicitly clear the info from
237 * the new page, which will be freed.
238 */
239 __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
240}
241
Joonsoo Kime2f612e2016-10-07 16:58:21 -0700242void pagetypeinfo_showmixedcount_print(struct seq_file *m,
243 pg_data_t *pgdat, struct zone *zone)
244{
245 struct page *page;
246 struct page_ext *page_ext;
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700247 struct page_owner *page_owner;
Joonsoo Kime2f612e2016-10-07 16:58:21 -0700248 unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
249 unsigned long end_pfn = pfn + zone->spanned_pages;
250 unsigned long count[MIGRATE_TYPES] = { 0, };
251 int pageblock_mt, page_mt;
252 int i;
253
254 /* Scan block by block. First and last block may be incomplete */
255 pfn = zone->zone_start_pfn;
256
257 /*
258 * Walk the zone in pageblock_nr_pages steps. If a page block spans
259 * a zone boundary, it will be double counted between zones. This does
260 * not matter as the mixed block count will still be correct
261 */
262 for (; pfn < end_pfn; ) {
263 if (!pfn_valid(pfn)) {
264 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
265 continue;
266 }
267
268 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
269 block_end_pfn = min(block_end_pfn, end_pfn);
270
271 page = pfn_to_page(pfn);
272 pageblock_mt = get_pageblock_migratetype(page);
273
274 for (; pfn < block_end_pfn; pfn++) {
275 if (!pfn_valid_within(pfn))
276 continue;
277
278 page = pfn_to_page(pfn);
279
280 if (page_zone(page) != zone)
281 continue;
282
283 if (PageBuddy(page)) {
284 pfn += (1UL << page_order(page)) - 1;
285 continue;
286 }
287
288 if (PageReserved(page))
289 continue;
290
291 page_ext = lookup_page_ext(page);
292 if (unlikely(!page_ext))
293 continue;
294
295 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
296 continue;
297
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700298 page_owner = get_page_owner(page_ext);
299 page_mt = gfpflags_to_migratetype(
300 page_owner->gfp_mask);
Joonsoo Kime2f612e2016-10-07 16:58:21 -0700301 if (pageblock_mt != page_mt) {
302 if (is_migrate_cma(pageblock_mt))
303 count[MIGRATE_MOVABLE]++;
304 else
305 count[pageblock_mt]++;
306
307 pfn = block_end_pfn;
308 break;
309 }
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700310 pfn += (1UL << page_owner->order) - 1;
Joonsoo Kime2f612e2016-10-07 16:58:21 -0700311 }
312 }
313
314 /* Print counts */
315 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
316 for (i = 0; i < MIGRATE_TYPES; i++)
317 seq_printf(m, "%12lu ", count[i]);
318 seq_putc(m, '\n');
319}
320
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800321static ssize_t
322print_page_owner(char __user *buf, size_t count, unsigned long pfn,
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700323 struct page *page, struct page_owner *page_owner,
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700324 depot_stack_handle_t handle)
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800325{
326 int ret;
327 int pageblock_mt, page_mt;
328 char *kbuf;
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700329 unsigned long entries[PAGE_OWNER_STACK_DEPTH];
Sergei Rogachev94f759d62015-02-11 15:28:34 -0800330 struct stack_trace trace = {
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700331 .nr_entries = 0,
332 .entries = entries,
333 .max_entries = PAGE_OWNER_STACK_DEPTH,
334 .skip = 0
Sergei Rogachev94f759d62015-02-11 15:28:34 -0800335 };
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800336
337 kbuf = kmalloc(count, GFP_KERNEL);
338 if (!kbuf)
339 return -ENOMEM;
340
341 ret = snprintf(kbuf, count,
Vlastimil Babka60f30352016-03-15 14:56:08 -0700342 "Page allocated via order %u, mask %#x(%pGg)\n",
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700343 page_owner->order, page_owner->gfp_mask,
344 &page_owner->gfp_mask);
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800345
346 if (ret >= count)
347 goto err;
348
349 /* Print information relevant to grouping pages by mobility */
Mel Gorman0b423ca2016-05-19 17:14:27 -0700350 pageblock_mt = get_pageblock_migratetype(page);
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700351 page_mt = gfpflags_to_migratetype(page_owner->gfp_mask);
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800352 ret += snprintf(kbuf + ret, count - ret,
Vlastimil Babka60f30352016-03-15 14:56:08 -0700353 "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800354 pfn,
Vlastimil Babka60f30352016-03-15 14:56:08 -0700355 migratetype_names[page_mt],
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800356 pfn >> pageblock_order,
Vlastimil Babka60f30352016-03-15 14:56:08 -0700357 migratetype_names[pageblock_mt],
358 page->flags, &page->flags);
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800359
360 if (ret >= count)
361 goto err;
362
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700363 depot_fetch_stack(handle, &trace);
Sergei Rogachev94f759d62015-02-11 15:28:34 -0800364 ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800365 if (ret >= count)
366 goto err;
367
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700368 if (page_owner->last_migrate_reason != -1) {
Vlastimil Babka7cd12b42016-03-15 14:56:18 -0700369 ret += snprintf(kbuf + ret, count - ret,
370 "Page has been migrated, last migrate reason: %s\n",
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700371 migrate_reason_names[page_owner->last_migrate_reason]);
Vlastimil Babka7cd12b42016-03-15 14:56:18 -0700372 if (ret >= count)
373 goto err;
374 }
375
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800376 ret += snprintf(kbuf + ret, count - ret, "\n");
377 if (ret >= count)
378 goto err;
379
380 if (copy_to_user(buf, kbuf, ret))
381 ret = -EFAULT;
382
383 kfree(kbuf);
384 return ret;
385
386err:
387 kfree(kbuf);
388 return -ENOMEM;
389}
390
Vlastimil Babka4e462112016-03-15 14:56:21 -0700391void __dump_page_owner(struct page *page)
392{
393 struct page_ext *page_ext = lookup_page_ext(page);
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700394 struct page_owner *page_owner;
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700395 unsigned long entries[PAGE_OWNER_STACK_DEPTH];
Vlastimil Babka4e462112016-03-15 14:56:21 -0700396 struct stack_trace trace = {
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700397 .nr_entries = 0,
398 .entries = entries,
399 .max_entries = PAGE_OWNER_STACK_DEPTH,
400 .skip = 0
Vlastimil Babka4e462112016-03-15 14:56:21 -0700401 };
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700402 depot_stack_handle_t handle;
Sudip Mukherjee82850272016-06-24 14:50:24 -0700403 gfp_t gfp_mask;
404 int mt;
Vlastimil Babka4e462112016-03-15 14:56:21 -0700405
Yang Shif86e4272016-06-03 14:55:38 -0700406 if (unlikely(!page_ext)) {
407 pr_alert("There is not page extension available.\n");
408 return;
409 }
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700410
411 page_owner = get_page_owner(page_ext);
412 gfp_mask = page_owner->gfp_mask;
Sudip Mukherjee82850272016-06-24 14:50:24 -0700413 mt = gfpflags_to_migratetype(gfp_mask);
Yang Shif86e4272016-06-03 14:55:38 -0700414
Vlastimil Babka4e462112016-03-15 14:56:21 -0700415 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
416 pr_alert("page_owner info is not active (free page?)\n");
417 return;
418 }
419
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700420 handle = READ_ONCE(page_owner->handle);
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700421 if (!handle) {
422 pr_alert("page_owner info is not active (free page?)\n");
423 return;
424 }
425
426 depot_fetch_stack(handle, &trace);
Joe Perches756a0252016-03-17 14:19:47 -0700427 pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700428 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
Vlastimil Babka4e462112016-03-15 14:56:21 -0700429 print_stack_trace(&trace, 0);
430
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700431 if (page_owner->last_migrate_reason != -1)
Vlastimil Babka4e462112016-03-15 14:56:21 -0700432 pr_alert("page has been migrated, last migrate reason: %s\n",
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700433 migrate_reason_names[page_owner->last_migrate_reason]);
Vlastimil Babka4e462112016-03-15 14:56:21 -0700434}
435
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800436static ssize_t
437read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
438{
439 unsigned long pfn;
440 struct page *page;
441 struct page_ext *page_ext;
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700442 struct page_owner *page_owner;
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700443 depot_stack_handle_t handle;
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800444
Vlastimil Babka7dd80b82016-03-15 14:56:12 -0700445 if (!static_branch_unlikely(&page_owner_inited))
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800446 return -EINVAL;
447
448 page = NULL;
449 pfn = min_low_pfn + *ppos;
450
451 /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
452 while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
453 pfn++;
454
455 drain_all_pages(NULL);
456
457 /* Find an allocated page */
458 for (; pfn < max_pfn; pfn++) {
459 /*
460 * If the new page is in a new MAX_ORDER_NR_PAGES area,
461 * validate the area as existing, skip it if not
462 */
463 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
464 pfn += MAX_ORDER_NR_PAGES - 1;
465 continue;
466 }
467
468 /* Check for holes within a MAX_ORDER area */
469 if (!pfn_valid_within(pfn))
470 continue;
471
472 page = pfn_to_page(pfn);
473 if (PageBuddy(page)) {
474 unsigned long freepage_order = page_order_unsafe(page);
475
476 if (freepage_order < MAX_ORDER)
477 pfn += (1UL << freepage_order) - 1;
478 continue;
479 }
480
481 page_ext = lookup_page_ext(page);
Yang Shif86e4272016-06-03 14:55:38 -0700482 if (unlikely(!page_ext))
483 continue;
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800484
485 /*
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -0800486 * Some pages could be missed by concurrent allocation or free,
487 * because we don't hold the zone lock.
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800488 */
489 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
490 continue;
491
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700492 page_owner = get_page_owner(page_ext);
493
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700494 /*
495 * Access to page_ext->handle isn't synchronous so we should
496 * be careful to access it.
497 */
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700498 handle = READ_ONCE(page_owner->handle);
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700499 if (!handle)
500 continue;
501
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800502 /* Record the next PFN to read in the file offset */
503 *ppos = (pfn - min_low_pfn) + 1;
504
Joonsoo Kimf2ca0b52016-07-26 15:23:55 -0700505 return print_page_owner(buf, count, pfn, page,
Joonsoo Kim9300d8d2016-10-07 16:58:30 -0700506 page_owner, handle);
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800507 }
508
509 return 0;
510}
511
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -0800512static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
513{
514 struct page *page;
515 struct page_ext *page_ext;
516 unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
517 unsigned long end_pfn = pfn + zone->spanned_pages;
518 unsigned long count = 0;
519
520 /* Scan block by block. First and last block may be incomplete */
521 pfn = zone->zone_start_pfn;
522
523 /*
524 * Walk the zone in pageblock_nr_pages steps. If a page block spans
525 * a zone boundary, it will be double counted between zones. This does
526 * not matter as the mixed block count will still be correct
527 */
528 for (; pfn < end_pfn; ) {
529 if (!pfn_valid(pfn)) {
530 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
531 continue;
532 }
533
534 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
535 block_end_pfn = min(block_end_pfn, end_pfn);
536
537 page = pfn_to_page(pfn);
538
539 for (; pfn < block_end_pfn; pfn++) {
540 if (!pfn_valid_within(pfn))
541 continue;
542
543 page = pfn_to_page(pfn);
544
Joonsoo Kim9d43f5a2016-05-19 17:12:13 -0700545 if (page_zone(page) != zone)
546 continue;
547
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -0800548 /*
549 * We are safe to check buddy flag and order, because
550 * this is init stage and only single thread runs.
551 */
552 if (PageBuddy(page)) {
553 pfn += (1UL << page_order(page)) - 1;
554 continue;
555 }
556
557 if (PageReserved(page))
558 continue;
559
560 page_ext = lookup_page_ext(page);
Yang Shif86e4272016-06-03 14:55:38 -0700561 if (unlikely(!page_ext))
562 continue;
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -0800563
564 /* Maybe overraping zone */
565 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
566 continue;
567
568 /* Found early allocated page */
569 set_page_owner(page, 0, 0);
570 count++;
571 }
572 }
573
574 pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
575 pgdat->node_id, zone->name, count);
576}
577
578static void init_zones_in_node(pg_data_t *pgdat)
579{
580 struct zone *zone;
581 struct zone *node_zones = pgdat->node_zones;
582 unsigned long flags;
583
584 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
585 if (!populated_zone(zone))
586 continue;
587
588 spin_lock_irqsave(&zone->lock, flags);
589 init_pages_in_zone(pgdat, zone);
590 spin_unlock_irqrestore(&zone->lock, flags);
591 }
592}
593
594static void init_early_allocated_pages(void)
595{
596 pg_data_t *pgdat;
597
598 drain_all_pages(NULL);
599 for_each_online_pgdat(pgdat)
600 init_zones_in_node(pgdat);
601}
602
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800603static const struct file_operations proc_page_owner_operations = {
604 .read = read_page_owner,
605};
606
607static int __init pageowner_init(void)
608{
609 struct dentry *dentry;
610
Vlastimil Babka7dd80b82016-03-15 14:56:12 -0700611 if (!static_branch_unlikely(&page_owner_inited)) {
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800612 pr_info("page_owner is disabled\n");
613 return 0;
614 }
615
616 dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
617 NULL, &proc_page_owner_operations);
618 if (IS_ERR(dentry))
619 return PTR_ERR(dentry);
620
621 return 0;
622}
Paul Gortmaker44c5af92015-05-01 21:57:34 -0400623late_initcall(pageowner_init)