blob: a57068cfe52fdcb4758e673e7bc82df0dfb51509 [file] [log] [blame]
Joonsoo Kim48c96a32014-12-12 16:56:01 -08001#include <linux/debugfs.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/uaccess.h>
5#include <linux/bootmem.h>
6#include <linux/stacktrace.h>
7#include <linux/page_owner.h>
Vlastimil Babka7dd80b82016-03-15 14:56:12 -07008#include <linux/jump_label.h>
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07009#include <linux/migrate.h>
Joonsoo Kim48c96a32014-12-12 16:56:01 -080010#include "internal.h"
11
12static bool page_owner_disabled = true;
Vlastimil Babka7dd80b82016-03-15 14:56:12 -070013DEFINE_STATIC_KEY_FALSE(page_owner_inited);
Joonsoo Kim48c96a32014-12-12 16:56:01 -080014
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -080015static void init_early_allocated_pages(void);
16
Joonsoo Kim48c96a32014-12-12 16:56:01 -080017static int early_page_owner_param(char *buf)
18{
19 if (!buf)
20 return -EINVAL;
21
22 if (strcmp(buf, "on") == 0)
23 page_owner_disabled = false;
24
25 return 0;
26}
27early_param("page_owner", early_page_owner_param);
28
29static bool need_page_owner(void)
30{
31 if (page_owner_disabled)
32 return false;
33
34 return true;
35}
36
37static void init_page_owner(void)
38{
39 if (page_owner_disabled)
40 return;
41
Vlastimil Babka7dd80b82016-03-15 14:56:12 -070042 static_branch_enable(&page_owner_inited);
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -080043 init_early_allocated_pages();
Joonsoo Kim48c96a32014-12-12 16:56:01 -080044}
45
46struct page_ext_operations page_owner_ops = {
47 .need = need_page_owner,
48 .init = init_page_owner,
49};
50
51void __reset_page_owner(struct page *page, unsigned int order)
52{
53 int i;
54 struct page_ext *page_ext;
55
56 for (i = 0; i < (1 << order); i++) {
57 page_ext = lookup_page_ext(page + i);
58 __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
59 }
60}
61
62void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
63{
Sergei Rogachev94f759d62015-02-11 15:28:34 -080064 struct page_ext *page_ext = lookup_page_ext(page);
65 struct stack_trace trace = {
66 .nr_entries = 0,
67 .max_entries = ARRAY_SIZE(page_ext->trace_entries),
68 .entries = &page_ext->trace_entries[0],
69 .skip = 3,
70 };
Joonsoo Kim48c96a32014-12-12 16:56:01 -080071
Sergei Rogachev94f759d62015-02-11 15:28:34 -080072 save_stack_trace(&trace);
Joonsoo Kim48c96a32014-12-12 16:56:01 -080073
74 page_ext->order = order;
75 page_ext->gfp_mask = gfp_mask;
Sergei Rogachev94f759d62015-02-11 15:28:34 -080076 page_ext->nr_entries = trace.nr_entries;
Vlastimil Babka7cd12b42016-03-15 14:56:18 -070077 page_ext->last_migrate_reason = -1;
Joonsoo Kim48c96a32014-12-12 16:56:01 -080078
79 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
80}
81
Vlastimil Babka7cd12b42016-03-15 14:56:18 -070082void __set_page_owner_migrate_reason(struct page *page, int reason)
83{
84 struct page_ext *page_ext = lookup_page_ext(page);
85
86 page_ext->last_migrate_reason = reason;
87}
88
Joonsoo Kime2cfc912015-07-17 16:24:18 -070089gfp_t __get_page_owner_gfp(struct page *page)
90{
91 struct page_ext *page_ext = lookup_page_ext(page);
92
93 return page_ext->gfp_mask;
94}
95
Vlastimil Babkad435edc2016-03-15 14:56:15 -070096void __copy_page_owner(struct page *oldpage, struct page *newpage)
97{
98 struct page_ext *old_ext = lookup_page_ext(oldpage);
99 struct page_ext *new_ext = lookup_page_ext(newpage);
100 int i;
101
102 new_ext->order = old_ext->order;
103 new_ext->gfp_mask = old_ext->gfp_mask;
104 new_ext->nr_entries = old_ext->nr_entries;
105
106 for (i = 0; i < ARRAY_SIZE(new_ext->trace_entries); i++)
107 new_ext->trace_entries[i] = old_ext->trace_entries[i];
108
109 /*
110 * We don't clear the bit on the oldpage as it's going to be freed
111 * after migration. Until then, the info can be useful in case of
112 * a bug, and the overal stats will be off a bit only temporarily.
113 * Also, migrate_misplaced_transhuge_page() can still fail the
114 * migration and then we want the oldpage to retain the info. But
115 * in that case we also don't need to explicitly clear the info from
116 * the new page, which will be freed.
117 */
118 __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
119}
120
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800121static ssize_t
122print_page_owner(char __user *buf, size_t count, unsigned long pfn,
123 struct page *page, struct page_ext *page_ext)
124{
125 int ret;
126 int pageblock_mt, page_mt;
127 char *kbuf;
Sergei Rogachev94f759d62015-02-11 15:28:34 -0800128 struct stack_trace trace = {
129 .nr_entries = page_ext->nr_entries,
130 .entries = &page_ext->trace_entries[0],
131 };
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800132
133 kbuf = kmalloc(count, GFP_KERNEL);
134 if (!kbuf)
135 return -ENOMEM;
136
137 ret = snprintf(kbuf, count,
Vlastimil Babka60f30352016-03-15 14:56:08 -0700138 "Page allocated via order %u, mask %#x(%pGg)\n",
139 page_ext->order, page_ext->gfp_mask,
140 &page_ext->gfp_mask);
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800141
142 if (ret >= count)
143 goto err;
144
145 /* Print information relevant to grouping pages by mobility */
146 pageblock_mt = get_pfnblock_migratetype(page, pfn);
147 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
148 ret += snprintf(kbuf + ret, count - ret,
Vlastimil Babka60f30352016-03-15 14:56:08 -0700149 "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800150 pfn,
Vlastimil Babka60f30352016-03-15 14:56:08 -0700151 migratetype_names[page_mt],
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800152 pfn >> pageblock_order,
Vlastimil Babka60f30352016-03-15 14:56:08 -0700153 migratetype_names[pageblock_mt],
154 page->flags, &page->flags);
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800155
156 if (ret >= count)
157 goto err;
158
Sergei Rogachev94f759d62015-02-11 15:28:34 -0800159 ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800160 if (ret >= count)
161 goto err;
162
Vlastimil Babka7cd12b42016-03-15 14:56:18 -0700163 if (page_ext->last_migrate_reason != -1) {
164 ret += snprintf(kbuf + ret, count - ret,
165 "Page has been migrated, last migrate reason: %s\n",
166 migrate_reason_names[page_ext->last_migrate_reason]);
167 if (ret >= count)
168 goto err;
169 }
170
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800171 ret += snprintf(kbuf + ret, count - ret, "\n");
172 if (ret >= count)
173 goto err;
174
175 if (copy_to_user(buf, kbuf, ret))
176 ret = -EFAULT;
177
178 kfree(kbuf);
179 return ret;
180
181err:
182 kfree(kbuf);
183 return -ENOMEM;
184}
185
186static ssize_t
187read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
188{
189 unsigned long pfn;
190 struct page *page;
191 struct page_ext *page_ext;
192
Vlastimil Babka7dd80b82016-03-15 14:56:12 -0700193 if (!static_branch_unlikely(&page_owner_inited))
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800194 return -EINVAL;
195
196 page = NULL;
197 pfn = min_low_pfn + *ppos;
198
199 /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
200 while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
201 pfn++;
202
203 drain_all_pages(NULL);
204
205 /* Find an allocated page */
206 for (; pfn < max_pfn; pfn++) {
207 /*
208 * If the new page is in a new MAX_ORDER_NR_PAGES area,
209 * validate the area as existing, skip it if not
210 */
211 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
212 pfn += MAX_ORDER_NR_PAGES - 1;
213 continue;
214 }
215
216 /* Check for holes within a MAX_ORDER area */
217 if (!pfn_valid_within(pfn))
218 continue;
219
220 page = pfn_to_page(pfn);
221 if (PageBuddy(page)) {
222 unsigned long freepage_order = page_order_unsafe(page);
223
224 if (freepage_order < MAX_ORDER)
225 pfn += (1UL << freepage_order) - 1;
226 continue;
227 }
228
229 page_ext = lookup_page_ext(page);
230
231 /*
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -0800232 * Some pages could be missed by concurrent allocation or free,
233 * because we don't hold the zone lock.
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800234 */
235 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
236 continue;
237
238 /* Record the next PFN to read in the file offset */
239 *ppos = (pfn - min_low_pfn) + 1;
240
241 return print_page_owner(buf, count, pfn, page, page_ext);
242 }
243
244 return 0;
245}
246
Joonsoo Kim61cf5fe2014-12-12 16:56:04 -0800247static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
248{
249 struct page *page;
250 struct page_ext *page_ext;
251 unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
252 unsigned long end_pfn = pfn + zone->spanned_pages;
253 unsigned long count = 0;
254
255 /* Scan block by block. First and last block may be incomplete */
256 pfn = zone->zone_start_pfn;
257
258 /*
259 * Walk the zone in pageblock_nr_pages steps. If a page block spans
260 * a zone boundary, it will be double counted between zones. This does
261 * not matter as the mixed block count will still be correct
262 */
263 for (; pfn < end_pfn; ) {
264 if (!pfn_valid(pfn)) {
265 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
266 continue;
267 }
268
269 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
270 block_end_pfn = min(block_end_pfn, end_pfn);
271
272 page = pfn_to_page(pfn);
273
274 for (; pfn < block_end_pfn; pfn++) {
275 if (!pfn_valid_within(pfn))
276 continue;
277
278 page = pfn_to_page(pfn);
279
280 /*
281 * We are safe to check buddy flag and order, because
282 * this is init stage and only single thread runs.
283 */
284 if (PageBuddy(page)) {
285 pfn += (1UL << page_order(page)) - 1;
286 continue;
287 }
288
289 if (PageReserved(page))
290 continue;
291
292 page_ext = lookup_page_ext(page);
293
294 /* Maybe overraping zone */
295 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
296 continue;
297
298 /* Found early allocated page */
299 set_page_owner(page, 0, 0);
300 count++;
301 }
302 }
303
304 pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
305 pgdat->node_id, zone->name, count);
306}
307
308static void init_zones_in_node(pg_data_t *pgdat)
309{
310 struct zone *zone;
311 struct zone *node_zones = pgdat->node_zones;
312 unsigned long flags;
313
314 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
315 if (!populated_zone(zone))
316 continue;
317
318 spin_lock_irqsave(&zone->lock, flags);
319 init_pages_in_zone(pgdat, zone);
320 spin_unlock_irqrestore(&zone->lock, flags);
321 }
322}
323
324static void init_early_allocated_pages(void)
325{
326 pg_data_t *pgdat;
327
328 drain_all_pages(NULL);
329 for_each_online_pgdat(pgdat)
330 init_zones_in_node(pgdat);
331}
332
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800333static const struct file_operations proc_page_owner_operations = {
334 .read = read_page_owner,
335};
336
337static int __init pageowner_init(void)
338{
339 struct dentry *dentry;
340
Vlastimil Babka7dd80b82016-03-15 14:56:12 -0700341 if (!static_branch_unlikely(&page_owner_inited)) {
Joonsoo Kim48c96a32014-12-12 16:56:01 -0800342 pr_info("page_owner is disabled\n");
343 return 0;
344 }
345
346 dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
347 NULL, &proc_page_owner_operations);
348 if (IS_ERR(dentry))
349 return PTR_ERR(dentry);
350
351 return 0;
352}
Paul Gortmaker44c5af92015-05-01 21:57:34 -0400353late_initcall(pageowner_init)