blob: 8b1a2ce21ee53f3a6ea598759faf02c21c4c5dd2 [file] [log] [blame]
Matt Mackalle6473092008-02-04 22:29:01 -08001#include <linux/mm.h>
2#include <linux/highmem.h>
3#include <linux/sched.h>
Naoya Horiguchid33b9f42009-12-14 17:59:59 -08004#include <linux/hugetlb.h>
Matt Mackalle6473092008-02-04 22:29:01 -08005
6static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -07007 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -08008{
9 pte_t *pte;
10 int err = 0;
11
12 pte = pte_offset_map(pmd, addr);
Johannes Weiner556637c2008-04-28 02:11:47 -070013 for (;;) {
Dave Hansen21650092008-06-12 15:21:47 -070014 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080015 if (err)
16 break;
Johannes Weiner556637c2008-04-28 02:11:47 -070017 addr += PAGE_SIZE;
18 if (addr == end)
19 break;
20 pte++;
21 }
Matt Mackalle6473092008-02-04 22:29:01 -080022
23 pte_unmap(pte);
24 return err;
25}
26
27static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -070028 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -080029{
30 pmd_t *pmd;
31 unsigned long next;
32 int err = 0;
33
34 pmd = pmd_offset(pud, addr);
35 do {
36 next = pmd_addr_end(addr, end);
37 if (pmd_none_or_clear_bad(pmd)) {
38 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -070039 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080040 if (err)
41 break;
42 continue;
43 }
44 if (walk->pmd_entry)
Dave Hansen21650092008-06-12 15:21:47 -070045 err = walk->pmd_entry(pmd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080046 if (!err && walk->pte_entry)
Dave Hansen21650092008-06-12 15:21:47 -070047 err = walk_pte_range(pmd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080048 if (err)
49 break;
50 } while (pmd++, addr = next, addr != end);
51
52 return err;
53}
54
55static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -070056 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -080057{
58 pud_t *pud;
59 unsigned long next;
60 int err = 0;
61
62 pud = pud_offset(pgd, addr);
63 do {
64 next = pud_addr_end(addr, end);
65 if (pud_none_or_clear_bad(pud)) {
66 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -070067 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080068 if (err)
69 break;
70 continue;
71 }
72 if (walk->pud_entry)
Dave Hansen21650092008-06-12 15:21:47 -070073 err = walk->pud_entry(pud, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080074 if (!err && (walk->pmd_entry || walk->pte_entry))
Dave Hansen21650092008-06-12 15:21:47 -070075 err = walk_pmd_range(pud, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080076 if (err)
77 break;
78 } while (pud++, addr = next, addr != end);
79
80 return err;
81}
82
Naoya Horiguchi116354d2010-04-06 14:35:04 -070083#ifdef CONFIG_HUGETLB_PAGE
84static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
85 unsigned long end)
86{
87 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
88 return boundary < end ? boundary : end;
89}
90
91static int walk_hugetlb_range(struct vm_area_struct *vma,
92 unsigned long addr, unsigned long end,
93 struct mm_walk *walk)
94{
95 struct hstate *h = hstate_vma(vma);
96 unsigned long next;
97 unsigned long hmask = huge_page_mask(h);
98 pte_t *pte;
99 int err = 0;
100
101 do {
102 next = hugetlb_entry_end(h, addr, end);
103 pte = huge_pte_offset(walk->mm, addr & hmask);
104 if (pte && walk->hugetlb_entry)
105 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
106 if (err)
107 return err;
108 } while (addr = next, addr != end);
109
110 return 0;
111}
112#endif
113
Matt Mackalle6473092008-02-04 22:29:01 -0800114/**
115 * walk_page_range - walk a memory map's page tables with a callback
Randy Dunlap76824862008-03-19 17:00:40 -0700116 * @mm: memory map to walk
117 * @addr: starting address
118 * @end: ending address
119 * @walk: set of callbacks to invoke for each level of the tree
Matt Mackalle6473092008-02-04 22:29:01 -0800120 *
121 * Recursively walk the page table for the memory area in a VMA,
122 * calling supplied callbacks. Callbacks are called in-order (first
123 * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
124 * etc.). If lower-level callbacks are omitted, walking depth is reduced.
125 *
Dave Hansen21650092008-06-12 15:21:47 -0700126 * Each callback receives an entry pointer and the start and end of the
127 * associated range, and a copy of the original mm_walk for access to
128 * the ->private or ->mm fields.
Matt Mackalle6473092008-02-04 22:29:01 -0800129 *
130 * No locks are taken, but the bottom level iterator will map PTE
131 * directories from highmem if necessary.
132 *
133 * If any callback returns a non-zero value, the walk is aborted and
134 * the return value is propagated back to the caller. Otherwise 0 is returned.
135 */
Dave Hansen21650092008-06-12 15:21:47 -0700136int walk_page_range(unsigned long addr, unsigned long end,
137 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -0800138{
139 pgd_t *pgd;
140 unsigned long next;
141 int err = 0;
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800142 struct vm_area_struct *vma;
Matt Mackalle6473092008-02-04 22:29:01 -0800143
144 if (addr >= end)
145 return err;
146
Dave Hansen21650092008-06-12 15:21:47 -0700147 if (!walk->mm)
148 return -EINVAL;
149
150 pgd = pgd_offset(walk->mm, addr);
Matt Mackalle6473092008-02-04 22:29:01 -0800151 do {
152 next = pgd_addr_end(addr, end);
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800153
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800154 /*
155 * handle hugetlb vma individually because pagetable walk for
156 * the hugetlb page is dependent on the architecture and
157 * we can't handled it in the same manner as non-huge pages.
158 */
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800159 vma = find_vma(walk->mm, addr);
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800160#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800161 if (vma && is_vm_hugetlb_page(vma)) {
162 if (vma->vm_end < next)
163 next = vma->vm_end;
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700164 /*
165 * Hugepage is very tightly coupled with vma, so
166 * walk through hugetlb entries within a given vma.
167 */
168 err = walk_hugetlb_range(vma, addr, next, walk);
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800169 if (err)
170 break;
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700171 pgd = pgd_offset(walk->mm, next);
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800172 continue;
173 }
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800174#endif
Matt Mackalle6473092008-02-04 22:29:01 -0800175 if (pgd_none_or_clear_bad(pgd)) {
176 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -0700177 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800178 if (err)
179 break;
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800180 pgd++;
Matt Mackalle6473092008-02-04 22:29:01 -0800181 continue;
182 }
183 if (walk->pgd_entry)
Dave Hansen21650092008-06-12 15:21:47 -0700184 err = walk->pgd_entry(pgd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800185 if (!err &&
186 (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
Dave Hansen21650092008-06-12 15:21:47 -0700187 err = walk_pud_range(pgd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800188 if (err)
189 break;
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800190 pgd++;
191 } while (addr = next, addr != end);
Matt Mackalle6473092008-02-04 22:29:01 -0800192
193 return err;
194}