blob: 7cfa6ae023038ef4d1f05c6224c85faad173d533 [file] [log] [blame]
Matt Mackalle6473092008-02-04 22:29:01 -08001#include <linux/mm.h>
2#include <linux/highmem.h>
3#include <linux/sched.h>
Naoya Horiguchid33b9f42009-12-14 17:59:59 -08004#include <linux/hugetlb.h>
Matt Mackalle6473092008-02-04 22:29:01 -08005
6static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -07007 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -08008{
9 pte_t *pte;
10 int err = 0;
11
12 pte = pte_offset_map(pmd, addr);
Johannes Weiner556637c2008-04-28 02:11:47 -070013 for (;;) {
Dave Hansen21650092008-06-12 15:21:47 -070014 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080015 if (err)
16 break;
Johannes Weiner556637c2008-04-28 02:11:47 -070017 addr += PAGE_SIZE;
18 if (addr == end)
19 break;
20 pte++;
21 }
Matt Mackalle6473092008-02-04 22:29:01 -080022
23 pte_unmap(pte);
24 return err;
25}
26
27static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -070028 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -080029{
30 pmd_t *pmd;
31 unsigned long next;
32 int err = 0;
33
34 pmd = pmd_offset(pud, addr);
35 do {
36 next = pmd_addr_end(addr, end);
Andrea Arcangelibae9c192011-01-13 15:46:46 -080037 split_huge_page_pmd(walk->mm, pmd);
Matt Mackalle6473092008-02-04 22:29:01 -080038 if (pmd_none_or_clear_bad(pmd)) {
39 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -070040 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080041 if (err)
42 break;
43 continue;
44 }
45 if (walk->pmd_entry)
Dave Hansen21650092008-06-12 15:21:47 -070046 err = walk->pmd_entry(pmd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080047 if (!err && walk->pte_entry)
Dave Hansen21650092008-06-12 15:21:47 -070048 err = walk_pte_range(pmd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080049 if (err)
50 break;
51 } while (pmd++, addr = next, addr != end);
52
53 return err;
54}
55
56static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -070057 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -080058{
59 pud_t *pud;
60 unsigned long next;
61 int err = 0;
62
63 pud = pud_offset(pgd, addr);
64 do {
65 next = pud_addr_end(addr, end);
66 if (pud_none_or_clear_bad(pud)) {
67 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -070068 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080069 if (err)
70 break;
71 continue;
72 }
73 if (walk->pud_entry)
Dave Hansen21650092008-06-12 15:21:47 -070074 err = walk->pud_entry(pud, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080075 if (!err && (walk->pmd_entry || walk->pte_entry))
Dave Hansen21650092008-06-12 15:21:47 -070076 err = walk_pmd_range(pud, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080077 if (err)
78 break;
79 } while (pud++, addr = next, addr != end);
80
81 return err;
82}
83
Naoya Horiguchi116354d2010-04-06 14:35:04 -070084#ifdef CONFIG_HUGETLB_PAGE
85static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
86 unsigned long end)
87{
88 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
89 return boundary < end ? boundary : end;
90}
91
92static int walk_hugetlb_range(struct vm_area_struct *vma,
93 unsigned long addr, unsigned long end,
94 struct mm_walk *walk)
95{
96 struct hstate *h = hstate_vma(vma);
97 unsigned long next;
98 unsigned long hmask = huge_page_mask(h);
99 pte_t *pte;
100 int err = 0;
101
102 do {
103 next = hugetlb_entry_end(h, addr, end);
104 pte = huge_pte_offset(walk->mm, addr & hmask);
105 if (pte && walk->hugetlb_entry)
106 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
107 if (err)
108 return err;
109 } while (addr = next, addr != end);
110
111 return 0;
112}
113#endif
114
Matt Mackalle6473092008-02-04 22:29:01 -0800115/**
116 * walk_page_range - walk a memory map's page tables with a callback
Randy Dunlap76824862008-03-19 17:00:40 -0700117 * @mm: memory map to walk
118 * @addr: starting address
119 * @end: ending address
120 * @walk: set of callbacks to invoke for each level of the tree
Matt Mackalle6473092008-02-04 22:29:01 -0800121 *
122 * Recursively walk the page table for the memory area in a VMA,
123 * calling supplied callbacks. Callbacks are called in-order (first
124 * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
125 * etc.). If lower-level callbacks are omitted, walking depth is reduced.
126 *
Dave Hansen21650092008-06-12 15:21:47 -0700127 * Each callback receives an entry pointer and the start and end of the
128 * associated range, and a copy of the original mm_walk for access to
129 * the ->private or ->mm fields.
Matt Mackalle6473092008-02-04 22:29:01 -0800130 *
131 * No locks are taken, but the bottom level iterator will map PTE
132 * directories from highmem if necessary.
133 *
134 * If any callback returns a non-zero value, the walk is aborted and
135 * the return value is propagated back to the caller. Otherwise 0 is returned.
136 */
Dave Hansen21650092008-06-12 15:21:47 -0700137int walk_page_range(unsigned long addr, unsigned long end,
138 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -0800139{
140 pgd_t *pgd;
141 unsigned long next;
142 int err = 0;
143
144 if (addr >= end)
145 return err;
146
Dave Hansen21650092008-06-12 15:21:47 -0700147 if (!walk->mm)
148 return -EINVAL;
149
150 pgd = pgd_offset(walk->mm, addr);
Matt Mackalle6473092008-02-04 22:29:01 -0800151 do {
David Sterba5f0af702010-11-24 12:57:10 -0800152 struct vm_area_struct *uninitialized_var(vma);
153
Matt Mackalle6473092008-02-04 22:29:01 -0800154 next = pgd_addr_end(addr, end);
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800155
David Sterba5f0af702010-11-24 12:57:10 -0800156#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800157 /*
158 * handle hugetlb vma individually because pagetable walk for
159 * the hugetlb page is dependent on the architecture and
160 * we can't handled it in the same manner as non-huge pages.
161 */
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800162 vma = find_vma(walk->mm, addr);
163 if (vma && is_vm_hugetlb_page(vma)) {
164 if (vma->vm_end < next)
165 next = vma->vm_end;
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700166 /*
167 * Hugepage is very tightly coupled with vma, so
168 * walk through hugetlb entries within a given vma.
169 */
170 err = walk_hugetlb_range(vma, addr, next, walk);
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800171 if (err)
172 break;
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700173 pgd = pgd_offset(walk->mm, next);
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800174 continue;
175 }
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800176#endif
Matt Mackalle6473092008-02-04 22:29:01 -0800177 if (pgd_none_or_clear_bad(pgd)) {
178 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -0700179 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800180 if (err)
181 break;
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800182 pgd++;
Matt Mackalle6473092008-02-04 22:29:01 -0800183 continue;
184 }
185 if (walk->pgd_entry)
Dave Hansen21650092008-06-12 15:21:47 -0700186 err = walk->pgd_entry(pgd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800187 if (!err &&
188 (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
Dave Hansen21650092008-06-12 15:21:47 -0700189 err = walk_pud_range(pgd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800190 if (err)
191 break;
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800192 pgd++;
193 } while (addr = next, addr != end);
Matt Mackalle6473092008-02-04 22:29:01 -0800194
195 return err;
196}