blob: 1090e772a26df232dd806dd6e63de8ca1084cbd8 [file] [log] [blame]
Matt Mackalle6473092008-02-04 22:29:01 -08001#include <linux/mm.h>
2#include <linux/highmem.h>
3#include <linux/sched.h>
Naoya Horiguchid33b9f42009-12-14 17:59:59 -08004#include <linux/hugetlb.h>
Matt Mackalle6473092008-02-04 22:29:01 -08005
6static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -07007 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -08008{
9 pte_t *pte;
10 int err = 0;
11
12 pte = pte_offset_map(pmd, addr);
Johannes Weiner556637c2008-04-28 02:11:47 -070013 for (;;) {
Dave Hansen21650092008-06-12 15:21:47 -070014 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080015 if (err)
16 break;
Johannes Weiner556637c2008-04-28 02:11:47 -070017 addr += PAGE_SIZE;
18 if (addr == end)
19 break;
20 pte++;
21 }
Matt Mackalle6473092008-02-04 22:29:01 -080022
23 pte_unmap(pte);
24 return err;
25}
26
27static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -070028 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -080029{
30 pmd_t *pmd;
31 unsigned long next;
32 int err = 0;
33
34 pmd = pmd_offset(pud, addr);
35 do {
Dave Hansen03319322011-03-22 16:32:56 -070036again:
Matt Mackalle6473092008-02-04 22:29:01 -080037 next = pmd_addr_end(addr, end);
Dave Hansen03319322011-03-22 16:32:56 -070038 if (pmd_none(*pmd)) {
Matt Mackalle6473092008-02-04 22:29:01 -080039 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -070040 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080041 if (err)
42 break;
43 continue;
44 }
Dave Hansen03319322011-03-22 16:32:56 -070045 /*
46 * This implies that each ->pmd_entry() handler
47 * needs to know about pmd_trans_huge() pmds
48 */
Matt Mackalle6473092008-02-04 22:29:01 -080049 if (walk->pmd_entry)
Dave Hansen21650092008-06-12 15:21:47 -070050 err = walk->pmd_entry(pmd, addr, next, walk);
Dave Hansen03319322011-03-22 16:32:56 -070051 if (err)
52 break;
53
54 /*
55 * Check this here so we only break down trans_huge
56 * pages when we _need_ to
57 */
58 if (!walk->pte_entry)
59 continue;
60
61 split_huge_page_pmd(walk->mm, pmd);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -070062 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
Dave Hansen03319322011-03-22 16:32:56 -070063 goto again;
64 err = walk_pte_range(pmd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080065 if (err)
66 break;
67 } while (pmd++, addr = next, addr != end);
68
69 return err;
70}
71
72static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -070073 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -080074{
75 pud_t *pud;
76 unsigned long next;
77 int err = 0;
78
79 pud = pud_offset(pgd, addr);
80 do {
81 next = pud_addr_end(addr, end);
82 if (pud_none_or_clear_bad(pud)) {
83 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -070084 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080085 if (err)
86 break;
87 continue;
88 }
89 if (walk->pud_entry)
Dave Hansen21650092008-06-12 15:21:47 -070090 err = walk->pud_entry(pud, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080091 if (!err && (walk->pmd_entry || walk->pte_entry))
Dave Hansen21650092008-06-12 15:21:47 -070092 err = walk_pmd_range(pud, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080093 if (err)
94 break;
95 } while (pud++, addr = next, addr != end);
96
97 return err;
98}
99
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700100#ifdef CONFIG_HUGETLB_PAGE
101static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
102 unsigned long end)
103{
104 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
105 return boundary < end ? boundary : end;
106}
107
108static int walk_hugetlb_range(struct vm_area_struct *vma,
109 unsigned long addr, unsigned long end,
110 struct mm_walk *walk)
111{
112 struct hstate *h = hstate_vma(vma);
113 unsigned long next;
114 unsigned long hmask = huge_page_mask(h);
115 pte_t *pte;
116 int err = 0;
117
118 do {
119 next = hugetlb_entry_end(h, addr, end);
120 pte = huge_pte_offset(walk->mm, addr & hmask);
121 if (pte && walk->hugetlb_entry)
122 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
123 if (err)
124 return err;
125 } while (addr = next, addr != end);
126
127 return 0;
128}
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700129
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700130#else /* CONFIG_HUGETLB_PAGE */
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700131static int walk_hugetlb_range(struct vm_area_struct *vma,
132 unsigned long addr, unsigned long end,
133 struct mm_walk *walk)
134{
135 return 0;
136}
137
138#endif /* CONFIG_HUGETLB_PAGE */
139
140
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700141
Matt Mackalle6473092008-02-04 22:29:01 -0800142/**
143 * walk_page_range - walk a memory map's page tables with a callback
Randy Dunlap76824862008-03-19 17:00:40 -0700144 * @mm: memory map to walk
145 * @addr: starting address
146 * @end: ending address
147 * @walk: set of callbacks to invoke for each level of the tree
Matt Mackalle6473092008-02-04 22:29:01 -0800148 *
149 * Recursively walk the page table for the memory area in a VMA,
150 * calling supplied callbacks. Callbacks are called in-order (first
151 * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
152 * etc.). If lower-level callbacks are omitted, walking depth is reduced.
153 *
Dave Hansen21650092008-06-12 15:21:47 -0700154 * Each callback receives an entry pointer and the start and end of the
155 * associated range, and a copy of the original mm_walk for access to
156 * the ->private or ->mm fields.
Matt Mackalle6473092008-02-04 22:29:01 -0800157 *
KOSAKI Motohirodd785532011-07-25 17:12:11 -0700158 * Usually no locks are taken, but splitting transparent huge page may
159 * take page table lock. And the bottom level iterator will map PTE
Matt Mackalle6473092008-02-04 22:29:01 -0800160 * directories from highmem if necessary.
161 *
162 * If any callback returns a non-zero value, the walk is aborted and
163 * the return value is propagated back to the caller. Otherwise 0 is returned.
KOSAKI Motohiroc27fe4c82011-07-25 17:12:10 -0700164 *
165 * walk->mm->mmap_sem must be held for at least read if walk->hugetlb_entry
166 * is !NULL.
Matt Mackalle6473092008-02-04 22:29:01 -0800167 */
Dave Hansen21650092008-06-12 15:21:47 -0700168int walk_page_range(unsigned long addr, unsigned long end,
169 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -0800170{
171 pgd_t *pgd;
172 unsigned long next;
173 int err = 0;
174
175 if (addr >= end)
176 return err;
177
Dave Hansen21650092008-06-12 15:21:47 -0700178 if (!walk->mm)
179 return -EINVAL;
180
Cliff Wickmane62eec82013-05-24 15:55:36 -0700181 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
182
Dave Hansen21650092008-06-12 15:21:47 -0700183 pgd = pgd_offset(walk->mm, addr);
Matt Mackalle6473092008-02-04 22:29:01 -0800184 do {
Cliff Wickmane62eec82013-05-24 15:55:36 -0700185 struct vm_area_struct *vma = NULL;
David Sterba5f0af702010-11-24 12:57:10 -0800186
Matt Mackalle6473092008-02-04 22:29:01 -0800187 next = pgd_addr_end(addr, end);
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800188
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800189 /*
Cliff Wickmane62eec82013-05-24 15:55:36 -0700190 * This function was not intended to be vma based.
191 * But there are vma special cases to be handled:
192 * - hugetlb vma's
193 * - VM_PFNMAP vma's
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800194 */
Cliff Wickmane62eec82013-05-24 15:55:36 -0700195 vma = find_vma(walk->mm, addr);
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700196 if (vma) {
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700197 /*
Cliff Wickmane62eec82013-05-24 15:55:36 -0700198 * There are no page structures backing a VM_PFNMAP
199 * range, so do not allow split_huge_page_pmd().
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700200 */
Cliff Wickmane62eec82013-05-24 15:55:36 -0700201 if ((vma->vm_start <= addr) &&
202 (vma->vm_flags & VM_PFNMAP)) {
203 next = vma->vm_end;
204 pgd = pgd_offset(walk->mm, next);
205 continue;
206 }
207 /*
208 * Handle hugetlb vma individually because pagetable
209 * walk for the hugetlb page is dependent on the
210 * architecture and we can't handled it in the same
211 * manner as non-huge pages.
212 */
213 if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
214 is_vm_hugetlb_page(vma)) {
215 if (vma->vm_end < next)
216 next = vma->vm_end;
217 /*
218 * Hugepage is very tightly coupled with vma,
219 * so walk through hugetlb entries within a
220 * given vma.
221 */
222 err = walk_hugetlb_range(vma, addr, next, walk);
223 if (err)
224 break;
225 pgd = pgd_offset(walk->mm, next);
226 continue;
227 }
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800228 }
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700229
Matt Mackalle6473092008-02-04 22:29:01 -0800230 if (pgd_none_or_clear_bad(pgd)) {
231 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -0700232 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800233 if (err)
234 break;
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800235 pgd++;
Matt Mackalle6473092008-02-04 22:29:01 -0800236 continue;
237 }
238 if (walk->pgd_entry)
Dave Hansen21650092008-06-12 15:21:47 -0700239 err = walk->pgd_entry(pgd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800240 if (!err &&
241 (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
Dave Hansen21650092008-06-12 15:21:47 -0700242 err = walk_pud_range(pgd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800243 if (err)
244 break;
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800245 pgd++;
246 } while (addr = next, addr != end);
Matt Mackalle6473092008-02-04 22:29:01 -0800247
248 return err;
249}