blob: b793ef149da20cf607d1bba83c7495e78290bcc0 [file] [log] [blame]
Matt Mackalle6473092008-02-04 22:29:01 -08001#include <linux/mm.h>
2#include <linux/highmem.h>
3#include <linux/sched.h>
Naoya Horiguchid33b9f42009-12-14 17:59:59 -08004#include <linux/hugetlb.h>
Matt Mackalle6473092008-02-04 22:29:01 -08005
6static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -07007 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -08008{
9 pte_t *pte;
10 int err = 0;
11
12 pte = pte_offset_map(pmd, addr);
Johannes Weiner556637c2008-04-28 02:11:47 -070013 for (;;) {
Dave Hansen21650092008-06-12 15:21:47 -070014 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080015 if (err)
16 break;
Johannes Weiner556637c2008-04-28 02:11:47 -070017 addr += PAGE_SIZE;
18 if (addr == end)
19 break;
20 pte++;
21 }
Matt Mackalle6473092008-02-04 22:29:01 -080022
23 pte_unmap(pte);
24 return err;
25}
26
27static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -070028 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -080029{
30 pmd_t *pmd;
31 unsigned long next;
32 int err = 0;
33
34 pmd = pmd_offset(pud, addr);
35 do {
Dave Hansen03319322011-03-22 16:32:56 -070036again:
Matt Mackalle6473092008-02-04 22:29:01 -080037 next = pmd_addr_end(addr, end);
Dave Hansen03319322011-03-22 16:32:56 -070038 if (pmd_none(*pmd)) {
Matt Mackalle6473092008-02-04 22:29:01 -080039 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -070040 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080041 if (err)
42 break;
43 continue;
44 }
Dave Hansen03319322011-03-22 16:32:56 -070045 /*
46 * This implies that each ->pmd_entry() handler
47 * needs to know about pmd_trans_huge() pmds
48 */
Matt Mackalle6473092008-02-04 22:29:01 -080049 if (walk->pmd_entry)
Dave Hansen21650092008-06-12 15:21:47 -070050 err = walk->pmd_entry(pmd, addr, next, walk);
Dave Hansen03319322011-03-22 16:32:56 -070051 if (err)
52 break;
53
54 /*
55 * Check this here so we only break down trans_huge
56 * pages when we _need_ to
57 */
58 if (!walk->pte_entry)
59 continue;
60
Kirill A. Shutemove1803772012-12-12 13:50:59 -080061 split_huge_page_pmd_mm(walk->mm, addr, pmd);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -070062 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
Dave Hansen03319322011-03-22 16:32:56 -070063 goto again;
64 err = walk_pte_range(pmd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080065 if (err)
66 break;
67 } while (pmd++, addr = next, addr != end);
68
69 return err;
70}
71
72static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -070073 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -080074{
75 pud_t *pud;
76 unsigned long next;
77 int err = 0;
78
79 pud = pud_offset(pgd, addr);
80 do {
81 next = pud_addr_end(addr, end);
82 if (pud_none_or_clear_bad(pud)) {
83 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -070084 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080085 if (err)
86 break;
87 continue;
88 }
Naoya Horiguchi0b1fbfe2015-02-11 15:27:34 -080089 if (walk->pmd_entry || walk->pte_entry)
Dave Hansen21650092008-06-12 15:21:47 -070090 err = walk_pmd_range(pud, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080091 if (err)
92 break;
93 } while (pud++, addr = next, addr != end);
94
95 return err;
96}
97
Naoya Horiguchi116354d2010-04-06 14:35:04 -070098#ifdef CONFIG_HUGETLB_PAGE
99static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
100 unsigned long end)
101{
102 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
103 return boundary < end ? boundary : end;
104}
105
106static int walk_hugetlb_range(struct vm_area_struct *vma,
107 unsigned long addr, unsigned long end,
108 struct mm_walk *walk)
109{
110 struct hstate *h = hstate_vma(vma);
111 unsigned long next;
112 unsigned long hmask = huge_page_mask(h);
113 pte_t *pte;
114 int err = 0;
115
116 do {
117 next = hugetlb_entry_end(h, addr, end);
118 pte = huge_pte_offset(walk->mm, addr & hmask);
119 if (pte && walk->hugetlb_entry)
120 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
121 if (err)
122 return err;
123 } while (addr = next, addr != end);
124
125 return 0;
126}
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700127
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700128#else /* CONFIG_HUGETLB_PAGE */
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700129static int walk_hugetlb_range(struct vm_area_struct *vma,
130 unsigned long addr, unsigned long end,
131 struct mm_walk *walk)
132{
133 return 0;
134}
135
136#endif /* CONFIG_HUGETLB_PAGE */
137
138
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700139
Matt Mackalle6473092008-02-04 22:29:01 -0800140/**
141 * walk_page_range - walk a memory map's page tables with a callback
Randy Dunlap76824862008-03-19 17:00:40 -0700142 * @addr: starting address
143 * @end: ending address
144 * @walk: set of callbacks to invoke for each level of the tree
Matt Mackalle6473092008-02-04 22:29:01 -0800145 *
146 * Recursively walk the page table for the memory area in a VMA,
147 * calling supplied callbacks. Callbacks are called in-order (first
148 * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
149 * etc.). If lower-level callbacks are omitted, walking depth is reduced.
150 *
Dave Hansen21650092008-06-12 15:21:47 -0700151 * Each callback receives an entry pointer and the start and end of the
152 * associated range, and a copy of the original mm_walk for access to
153 * the ->private or ->mm fields.
Matt Mackalle6473092008-02-04 22:29:01 -0800154 *
KOSAKI Motohirodd785532011-07-25 17:12:11 -0700155 * Usually no locks are taken, but splitting transparent huge page may
156 * take page table lock. And the bottom level iterator will map PTE
Matt Mackalle6473092008-02-04 22:29:01 -0800157 * directories from highmem if necessary.
158 *
159 * If any callback returns a non-zero value, the walk is aborted and
160 * the return value is propagated back to the caller. Otherwise 0 is returned.
KOSAKI Motohiroc27fe4c82011-07-25 17:12:10 -0700161 *
162 * walk->mm->mmap_sem must be held for at least read if walk->hugetlb_entry
163 * is !NULL.
Matt Mackalle6473092008-02-04 22:29:01 -0800164 */
Dave Hansen21650092008-06-12 15:21:47 -0700165int walk_page_range(unsigned long addr, unsigned long end,
166 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -0800167{
168 pgd_t *pgd;
169 unsigned long next;
170 int err = 0;
171
172 if (addr >= end)
173 return err;
174
Dave Hansen21650092008-06-12 15:21:47 -0700175 if (!walk->mm)
176 return -EINVAL;
177
Sasha Levin96dad672014-10-09 15:28:39 -0700178 VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
Cliff Wickmana9ff7852013-05-24 15:55:36 -0700179
Dave Hansen21650092008-06-12 15:21:47 -0700180 pgd = pgd_offset(walk->mm, addr);
Matt Mackalle6473092008-02-04 22:29:01 -0800181 do {
Cliff Wickmana9ff7852013-05-24 15:55:36 -0700182 struct vm_area_struct *vma = NULL;
David Sterba5f0af702010-11-24 12:57:10 -0800183
Matt Mackalle6473092008-02-04 22:29:01 -0800184 next = pgd_addr_end(addr, end);
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800185
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800186 /*
Cliff Wickmana9ff7852013-05-24 15:55:36 -0700187 * This function was not intended to be vma based.
188 * But there are vma special cases to be handled:
189 * - hugetlb vma's
190 * - VM_PFNMAP vma's
Naoya Horiguchi5dc37642009-12-14 18:00:01 -0800191 */
Cliff Wickmana9ff7852013-05-24 15:55:36 -0700192 vma = find_vma(walk->mm, addr);
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700193 if (vma) {
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700194 /*
Cliff Wickmana9ff7852013-05-24 15:55:36 -0700195 * There are no page structures backing a VM_PFNMAP
196 * range, so do not allow split_huge_page_pmd().
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700197 */
Cliff Wickmana9ff7852013-05-24 15:55:36 -0700198 if ((vma->vm_start <= addr) &&
199 (vma->vm_flags & VM_PFNMAP)) {
Shiraz Hashim23aaed62015-02-05 12:25:06 -0800200 if (walk->pte_hole)
201 err = walk->pte_hole(addr, next, walk);
202 if (err)
203 break;
Cliff Wickmana9ff7852013-05-24 15:55:36 -0700204 pgd = pgd_offset(walk->mm, next);
205 continue;
206 }
207 /*
208 * Handle hugetlb vma individually because pagetable
209 * walk for the hugetlb page is dependent on the
210 * architecture and we can't handled it in the same
211 * manner as non-huge pages.
212 */
213 if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
214 is_vm_hugetlb_page(vma)) {
215 if (vma->vm_end < next)
216 next = vma->vm_end;
217 /*
218 * Hugepage is very tightly coupled with vma,
219 * so walk through hugetlb entries within a
220 * given vma.
221 */
222 err = walk_hugetlb_range(vma, addr, next, walk);
223 if (err)
224 break;
225 pgd = pgd_offset(walk->mm, next);
226 continue;
227 }
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800228 }
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700229
Matt Mackalle6473092008-02-04 22:29:01 -0800230 if (pgd_none_or_clear_bad(pgd)) {
231 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -0700232 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800233 if (err)
234 break;
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800235 pgd++;
Matt Mackalle6473092008-02-04 22:29:01 -0800236 continue;
237 }
Naoya Horiguchi0b1fbfe2015-02-11 15:27:34 -0800238 if (walk->pmd_entry || walk->pte_entry)
Dave Hansen21650092008-06-12 15:21:47 -0700239 err = walk_pud_range(pgd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800240 if (err)
241 break;
Naoya Horiguchid33b9f42009-12-14 17:59:59 -0800242 pgd++;
Chen LinX3017f072013-10-30 13:56:18 -0700243 } while (addr = next, addr < end);
Matt Mackalle6473092008-02-04 22:29:01 -0800244
245 return err;
246}