blob: 60f7856e508fb90e6010feadad2233f4d148341e [file] [log] [blame]
Matt Mackalle6473092008-02-04 22:29:01 -08001#include <linux/mm.h>
2#include <linux/highmem.h>
3#include <linux/sched.h>
Naoya Horiguchid33b9f42009-12-14 17:59:59 -08004#include <linux/hugetlb.h>
Matt Mackalle6473092008-02-04 22:29:01 -08005
6static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -07007 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -08008{
9 pte_t *pte;
10 int err = 0;
11
12 pte = pte_offset_map(pmd, addr);
Johannes Weiner556637c2008-04-28 02:11:47 -070013 for (;;) {
Dave Hansen21650092008-06-12 15:21:47 -070014 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080015 if (err)
16 break;
Johannes Weiner556637c2008-04-28 02:11:47 -070017 addr += PAGE_SIZE;
18 if (addr == end)
19 break;
20 pte++;
21 }
Matt Mackalle6473092008-02-04 22:29:01 -080022
23 pte_unmap(pte);
24 return err;
25}
26
27static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -070028 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -080029{
30 pmd_t *pmd;
31 unsigned long next;
32 int err = 0;
33
34 pmd = pmd_offset(pud, addr);
35 do {
Dave Hansen03319322011-03-22 16:32:56 -070036again:
Matt Mackalle6473092008-02-04 22:29:01 -080037 next = pmd_addr_end(addr, end);
Naoya Horiguchi48684a62015-02-11 15:28:06 -080038 if (pmd_none(*pmd) || !walk->vma) {
Matt Mackalle6473092008-02-04 22:29:01 -080039 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -070040 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080041 if (err)
42 break;
43 continue;
44 }
Dave Hansen03319322011-03-22 16:32:56 -070045 /*
46 * This implies that each ->pmd_entry() handler
47 * needs to know about pmd_trans_huge() pmds
48 */
Matt Mackalle6473092008-02-04 22:29:01 -080049 if (walk->pmd_entry)
Dave Hansen21650092008-06-12 15:21:47 -070050 err = walk->pmd_entry(pmd, addr, next, walk);
Dave Hansen03319322011-03-22 16:32:56 -070051 if (err)
52 break;
53
54 /*
55 * Check this here so we only break down trans_huge
56 * pages when we _need_ to
57 */
58 if (!walk->pte_entry)
59 continue;
60
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -080061 split_huge_pmd(walk->vma, pmd, addr);
Naoya Horiguchifafaa422015-02-11 15:27:37 -080062 if (pmd_trans_unstable(pmd))
Dave Hansen03319322011-03-22 16:32:56 -070063 goto again;
64 err = walk_pte_range(pmd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080065 if (err)
66 break;
67 } while (pmd++, addr = next, addr != end);
68
69 return err;
70}
71
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030072static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -070073 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -080074{
75 pud_t *pud;
76 unsigned long next;
77 int err = 0;
78
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030079 pud = pud_offset(p4d, addr);
Matt Mackalle6473092008-02-04 22:29:01 -080080 do {
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -080081 again:
Matt Mackalle6473092008-02-04 22:29:01 -080082 next = pud_addr_end(addr, end);
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -080083 if (pud_none(*pud) || !walk->vma) {
Matt Mackalle6473092008-02-04 22:29:01 -080084 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -070085 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080086 if (err)
87 break;
88 continue;
89 }
Matthew Wilcoxa00cc7d2017-02-24 14:57:02 -080090
91 if (walk->pud_entry) {
92 spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
93
94 if (ptl) {
95 err = walk->pud_entry(pud, addr, next, walk);
96 spin_unlock(ptl);
97 if (err)
98 break;
99 continue;
100 }
101 }
102
103 split_huge_pud(walk->vma, pud, addr);
104 if (pud_none(*pud))
105 goto again;
106
Naoya Horiguchi0b1fbfe2015-02-11 15:27:34 -0800107 if (walk->pmd_entry || walk->pte_entry)
Dave Hansen21650092008-06-12 15:21:47 -0700108 err = walk_pmd_range(pud, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800109 if (err)
110 break;
111 } while (pud++, addr = next, addr != end);
112
113 return err;
114}
115
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300116static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
117 struct mm_walk *walk)
118{
119 p4d_t *p4d;
120 unsigned long next;
121 int err = 0;
122
123 p4d = p4d_offset(pgd, addr);
124 do {
125 next = p4d_addr_end(addr, end);
126 if (p4d_none_or_clear_bad(p4d)) {
127 if (walk->pte_hole)
128 err = walk->pte_hole(addr, next, walk);
129 if (err)
130 break;
131 continue;
132 }
133 if (walk->pmd_entry || walk->pte_entry)
134 err = walk_pud_range(p4d, addr, next, walk);
135 if (err)
136 break;
137 } while (p4d++, addr = next, addr != end);
138
139 return err;
140}
141
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800142static int walk_pgd_range(unsigned long addr, unsigned long end,
143 struct mm_walk *walk)
144{
145 pgd_t *pgd;
146 unsigned long next;
147 int err = 0;
148
149 pgd = pgd_offset(walk->mm, addr);
150 do {
151 next = pgd_addr_end(addr, end);
152 if (pgd_none_or_clear_bad(pgd)) {
153 if (walk->pte_hole)
154 err = walk->pte_hole(addr, next, walk);
155 if (err)
156 break;
157 continue;
158 }
159 if (walk->pmd_entry || walk->pte_entry)
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300160 err = walk_p4d_range(pgd, addr, next, walk);
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800161 if (err)
162 break;
163 } while (pgd++, addr = next, addr != end);
164
165 return err;
166}
167
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700168#ifdef CONFIG_HUGETLB_PAGE
169static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
170 unsigned long end)
171{
172 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
173 return boundary < end ? boundary : end;
174}
175
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800176static int walk_hugetlb_range(unsigned long addr, unsigned long end,
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700177 struct mm_walk *walk)
178{
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800179 struct vm_area_struct *vma = walk->vma;
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700180 struct hstate *h = hstate_vma(vma);
181 unsigned long next;
182 unsigned long hmask = huge_page_mask(h);
183 pte_t *pte;
184 int err = 0;
185
186 do {
187 next = hugetlb_entry_end(h, addr, end);
188 pte = huge_pte_offset(walk->mm, addr & hmask);
189 if (pte && walk->hugetlb_entry)
190 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
191 if (err)
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800192 break;
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700193 } while (addr = next, addr != end);
194
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800195 return err;
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700196}
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700197
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700198#else /* CONFIG_HUGETLB_PAGE */
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800199static int walk_hugetlb_range(unsigned long addr, unsigned long end,
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700200 struct mm_walk *walk)
201{
202 return 0;
203}
204
205#endif /* CONFIG_HUGETLB_PAGE */
206
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800207/*
208 * Decide whether we really walk over the current vma on [@start, @end)
209 * or skip it via the returned value. Return 0 if we do walk over the
210 * current vma, and return 1 if we skip the vma. Negative values means
211 * error, where we abort the current walk.
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800212 */
213static int walk_page_test(unsigned long start, unsigned long end,
214 struct mm_walk *walk)
215{
216 struct vm_area_struct *vma = walk->vma;
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700217
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800218 if (walk->test_walk)
219 return walk->test_walk(start, end, walk);
220
221 /*
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800222 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
223 * range, so we don't walk over it as we do for normal vmas. However,
224 * Some callers are interested in handling hole range and they don't
225 * want to just ignore any single address range. Such users certainly
226 * define their ->pte_hole() callbacks, so let's delegate them to handle
227 * vma(VM_PFNMAP).
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800228 */
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800229 if (vma->vm_flags & VM_PFNMAP) {
230 int err = 1;
231 if (walk->pte_hole)
232 err = walk->pte_hole(start, end, walk);
233 return err ? err : 1;
234 }
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800235 return 0;
236}
237
238static int __walk_page_range(unsigned long start, unsigned long end,
239 struct mm_walk *walk)
240{
241 int err = 0;
242 struct vm_area_struct *vma = walk->vma;
243
244 if (vma && is_vm_hugetlb_page(vma)) {
245 if (walk->hugetlb_entry)
246 err = walk_hugetlb_range(start, end, walk);
247 } else
248 err = walk_pgd_range(start, end, walk);
249
250 return err;
251}
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700252
Matt Mackalle6473092008-02-04 22:29:01 -0800253/**
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800254 * walk_page_range - walk page table with caller specific callbacks
Matt Mackalle6473092008-02-04 22:29:01 -0800255 *
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800256 * Recursively walk the page table tree of the process represented by @walk->mm
257 * within the virtual address range [@start, @end). During walking, we can do
258 * some caller-specific works for each entry, by setting up pmd_entry(),
259 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
260 * callbacks, the associated entries/pages are just ignored.
261 * The return values of these callbacks are commonly defined like below:
262 * - 0 : succeeded to handle the current entry, and if you don't reach the
263 * end address yet, continue to walk.
264 * - >0 : succeeded to handle the current entry, and return to the caller
265 * with caller specific value.
266 * - <0 : failed to handle the current entry, and return to the caller
267 * with error code.
Matt Mackalle6473092008-02-04 22:29:01 -0800268 *
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800269 * Before starting to walk page table, some callers want to check whether
270 * they really want to walk over the current vma, typically by checking
271 * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
272 * purpose.
Matt Mackalle6473092008-02-04 22:29:01 -0800273 *
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800274 * struct mm_walk keeps current values of some common data like vma and pmd,
275 * which are useful for the access from callbacks. If you want to pass some
276 * caller-specific data to callbacks, @walk->private should be helpful.
Matt Mackalle6473092008-02-04 22:29:01 -0800277 *
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800278 * Locking:
279 * Callers of walk_page_range() and walk_page_vma() should hold
280 * @walk->mm->mmap_sem, because these function traverse vma list and/or
281 * access to vma's data.
Matt Mackalle6473092008-02-04 22:29:01 -0800282 */
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800283int walk_page_range(unsigned long start, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -0700284 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -0800285{
Matt Mackalle6473092008-02-04 22:29:01 -0800286 int err = 0;
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800287 unsigned long next;
288 struct vm_area_struct *vma;
Matt Mackalle6473092008-02-04 22:29:01 -0800289
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800290 if (start >= end)
291 return -EINVAL;
Matt Mackalle6473092008-02-04 22:29:01 -0800292
Dave Hansen21650092008-06-12 15:21:47 -0700293 if (!walk->mm)
294 return -EINVAL;
295
Sasha Levin96dad672014-10-09 15:28:39 -0700296 VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
Cliff Wickmana9ff7852013-05-24 15:55:36 -0700297
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800298 vma = find_vma(walk->mm, start);
Matt Mackalle6473092008-02-04 22:29:01 -0800299 do {
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800300 if (!vma) { /* after the last vma */
301 walk->vma = NULL;
302 next = end;
303 } else if (start < vma->vm_start) { /* outside vma */
304 walk->vma = NULL;
305 next = min(end, vma->vm_start);
306 } else { /* inside vma */
307 walk->vma = vma;
308 next = min(end, vma->vm_end);
309 vma = vma->vm_next;
David Sterba5f0af702010-11-24 12:57:10 -0800310
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800311 err = walk_page_test(start, next, walk);
Naoya Horiguchif6837392015-03-25 15:55:14 -0700312 if (err > 0) {
313 /*
314 * positive return values are purely for
315 * controlling the pagewalk, so should never
316 * be passed to the callers.
317 */
318 err = 0;
Cliff Wickmana9ff7852013-05-24 15:55:36 -0700319 continue;
Naoya Horiguchif6837392015-03-25 15:55:14 -0700320 }
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800321 if (err < 0)
Matt Mackalle6473092008-02-04 22:29:01 -0800322 break;
Matt Mackalle6473092008-02-04 22:29:01 -0800323 }
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800324 if (walk->vma || walk->pte_hole)
325 err = __walk_page_range(start, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800326 if (err)
327 break;
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800328 } while (start = next, start < end);
Matt Mackalle6473092008-02-04 22:29:01 -0800329 return err;
330}
Naoya Horiguchi900fc5f2015-02-11 15:27:40 -0800331
332int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
333{
334 int err;
335
336 if (!walk->mm)
337 return -EINVAL;
338
339 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
340 VM_BUG_ON(!vma);
341 walk->vma = vma;
342 err = walk_page_test(vma->vm_start, vma->vm_end, walk);
343 if (err > 0)
344 return 0;
345 if (err < 0)
346 return err;
347 return __walk_page_range(vma->vm_start, vma->vm_end, walk);
348}