Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 1 | #include <linux/mm.h> |
| 2 | #include <linux/highmem.h> |
| 3 | #include <linux/sched.h> |
Naoya Horiguchi | d33b9f4 | 2009-12-14 17:59:59 -0800 | [diff] [blame] | 4 | #include <linux/hugetlb.h> |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 5 | |
| 6 | static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 7 | struct mm_walk *walk) |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 8 | { |
| 9 | pte_t *pte; |
| 10 | int err = 0; |
| 11 | |
| 12 | pte = pte_offset_map(pmd, addr); |
Johannes Weiner | 556637c | 2008-04-28 02:11:47 -0700 | [diff] [blame] | 13 | for (;;) { |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 14 | err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 15 | if (err) |
| 16 | break; |
Johannes Weiner | 556637c | 2008-04-28 02:11:47 -0700 | [diff] [blame] | 17 | addr += PAGE_SIZE; |
| 18 | if (addr == end) |
| 19 | break; |
| 20 | pte++; |
| 21 | } |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 22 | |
| 23 | pte_unmap(pte); |
| 24 | return err; |
| 25 | } |
| 26 | |
| 27 | static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 28 | struct mm_walk *walk) |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 29 | { |
| 30 | pmd_t *pmd; |
| 31 | unsigned long next; |
| 32 | int err = 0; |
| 33 | |
| 34 | pmd = pmd_offset(pud, addr); |
| 35 | do { |
Dave Hansen | 0331932 | 2011-03-22 16:32:56 -0700 | [diff] [blame] | 36 | again: |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 37 | next = pmd_addr_end(addr, end); |
Dave Hansen | 0331932 | 2011-03-22 16:32:56 -0700 | [diff] [blame] | 38 | if (pmd_none(*pmd)) { |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 39 | if (walk->pte_hole) |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 40 | err = walk->pte_hole(addr, next, walk); |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 41 | if (err) |
| 42 | break; |
| 43 | continue; |
| 44 | } |
Dave Hansen | 0331932 | 2011-03-22 16:32:56 -0700 | [diff] [blame] | 45 | /* |
| 46 | * This implies that each ->pmd_entry() handler |
| 47 | * needs to know about pmd_trans_huge() pmds |
| 48 | */ |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 49 | if (walk->pmd_entry) |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 50 | err = walk->pmd_entry(pmd, addr, next, walk); |
Dave Hansen | 0331932 | 2011-03-22 16:32:56 -0700 | [diff] [blame] | 51 | if (err) |
| 52 | break; |
| 53 | |
| 54 | /* |
| 55 | * Check this here so we only break down trans_huge |
| 56 | * pages when we _need_ to |
| 57 | */ |
| 58 | if (!walk->pte_entry) |
| 59 | continue; |
| 60 | |
Kirill A. Shutemov | e180377 | 2012-12-12 13:50:59 -0800 | [diff] [blame] | 61 | split_huge_page_pmd_mm(walk->mm, addr, pmd); |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 62 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
Dave Hansen | 0331932 | 2011-03-22 16:32:56 -0700 | [diff] [blame] | 63 | goto again; |
| 64 | err = walk_pte_range(pmd, addr, next, walk); |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 65 | if (err) |
| 66 | break; |
| 67 | } while (pmd++, addr = next, addr != end); |
| 68 | |
| 69 | return err; |
| 70 | } |
| 71 | |
| 72 | static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 73 | struct mm_walk *walk) |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 74 | { |
| 75 | pud_t *pud; |
| 76 | unsigned long next; |
| 77 | int err = 0; |
| 78 | |
| 79 | pud = pud_offset(pgd, addr); |
| 80 | do { |
| 81 | next = pud_addr_end(addr, end); |
| 82 | if (pud_none_or_clear_bad(pud)) { |
| 83 | if (walk->pte_hole) |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 84 | err = walk->pte_hole(addr, next, walk); |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 85 | if (err) |
| 86 | break; |
| 87 | continue; |
| 88 | } |
| 89 | if (walk->pud_entry) |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 90 | err = walk->pud_entry(pud, addr, next, walk); |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 91 | if (!err && (walk->pmd_entry || walk->pte_entry)) |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 92 | err = walk_pmd_range(pud, addr, next, walk); |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 93 | if (err) |
| 94 | break; |
| 95 | } while (pud++, addr = next, addr != end); |
| 96 | |
| 97 | return err; |
| 98 | } |
| 99 | |
Naoya Horiguchi | 116354d | 2010-04-06 14:35:04 -0700 | [diff] [blame] | 100 | #ifdef CONFIG_HUGETLB_PAGE |
| 101 | static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, |
| 102 | unsigned long end) |
| 103 | { |
| 104 | unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); |
| 105 | return boundary < end ? boundary : end; |
| 106 | } |
| 107 | |
| 108 | static int walk_hugetlb_range(struct vm_area_struct *vma, |
| 109 | unsigned long addr, unsigned long end, |
| 110 | struct mm_walk *walk) |
| 111 | { |
| 112 | struct hstate *h = hstate_vma(vma); |
| 113 | unsigned long next; |
| 114 | unsigned long hmask = huge_page_mask(h); |
| 115 | pte_t *pte; |
| 116 | int err = 0; |
| 117 | |
| 118 | do { |
| 119 | next = hugetlb_entry_end(h, addr, end); |
| 120 | pte = huge_pte_offset(walk->mm, addr & hmask); |
| 121 | if (pte && walk->hugetlb_entry) |
| 122 | err = walk->hugetlb_entry(pte, hmask, addr, next, walk); |
| 123 | if (err) |
| 124 | return err; |
| 125 | } while (addr = next, addr != end); |
| 126 | |
| 127 | return 0; |
| 128 | } |
KOSAKI Motohiro | 6c6d528 | 2011-07-25 17:12:09 -0700 | [diff] [blame] | 129 | |
KOSAKI Motohiro | 6c6d528 | 2011-07-25 17:12:09 -0700 | [diff] [blame] | 130 | #else /* CONFIG_HUGETLB_PAGE */ |
KOSAKI Motohiro | 6c6d528 | 2011-07-25 17:12:09 -0700 | [diff] [blame] | 131 | static int walk_hugetlb_range(struct vm_area_struct *vma, |
| 132 | unsigned long addr, unsigned long end, |
| 133 | struct mm_walk *walk) |
| 134 | { |
| 135 | return 0; |
| 136 | } |
| 137 | |
| 138 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 139 | |
| 140 | |
Naoya Horiguchi | 116354d | 2010-04-06 14:35:04 -0700 | [diff] [blame] | 141 | |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 142 | /** |
| 143 | * walk_page_range - walk a memory map's page tables with a callback |
Randy Dunlap | 7682486 | 2008-03-19 17:00:40 -0700 | [diff] [blame] | 144 | * @addr: starting address |
| 145 | * @end: ending address |
| 146 | * @walk: set of callbacks to invoke for each level of the tree |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 147 | * |
| 148 | * Recursively walk the page table for the memory area in a VMA, |
| 149 | * calling supplied callbacks. Callbacks are called in-order (first |
| 150 | * PGD, first PUD, first PMD, first PTE, second PTE... second PMD, |
| 151 | * etc.). If lower-level callbacks are omitted, walking depth is reduced. |
| 152 | * |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 153 | * Each callback receives an entry pointer and the start and end of the |
| 154 | * associated range, and a copy of the original mm_walk for access to |
| 155 | * the ->private or ->mm fields. |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 156 | * |
KOSAKI Motohiro | dd78553 | 2011-07-25 17:12:11 -0700 | [diff] [blame] | 157 | * Usually no locks are taken, but splitting transparent huge page may |
| 158 | * take page table lock. And the bottom level iterator will map PTE |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 159 | * directories from highmem if necessary. |
| 160 | * |
| 161 | * If any callback returns a non-zero value, the walk is aborted and |
| 162 | * the return value is propagated back to the caller. Otherwise 0 is returned. |
KOSAKI Motohiro | c27fe4c8 | 2011-07-25 17:12:10 -0700 | [diff] [blame] | 163 | * |
| 164 | * walk->mm->mmap_sem must be held for at least read if walk->hugetlb_entry |
| 165 | * is !NULL. |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 166 | */ |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 167 | int walk_page_range(unsigned long addr, unsigned long end, |
| 168 | struct mm_walk *walk) |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 169 | { |
| 170 | pgd_t *pgd; |
| 171 | unsigned long next; |
| 172 | int err = 0; |
| 173 | |
| 174 | if (addr >= end) |
| 175 | return err; |
| 176 | |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 177 | if (!walk->mm) |
| 178 | return -EINVAL; |
| 179 | |
Sasha Levin | 96dad67 | 2014-10-09 15:28:39 -0700 | [diff] [blame] | 180 | VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm); |
Cliff Wickman | a9ff785 | 2013-05-24 15:55:36 -0700 | [diff] [blame] | 181 | |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 182 | pgd = pgd_offset(walk->mm, addr); |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 183 | do { |
Cliff Wickman | a9ff785 | 2013-05-24 15:55:36 -0700 | [diff] [blame] | 184 | struct vm_area_struct *vma = NULL; |
David Sterba | 5f0af70 | 2010-11-24 12:57:10 -0800 | [diff] [blame] | 185 | |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 186 | next = pgd_addr_end(addr, end); |
Naoya Horiguchi | d33b9f4 | 2009-12-14 17:59:59 -0800 | [diff] [blame] | 187 | |
Naoya Horiguchi | 5dc3764 | 2009-12-14 18:00:01 -0800 | [diff] [blame] | 188 | /* |
Cliff Wickman | a9ff785 | 2013-05-24 15:55:36 -0700 | [diff] [blame] | 189 | * This function was not intended to be vma based. |
| 190 | * But there are vma special cases to be handled: |
| 191 | * - hugetlb vma's |
| 192 | * - VM_PFNMAP vma's |
Naoya Horiguchi | 5dc3764 | 2009-12-14 18:00:01 -0800 | [diff] [blame] | 193 | */ |
Cliff Wickman | a9ff785 | 2013-05-24 15:55:36 -0700 | [diff] [blame] | 194 | vma = find_vma(walk->mm, addr); |
KOSAKI Motohiro | 6c6d528 | 2011-07-25 17:12:09 -0700 | [diff] [blame] | 195 | if (vma) { |
Naoya Horiguchi | 116354d | 2010-04-06 14:35:04 -0700 | [diff] [blame] | 196 | /* |
Cliff Wickman | a9ff785 | 2013-05-24 15:55:36 -0700 | [diff] [blame] | 197 | * There are no page structures backing a VM_PFNMAP |
| 198 | * range, so do not allow split_huge_page_pmd(). |
Naoya Horiguchi | 116354d | 2010-04-06 14:35:04 -0700 | [diff] [blame] | 199 | */ |
Cliff Wickman | a9ff785 | 2013-05-24 15:55:36 -0700 | [diff] [blame] | 200 | if ((vma->vm_start <= addr) && |
| 201 | (vma->vm_flags & VM_PFNMAP)) { |
| 202 | next = vma->vm_end; |
| 203 | pgd = pgd_offset(walk->mm, next); |
| 204 | continue; |
| 205 | } |
| 206 | /* |
| 207 | * Handle hugetlb vma individually because pagetable |
| 208 | * walk for the hugetlb page is dependent on the |
| 209 | * architecture and we can't handled it in the same |
| 210 | * manner as non-huge pages. |
| 211 | */ |
| 212 | if (walk->hugetlb_entry && (vma->vm_start <= addr) && |
| 213 | is_vm_hugetlb_page(vma)) { |
| 214 | if (vma->vm_end < next) |
| 215 | next = vma->vm_end; |
| 216 | /* |
| 217 | * Hugepage is very tightly coupled with vma, |
| 218 | * so walk through hugetlb entries within a |
| 219 | * given vma. |
| 220 | */ |
| 221 | err = walk_hugetlb_range(vma, addr, next, walk); |
| 222 | if (err) |
| 223 | break; |
| 224 | pgd = pgd_offset(walk->mm, next); |
| 225 | continue; |
| 226 | } |
Naoya Horiguchi | d33b9f4 | 2009-12-14 17:59:59 -0800 | [diff] [blame] | 227 | } |
KOSAKI Motohiro | 6c6d528 | 2011-07-25 17:12:09 -0700 | [diff] [blame] | 228 | |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 229 | if (pgd_none_or_clear_bad(pgd)) { |
| 230 | if (walk->pte_hole) |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 231 | err = walk->pte_hole(addr, next, walk); |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 232 | if (err) |
| 233 | break; |
Naoya Horiguchi | d33b9f4 | 2009-12-14 17:59:59 -0800 | [diff] [blame] | 234 | pgd++; |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 235 | continue; |
| 236 | } |
| 237 | if (walk->pgd_entry) |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 238 | err = walk->pgd_entry(pgd, addr, next, walk); |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 239 | if (!err && |
| 240 | (walk->pud_entry || walk->pmd_entry || walk->pte_entry)) |
Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 241 | err = walk_pud_range(pgd, addr, next, walk); |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 242 | if (err) |
| 243 | break; |
Naoya Horiguchi | d33b9f4 | 2009-12-14 17:59:59 -0800 | [diff] [blame] | 244 | pgd++; |
Chen LinX | 3017f07 | 2013-10-30 13:56:18 -0700 | [diff] [blame] | 245 | } while (addr = next, addr < end); |
Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 246 | |
| 247 | return err; |
| 248 | } |