blob: 3bd3008db4cb39017feef01061d3f92149bd9c0e [file] [log] [blame]
Kirill A. Shutemovace71a12017-02-24 14:57:45 -08001#include <linux/mm.h>
2#include <linux/rmap.h>
3#include <linux/hugetlb.h>
4#include <linux/swap.h>
5#include <linux/swapops.h>
6
7#include "internal.h"
8
9static inline bool check_pmd(struct page_vma_mapped_walk *pvmw)
10{
11 pmd_t pmde;
12 /*
13 * Make sure we don't re-load pmd between present and !trans_huge check.
14 * We need a consistent view.
15 */
16 pmde = READ_ONCE(*pvmw->pmd);
17 return pmd_present(pmde) && !pmd_trans_huge(pmde);
18}
19
20static inline bool not_found(struct page_vma_mapped_walk *pvmw)
21{
22 page_vma_mapped_walk_done(pvmw);
23 return false;
24}
25
26static bool map_pte(struct page_vma_mapped_walk *pvmw)
27{
28 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
29 if (!(pvmw->flags & PVMW_SYNC)) {
30 if (pvmw->flags & PVMW_MIGRATION) {
31 if (!is_swap_pte(*pvmw->pte))
32 return false;
33 } else {
34 if (!pte_present(*pvmw->pte))
35 return false;
36 }
37 }
38 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
39 spin_lock(pvmw->ptl);
40 return true;
41}
42
43static bool check_pte(struct page_vma_mapped_walk *pvmw)
44{
45 if (pvmw->flags & PVMW_MIGRATION) {
46#ifdef CONFIG_MIGRATION
47 swp_entry_t entry;
48 if (!is_swap_pte(*pvmw->pte))
49 return false;
50 entry = pte_to_swp_entry(*pvmw->pte);
51 if (!is_migration_entry(entry))
52 return false;
53 if (migration_entry_to_page(entry) - pvmw->page >=
54 hpage_nr_pages(pvmw->page)) {
55 return false;
56 }
57 if (migration_entry_to_page(entry) < pvmw->page)
58 return false;
59#else
60 WARN_ON_ONCE(1);
61#endif
62 } else {
63 if (!pte_present(*pvmw->pte))
64 return false;
65
66 /* THP can be referenced by any subpage */
67 if (pte_page(*pvmw->pte) - pvmw->page >=
68 hpage_nr_pages(pvmw->page)) {
69 return false;
70 }
71 if (pte_page(*pvmw->pte) < pvmw->page)
72 return false;
73 }
74
75 return true;
76}
77
78/**
79 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
80 * @pvmw->address
81 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
82 * must be set. pmd, pte and ptl must be NULL.
83 *
84 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
85 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
86 * adjusted if needed (for PTE-mapped THPs).
87 *
88 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
89 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
90 * a loop to find all PTEs that map the THP.
91 *
92 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
93 * regardless of which page table level the page is mapped at. @pvmw->pmd is
94 * NULL.
95 *
96 * Retruns false if there are no more page table entries for the page in
97 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
98 *
99 * If you need to stop the walk before page_vma_mapped_walk() returned false,
100 * use page_vma_mapped_walk_done(). It will do the housekeeping.
101 */
102bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
103{
104 struct mm_struct *mm = pvmw->vma->vm_mm;
105 struct page *page = pvmw->page;
106 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300107 p4d_t *p4d;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800108 pud_t *pud;
109
110 /* The only possible pmd mapping has been handled on last iteration */
111 if (pvmw->pmd && !pvmw->pte)
112 return not_found(pvmw);
113
Hugh Dickinsd75450f2017-04-07 16:04:39 -0700114 if (pvmw->pte)
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800115 goto next_pte;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800116
117 if (unlikely(PageHuge(pvmw->page))) {
118 /* when pud is not present, pte will be NULL */
Punit Agrawal7868a202017-07-06 15:39:42 -0700119 pvmw->pte = huge_pte_offset(mm, pvmw->address,
120 PAGE_SIZE << compound_order(page));
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800121 if (!pvmw->pte)
122 return false;
123
124 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
125 spin_lock(pvmw->ptl);
126 if (!check_pte(pvmw))
127 return not_found(pvmw);
128 return true;
129 }
130restart:
131 pgd = pgd_offset(mm, pvmw->address);
132 if (!pgd_present(*pgd))
133 return false;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300134 p4d = p4d_offset(pgd, pvmw->address);
135 if (!p4d_present(*p4d))
136 return false;
137 pud = pud_offset(p4d, pvmw->address);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800138 if (!pud_present(*pud))
139 return false;
140 pvmw->pmd = pmd_offset(pud, pvmw->address);
Zi Yan616b8372017-09-08 16:10:57 -0700141 if (pmd_trans_huge(*pvmw->pmd) || is_pmd_migration_entry(*pvmw->pmd)) {
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800142 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800143 if (likely(pmd_trans_huge(*pvmw->pmd))) {
144 if (pvmw->flags & PVMW_MIGRATION)
145 return not_found(pvmw);
146 if (pmd_page(*pvmw->pmd) != page)
147 return not_found(pvmw);
148 return true;
Zi Yan616b8372017-09-08 16:10:57 -0700149 } else if (!pmd_present(*pvmw->pmd)) {
150 if (thp_migration_supported()) {
151 if (!(pvmw->flags & PVMW_MIGRATION))
152 return not_found(pvmw);
153 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
154 swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
155
156 if (migration_entry_to_page(entry) != page)
157 return not_found(pvmw);
158 return true;
159 }
160 } else
161 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
162 return not_found(pvmw);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800163 } else {
164 /* THP pmd was split under us: handle on pte level */
165 spin_unlock(pvmw->ptl);
166 pvmw->ptl = NULL;
167 }
168 } else {
169 if (!check_pmd(pvmw))
170 return false;
171 }
172 if (!map_pte(pvmw))
173 goto next_pte;
174 while (1) {
175 if (check_pte(pvmw))
176 return true;
Hugh Dickinsd75450f2017-04-07 16:04:39 -0700177next_pte:
178 /* Seek to next pte only makes sense for THP */
179 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
180 return not_found(pvmw);
181 do {
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800182 pvmw->address += PAGE_SIZE;
Hugh Dickinsd75450f2017-04-07 16:04:39 -0700183 if (pvmw->address >= pvmw->vma->vm_end ||
184 pvmw->address >=
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800185 __vma_address(pvmw->page, pvmw->vma) +
186 hpage_nr_pages(pvmw->page) * PAGE_SIZE)
187 return not_found(pvmw);
188 /* Did we cross page table boundary? */
189 if (pvmw->address % PMD_SIZE == 0) {
190 pte_unmap(pvmw->pte);
191 if (pvmw->ptl) {
192 spin_unlock(pvmw->ptl);
193 pvmw->ptl = NULL;
194 }
195 goto restart;
196 } else {
197 pvmw->pte++;
198 }
199 } while (pte_none(*pvmw->pte));
200
201 if (!pvmw->ptl) {
202 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
203 spin_lock(pvmw->ptl);
204 }
205 }
206}
Kirill A. Shutemov6a328a62017-02-24 14:58:10 -0800207
208/**
209 * page_mapped_in_vma - check whether a page is really mapped in a VMA
210 * @page: the page to test
211 * @vma: the VMA to test
212 *
213 * Returns 1 if the page is mapped into the page tables of the VMA, 0
214 * if the page is not mapped into the page tables of this VMA. Only
215 * valid for normal file or anonymous VMAs.
216 */
217int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
218{
219 struct page_vma_mapped_walk pvmw = {
220 .page = page,
221 .vma = vma,
222 .flags = PVMW_SYNC,
223 };
224 unsigned long start, end;
225
226 start = __vma_address(page, vma);
227 end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
228
229 if (unlikely(end < vma->vm_start || start >= vma->vm_end))
230 return 0;
231 pvmw.address = max(start, vma->vm_start);
232 if (!page_vma_mapped_walk(&pvmw))
233 return 0;
234 page_vma_mapped_walk_done(&pvmw);
235 return 1;
236}