blob: 956015614395323cd827a758dffd55dc4a2343aa [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kirill A. Shutemovace71a12017-02-24 14:57:45 -08002#include <linux/mm.h>
3#include <linux/rmap.h>
4#include <linux/hugetlb.h>
5#include <linux/swap.h>
6#include <linux/swapops.h>
7
8#include "internal.h"
9
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080010static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11{
12 page_vma_mapped_walk_done(pvmw);
13 return false;
14}
15
16static bool map_pte(struct page_vma_mapped_walk *pvmw)
17{
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 if (!(pvmw->flags & PVMW_SYNC)) {
20 if (pvmw->flags & PVMW_MIGRATION) {
21 if (!is_swap_pte(*pvmw->pte))
22 return false;
23 } else {
24 if (!pte_present(*pvmw->pte))
25 return false;
26 }
27 }
28 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
29 spin_lock(pvmw->ptl);
30 return true;
31}
32
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030033/**
34 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
35 *
36 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
37 * mapped. check_pte() has to validate this.
38 *
39 * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
40 * page.
41 *
42 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
43 * entry that points to @pvmw->page or any subpage in case of THP.
44 *
45 * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
46 * @pvmw->page or any subpage in case of THP.
47 *
48 * Otherwise, return false.
49 *
50 */
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080051static bool check_pte(struct page_vma_mapped_walk *pvmw)
52{
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030053 unsigned long pfn;
54
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080055 if (pvmw->flags & PVMW_MIGRATION) {
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080056 swp_entry_t entry;
57 if (!is_swap_pte(*pvmw->pte))
58 return false;
59 entry = pte_to_swp_entry(*pvmw->pte);
Jérôme Glissea5430dd2017-09-08 16:12:17 -070060
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080061 if (!is_migration_entry(entry))
62 return false;
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030063
64 pfn = migration_entry_to_pfn(entry);
65 } else if (is_swap_pte(*pvmw->pte)) {
66 swp_entry_t entry;
67
68 /* Handle un-addressable ZONE_DEVICE memory */
69 entry = pte_to_swp_entry(*pvmw->pte);
70 if (!is_device_private_entry(entry))
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080071 return false;
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030072
73 pfn = device_private_entry_to_pfn(entry);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080074 } else {
75 if (!pte_present(*pvmw->pte))
76 return false;
77
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030078 pfn = pte_pfn(*pvmw->pte);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080079 }
80
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +030081 if (pfn < page_to_pfn(pvmw->page))
82 return false;
83
84 /* THP can be referenced by any subpage */
85 if (pfn - page_to_pfn(pvmw->page) >= hpage_nr_pages(pvmw->page))
86 return false;
87
Kirill A. Shutemovace71a12017-02-24 14:57:45 -080088 return true;
89}
90
91/**
92 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
93 * @pvmw->address
94 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
95 * must be set. pmd, pte and ptl must be NULL.
96 *
97 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
98 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
99 * adjusted if needed (for PTE-mapped THPs).
100 *
101 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
102 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
103 * a loop to find all PTEs that map the THP.
104 *
105 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
106 * regardless of which page table level the page is mapped at. @pvmw->pmd is
107 * NULL.
108 *
109 * Retruns false if there are no more page table entries for the page in
110 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
111 *
112 * If you need to stop the walk before page_vma_mapped_walk() returned false,
113 * use page_vma_mapped_walk_done(). It will do the housekeeping.
114 */
115bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
116{
117 struct mm_struct *mm = pvmw->vma->vm_mm;
118 struct page *page = pvmw->page;
119 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300120 p4d_t *p4d;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800121 pud_t *pud;
Will Deacona7b10092017-10-13 15:58:25 -0700122 pmd_t pmde;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800123
124 /* The only possible pmd mapping has been handled on last iteration */
125 if (pvmw->pmd && !pvmw->pte)
126 return not_found(pvmw);
127
Hugh Dickinsd75450f2017-04-07 16:04:39 -0700128 if (pvmw->pte)
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800129 goto next_pte;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800130
131 if (unlikely(PageHuge(pvmw->page))) {
132 /* when pud is not present, pte will be NULL */
Punit Agrawal7868a202017-07-06 15:39:42 -0700133 pvmw->pte = huge_pte_offset(mm, pvmw->address,
134 PAGE_SIZE << compound_order(page));
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800135 if (!pvmw->pte)
136 return false;
137
138 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
139 spin_lock(pvmw->ptl);
140 if (!check_pte(pvmw))
141 return not_found(pvmw);
142 return true;
143 }
144restart:
145 pgd = pgd_offset(mm, pvmw->address);
146 if (!pgd_present(*pgd))
147 return false;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300148 p4d = p4d_offset(pgd, pvmw->address);
149 if (!p4d_present(*p4d))
150 return false;
151 pud = pud_offset(p4d, pvmw->address);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800152 if (!pud_present(*pud))
153 return false;
154 pvmw->pmd = pmd_offset(pud, pvmw->address);
Will Deacona7b10092017-10-13 15:58:25 -0700155 /*
156 * Make sure the pmd value isn't cached in a register by the
157 * compiler and used as a stale value after we've observed a
158 * subsequent update.
159 */
160 pmde = READ_ONCE(*pvmw->pmd);
161 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800162 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800163 if (likely(pmd_trans_huge(*pvmw->pmd))) {
164 if (pvmw->flags & PVMW_MIGRATION)
165 return not_found(pvmw);
166 if (pmd_page(*pvmw->pmd) != page)
167 return not_found(pvmw);
168 return true;
Zi Yan616b8372017-09-08 16:10:57 -0700169 } else if (!pmd_present(*pvmw->pmd)) {
170 if (thp_migration_supported()) {
171 if (!(pvmw->flags & PVMW_MIGRATION))
172 return not_found(pvmw);
173 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
174 swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
175
176 if (migration_entry_to_page(entry) != page)
177 return not_found(pvmw);
178 return true;
179 }
Zi Yanaf0db982017-10-13 15:57:47 -0700180 }
Zi Yan616b8372017-09-08 16:10:57 -0700181 return not_found(pvmw);
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800182 } else {
183 /* THP pmd was split under us: handle on pte level */
184 spin_unlock(pvmw->ptl);
185 pvmw->ptl = NULL;
186 }
Will Deacona7b10092017-10-13 15:58:25 -0700187 } else if (!pmd_present(pmde)) {
188 return false;
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800189 }
190 if (!map_pte(pvmw))
191 goto next_pte;
192 while (1) {
193 if (check_pte(pvmw))
194 return true;
Hugh Dickinsd75450f2017-04-07 16:04:39 -0700195next_pte:
196 /* Seek to next pte only makes sense for THP */
197 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
198 return not_found(pvmw);
199 do {
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800200 pvmw->address += PAGE_SIZE;
Hugh Dickinsd75450f2017-04-07 16:04:39 -0700201 if (pvmw->address >= pvmw->vma->vm_end ||
202 pvmw->address >=
Kirill A. Shutemovace71a12017-02-24 14:57:45 -0800203 __vma_address(pvmw->page, pvmw->vma) +
204 hpage_nr_pages(pvmw->page) * PAGE_SIZE)
205 return not_found(pvmw);
206 /* Did we cross page table boundary? */
207 if (pvmw->address % PMD_SIZE == 0) {
208 pte_unmap(pvmw->pte);
209 if (pvmw->ptl) {
210 spin_unlock(pvmw->ptl);
211 pvmw->ptl = NULL;
212 }
213 goto restart;
214 } else {
215 pvmw->pte++;
216 }
217 } while (pte_none(*pvmw->pte));
218
219 if (!pvmw->ptl) {
220 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
221 spin_lock(pvmw->ptl);
222 }
223 }
224}
Kirill A. Shutemov6a328a62017-02-24 14:58:10 -0800225
226/**
227 * page_mapped_in_vma - check whether a page is really mapped in a VMA
228 * @page: the page to test
229 * @vma: the VMA to test
230 *
231 * Returns 1 if the page is mapped into the page tables of the VMA, 0
232 * if the page is not mapped into the page tables of this VMA. Only
233 * valid for normal file or anonymous VMAs.
234 */
235int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
236{
237 struct page_vma_mapped_walk pvmw = {
238 .page = page,
239 .vma = vma,
240 .flags = PVMW_SYNC,
241 };
242 unsigned long start, end;
243
244 start = __vma_address(page, vma);
245 end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
246
247 if (unlikely(end < vma->vm_start || start >= vma->vm_end))
248 return 0;
249 pvmw.address = max(start, vma->vm_start);
250 if (!page_vma_mapped_walk(&pvmw))
251 return 0;
252 page_vma_mapped_walk_done(&pvmw);
253 return 1;
254}