[PATCH] mbind: fix verify_pages pte_page
Strict mbind's check that pages already mapped are on right node has been
using pte_page without checking if pfn_valid, and without page_table_lock
to prevent spurious failures when try_to_unmap_one intervenes between the
pte_present and the pte_page.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 39252c7..c512cc9 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -242,6 +242,9 @@
verify_pages(struct mm_struct *mm,
unsigned long addr, unsigned long end, unsigned long *nodes)
{
+ int err = 0;
+
+ spin_lock(&mm->page_table_lock);
while (addr < end) {
struct page *p;
pte_t *pte;
@@ -268,17 +271,23 @@
}
p = NULL;
pte = pte_offset_map(pmd, addr);
- if (pte_present(*pte))
- p = pte_page(*pte);
+ if (pte_present(*pte)) {
+ unsigned long pfn = pte_pfn(*pte);
+ if (pfn_valid(pfn))
+ p = pfn_to_page(pfn);
+ }
pte_unmap(pte);
if (p) {
unsigned nid = page_to_nid(p);
- if (!test_bit(nid, nodes))
- return -EIO;
+ if (!test_bit(nid, nodes)) {
+ err = -EIO;
+ break;
+ }
}
addr += PAGE_SIZE;
}
- return 0;
+ spin_unlock(&mm->page_table_lock);
+ return err;
}
/* Step 1: check the range */