KVM: MMU: Fix SMP shadow instantiation race
There is a race where VCPU0 is shadowing a pagetable entry while VCPU1
is updating it, which results in a stale shadow copy.
Fix that by comparing the contents of the cached guest pte with the
current guest pte after write-protecting the guest pagetable.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index ba71e8d..92ac0d1 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -681,7 +681,8 @@
unsigned level,
int metaphysical,
unsigned access,
- u64 *parent_pte)
+ u64 *parent_pte,
+ bool *new_page)
{
union kvm_mmu_page_role role;
unsigned index;
@@ -720,6 +721,8 @@
vcpu->mmu.prefetch_page(vcpu, sp);
if (!metaphysical)
rmap_write_protect(vcpu->kvm, gfn);
+ if (new_page)
+ *new_page = 1;
return sp;
}
@@ -993,7 +996,8 @@
>> PAGE_SHIFT;
new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
v, level - 1,
- 1, ACC_ALL, &table[index]);
+ 1, ACC_ALL, &table[index],
+ NULL);
if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n");
return -ENOMEM;
@@ -1059,7 +1063,7 @@
ASSERT(!VALID_PAGE(root));
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
- PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
+ PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
root = __pa(sp->spt);
++sp->root_count;
vcpu->mmu.root_hpa = root;
@@ -1080,7 +1084,7 @@
root_gfn = 0;
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, !is_paging(vcpu),
- ACC_ALL, NULL);
+ ACC_ALL, NULL, NULL);
root = __pa(sp->spt);
++sp->root_count;
vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;