KVM: MMU: When updating the dirty bit, inform the mmu about it

Since the mmu uses different shadow pages for dirty large pages and clean
large pages, this allows the mmu to drop ptes that are now invalid.

Signed-off-by: Avi Kivity <avi@qumranet.com>
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index a0f84a5..a9e687b 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -74,9 +74,14 @@
 				    pt_element_t *ptep,
 				    gfn_t table_gfn)
 {
+	gpa_t pte_gpa;
+
 	if (write_fault && !is_dirty_pte(*ptep)) {
 		mark_page_dirty(vcpu->kvm, table_gfn);
 		*ptep |= PT_DIRTY_MASK;
+		pte_gpa = ((gpa_t)table_gfn << PAGE_SHIFT);
+		pte_gpa += offset_in_page(ptep);
+		kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)ptep, sizeof(*ptep));
 	}
 }