KVM: MMU: Avoid calling gfn_to_page() in mmu_set_spte()

Since gfn_to_page() is a sleeping function, and we want to make the core mmu
spinlocked, we need to pass the page from the walker context (which can sleep)
to the shadow context (which cannot).

[marcelo: avoid recursive locking of mmap_sem]

Signed-off-by: Avi Kivity <avi@qumranet.com>
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3b91227..c0b757b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -890,11 +890,10 @@
 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
 			 unsigned pt_access, unsigned pte_access,
 			 int user_fault, int write_fault, int dirty,
-			 int *ptwrite, gfn_t gfn)
+			 int *ptwrite, gfn_t gfn, struct page *page)
 {
 	u64 spte;
 	int was_rmapped = is_rmap_pte(*shadow_pte);
-	struct page *page;
 
 	pgprintk("%s: spte %llx access %x write_fault %d"
 		 " user_fault %d gfn %lx\n",
@@ -912,8 +911,6 @@
 	if (!(pte_access & ACC_EXEC_MASK))
 		spte |= PT64_NX_MASK;
 
-	page = gfn_to_page(vcpu->kvm, gfn);
-
 	spte |= PT_PRESENT_MASK;
 	if (pte_access & ACC_USER_MASK)
 		spte |= PT_USER_MASK;
@@ -979,6 +976,11 @@
 	int level = PT32E_ROOT_LEVEL;
 	hpa_t table_addr = vcpu->arch.mmu.root_hpa;
 	int pt_write = 0;
+	struct page *page;
+
+	down_read(&current->mm->mmap_sem);
+	page = gfn_to_page(vcpu->kvm, gfn);
+	up_read(&current->mm->mmap_sem);
 
 	for (; ; level--) {
 		u32 index = PT64_INDEX(v, level);
@@ -989,7 +991,7 @@
 
 		if (level == 1) {
 			mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
-				     0, write, 1, &pt_write, gfn);
+				     0, write, 1, &pt_write, gfn, page);
 			return pt_write || is_io_pte(table[index]);
 		}
 
@@ -1005,6 +1007,7 @@
 						     NULL);
 			if (!new_table) {
 				pgprintk("nonpaging_map: ENOMEM\n");
+				kvm_release_page_clean(page);
 				return -ENOMEM;
 			}
 
@@ -1347,6 +1350,43 @@
 	return !!(spte && (*spte & PT_ACCESSED_MASK));
 }
 
+static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+					  const u8 *new, int bytes)
+{
+	gfn_t gfn;
+	int r;
+	u64 gpte = 0;
+
+	if (bytes != 4 && bytes != 8)
+		return;
+
+	/*
+	 * Assume that the pte write on a page table of the same type
+	 * as the current vcpu paging mode.  This is nearly always true
+	 * (might be false while changing modes).  Note it is verified later
+	 * by update_pte().
+	 */
+	if (is_pae(vcpu)) {
+		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
+		if ((bytes == 4) && (gpa % 4 == 0)) {
+			r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
+			if (r)
+				return;
+			memcpy((void *)&gpte + (gpa % 8), new, 4);
+		} else if ((bytes == 8) && (gpa % 8 == 0)) {
+			memcpy((void *)&gpte, new, 8);
+		}
+	} else {
+		if ((bytes == 4) && (gpa % 4 == 0))
+			memcpy((void *)&gpte, new, 4);
+	}
+	if (!is_present_pte(gpte))
+		return;
+	gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
+	vcpu->arch.update_pte.gfn = gfn;
+	vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn);
+}
+
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 		       const u8 *new, int bytes)
 {
@@ -1367,6 +1407,7 @@
 	int npte;
 
 	pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
+	mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
 	mutex_lock(&vcpu->kvm->lock);
 	++vcpu->kvm->stat.mmu_pte_write;
 	kvm_mmu_audit(vcpu, "pre pte write");
@@ -1437,6 +1478,10 @@
 	}
 	kvm_mmu_audit(vcpu, "post pte write");
 	mutex_unlock(&vcpu->kvm->lock);
+	if (vcpu->arch.update_pte.page) {
+		kvm_release_page_clean(vcpu->arch.update_pte.page);
+		vcpu->arch.update_pte.page = NULL;
+	}
 }
 
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)