KVM: Use page_private()/set_page_private() apis

Besides using an established api, this allows using kvm in older kernels.

Signed-off-by: Markus Rechberger <markus.rechberger@amd.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index be79377..a1a9336 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -298,18 +298,18 @@
 	if (!is_rmap_pte(*spte))
 		return;
 	page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
-	if (!page->private) {
+	if (!page_private(page)) {
 		rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
-		page->private = (unsigned long)spte;
-	} else if (!(page->private & 1)) {
+		set_page_private(page,(unsigned long)spte);
+	} else if (!(page_private(page) & 1)) {
 		rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
 		desc = mmu_alloc_rmap_desc(vcpu);
-		desc->shadow_ptes[0] = (u64 *)page->private;
+		desc->shadow_ptes[0] = (u64 *)page_private(page);
 		desc->shadow_ptes[1] = spte;
-		page->private = (unsigned long)desc | 1;
+		set_page_private(page,(unsigned long)desc | 1);
 	} else {
 		rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
-		desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
+		desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
 		while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
 			desc = desc->more;
 		if (desc->shadow_ptes[RMAP_EXT-1]) {
@@ -337,12 +337,12 @@
 	if (j != 0)
 		return;
 	if (!prev_desc && !desc->more)
-		page->private = (unsigned long)desc->shadow_ptes[0];
+		set_page_private(page,(unsigned long)desc->shadow_ptes[0]);
 	else
 		if (prev_desc)
 			prev_desc->more = desc->more;
 		else
-			page->private = (unsigned long)desc->more | 1;
+			set_page_private(page,(unsigned long)desc->more | 1);
 	mmu_free_rmap_desc(vcpu, desc);
 }
 
@@ -356,20 +356,20 @@
 	if (!is_rmap_pte(*spte))
 		return;
 	page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
-	if (!page->private) {
+	if (!page_private(page)) {
 		printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
 		BUG();
-	} else if (!(page->private & 1)) {
+	} else if (!(page_private(page) & 1)) {
 		rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
-		if ((u64 *)page->private != spte) {
+		if ((u64 *)page_private(page) != spte) {
 			printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
 			       spte, *spte);
 			BUG();
 		}
-		page->private = 0;
+		set_page_private(page,0);
 	} else {
 		rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
-		desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
+		desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
 		prev_desc = NULL;
 		while (desc) {
 			for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
@@ -398,11 +398,11 @@
 	BUG_ON(!slot);
 	page = gfn_to_page(slot, gfn);
 
-	while (page->private) {
-		if (!(page->private & 1))
-			spte = (u64 *)page->private;
+	while (page_private(page)) {
+		if (!(page_private(page) & 1))
+			spte = (u64 *)page_private(page);
 		else {
-			desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
+			desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
 			spte = desc->shadow_ptes[0];
 		}
 		BUG_ON(!spte);
@@ -1218,7 +1218,7 @@
 		INIT_LIST_HEAD(&page_header->link);
 		if ((page = alloc_page(GFP_KERNEL)) == NULL)
 			goto error_1;
-		page->private = (unsigned long)page_header;
+		set_page_private(page, (unsigned long)page_header);
 		page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
 		memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
 		list_add(&page_header->link, &vcpu->free_pages);