[PATCH] KVM: Replace __x86_64__ with CONFIG_X86_64

As per akpm's request.

Signed-off-by: Avi Kivity <avi@qumranet.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index fa8f729..ad97014 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -34,7 +34,7 @@
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
 
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 #define HOST_IS_64 1
 #else
 #define HOST_IS_64 0
@@ -71,7 +71,7 @@
 };
 
 static const u32 vmx_msr_index[] = {
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 	MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
 #endif
 	MSR_EFER, MSR_K6_STAR,
@@ -138,7 +138,7 @@
 
 static u64 vmcs_read64(unsigned long field)
 {
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 	return vmcs_readl(field);
 #else
 	return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
@@ -168,7 +168,7 @@
 
 static void vmcs_write64(unsigned long field, u64 value)
 {
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 	vmcs_writel(field, value);
 #else
 	vmcs_writel(field, value);
@@ -297,7 +297,7 @@
 
 static void reload_tss(void)
 {
-#ifndef __x86_64__
+#ifndef CONFIG_X86_64
 
 	/*
 	 * VT restores TR but not its size.  Useless.
@@ -328,7 +328,7 @@
 	}
 
 	switch (msr_index) {
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 	case MSR_FS_BASE:
 		data = vmcs_readl(GUEST_FS_BASE);
 		break;
@@ -391,7 +391,7 @@
 {
 	struct vmx_msr_entry *msr;
 	switch (msr_index) {
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 	case MSR_FS_BASE:
 		vmcs_writel(GUEST_FS_BASE, data);
 		break;
@@ -726,7 +726,7 @@
 	fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
 }
 
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 
 static void enter_lmode(struct kvm_vcpu *vcpu)
 {
@@ -768,7 +768,7 @@
 	if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
 		enter_rmode(vcpu);
 
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 	if (vcpu->shadow_efer & EFER_LME) {
 		if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK))
 			enter_lmode(vcpu);
@@ -809,7 +809,7 @@
 	vcpu->cr4 = cr4;
 }
 
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 
 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
@@ -1096,7 +1096,7 @@
 	vmcs_write16(HOST_FS_SELECTOR, read_fs());    /* 22.2.4 */
 	vmcs_write16(HOST_GS_SELECTOR, read_gs());    /* 22.2.4 */
 	vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 	rdmsrl(MSR_FS_BASE, a);
 	vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
 	rdmsrl(MSR_GS_BASE, a);
@@ -1174,7 +1174,7 @@
 	vcpu->cr0 = 0x60000010;
 	vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
 	vmx_set_cr4(vcpu, 0);
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 	vmx_set_efer(vcpu, 0);
 #endif
 
@@ -1690,7 +1690,7 @@
 		vmcs_write16(HOST_GS_SELECTOR, 0);
 	}
 
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 	vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
 	vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
 #else
@@ -1714,7 +1714,7 @@
 	asm (
 		/* Store host registers */
 		"pushf \n\t"
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 		"push %%rax; push %%rbx; push %%rdx;"
 		"push %%rsi; push %%rdi; push %%rbp;"
 		"push %%r8;  push %%r9;  push %%r10; push %%r11;"
@@ -1728,7 +1728,7 @@
 		/* Check if vmlaunch of vmresume is needed */
 		"cmp $0, %1 \n\t"
 		/* Load guest registers.  Don't clobber flags. */
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 		"mov %c[cr2](%3), %%rax \n\t"
 		"mov %%rax, %%cr2 \n\t"
 		"mov %c[rax](%3), %%rax \n\t"
@@ -1765,7 +1765,7 @@
 		".globl kvm_vmx_return \n\t"
 		"kvm_vmx_return: "
 		/* Save guest registers, load host registers, keep flags */
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 		"xchg %3,     0(%%rsp) \n\t"
 		"mov %%rax, %c[rax](%3) \n\t"
 		"mov %%rbx, %c[rbx](%3) \n\t"
@@ -1817,7 +1817,7 @@
 		[rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
 		[rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
 		[rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 		[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
 		[r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
 		[r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
@@ -1838,7 +1838,7 @@
 	fx_save(vcpu->guest_fx_image);
 	fx_restore(vcpu->host_fx_image);
 
-#ifndef __x86_64__
+#ifndef CONFIG_X86_64
 	asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
 #endif
 
@@ -1856,7 +1856,7 @@
 			 */
 			local_irq_disable();
 			load_gs(gs_sel);
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 			wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
 #endif
 			local_irq_enable();
@@ -1966,7 +1966,7 @@
 	.set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch,
 	.set_cr3 = vmx_set_cr3,
 	.set_cr4 = vmx_set_cr4,
-#ifdef __x86_64__
+#ifdef CONFIG_X86_64
 	.set_efer = vmx_set_efer,
 #endif
 	.get_idt = vmx_get_idt,