[POWERPC] Move inline asm eieio to using eieio inline function

Use the eieio function so we can redefine what eieio does rather
than direct inline asm.  This is part code clean up and partially
because not all PPCs have eieio (book-e has mbar that maps to eieio).

Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index 34ae114..e31aca92 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -35,7 +35,7 @@
 	asm volatile("sync");
 	do {
 		tmp = *port;
-		asm volatile("eieio");
+		eieio();
 		*tbuf++ = tmp;
 	} while (--count != 0);
 	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
@@ -66,7 +66,7 @@
 	asm volatile("sync");
 	do {
 		tmp = *port;
-		asm volatile("eieio");
+		eieio();
 		*tbuf++ = tmp;
 	} while (--count != 0);
 	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
@@ -97,7 +97,7 @@
 	asm volatile("sync");
 	do {
 		tmp = *port;
-		asm volatile("eieio");
+		eieio();
 		*tbuf++ = tmp;
 	} while (--count != 0);
 	asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
@@ -155,21 +155,21 @@
 	__asm__ __volatile__ ("sync" : : : "memory");
 	while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
 		*((u8 *)dest) = *((volatile u8 *)vsrc);
-		__asm__ __volatile__ ("eieio" : : : "memory");
+		eieio();
 		vsrc++;
 		dest++;
 		n--;
 	}
 	while(n > 4) {
 		*((u32 *)dest) = *((volatile u32 *)vsrc);
-		__asm__ __volatile__ ("eieio" : : : "memory");
+		eieio();
 		vsrc += 4;
 		dest += 4;
 		n -= 4;
 	}
 	while(n) {
 		*((u8 *)dest) = *((volatile u8 *)vsrc);
-		__asm__ __volatile__ ("eieio" : : : "memory");
+		eieio();
 		vsrc++;
 		dest++;
 		n--;
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 823fa63..6ba9b47 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -163,7 +163,7 @@
 
 	hptep->r = hpte_r;
 	/* Guarantee the second dword is visible before the valid bit */
-	__asm__ __volatile__ ("eieio" : : : "memory");
+	eieio();
 	/*
 	 * Now set the first dword including the valid bit
 	 * NOTE: this also unlocks the hpte
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 132c6bc..28492bb 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -55,7 +55,7 @@
 		for (entry = 0; entry < 8; entry++, ste++) {
 			if (!(ste->esid_data & STE_ESID_V)) {
 				ste->vsid_data = vsid_data;
-				asm volatile("eieio":::"memory");
+				eieio();
 				ste->esid_data = esid_data;
 				return (global_entry | entry);
 			}
@@ -101,7 +101,7 @@
 	asm volatile("sync" : : : "memory");    /* Order update */
 
 	castout_ste->vsid_data = vsid_data;
-	asm volatile("eieio" : : : "memory");   /* Order update */
+	eieio();				/* Order update */
 	castout_ste->esid_data = esid_data;
 
 	asm volatile("slbie  %0" : : "r" (old_esid << SID_SHIFT));
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 09621f6..eff3de9 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -43,7 +43,7 @@
 #ifdef CONFIG_SMP
 #define smp_mb()	mb()
 #define smp_rmb()	rmb()
-#define smp_wmb()	__asm__ __volatile__ ("eieio" : : : "memory")
+#define smp_wmb()	eieio()
 #define smp_read_barrier_depends()	read_barrier_depends()
 #else
 #define smp_mb()	barrier()