[PATCH] FRV: Use the correct preemption primitives in kmap_atomic() and co

Use inc/dec_preempt_count() rather than preempt_enable/disable() and manually
add in the compiler barriers that were provided by the latter.  This makes FRV
consistent with other archs.

Furthermore, the compiler barrier effects are now there unconditionally - at
least as far as preemption is concerned - because we don't want the compiler
moving memory accesses out of the section of code in which the mapping is in
force - in effect the kmap_atomic() must imply a LOCK-class barrier and the
kunmap_atomic() must imply an UNLOCK-class barrier to the compiler.

Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
index e2247c2..0f390f4 100644
--- a/include/asm-frv/highmem.h
+++ b/include/asm-frv/highmem.h
@@ -82,11 +82,11 @@
 	dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V;		\
 												\
 	if (type != __KM_CACHE)									\
-		asm volatile("movgs %0,dampr"#ampr :: "r"(dampr));				\
+		asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory");			\
 	else											\
 		asm volatile("movgs %0,iampr"#ampr"\n"						\
 			     "movgs %0,dampr"#ampr"\n"						\
-			     :: "r"(dampr)							\
+			     :: "r"(dampr) : "memory"						\
 			     );									\
 												\
 	asm("movsg damlr"#ampr",%0" : "=r"(damlr));						\
@@ -104,7 +104,7 @@
 	asm volatile("movgs %0,tplr \n"								  \
 		     "movgs %1,tppr \n"								  \
 		     "tlbpr %0,gr0,#2,#1"							  \
-		     : : "r"(damlr), "r"(dampr));						  \
+		     : : "r"(damlr), "r"(dampr) : "memory");					  \
 												  \
 	/*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/			  \
 												  \
@@ -115,7 +115,7 @@
 {
 	unsigned long paddr;
 
-	preempt_disable();
+	inc_preempt_count();
 	paddr = page_to_phys(page);
 
 	switch (type) {
@@ -138,16 +138,16 @@
 	}
 }
 
-#define __kunmap_atomic_primary(type, ampr)			\
-do {								\
-	asm volatile("movgs gr0,dampr"#ampr"\n");		\
-	if (type == __KM_CACHE)					\
-		asm volatile("movgs gr0,iampr"#ampr"\n");	\
+#define __kunmap_atomic_primary(type, ampr)				\
+do {									\
+	asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory");		\
+	if (type == __KM_CACHE)						\
+		asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory");	\
 } while(0)
 
-#define __kunmap_atomic_secondary(slot, vaddr)			\
-do {								\
-	asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr));	\
+#define __kunmap_atomic_secondary(slot, vaddr)				\
+do {									\
+	asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory");	\
 } while(0)
 
 static inline void kunmap_atomic(void *kvaddr, enum km_type type)
@@ -170,7 +170,8 @@
 	default:
 		BUG();
 	}
-	preempt_enable();
+	dec_preempt_count();
+	preempt_check_resched();
 }
 
 #endif /* !__ASSEMBLY__ */