sh: Preparation for uncached jumps through PMB.

Presently most of the 29-bit physical parts do P1/P2 segmentation
with a 1:1 cached/uncached mapping, jumping between the two to
control the caching behaviour. This provides the basic infrastructure
to maintain this behaviour on 32-bit physical parts that don't map
P1/P2 at all, using a shiny new linker section and corresponding
fixmap entry.

Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/include/asm-sh/fixmap.h b/include/asm-sh/fixmap.h
index 09463cd..721fcc4 100644
--- a/include/asm-sh/fixmap.h
+++ b/include/asm-sh/fixmap.h
@@ -49,6 +49,7 @@
 #define FIX_N_COLOURS 16
 	FIX_CMAP_BEGIN,
 	FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS,
+	FIX_UNCACHED,
 #ifdef CONFIG_HIGHMEM
 	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
 	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
diff --git a/include/asm-sh/sections.h b/include/asm-sh/sections.h
index bd9cbc9..8f8f4ad 100644
--- a/include/asm-sh/sections.h
+++ b/include/asm-sh/sections.h
@@ -4,6 +4,7 @@
 #include <asm-generic/sections.h>
 
 extern long __machvec_start, __machvec_end;
+extern char __uncached_start, __uncached_end;
 extern char _ebss[];
 
 #endif /* __ASM_SH_SECTIONS_H */
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 969f3d4..9bda8d0 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -144,6 +144,8 @@
 #define instruction_size(insn)	(4)
 #endif
 
+extern unsigned long cached_to_uncached;
+
 /* XXX
  * disable hlt during certain critical i/o operations
  */
diff --git a/include/asm-sh/system_32.h b/include/asm-sh/system_32.h
index ad37e8d..e918bac 100644
--- a/include/asm-sh/system_32.h
+++ b/include/asm-sh/system_32.h
@@ -58,29 +58,31 @@
 	last = __last;						\
 } while (0)
 
+#define __uses_jump_to_uncached __attribute__ ((__section__ (".uncached.text")))
+
 /*
- * Jump to P2 area.
- * When handling TLB or caches, we need to do it from P2 area.
+ * Jump to uncached area.
+ * When handling TLB or caches, we need to do it from an uncached area.
  */
-#define jump_to_P2()			\
-do {					\
-	unsigned long __dummy;		\
-	__asm__ __volatile__(		\
-		"mov.l	1f, %0\n\t"	\
-		"or	%1, %0\n\t"	\
-		"jmp	@%0\n\t"	\
-		" nop\n\t"		\
-		".balign 4\n"		\
-		"1:	.long 2f\n"	\
-		"2:"			\
-		: "=&r" (__dummy)	\
-		: "r" (0x20000000));	\
+#define jump_to_uncached()			\
+do {						\
+	unsigned long __dummy;			\
+						\
+	__asm__ __volatile__(			\
+		"mova	1f, %0\n\t"		\
+		"add	%1, %0\n\t"		\
+		"jmp	@%0\n\t"		\
+		" nop\n\t"			\
+		".balign 4\n"			\
+		"1:"				\
+		: "=&z" (__dummy)		\
+		: "r" (cached_to_uncached));	\
 } while (0)
 
 /*
- * Back to P1 area.
+ * Back to cached area.
  */
-#define back_to_P1()					\
+#define back_to_cached()				\
 do {							\
 	unsigned long __dummy;				\
 	ctrl_barrier();					\
diff --git a/include/asm-sh/system_64.h b/include/asm-sh/system_64.h
index 0e466e9..943acf5 100644
--- a/include/asm-sh/system_64.h
+++ b/include/asm-sh/system_64.h
@@ -32,8 +32,9 @@
 			      &next->thread);			\
 } while (0)
 
-/* No segmentation.. */
-#define jump_to_P2()	do { } while (0)
-#define back_to_P1()	do { } while (0)
+#define __uses_jump_to_uncached
+
+#define jump_to_uncached()	do { } while (0)
+#define back_to_cached()	do { } while (0)
 
 #endif /* __ASM_SH_SYSTEM_64_H */