sh: Prevent 64-bit pgprot clobbering across ioremap implementations.

Presently 'flags' gets passed around a lot between the various ioremap
helpers and implementations, which is only 32-bits. In the X2TLB case
we use 64-bit pgprots which presently results in the upper 32bits being
chopped off (which handily include our read/write/exec permissions).

As such, we convert everything internally to using pgprot_t directly and
simply convert over with pgprot_val() where needed. With this in place,
transparent fixmap utilization for early ioremap works as expected.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 6a0dd8c..13696df 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -235,7 +235,7 @@
  */
 #ifdef CONFIG_MMU
 void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
-			       unsigned long flags, void *caller);
+			       pgprot_t prot, void *caller);
 void __iounmap(void __iomem *addr);
 
 #ifdef CONFIG_IOREMAP_FIXED
@@ -254,13 +254,13 @@
 #endif
 
 static inline void __iomem *
-__ioremap(unsigned long offset, unsigned long size, unsigned long flags)
+__ioremap(unsigned long offset, unsigned long size, pgprot_t prot)
 {
-	return __ioremap_caller(offset, size, flags, __builtin_return_address(0));
+	return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
 }
 
 static inline void __iomem *
-__ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags)
+__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
 {
 #ifdef CONFIG_29BIT
 	unsigned long last_addr = offset + size - 1;
@@ -272,7 +272,7 @@
 	 * mapping must be done by the PMB or by using page tables.
 	 */
 	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
-		if (unlikely(flags & _PAGE_CACHABLE))
+		if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
 			return (void __iomem *)P1SEGADDR(offset);
 
 		return (void __iomem *)P2SEGADDR(offset);
@@ -287,7 +287,7 @@
 }
 
 static inline void __iomem *
-__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
+__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
 {
 	void __iomem *ret;
 
@@ -295,30 +295,39 @@
 	if (ret)
 		return ret;
 
-	ret = __ioremap_29bit(offset, size, flags);
+	ret = __ioremap_29bit(offset, size, prot);
 	if (ret)
 		return ret;
 
-	return __ioremap(offset, size, flags);
+	return __ioremap(offset, size, prot);
 }
 #else
-#define __ioremap(offset, size, flags)		((void __iomem *)(offset))
-#define __ioremap_mode(offset, size, flags)	((void __iomem *)(offset))
+#define __ioremap(offset, size, prot)		((void __iomem *)(offset))
+#define __ioremap_mode(offset, size, prot)	((void __iomem *)(offset))
 #define __iounmap(addr)				do { } while (0)
 #endif /* CONFIG_MMU */
 
-#define ioremap(offset, size)				\
-	__ioremap_mode((offset), (size), 0)
-#define ioremap_nocache(offset, size)			\
-	__ioremap_mode((offset), (size), 0)
-#define ioremap_cache(offset, size)			\
-	__ioremap_mode((offset), (size), _PAGE_CACHABLE)
-#define p3_ioremap(offset, size, flags)			\
-	__ioremap((offset), (size), (flags))
-#define ioremap_prot(offset, size, flags)		\
-	__ioremap_mode((offset), (size), (flags))
-#define iounmap(addr)					\
-	__iounmap((addr))
+static inline void __iomem *
+ioremap(unsigned long offset, unsigned long size)
+{
+	return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
+}
+
+static inline void __iomem *
+ioremap_cache(unsigned long offset, unsigned long size)
+{
+	return __ioremap_mode(offset, size, PAGE_KERNEL);
+}
+
+static inline void __iomem *
+ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags)
+{
+	return __ioremap_mode(offset, size, __pgprot(flags));
+}
+
+#define ioremap_nocache	ioremap
+#define p3_ioremap	__ioremap
+#define iounmap		__iounmap
 
 #define maybebadio(port) \
 	printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \