sh: Prevent 64-bit pgprot clobbering across ioremap implementations.

Presently 'flags' gets passed around a lot between the various ioremap
helpers and implementations, which is only 32-bits. In the X2TLB case
we use 64-bit pgprots which presently results in the upper 32bits being
chopped off (which handily include our read/write/exec permissions).

As such, we convert everything internally to using pgprot_t directly and
simply convert over with pgprot_val() where needed. With this in place,
transparent fixmap utilization for early ioremap works as expected.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index a130b22..85b420d 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -35,11 +35,10 @@
  */
 void __iomem * __init_refok
 __ioremap_caller(unsigned long phys_addr, unsigned long size,
-		 unsigned long flags, void *caller)
+		 pgprot_t pgprot, void *caller)
 {
 	struct vm_struct *area;
 	unsigned long offset, last_addr, addr, orig_addr;
-	pgprot_t pgprot;
 
 	/* Don't allow wraparound or zero size */
 	last_addr = phys_addr + size - 1;
@@ -69,7 +68,7 @@
 	 * If we can't yet use the regular approach, go the fixmap route.
 	 */
 	if (!mem_init_done)
-		return ioremap_fixed(phys_addr, size, __pgprot(flags));
+		return ioremap_fixed(phys_addr, size, pgprot);
 
 	/*
 	 * Ok, go for it..
@@ -91,8 +90,9 @@
 	 * PMB entries are all pre-faulted.
 	 */
 	if (unlikely(phys_addr >= P1SEG)) {
-		unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
+		unsigned long mapped;
 
+		mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot));
 		if (likely(mapped)) {
 			addr		+= mapped;
 			phys_addr	+= mapped;
@@ -101,7 +101,6 @@
 	}
 #endif
 
-	pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
 	if (likely(size))
 		if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
 			vunmap((void *)orig_addr);