sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB.

Both the store queue API and the PMB remapping take unsigned long for
their pgprot flags, which cuts off the extended protection bits. In the
case of the PMB this isn't really a problem since the cache attribute
bits that we care about are all in the lower 32-bits, but we do it just
to be safe. The store queue remapping on the other hand depends on the
extended prot bits for enabling userspace access to the mappings.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index 2fcbedb..151bc92 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -33,6 +33,7 @@
 #ifndef __ASSEMBLY__
 #include <linux/errno.h>
 #include <linux/threads.h>
+#include <asm/page.h>
 
 /* Default "unsigned long" context */
 typedef unsigned long mm_context_id_t[NR_CPUS];
@@ -71,13 +72,13 @@
 #ifdef CONFIG_PMB
 /* arch/sh/mm/pmb.c */
 long pmb_remap(unsigned long virt, unsigned long phys,
-	       unsigned long size, unsigned long flags);
+	       unsigned long size, pgprot_t prot);
 void pmb_unmap(unsigned long addr);
 int pmb_init(void);
 bool __in_29bit_mode(void);
 #else
 static inline long pmb_remap(unsigned long virt, unsigned long phys,
-			     unsigned long size, unsigned long flags)
+			     unsigned long size, pgprot_t prot)
 {
 	return -EINVAL;
 }