sh: Clean up places that make 29-bit physical assumptions.

Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/arch/sh/boot/compressed/misc_32.c b/arch/sh/boot/compressed/misc_32.c
index df65e30..adcea31 100644
--- a/arch/sh/boot/compressed/misc_32.c
+++ b/arch/sh/boot/compressed/misc_32.c
@@ -230,7 +230,10 @@
 void decompress_kernel(void)
 {
 	output_data = 0;
-	output_ptr = P2SEGADDR((unsigned long)&_text+PAGE_SIZE);
+	output_ptr = PHYSADDR((unsigned long)&_text+PAGE_SIZE);
+#ifdef CONFIG_29BIT
+	output_ptr |= P2SEG;
+#endif
 	free_mem_ptr = (unsigned long)&_end;
 	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
 
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 7eb7fdc..f48ce8e 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -82,7 +82,7 @@
 {
 	unsigned long size;
 
-	memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START;
+	memory_start = (unsigned long)__va(__MEMORY_START);
 	size = memparse(p, &p);
 
 	if (size > __MEMORY_SIZE) {
@@ -254,7 +254,7 @@
 	data_resource.start = virt_to_phys(_etext);
 	data_resource.end = virt_to_phys(_edata)-1;
 
-	memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START;
+	memory_start = (unsigned long)__va(__MEMORY_START);
 	if (!memory_end)
 		memory_end = memory_start + __MEMORY_SIZE;
 
diff --git a/arch/sh/kernel/vmlinux_32.lds.S b/arch/sh/kernel/vmlinux_32.lds.S
index 50c69c1..d549fac 100644
--- a/arch/sh/kernel/vmlinux_32.lds.S
+++ b/arch/sh/kernel/vmlinux_32.lds.S
@@ -15,7 +15,12 @@
 ENTRY(_start)
 SECTIONS
 {
+#ifdef CONFIG_32BIT
+	. = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
+#else
 	. = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
+#endif
+
 	_text = .;			/* Text and read-only data */
 
 	.empty_zero_page : {
diff --git a/include/asm-sh/addrspace.h b/include/asm-sh/addrspace.h
index e7f2deb..fa544fc 100644
--- a/include/asm-sh/addrspace.h
+++ b/include/asm-sh/addrspace.h
@@ -31,6 +31,7 @@
 /* Returns the physical address of a PnSEG (n=1,2) address   */
 #define PHYSADDR(a)	(((unsigned long)(a)) & 0x1fffffff)
 
+#ifdef CONFIG_29BIT
 /*
  * Map an address to a certain privileged segment
  */
@@ -42,8 +43,11 @@
 	((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG))
 #define P4SEGADDR(a)	\
 	((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG))
-
+#endif /* 29BIT */
 #endif /* P1SEG */
 
+/* Check if an address can be reached in 29 bits */
+#define IS_29BIT(a)	(((unsigned long)(a)) < 0x20000000)
+
 #endif /* __KERNEL__ */
 #endif /* __ASM_SH_ADDRSPACE_H */
diff --git a/include/asm-sh/cpu-sh4/mmu_context.h b/include/asm-sh/cpu-sh4/mmu_context.h
index fdd56e3..9ea8eb2 100644
--- a/include/asm-sh/cpu-sh4/mmu_context.h
+++ b/include/asm-sh/cpu-sh4/mmu_context.h
@@ -30,6 +30,12 @@
 #define MMUCR_ME		(0)
 #endif
 
+#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40)
+#define MMUCR_SE		(1 << 4)
+#else
+#define MMUCR_SE		(0)
+#endif
+
 #ifdef CONFIG_SH_STORE_QUEUES
 #define MMUCR_SQMD		(1 << 9)
 #else
@@ -37,7 +43,7 @@
 #endif
 
 #define MMU_NTLB_ENTRIES	64
-#define MMU_CONTROL_INIT	(0x05|MMUCR_SQMD|MMUCR_ME)
+#define MMU_CONTROL_INIT	(0x05|MMUCR_SQMD|MMUCR_ME|MMUCR_SE)
 
 #define MMU_ITLB_DATA_ARRAY	0xF3000000
 #define MMU_UTLB_DATA_ARRAY	0xF7000000
diff --git a/include/asm-sh/io.h b/include/asm-sh/io.h
index a4e5f55..94900c0 100644
--- a/include/asm-sh/io.h
+++ b/include/asm-sh/io.h
@@ -273,23 +273,9 @@
 #if !defined(CONFIG_MMU)
 #define virt_to_phys(address)	((unsigned long)(address))
 #define phys_to_virt(address)	((void *)(address))
-#elif defined(CONFIG_SUPERH64)
+#else
 #define virt_to_phys(address)	(__pa(address))
 #define phys_to_virt(address)	(__va(address))
-#else
-/*
- * Change virtual addresses to physical addresses and vv.
- * These are trivial on the 1:1 Linux/SuperH mapping
- */
-static inline unsigned long virt_to_phys(volatile void *address)
-{
-	return PHYSADDR(address);
-}
-
-static inline void *phys_to_virt(unsigned long address)
-{
-	return (void *)P1SEGADDR(address);
-}
 #endif
 
 /*
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index bff635a..002e64a 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -5,6 +5,8 @@
  * Copyright (C) 1999  Niibe Yutaka
  */
 
+#include <linux/const.h>
+
 #ifdef __KERNEL__
 
 /* PAGE_SHIFT determines the page size */
@@ -18,15 +20,13 @@
 # error "Bogus kernel page size?"
 #endif
 
-#ifdef __ASSEMBLY__
-#define PAGE_SIZE	(1 << PAGE_SHIFT)
-#else
-#define PAGE_SIZE	(1UL << PAGE_SHIFT)
-#endif
-
+#define PAGE_SIZE	(_AC(1, UL) << PAGE_SHIFT)
 #define PAGE_MASK	(~(PAGE_SIZE-1))
 #define PTE_MASK	PAGE_MASK
 
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
 #define HPAGE_SHIFT	16
 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
@@ -104,20 +104,44 @@
 
 #endif /* !__ASSEMBLY__ */
 
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
+/*
+ * __MEMORY_START and SIZE are the physical addresses and size of RAM.
+ */
 #define __MEMORY_START		CONFIG_MEMORY_START
 #define __MEMORY_SIZE		CONFIG_MEMORY_SIZE
 
+/*
+ * PAGE_OFFSET is the virtual address of the start of kernel address
+ * space.
+ */
 #define PAGE_OFFSET		CONFIG_PAGE_OFFSET
-#define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
-#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
-#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
 
+/*
+ * Virtual to physical RAM address translation.
+ *
+ * In 29 bit mode, the physical offset of RAM from address 0 is visible in
+ * the kernel virtual address space, and thus we don't have to take
+ * this into account when translating. However in 32 bit mode this offset
+ * is not visible (it is part of the PMB mapping) and so needs to be
+ * added or subtracted as required.
+ */
+#ifdef CONFIG_32BIT
+#define __pa(x)	((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
+#define __va(x)	((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
+#else
+#define __pa(x)	((unsigned long)(x)-PAGE_OFFSET)
+#define __va(x)	((void *)((unsigned long)(x)+PAGE_OFFSET))
+#endif
+
+#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
 #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
 
-/* PFN start number, because of __MEMORY_START */
+/*
+ * PFN = physical frame number (ie PFN 0 == physical address 0)
+ * PFN_START is the PFN of the first page of RAM. By defining this we
+ * don't have struct page entries for the portion of address space
+ * between physical address 0 and the start of RAM.
+ */
 #define PFN_START		(__MEMORY_START >> PAGE_SHIFT)
 #define ARCH_PFN_OFFSET		(PFN_START)
 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index b4d7561..3df90f0 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -69,7 +69,13 @@
 #define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
 #define FIRST_USER_ADDRESS	0
 
-#define PTE_PHYS_MASK		(0x20000000 - PAGE_SIZE)
+#ifdef CONFIG_32BIT
+#define PHYS_ADDR_MASK		0xffffffff
+#else
+#define PHYS_ADDR_MASK		0x1fffffff
+#endif
+
+#define PTE_PHYS_MASK		(PHYS_ADDR_MASK & PAGE_MASK)
 
 #ifdef CONFIG_SUPERH32
 #define VMALLOC_START	(P3SEG)
diff --git a/include/asm-sh/pgtable_32.h b/include/asm-sh/pgtable_32.h
index 7030360..7efc954 100644
--- a/include/asm-sh/pgtable_32.h
+++ b/include/asm-sh/pgtable_32.h
@@ -98,7 +98,7 @@
 #define _PAGE_CLEAR_FLAGS	(_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE)
 #endif
 
-#define _PAGE_FLAGS_HARDWARE_MASK	(0x1fffffff & ~(_PAGE_CLEAR_FLAGS))
+#define _PAGE_FLAGS_HARDWARE_MASK	(PHYS_ADDR_MASK & ~(_PAGE_CLEAR_FLAGS))
 
 /* Hardware flags, page size encoding */
 #if defined(CONFIG_X2TLB)
diff --git a/include/asm-sh/scatterlist.h b/include/asm-sh/scatterlist.h
index a7d0d18..35ef9c3 100644
--- a/include/asm-sh/scatterlist.h
+++ b/include/asm-sh/scatterlist.h
@@ -1,6 +1,7 @@
 #ifndef __ASM_SH_SCATTERLIST_H
 #define __ASM_SH_SCATTERLIST_H
 
+#include <asm/pgtable.h>
 #include <asm/types.h>
 
 struct scatterlist {
@@ -13,7 +14,7 @@
     unsigned int length;
 };
 
-#define ISA_DMA_THRESHOLD (0x1fffffff)
+#define ISA_DMA_THRESHOLD	PHYS_ADDR_MASK
 
 /* These macros should be used after a pci_map_sg call has been done
  * to get bus addresses of each of the SG entries and their lengths.