[S390] 4level-fixup cleanup

Get independent from asm-generic/4level-fixup.h

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index dc37ea8..7e8efaa 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -18,13 +18,18 @@
 static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
 {
 	pgd_t *pgd;
+	pud_t *pud;
 	pmd_t *pmd;
 
 	pgd = pgd_offset(mm, addr);
 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
 		return NULL;
 
-	pmd = pmd_offset(pgd, addr);
+	pud = pud_offset(pgd, addr);
+	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+		return NULL;
+
+	pmd = pmd_offset(pud, addr);
 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
 		return NULL;
 
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 90ec058..b234bb4 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -81,6 +81,7 @@
 static void __init setup_ro_region(void)
 {
 	pgd_t *pgd;
+	pud_t *pud;
 	pmd_t *pmd;
 	pte_t *pte;
 	pte_t new_pte;
@@ -91,7 +92,8 @@
 
 	for (; address < end; address += PAGE_SIZE) {
 		pgd = pgd_offset_k(address);
-		pmd = pmd_offset(pgd, address);
+		pud = pud_offset(pgd, address);
+		pmd = pmd_offset(pud, address);
 		pte = pte_offset_kernel(pmd, address);
 		new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
 		*pte = new_pte;
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 1bd51d8..fb9c5a8 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -73,6 +73,8 @@
 	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
 }
 
+#define vmem_pud_alloc()	({ BUG(); ((pud_t *) NULL); })
+
 static inline pmd_t *vmem_pmd_alloc(void)
 {
 	pmd_t *pmd = NULL;
@@ -103,6 +105,7 @@
 {
 	unsigned long address;
 	pgd_t *pg_dir;
+	pud_t *pu_dir;
 	pmd_t *pm_dir;
 	pte_t *pt_dir;
 	pte_t  pte;
@@ -111,13 +114,21 @@
 	for (address = start; address < start + size; address += PAGE_SIZE) {
 		pg_dir = pgd_offset_k(address);
 		if (pgd_none(*pg_dir)) {
+			pu_dir = vmem_pud_alloc();
+			if (!pu_dir)
+				goto out;
+			pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
+		}
+
+		pu_dir = pud_offset(pg_dir, address);
+		if (pud_none(*pu_dir)) {
 			pm_dir = vmem_pmd_alloc();
 			if (!pm_dir)
 				goto out;
-			pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
+			pud_populate_kernel(&init_mm, pu_dir, pm_dir);
 		}
 
-		pm_dir = pmd_offset(pg_dir, address);
+		pm_dir = pmd_offset(pu_dir, address);
 		if (pmd_none(*pm_dir)) {
 			pt_dir = vmem_pte_alloc();
 			if (!pt_dir)
@@ -143,6 +154,7 @@
 {
 	unsigned long address;
 	pgd_t *pg_dir;
+	pud_t *pu_dir;
 	pmd_t *pm_dir;
 	pte_t *pt_dir;
 	pte_t  pte;
@@ -150,9 +162,10 @@
 	pte_val(pte) = _PAGE_TYPE_EMPTY;
 	for (address = start; address < start + size; address += PAGE_SIZE) {
 		pg_dir = pgd_offset_k(address);
-		if (pgd_none(*pg_dir))
+		pu_dir = pud_offset(pg_dir, address);
+		if (pud_none(*pu_dir))
 			continue;
-		pm_dir = pmd_offset(pg_dir, address);
+		pm_dir = pmd_offset(pu_dir, address);
 		if (pmd_none(*pm_dir))
 			continue;
 		pt_dir = pte_offset_kernel(pm_dir, address);
@@ -169,6 +182,7 @@
 	unsigned long address, start_addr, end_addr;
 	struct page *map_start, *map_end;
 	pgd_t *pg_dir;
+	pud_t *pu_dir;
 	pmd_t *pm_dir;
 	pte_t *pt_dir;
 	pte_t  pte;
@@ -183,13 +197,21 @@
 	for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
 		pg_dir = pgd_offset_k(address);
 		if (pgd_none(*pg_dir)) {
+			pu_dir = vmem_pud_alloc();
+			if (!pu_dir)
+				goto out;
+			pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
+		}
+
+		pu_dir = pud_offset(pg_dir, address);
+		if (pud_none(*pu_dir)) {
 			pm_dir = vmem_pmd_alloc();
 			if (!pm_dir)
 				goto out;
-			pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
+			pud_populate_kernel(&init_mm, pu_dir, pm_dir);
 		}
 
-		pm_dir = pmd_offset(pg_dir, address);
+		pm_dir = pmd_offset(pu_dir, address);
 		if (pmd_none(*pm_dir)) {
 			pt_dir = vmem_pte_alloc();
 			if (!pt_dir)