ARM: ensure C page table setup code follows assembly code (part II)

This does the same as the previous commit, but for the S bit, which also
needs to match the initial value which the assembly code used for the
same reasons.  Again, we add a check for SMP to ensure that the page
tables are correctly setup for SMP.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 92df149..df875c4 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -117,6 +117,8 @@
 };
 
 #ifdef CONFIG_CPU_CP15
+static unsigned long initial_pmd_value __initdata = 0;
+
 /*
  * Initialise the cache_policy variable with the initial state specified
  * via the "pmd" value.  This is used to ensure that on ARMv6 and later,
@@ -128,6 +130,8 @@
 {
 	int i;
 
+	initial_pmd_value = pmd;
+
 	pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE;
 
 	for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
@@ -414,9 +418,15 @@
 		ecc_mask = 0;
 	}
 
-	if (is_smp() && cachepolicy != CPOLICY_WRITEALLOC) {
-		pr_warn("Forcing write-allocate cache policy for SMP\n");
-		cachepolicy = CPOLICY_WRITEALLOC;
+	if (is_smp()) {
+		if (cachepolicy != CPOLICY_WRITEALLOC) {
+			pr_warn("Forcing write-allocate cache policy for SMP\n");
+			cachepolicy = CPOLICY_WRITEALLOC;
+		}
+		if (!(initial_pmd_value & PMD_SECT_S)) {
+			pr_warn("Forcing shared mappings for SMP\n");
+			initial_pmd_value |= PMD_SECT_S;
+		}
 	}
 
 	/*
@@ -541,11 +551,12 @@
 		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 #endif
 
-		if (is_smp()) {
-			/*
-			 * Mark memory with the "shared" attribute
-			 * for SMP systems
-			 */
+		/*
+		 * If the initial page tables were created with the S bit
+		 * set, then we need to do the same here for the same
+		 * reasons given in early_cachepolicy().
+		 */
+		if (initial_pmd_value & PMD_SECT_S) {
 			user_pgprot |= L_PTE_SHARED;
 			kern_pgprot |= L_PTE_SHARED;
 			vecs_pgprot |= L_PTE_SHARED;