Merge "target: msm8996: Do not switch to legacy mmc mode during lk exit"
diff --git a/app/aboot/aboot.c b/app/aboot/aboot.c
index 07a8d76..d7c9786 100644
--- a/app/aboot/aboot.c
+++ b/app/aboot/aboot.c
@@ -90,6 +90,7 @@
 extern int get_target_boot_params(const char *cmdline, const char *part,
 				  char *buf, int buflen);
 
+void *info_buf;
 void write_device_info_mmc(device_info *dev);
 void write_device_info_flash(device_info *dev);
 static int aboot_save_boot_hash_mmc(uint32_t image_addr, uint32_t image_size);
@@ -1472,7 +1473,6 @@
 	return 0;
 }
 
-BUF_DMA_ALIGN(info_buf, BOOT_IMG_MAX_PAGE_SIZE);
 void write_device_info_mmc(device_info *dev)
 {
 	unsigned long long ptn = 0;
@@ -1551,10 +1551,15 @@
 
 void write_device_info_flash(device_info *dev)
 {
-	struct device_info *info = (void *) info_buf;
+	struct device_info *info = memalign(PAGE_SIZE, ROUNDUP(BOOT_IMG_MAX_PAGE_SIZE, PAGE_SIZE));
 	struct ptentry *ptn;
 	struct ptable *ptable;
-
+	if(info == NULL)
+	{
+		dprintf(CRITICAL, "Failed to allocate memory for device info struct\n");
+		ASSERT(0);
+	}
+	info_buf = info;
 	ptable = flash_get_ptable();
 	if (ptable == NULL)
 	{
@@ -1576,6 +1581,7 @@
 		dprintf(CRITICAL, "ERROR: Cannot write device info\n");
 			return;
 	}
+	free(info);
 }
 
 static int read_allow_oem_unlock(device_info *dev)
@@ -1651,10 +1657,15 @@
 
 void read_device_info_flash(device_info *dev)
 {
-	struct device_info *info = (void*) info_buf;
+	struct device_info *info = memalign(PAGE_SIZE, ROUNDUP(BOOT_IMG_MAX_PAGE_SIZE, PAGE_SIZE));
 	struct ptentry *ptn;
 	struct ptable *ptable;
-
+	if(info == NULL)
+	{
+		dprintf(CRITICAL, "Failed to allocate memory for device info struct\n");
+		ASSERT(0);
+	}
+	info_buf = info;
 	ptable = flash_get_ptable();
 	if (ptable == NULL)
 	{
@@ -1683,23 +1694,31 @@
 		write_device_info_flash(info);
 	}
 	memcpy(dev, info, sizeof(device_info));
+	free(info);
 }
 
 void write_device_info(device_info *dev)
 {
 	if(target_is_emmc_boot())
 	{
-		struct device_info *info = (void*) info_buf;
+		struct device_info *info = memalign(PAGE_SIZE, ROUNDUP(BOOT_IMG_MAX_PAGE_SIZE, PAGE_SIZE));
+		if(info == NULL)
+		{
+			dprintf(CRITICAL, "Failed to allocate memory for device info struct\n");
+			ASSERT(0);
+		}
+		info_buf = info;
 		memcpy(info, dev, sizeof(struct device_info));
 
 #if USE_RPMB_FOR_DEVINFO
 		if (is_secure_boot_enable())
-			write_device_info_rpmb((void*) info, mmc_get_device_blocksize());
+			write_device_info_rpmb((void*) info, PAGE_SIZE);
 		else
 			write_device_info_mmc(info);
 #else
 		write_device_info_mmc(info);
 #endif
+		free(info);
 	}
 	else
 	{
@@ -1711,11 +1730,17 @@
 {
 	if(target_is_emmc_boot())
 	{
-		struct device_info *info = (void*) info_buf;
+		struct device_info *info = memalign(PAGE_SIZE, ROUNDUP(BOOT_IMG_MAX_PAGE_SIZE, PAGE_SIZE));
+		if(info == NULL)
+		{
+			dprintf(CRITICAL, "Failed to allocate memory for device info struct\n");
+			ASSERT(0);
+		}
+		info_buf = info;
 
 #if USE_RPMB_FOR_DEVINFO
 		if (is_secure_boot_enable())
-			read_device_info_rpmb((void*) info, mmc_get_device_blocksize());
+			read_device_info_rpmb((void*) info, PAGE_SIZE);
 		else
 			read_device_info_mmc(info);
 #else
@@ -1725,8 +1750,10 @@
 		if (memcmp(info->magic, DEVICE_MAGIC, DEVICE_MAGIC_SIZE))
 		{
 			memcpy(info->magic, DEVICE_MAGIC, DEVICE_MAGIC_SIZE);
-			info->is_unlocked = 0;
-			info->is_verified = 0;
+			if (is_secure_boot_enable())
+				info->is_unlocked = 0;
+			else
+				info->is_verified = 1;
 			info->is_tampered = 0;
 #if USER_BUILD_VARIANT
 			info->charger_screen_enabled = 1;
@@ -1736,6 +1763,7 @@
 			write_device_info(info);
 		}
 		memcpy(dev, info, sizeof(device_info));
+		free(info);
 	}
 	else
 	{
diff --git a/app/mmutest/mmu_test.c b/app/mmutest/mmu_test.c
new file mode 100644
index 0000000..216e73b
--- /dev/null
+++ b/app/mmutest/mmu_test.c
@@ -0,0 +1,94 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <debug.h>
+#include <arch/arm/mmu.h>
+#include <mmu.h>
+#include <string.h>
+
+/* COMMON memory - cacheable, write through */
+#define COMMON_MEMORY       (MMU_MEMORY_TYPE_NORMAL_WRITE_THROUGH | \
+                           MMU_MEMORY_AP_READ_WRITE | MMU_MEMORY_XN)
+
+#define MB (1024 * 1024)
+static mmu_section_t ramdump_mmu_section_table[] =
+{
+  /*        Physical addr,    Virtual addr,     Mapping type ,              Size (in MB),            Flags */
+	{    0xC0000000,        0xC0000000,       MMU_L2_NS_SECTION_MAPPING,  512,            COMMON_MEMORY},
+	{    0x100000000,       0xC0000000,       MMU_L2_NS_SECTION_MAPPING,  1024,            COMMON_MEMORY},
+	{    0x140000000,       0xC0000000,       MMU_L2_NS_SECTION_MAPPING,  484,            COMMON_MEMORY},
+};
+
+uint32_t vaddr[] = {0xc2300000, 0xcd000000, 0xde000000};
+uint64_t paddr[] = {0xc2300000, 0x10d000000, 0x15e000000};
+
+void ramdump_table_map()
+{
+	uint32_t i, j;
+	uint32_t table_sz = ARRAY_SIZE(ramdump_mmu_section_table);
+	char *ptr = NULL;
+	bool pass_access = true;
+	bool pass_conversion = true;
+	uint64_t paddr_v;
+	uint32_t vaddr_v;
+
+	for (i = 0 ; i < table_sz; i++)
+	{
+		arm_mmu_map_entry(&ramdump_mmu_section_table[i]);
+		vaddr_v = physical_to_virtual_mapping(paddr[i]);
+		if (vaddr_v != vaddr[i])
+			pass_conversion = false;
+		paddr_v = virtual_to_physical_mapping(vaddr[i]);
+		if (paddr_v != paddr[i])
+			pass_conversion = false;
+		ptr = (char *)(uintptr_t)ramdump_mmu_section_table[i].vaddress;
+
+		for (j = 0 ; j < (ramdump_mmu_section_table[i].size * MB)/5; j++)
+		{
+			strcpy(ptr, "hello");
+			ptr+=5;
+		}
+
+		ptr = (char *)(uintptr_t)ramdump_mmu_section_table[i].vaddress;
+
+		for (j = 0 ; j < (ramdump_mmu_section_table[i].size * MB)/5; j++)
+		{
+			if (memcmp((void *)ptr, "hello", 5))
+			{
+				pass_access = false;
+				break;
+			}
+			ptr+=5;
+		}
+		if (pass_access)
+			dprintf(CRITICAL, "LAPE TEST PASS for addr: 0x%llx\n", ramdump_mmu_section_table[i].paddress);
+	}
+		if (pass_conversion)
+			dprintf(CRITICAL, "Physical to virtual conversion TEST PASS\n");
+}
diff --git a/app/mmutest/rules.mk b/app/mmutest/rules.mk
new file mode 100644
index 0000000..78918a1
--- /dev/null
+++ b/app/mmutest/rules.mk
@@ -0,0 +1,6 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+INCLUDES += -I$(LK_TOP_DIR)/platform/msm_shared/include
+
+OBJS += \
+	$(LOCAL_DIR)/mmu_test.o
diff --git a/arch/arm/include/arch/arm.h b/arch/arm/include/arch/arm.h
index e056628..d5c3c5d 100644
--- a/arch/arm/include/arch/arm.h
+++ b/arch/arm/include/arch/arm.h
@@ -86,7 +86,9 @@
 void arm_write_ttbr(uint32_t val);
 void arm_write_dacr(uint32_t val);
 void arm_invalidate_tlb(void);
-
+void arm_write_mair0(uint32_t);
+void arm_write_mair1(uint32_t);
+void arm_write_ttbcr(uint32_t);
 void dump_fault_frame(struct arm_fault_frame *frame);
 
 #if defined(__cplusplus)
diff --git a/arch/arm/include/arch/arm/mmu.h b/arch/arm/include/arch/arm/mmu.h
index d5067eb..12bcd23 100644
--- a/arch/arm/include/arch/arm/mmu.h
+++ b/arch/arm/include/arch/arm/mmu.h
@@ -1,6 +1,8 @@
 /*
  * Copyright (c) 2008 Travis Geiselbrecht
  *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
  * Permission is hereby granted, free of charge, to any person obtaining
  * a copy of this software and associated documentation files
  * (the "Software"), to deal in the Software without restriction,
@@ -33,6 +35,7 @@
 
 #if defined(ARM_ISA_ARMV6) | defined(ARM_ISA_ARMV7)
 
+#ifndef LPAE
 /* C, B and TEX[2:0] encodings without TEX remap */
                                                        /* TEX      |    CB    */
 #define MMU_MEMORY_TYPE_STRONGLY_ORDERED              ((0x0 << 12) | (0x0 << 2))
@@ -48,6 +51,68 @@
 #define MMU_MEMORY_AP_READ_WRITE    (0x3 << 10)
 
 #define MMU_MEMORY_XN               (0x1 << 4)
+#else /* LPAE */
+
+typedef enum
+{
+ /* Secure L1 section */
+  MMU_L1_SECTION_MAPPING = 0,
+
+  /* Non-secure L1 section */
+  MMU_L1_NS_SECTION_MAPPING,
+
+  /* Secure L2 section */
+  MMU_L2_SECTION_MAPPING,
+
+  /* Non-secure L2 section */
+  MMU_L2_NS_SECTION_MAPPING,
+
+  /* Secure L3 section */
+  MMU_L3_SECTION_MAPPING,
+
+  /* Non-secure L3 section */
+  MMU_L3_NS_SECTION_MAPPING,
+
+}mapping_type;
+
+
+#define ATTR_INDEX(x)         (x << 2) /* [4:2] - AttrIndx[2:0] of block descriptor */
+
+/* define the memory attributes for the block descriptors */
+
+#define MMU_MEMORY_TYPE_STRONGLY_ORDERED               ATTR_INDEX(0)
+#define MMU_MEMORY_TYPE_DEVICE_SHARED                  ATTR_INDEX(1) | (3 << 8) /*[9:8] - SH[1:0] - Shareability */
+#define MMU_MEMORY_TYPE_NORMAL                         ATTR_INDEX(2)
+#define MMU_MEMORY_TYPE_NORMAL_WRITE_THROUGH           ATTR_INDEX(7)
+#define MMU_MEMORY_TYPE_NORMAL_WRITE_BACK_ALLOCATE     ATTR_INDEX(4)
+#define MMU_MEMORY_TYPE_NORMAL_WRITE_BACK_NO_ALLOCATE  ATTR_INDEX(5)
+
+#define MMU_MEMORY_AP_READ_WRITE                       (1 << 6) /* Read/Write at any priveledge */
+#define MMU_MEMORY_XN                                  (1ULL << 54)
+#define MMU_MEMORY_PXN                                 (1ULL << 53)
+
+/* define the memory attributes:
+ * For LPAE, the block descriptor contains index into the MAIR registers.
+ * MAIR registers define the memory attributes. Below configuration is arrived based on
+ * the arm v7 manual section B4.1.104
+ */
+/* MAIR is 64 bit, with attrm[0..7], where m is 0..7
+ * Fill attrm[0..7] as below
+ * STRONG ORDERED                                        0000.0000
+ * DEVICE MEMORY                                         0000.0100
+ * normal memory, non cacheable                          0100.0100
+ * normal memory, WB, RA, nWA                            1110.1110
+ * normal memory, WB, RA, WA                             1111.1111
+ * normal memory, WB, nRA, nWA                           1100.1100
+ * normal memory, WT, RA, nWA                            1010.1010
+ * normal memory , WT, RA, WA                            1011.1011
+ */
+
+#define MAIR0                  0xee440400
+#define MAIR1                  0xbbaaccff
+#include <mmu.h>
+void arm_mmu_map_entry(mmu_section_t *entry);
+#endif /* LPAE */
 
 #else
 
@@ -56,7 +121,8 @@
 #endif
 
 void arm_mmu_map_section(addr_t paddr, addr_t vaddr, uint flags);
-
+uint64_t virtual_to_physical_mapping(uint32_t vaddr);
+uint32_t physical_to_virtual_mapping(uint64_t paddr);
 
 #if defined(__cplusplus)
 }
diff --git a/arch/arm/mmu_lpae.c b/arch/arm/mmu_lpae.c
new file mode 100644
index 0000000..ddf66fd
--- /dev/null
+++ b/arch/arm/mmu_lpae.c
@@ -0,0 +1,323 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+    * Neither the name of The Linux Foundation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <debug.h>
+#include <sys/types.h>
+#include <compiler.h>
+#include <arch.h>
+#include <arch/arm.h>
+#include <arch/defines.h>
+#include <arch/arm/mmu.h>
+#include <mmu.h>
+#include <platform.h>
+
+#if ARM_WITH_MMU
+
+#define LPAE_SIZE               (1ULL << 40)
+#define LPAE_MASK               (LPAE_SIZE - 1)
+#define L1_PT_INDEX             0x7FC0000000
+#define PT_TABLE_DESC_BIT       0x2
+#define SIZE_1GB                (0x400000000UL)
+#define SIZE_2MB                (0x200000)
+#define MMU_L2_PT_SIZE          512
+#define MMU_PT_BLOCK_DESCRIPTOR 0x1
+#define MMU_PT_TABLE_DESCRIPTOR 0x3
+#define MMU_AP_FLAG             (0x1 << 10)
+#define L2_PT_MASK              0xFFFFE00000
+#define L2_INDEX_MASK           0x3FE00000
+
+uint64_t mmu_l1_pagetable[L1_PT_SZ] __attribute__ ((aligned(4096))); /* Max is 8 */
+uint64_t mmu_l2_pagetable[L2_PT_SZ*MMU_L2_PT_SIZE] __attribute__ ((aligned(4096))); /* Macro from target code * 512 */
+uint64_t avail_l2_pt = L2_PT_SZ;
+uint64_t *empty_l2_pt = mmu_l2_pagetable;
+
+/************************************************************/
+/* MAP 2MB granules in 1GB section in L2 page table */
+/***********************************************************/
+
+static void mmu_map_l2_entry(mmu_section_t *block)
+{
+	uint64_t *l2_pt = NULL;
+	uint64_t  address_start;
+	uint64_t  address_end;
+	uint64_t  p_addr;
+
+	/* First initialize the first level descriptor for each 1 GB
+	 * Bits[47:12] provide the physical base address of the level 2 page table
+	 *
+	 *    ________________________________________________________________________________
+	 *   |  |     |  |   |       |        |                            |       |          |
+	 *   |63|62-61|60| 59|58---52|51----40|39------------------------12|11----2|1------- 0|
+	 *   |NS| AP  |XN|PXN|Ignored|UNK|SBZP|Next-level table addr[39:12]|Ignored|Descriptor|
+	 *   |__|_____|__|___|_______|________|____________________________|_______|__________|
+	 * NS: Used only in secure state
+	 * AP: Access protection
+	 */
+
+	/* Convert the virtual address[38:30] into an index of the L1 page table */
+	address_start = (block->vaddress & LPAE_MASK) >> 30;
+
+	 /* Check if this 1GB entry has L2 page table mapped already
+	 * if L1 entry hasn't mapped any L2 page table, allocate a L2 page table for it
+	 */
+
+	if((mmu_l1_pagetable[address_start] & PT_TABLE_DESC_BIT) == 0)
+	{
+		ASSERT(avail_l2_pt);
+
+		/* Get the first l2 empty page table and fill in the L1 PTE with a table descriptor,
+		 * The l2_pt address bits 12:39 are used for L1 PTE entry
+		 */
+		l2_pt = empty_l2_pt;
+
+		/* Bits 39.12 of the page table address are mapped into the L1 PTE entry */
+		mmu_l1_pagetable[address_start] = ((uint64_t)(uintptr_t)l2_pt & 0x0FFFFFFF000) | MMU_PT_TABLE_DESCRIPTOR;
+
+		/* Advance pointer to next empty l2 page table */
+		empty_l2_pt += MMU_L2_PT_SIZE;
+		avail_l2_pt--;
+	}
+	else
+	{
+		/* Entry has L2 page table mapped already, so just get the existing L2 page table address */
+		l2_pt = (uint64_t *) (uintptr_t)(mmu_l1_pagetable[address_start] & 0xFFFFFFF000);
+    }
+
+	/* Get the physical address of 2MB sections, bits 21:39 are used to populate the L2 entry */
+	p_addr = block->paddress & L2_PT_MASK;
+
+	/* Start index into the L2 page table for this section using the virtual address[29:21]*/
+	address_start  = (block->vaddress & L2_INDEX_MASK) >> 21;
+
+	/* The end index for the given section. size given is in MB convert it to number of 2MB segments */
+	address_end = address_start + ((block->size) >> 1);
+
+	/*
+	 *      ___________________________________________________________________________________________________________________
+	 *     |       |        |  |   |    |        |                  |        |  |  |       |       |  |             |          |
+	 *     |63---59|58----55|54|53 |52  |51----40|39--------------21|20----12|11|10|9     8|7     6|5 |4-----------2|  1   0   |
+	 *     |Ignored|Reserved|XN|PXN|Cont|UNK|SBZP|Output addr[39:21]|UNK|SBZP|nG|AF|SH[1:0]|AP[2:1]|NS|AttrIndx[2:0]|Descriptor|
+	 *     |_______|________|__|___|____|________|__________________|________|__|__|_______|_______|__|_____________|__________|
+	 */
+
+	/* Map all the 2MB segments in the 1GB section */
+	while (address_start < address_end)
+	{
+		l2_pt[address_start] =  (p_addr) | MMU_PT_BLOCK_DESCRIPTOR | MMU_AP_FLAG | block->flags;
+		address_start++;
+		/* Increment to the next 2MB segment in current L2 page table*/
+		p_addr += SIZE_2MB;
+		arm_invalidate_tlb();
+	}
+}
+
+/************************************************************/
+/* MAP 1GB granules in L1 page table */
+/***********************************************************/
+static void mmu_map_l1_entry(mmu_section_t *block)
+{
+	uint64_t address_start;
+	uint64_t address_end;
+	uint64_t p_addr;
+
+	/* Convert our base address into an index into the page table */
+	address_start = (block->vaddress & LPAE_MASK) >> 30;
+
+	/* Get the end address into the page table, size is in MB, convert to GB for L1 mapping */
+	address_end = address_start + ((block->size) >> 10);
+
+	/* bits 38:30 provide the physical base address of the section */
+	p_addr = block->paddress & L1_PT_INDEX;
+
+	while(address_start < address_end)
+	{
+	 /*
+	 *    A Block descriptor for first stage, level one is as follows (Descriptor = 0b01):
+	 *         ___________________________________________________________________________________________________________________
+	 *        |       |        |  |	  |    |        |                  |        |  |  |       |       |  |             |          |
+	 *        |63---59|58----55|54|53 |52  |51----40|39--------------30|n-1 --12|11|10|9     8|7     6|5 |4-----------2|  1   0   |
+	 *        |Ignored|Reserved|XN|PXN|Cont|UNK/SBZP|Output addr[47:30]|UNK/SBZP|nG|AF|SH[1:0]|AP[2:1]|NS|AttrIndx[2:0]|Descriptor|
+	 *        |_______|________|__|___|____|________|__________________|________|__|__|_______|_______|__|_____________|__________|
+	 */
+
+		mmu_l1_pagetable[address_start] =  (p_addr) | block->flags | MMU_AP_FLAG | MMU_PT_BLOCK_DESCRIPTOR;
+
+		p_addr += SIZE_1GB; /* Point to next level */
+		address_start++;
+		arm_invalidate_tlb();
+	}
+}
+
+void arm_mmu_map_entry(mmu_section_t *entry)
+{
+	ASSERT(entry);
+
+	if (entry->type == MMU_L1_NS_SECTION_MAPPING)
+		mmu_map_l1_entry(entry);
+	else if(entry->type == MMU_L2_NS_SECTION_MAPPING)
+		mmu_map_l2_entry(entry);
+	else
+		dprintf(CRITICAL, "Invalid mapping type in the mmu table: %d\n", entry->type);
+}
+
+void arm_mmu_init(void)
+{
+	/* set some mmu specific control bits:
+	 * access flag disabled, TEX remap disabled, mmu disabled
+	 */
+	arm_write_cr1(arm_read_cr1() & ~((1<<29)|(1<<28)|(1<<0)));
+
+	platform_init_mmu_mappings();
+
+	/* set up the translation table base */
+	arm_write_ttbr((uint32_t)mmu_l1_pagetable);
+
+	/* set up the Memory Attribute Indirection Registers 0 and 1 */
+	arm_write_mair0(MAIR0);
+	arm_write_mair1(MAIR1);
+
+	/* TTBCR.EAE = 1 */
+	arm_write_ttbcr(0x80000000);
+
+	/* Enable TRE */
+	arm_write_cr1(arm_read_cr1() | (1<<28));
+
+	/* turn on the mmu */
+	arm_write_cr1(arm_read_cr1() | 0x1);
+}
+
+void arch_disable_mmu(void)
+{
+	/* Ensure all memory access are complete
+	 * before disabling MMU
+	 */
+	dsb();
+	arm_write_cr1(arm_read_cr1() & ~(1<<0));
+	arm_invalidate_tlb();
+}
+
+uint64_t virtual_to_physical_mapping(uint32_t vaddr)
+{
+	uint32_t l1_index;
+	uint64_t *l2_pt = NULL;
+	uint32_t l2_index;
+	uint32_t offset = 0;
+	uint64_t paddr = 0;
+
+	/* Find the L1 index from virtual address */
+	l1_index = (vaddr & LPAE_MASK) >> 30;
+
+	if ((mmu_l1_pagetable[l1_index] & MMU_PT_TABLE_DESCRIPTOR) == MMU_PT_TABLE_DESCRIPTOR)
+	{
+		/* Get the l2 page table address */
+		l2_pt = (uint64_t *) (uintptr_t) (mmu_l1_pagetable[l1_index] & 0x0FFFFFFF000);
+		/* Get the l2 index from virtual address */
+		l2_index = (vaddr & L2_INDEX_MASK) >> 21;
+		/* Calculate the offset from vaddr. */
+		offset = vaddr & 0x1FFFFF;
+		/* Get the physical address bits from 21 to 39 */
+		paddr = (l2_pt[l2_index] & L2_PT_MASK) + offset;
+	} else if ((mmu_l1_pagetable[l1_index] & MMU_PT_TABLE_DESCRIPTOR) == MMU_PT_BLOCK_DESCRIPTOR)
+	{
+		/* Calculate the offset from bits 0 to 30 */
+		offset = vaddr & 0x3FFFFFFF;
+		/* Return the entry from l1 page table */
+		paddr = (mmu_l1_pagetable[l1_index] & L1_PT_INDEX) + offset;
+	} else
+	{
+		ASSERT(0);
+	}
+
+	return paddr;
+}
+
+uint32_t physical_to_virtual_mapping(uint64_t paddr)
+{
+	uint32_t i, j;
+	uint32_t vaddr_index = 0;
+	uint32_t vaddr = 0;
+	uint64_t paddr_base_l1;
+	uint64_t paddr_base_l2;
+	uint64_t *l2_pt = NULL;
+	bool l1_index_found = false;
+	uint32_t l1_index = 0;
+	uint32_t offset = 0;
+
+	/* Traverse through the L1 page table to look for block descriptor */
+	for (i = 0; i < L1_PT_SZ; i++)
+	{
+		/* Find the L1 page table index */
+		paddr_base_l1 = paddr & L1_PT_INDEX;
+
+		/* If the L1 index is unused continue */
+		if ((mmu_l1_pagetable[i] & MMU_PT_TABLE_DESCRIPTOR) == 0)
+			continue;
+
+		/* If Its a block entry, find the matching entry and return the index */
+		if ((mmu_l1_pagetable[i] & MMU_PT_TABLE_DESCRIPTOR) == MMU_PT_BLOCK_DESCRIPTOR)
+		{
+			if ((mmu_l1_pagetable[i] & L1_PT_INDEX) == paddr_base_l1)
+			{
+				offset = paddr - paddr_base_l1;
+				vaddr_index = i;
+				l1_index_found = true;
+				goto end;
+			} /* If the entry is table, extract table address */
+		} else if ((mmu_l1_pagetable[i] & MMU_PT_TABLE_DESCRIPTOR) == MMU_PT_TABLE_DESCRIPTOR)
+		{
+			l1_index = i;
+			l2_pt = (uint64_t *) (uintptr_t) (mmu_l1_pagetable[l1_index] & 0x0FFFFFFF000);
+			paddr_base_l2 = paddr & L2_PT_MASK;
+			/* Search the table to find index in L2 page table */
+			for (j = 0; j < MMU_L2_PT_SIZE; j++)
+			{
+				if (paddr_base_l2 == (l2_pt[j] & L2_PT_MASK))
+				{
+					vaddr_index = j;
+					offset = paddr - paddr_base_l2;
+					goto end;
+				}
+			}
+		}
+	}
+	/* If we reach here, that means the addrss is either no mapped or invalid request */
+	dprintf(CRITICAL, "The address %llx is not mapped\n", paddr);
+	ASSERT(0);
+
+end:
+	/* Convert the index into the virtual address */
+	if (l1_index_found)
+		vaddr = (vaddr_index << 30);
+	else
+		vaddr = ((vaddr_index << 21) & L2_INDEX_MASK) + (l1_index << 30);
+
+	vaddr += offset;
+
+	return vaddr;
+}
+#endif // ARM_WITH_MMU
diff --git a/arch/arm/ops.S b/arch/arm/ops.S
index 4cfd180..c97a4d6 100644
--- a/arch/arm/ops.S
+++ b/arch/arm/ops.S
@@ -1,7 +1,7 @@
 /*
  * Copyright (c) 2008 Travis Geiselbrecht
  *
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining
  * a copy of this software and associated documentation files
@@ -245,6 +245,8 @@
 
 /* void arm_invalidate_tlb(void) */
 FUNCTION(arm_invalidate_tlb)
+	dsb
+	isb
 	mov		r0, #0
 	mcr	p15, 0, r0, c8, c7, 0
 #if ARM_CPU_CORTEX_A8
@@ -287,3 +289,15 @@
 	mov		r0, #0
 #endif
 	bx		lr
+
+FUNCTION(arm_write_mair0)
+	mcr p15, 0, r0, c10, c2, 0
+	bx	lr
+
+FUNCTION(arm_write_mair1)
+	mcr p15, 0, r0, c10, c2, 1
+	bx lr
+
+FUNCTION(arm_write_ttbcr)
+	mcr p15, 0, r0, c2, c0, 2
+	bx lr
diff --git a/arch/arm/rules.mk b/arch/arm/rules.mk
index f45a6eb..7e3ace0 100644
--- a/arch/arm/rules.mk
+++ b/arch/arm/rules.mk
@@ -94,10 +94,15 @@
 	$(LOCAL_DIR)/ops.o \
 	$(LOCAL_DIR)/exceptions.o \
 	$(LOCAL_DIR)/faults.o \
-	$(LOCAL_DIR)/mmu.o \
 	$(LOCAL_DIR)/thread.o \
 	$(LOCAL_DIR)/dcc.o
 
+ifeq ($(ENABLE_LPAE_SUPPORT), 1)
+OBJS +=  $(LOCAL_DIR)/mmu_lpae.o
+else
+OBJS +=  $(LOCAL_DIR)/mmu.o
+endif
+
 # set the default toolchain to arm eabi and set a #define
 TOOLCHAIN_PREFIX ?= arm-eabi-
 ifeq ($(TOOLCHAIN_PREFIX),arm-none-linux-gnueabi-)
diff --git a/platform/msm8996/include/platform/iomap.h b/platform/msm8996/include/platform/iomap.h
index a4e7d54..2244632 100644
--- a/platform/msm8996/include/platform/iomap.h
+++ b/platform/msm8996/include/platform/iomap.h
@@ -191,8 +191,8 @@
  * as device memory, define the start address
  * and size in MB
  */
-#define RPMB_SND_RCV_BUF            0x90D00000
-#define RPMB_SND_RCV_BUF_SZ         0x1
+#define RPMB_SND_RCV_BUF            0x90F00000
+#define RPMB_SND_RCV_BUF_SZ         0x2
 
 #define TCSR_BOOT_MISC_DETECT       0x007B3000
 
diff --git a/platform/msm8996/platform.c b/platform/msm8996/platform.c
index d0ab86d..6e04abf 100644
--- a/platform/msm8996/platform.c
+++ b/platform/msm8996/platform.c
@@ -32,8 +32,8 @@
 #include <qgic.h>
 #include <qtimer.h>
 #include <platform/clock.h>
-#include <mmu.h>
 #include <arch/arm/mmu.h>
+#include <mmu.h>
 #include <smem.h>
 #include <board.h>
 
@@ -46,20 +46,31 @@
 
 /* Peripherals - non-shared device */
 #define IOMAP_MEMORY      (MMU_MEMORY_TYPE_DEVICE_SHARED | \
-                           MMU_MEMORY_AP_READ_WRITE | MMU_MEMORY_XN)
+                           MMU_MEMORY_AP_READ_WRITE | MMU_MEMORY_XN | MMU_MEMORY_PXN)
 
 /* SCRATCH memory - cacheable, write through */
 #define SCRATCH_MEMORY       (MMU_MEMORY_TYPE_NORMAL_WRITE_THROUGH | \
                            MMU_MEMORY_AP_READ_WRITE | MMU_MEMORY_XN)
 
-static mmu_section_t mmu_section_table[] = {
-/*       Physical addr,    Virtual addr,     Size (in MB),       Flags */
-	{    MEMBASE,           MEMBASE,          (MEMSIZE / MB),    LK_MEMORY},
-	{    MSM_IOMAP_BASE,    MSM_IOMAP_BASE,    MSM_IOMAP_SIZE,   IOMAP_MEMORY},
-	{    KERNEL_ADDR,       KERNEL_ADDR,       KERNEL_SIZE,      SCRATCH_MEMORY},
-	{    SCRATCH_ADDR,      SCRATCH_ADDR,      SCRATCH_SIZE,     SCRATCH_MEMORY},
-	{    MSM_SHARED_BASE,   MSM_SHARED_BASE,   MSM_SHARED_SIZE,  SCRATCH_MEMORY},
-	{    RPMB_SND_RCV_BUF,  RPMB_SND_RCV_BUF,  RPMB_SND_RCV_BUF_SZ,    IOMAP_MEMORY},
+/* COMMON memory - cacheable, write through */
+#define COMMON_MEMORY       (MMU_MEMORY_TYPE_NORMAL_WRITE_THROUGH | \
+                           MMU_MEMORY_AP_READ_WRITE | MMU_MEMORY_XN)
+
+
+static mmu_section_t default_mmu_section_table[] =
+{
+/*        Physical addr,    Virtual addr,     Mapping type ,              Size (in MB),            Flags */
+    {    0x00000000,        0x00000000,       MMU_L1_NS_SECTION_MAPPING,  1024,                IOMAP_MEMORY},
+    {    KERNEL_ADDR,       KERNEL_ADDR,      MMU_L2_NS_SECTION_MAPPING,  KERNEL_SIZE,         COMMON_MEMORY},
+    {    MEMBASE,           MEMBASE,          MMU_L2_NS_SECTION_MAPPING,  (MEMSIZE / MB),      LK_MEMORY},
+    {    SCRATCH_ADDR,      SCRATCH_ADDR,     MMU_L2_NS_SECTION_MAPPING,  SCRATCH_SIZE,        SCRATCH_MEMORY},
+    {    MSM_SHARED_BASE,   MSM_SHARED_BASE,  MMU_L2_NS_SECTION_MAPPING,  MSM_SHARED_SIZE,     COMMON_MEMORY},
+    {    RPMB_SND_RCV_BUF,  RPMB_SND_RCV_BUF, MMU_L2_NS_SECTION_MAPPING,  RPMB_SND_RCV_BUF_SZ, IOMAP_MEMORY},
+};
+
+static mmu_section_t dload_mmu_section_table[] =
+{
+    { 0x85800000, 0x85800000, MMU_L2_NS_SECTION_MAPPING, 178, COMMON_MEMORY},
 };
 
 void platform_early_init(void)
@@ -94,37 +105,30 @@
 /* Setup memory for this platform */
 void platform_init_mmu_mappings(void)
 {
-	uint32_t i;
-	uint32_t sections;
-	uint32_t table_size = ARRAY_SIZE(mmu_section_table);
+	int i;
+	int table_sz = ARRAY_SIZE(default_mmu_section_table);
 
-	/* Configure the MMU page entries for memory read from the
-	   mmu_section_table */
-	for (i = 0; i < table_size; i++)
+	for (i = 0 ; i < table_sz; i++)
+		arm_mmu_map_entry(&default_mmu_section_table[i]);
+
+	if (scm_device_enter_dload())
 	{
-		sections = mmu_section_table[i].num_of_sections;
+		/* TZ & Hyp memory can be mapped only while entering the download mode */
+		table_sz = ARRAY_SIZE(dload_mmu_section_table);
 
-		while (sections--)
-		{
-			arm_mmu_map_section(mmu_section_table[i].paddress +
-								sections * MB,
-								mmu_section_table[i].vaddress +
-								sections * MB,
-								mmu_section_table[i].flags);
-		}
+		for (i = 0 ; i < table_sz; i++)
+			arm_mmu_map_entry(&dload_mmu_section_table[i]);
 	}
 }
 
 addr_t platform_get_virt_to_phys_mapping(addr_t virt_addr)
 {
-	/* Using 1-1 mapping on this platform. */
-	return virt_addr;
+	return virtual_to_physical_mapping(virt_addr);
 }
 
 addr_t platform_get_phys_to_virt_mapping(addr_t phys_addr)
 {
-	/* Using 1-1 mapping on this platform. */
-	return phys_addr;
+	return physical_to_virtual_mapping(phys_addr);
 }
 
 uint32_t platform_get_sclk_count(void)
diff --git a/platform/msm_shared/include/mmu.h b/platform/msm_shared/include/mmu.h
index 6d001e7..d70872e 100644
--- a/platform/msm_shared/include/mmu.h
+++ b/platform/msm_shared/include/mmu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, 2015 The Linux Foundation. All rights reserved.
 
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -30,12 +30,23 @@
 #define __MSM_SHARED_MMU_H__
 
 #include <sys/types.h>
+#include <arch/arm/mmu.h>
 
+#ifdef LPAE
+typedef struct {
+	uint64_t paddress;
+	uint64_t vaddress;
+	mapping_type type;
+	uint64_t size;
+	uint64_t flags;
+} mmu_section_t;
+#else
 typedef struct {
 	addr_t paddress;
 	addr_t vaddress;
 	uint32_t num_of_sections;
 	uint32_t flags;
 } mmu_section_t;
+#endif
 
 #endif
diff --git a/platform/msm_shared/include/scm.h b/platform/msm_shared/include/scm.h
index 2cdca66..95f4f46 100644
--- a/platform/msm_shared/include/scm.h
+++ b/platform/msm_shared/include/scm.h
@@ -235,6 +235,7 @@
 #define SVC_MEMORY_PROTECTION       0x0C
 #define TZ_SVC_CRYPTO               0x0A
 #define SCM_SVC_INFO                0x06
+#define TZ_SVC_DLOAD_MODE           0x3
 
 /*Service specific command IDs */
 #define ERR_FATAL_ENABLE            0x0
@@ -461,4 +462,5 @@
 bool is_scm_armv8_support();
 
 int scm_dload_mode(int mode);
+int scm_device_enter_dload();
 #endif
diff --git a/platform/msm_shared/rpmb/rpmb.c b/platform/msm_shared/rpmb/rpmb.c
index de67c46..36bacab 100644
--- a/platform/msm_shared/rpmb/rpmb.c
+++ b/platform/msm_shared/rpmb/rpmb.c
@@ -140,7 +140,9 @@
 	read_rsp.cmd_id = CLIENT_CMD_READ_LK_DEVICE_STATE;
 
 	/* Read the device info */
+	arch_clean_invalidate_cache_range((addr_t) info, sz);
 	ret = qseecom_send_command(app_handle, (void*) &read_req, sizeof(read_req), (void*) &read_rsp, sizeof(read_rsp));
+	arch_invalidate_cache_range((addr_t) info, sz);
 
 	if (ret < 0 || read_rsp.status < 0)
 	{
@@ -165,7 +167,9 @@
 	write_rsp.cmd_id = CLIENT_CMD_WRITE_LK_DEVICE_STATE;
 
 	/* Write the device info */
+	arch_clean_invalidate_cache_range((addr_t) info, sz);
 	ret = qseecom_send_command(app_handle, (void *)&write_req, sizeof(write_req), (void *)&write_rsp, sizeof(write_rsp));
+	arch_invalidate_cache_range((addr_t) info, sz);
 
 	if (ret < 0 || write_rsp.status < 0)
 	{
diff --git a/platform/msm_shared/scm.c b/platform/msm_shared/scm.c
index 5179e8e..cdb0282 100644
--- a/platform/msm_shared/scm.c
+++ b/platform/msm_shared/scm.c
@@ -1334,4 +1334,22 @@
 
 	return ret;
 }
+
+bool scm_device_enter_dload()
+{
+	uint32_t ret = 0;
+
+	scmcall_arg scm_arg = {0};
+	scmcall_ret scm_ret = {0};
+
+	scm_arg.x0 = MAKE_SIP_SCM_CMD(TZ_SVC_DLOAD_MODE, SCM_DLOAD_CMD);
+	ret = scm_call2(&scm_arg, &scm_ret);
+	if (ret)
+		dprintf(CRITICAL, "SCM call to check dload mode failed: %x\n", ret);
+
+	if (!ret && (scm_io_read(TCSR_BOOT_MISC_DETECT) == SCM_DLOAD_MODE))
+		return true;
+
+	return false;
+}
 #endif
diff --git a/project/msm8952.mk b/project/msm8952.mk
index 4cdcf6e..2a88469 100644
--- a/project/msm8952.mk
+++ b/project/msm8952.mk
@@ -66,7 +66,7 @@
 DEFINES += MDTP_EFUSE_START=0
 endif
 
-ENABLE_WDOG_SUPPORT := 1
+ENABLE_WDOG_SUPPORT := 0
 ifeq ($(ENABLE_WDOG_SUPPORT),1)
 DEFINES += WDOG_SUPPORT=1
 endif
diff --git a/project/msm8996.mk b/project/msm8996.mk
index 1eaf598..eecf740 100644
--- a/project/msm8996.mk
+++ b/project/msm8996.mk
@@ -5,6 +5,7 @@
 TARGET := msm8996
 
 MODULES += app/aboot
+MODULES += app/mmutest
 
 ifeq ($(TARGET_BUILD_VARIANT),user)
 DEBUG := 0
@@ -22,6 +23,7 @@
 ENABLE_RPMB_SUPPORT := 1
 ENABLE_GLINK_SUPPORT := 1
 ENABLE_PWM_SUPPORT := true
+ENABLE_LPAE_SUPPORT := 1
 
 DEFINES +=VIRTIO=1
 
@@ -80,6 +82,10 @@
 DEFINES += WDOG_SUPPORT=1
 endif
 
+ifeq ($(ENABLE_LPAE_SUPPORT),1)
+DEFINES += LPAE=1
+endif
+
 CFLAGS += -Werror
 
 #SCM call before entering DLOAD mode
diff --git a/target/msm8996/rules.mk b/target/msm8996/rules.mk
index 3eb7f9e..57e3c4a 100644
--- a/target/msm8996/rules.mk
+++ b/target/msm8996/rules.mk
@@ -6,14 +6,18 @@
 PLATFORM := msm8996
 
 MEMBASE := 0x90B00000 # SDRAM
-MEMSIZE := 0x00300000 # 3MB
+MEMSIZE := 0x00400000 # 4MB
 
 BASE_ADDR    := 0x0000000
 
-SCRATCH_ADDR := 0x90E00000
-SCRATCH_SIZE := 512
+SCRATCH_ADDR := 0x91100000
+SCRATCH_SIZE := 750
 KERNEL_ADDR  := 0x80000000
-KERNEL_SIZE  := 62
+KERNEL_SIZE  := 88
+# LPAE supports only 32 virtual address, L1 pt size is 4
+L1_PT_SZ     := 4
+L2_PT_SZ     := 2
+
 
 DEFINES += DISPLAY_SPLASH_SCREEN=1
 DEFINES += DISPLAY_TYPE_MIPI=1
@@ -37,7 +41,9 @@
 	KERNEL_SIZE=$(KERNEL_SIZE) \
 	RAMDISK_ADDR=$(RAMDISK_ADDR) \
 	SCRATCH_ADDR=$(SCRATCH_ADDR) \
-	SCRATCH_SIZE=$(SCRATCH_SIZE)
+	SCRATCH_SIZE=$(SCRATCH_SIZE) \
+	L1_PT_SZ=$(L1_PT_SZ) \
+	L2_PT_SZ=$(L2_PT_SZ)
 
 
 OBJS += \