Merge pull request #1887 from ambroise-arm/av/a76-cve

Cortex-A76: Optimize CVE_2018_3639 workaround
diff --git a/Makefile b/Makefile
index 8656da5..11d0d7a 100644
--- a/Makefile
+++ b/Makefile
@@ -478,6 +478,12 @@
 ifeq ($(ENABLE_PAUTH),1)
     ifeq ($(CTX_INCLUDE_PAUTH_REGS),0)
         $(error ENABLE_PAUTH=1 requires CTX_INCLUDE_PAUTH_REGS=1)
+    else
+        $(info ENABLE_PAUTH and CTX_INCLUDE_PAUTH_REGS are experimental features)
+    endif
+else
+    ifeq ($(CTX_INCLUDE_PAUTH_REGS),1)
+        $(info CTX_INCLUDE_PAUTH_REGS is an experimental feature)
     endif
 endif
 
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index aa9d007..c295176 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -419,6 +419,9 @@
 	 */
 	mov	x0, #SMC_UNK
 	str	x0, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+#if CTX_INCLUDE_PAUTH_REGS
+	bl	pauth_context_save
+#endif
 	b	restore_gp_registers_eret
 
 smc_prohibited:
diff --git a/docs/firmware-design.rst b/docs/firmware-design.rst
index 52520ea..66b16fa 100644
--- a/docs/firmware-design.rst
+++ b/docs/firmware-design.rst
@@ -2565,11 +2565,16 @@
    must be set to 1. This will add all pointer authentication system registers
    to the context that is saved when doing a world switch.
 
-   The Trusted Firmware itself has support for pointer authentication at runtime
+   The TF-A itself has support for pointer authentication at runtime
    that can be enabled by setting both options ``ENABLE_PAUTH`` and
    ``CTX_INCLUDE_PAUTH_REGS`` to 1. This enables pointer authentication in BL1,
    BL2, BL31, and the TSP if it is used.
 
+   These options are experimental features.
+
+   Note that Pointer Authentication is enabled for Non-secure world irrespective
+   of the value of these build flags if the CPU supports it.
+
    If ``ARM_ARCH_MAJOR == 8`` and ``ARM_ARCH_MINOR >= 3`` the code footprint of
    enabling PAuth is lower because the compiler will use the optimized
    PAuth instructions rather than the backwards-compatible ones.
diff --git a/docs/plat/imx8m.rst b/docs/plat/imx8m.rst
index ab33a8a..a69f022 100644
--- a/docs/plat/imx8m.rst
+++ b/docs/plat/imx8m.rst
@@ -31,6 +31,7 @@
        CROSS_COMPILE=aarch64-linux-gnu- make PLAT=<Target_SoC> bl31
 
    Target_SoC should be "imx8mq" for i.MX8MQ SoC.
+   Target_SoC should be "imx8mm" for i.MX8MM SoC.
 
 Deploy TF-A Images
 -----------------
diff --git a/docs/user-guide.rst b/docs/user-guide.rst
index d3c63c7..c88b1f6 100644
--- a/docs/user-guide.rst
+++ b/docs/user-guide.rst
@@ -358,11 +358,12 @@
    registers to be included when saving and restoring the CPU context. Default
    is 0.
 
--  ``CTX_INCLUDE_PAUTH_REGS``: Boolean option that, when set to 1, will cause
-   the ARMv8.3-PAuth registers to be included when saving and restoring the CPU
-   context. Note that if the hardware supports this extension and this option is
-   set to 0 the value of the registers will be leaked between Secure and
-   Non-secure worlds if PAuth is used on both sides. The default is 0.
+-  ``CTX_INCLUDE_PAUTH_REGS``: Boolean option that, when set to 1, enables
+   Pointer Authentication for Secure world. This will cause the ARMv8.3-PAuth
+   registers to be included when saving and restoring the CPU context as
+   part of world switch. Default value is 0 and this is an experimental feature.
+   Note that Pointer Authentication is enabled for Non-secure world irrespective
+   of the value of this flag if the CPU supports it.
 
 -  ``DEBUG``: Chooses between a debug and release build. It can take either 0
    (release) or 1 (debug) as values. 0 is the default.
@@ -412,11 +413,11 @@
    and use partitions in EL3 as required. This option defaults to ``0``.
 
 -  ``ENABLE_PAUTH``: Boolean option to enable ARMv8.3 Pointer Authentication
-   (``ARMv8.3-PAuth``) support in the Trusted Firmware itself. Note that this
-   option doesn't affect the saving of the registers introduced with this
-   extension, they are always saved if they are detected regardless of the value
-   of this option. If enabled, it is needed to use a compiler that supports the
-   option ``-msign-return-address``. It defaults to 0.
+  support for TF-A BL images itself. If enabled, it is needed to use a compiler 
+  that supports the option ``-msign-return-address``. This flag defaults to 0
+  and this is an experimental feature.
+  Note that Pointer Authentication is enabled for Non-secure world irrespective
+  of the value of this flag if the CPU supports it.
 
 -  ``ENABLE_PIE``: Boolean option to enable Position Independent Executable(PIE)
    support within generic code in TF-A. This option is currently only supported
@@ -1049,7 +1050,7 @@
 
     ./tools/fiptool/fiptool
 
-Invoking the tool with ``--help`` will print a help message with all available
+Invoking the tool with ``help`` will print a help message with all available
 options.
 
 Example 1: create a new Firmware package ``fip.bin`` that contains BL2 and BL31:
@@ -1114,7 +1115,7 @@
    is important to use a version that is compatible with TF-A and fixes any
    known security vulnerabilities. See `mbed TLS Security Center`_ for more
    information. The latest version of TF-A is tested with tag
-   ``mbedtls-2.12.0``.
+   ``mbedtls-2.16.0``.
 
    The ``drivers/auth/mbedtls/mbedtls_*.mk`` files contain the list of mbed TLS
    source files the modules depend upon.
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index debe872..d3c5bea 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -843,4 +843,9 @@
 #define DIT			S3_3_C4_C2_5
 #define DIT_BIT			BIT(24)
 
+/*******************************************************************************
+ * Armv8.5 - new MSR encoding to directly access PSTATE.SSBS field
+ ******************************************************************************/
+#define SSBS			S3_3_C4_C2_6
+
 #endif /* ARCH_H */
diff --git a/include/lib/cpus/aarch64/neoverse_e1.h b/include/lib/cpus/aarch64/neoverse_e1.h
index 7084604..96b4661 100644
--- a/include/lib/cpus/aarch64/neoverse_e1.h
+++ b/include/lib/cpus/aarch64/neoverse_e1.h
@@ -9,7 +9,7 @@
 
 #include <lib/utils_def.h>
 
-#define NEOVERSE_E1_MIDR		U(0x410FD060)
+#define NEOVERSE_E1_MIDR		U(0x410FD4A0)
 
 /*******************************************************************************
  * CPU Extended Control register specific definitions.
diff --git a/include/lib/cpus/aarch64/neoverse_zeus.h b/include/lib/cpus/aarch64/neoverse_zeus.h
new file mode 100644
index 0000000..f094727
--- /dev/null
+++ b/include/lib/cpus/aarch64/neoverse_zeus.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2019, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NEOVERSE_ZEUS_H
+#define NEOVERSE_ZEUS_H
+
+#define NEOVERSE_ZEUS_MIDR					U(0x410FD400)
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define NEOVERSE_ZEUS_CPUECTLR_EL1				S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Power Control register specific definitions
+ ******************************************************************************/
+#define NEOVERSE_ZEUS_CPUPWRCTLR_EL1				S3_0_C15_C2_7
+#define NEOVERSE_ZEUS_CPUPWRCTLR_EL1_CORE_PWRDN_BIT		U(1)
+
+#endif /* NEOVERSE_ZEUS_H */
diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S
index c6a5c08..060c625 100644
--- a/lib/cpus/aarch64/neoverse_n1.S
+++ b/lib/cpus/aarch64/neoverse_n1.S
@@ -46,6 +46,10 @@
 
 func neoverse_n1_reset_func
 	mov	x19, x30
+
+	/* Disables speculative loads */
+	msr	SSBS, xzr
+
 	bl	cpu_get_rev_var
 	mov	x18, x0
 
diff --git a/lib/cpus/aarch64/neoverse_zeus.S b/lib/cpus/aarch64/neoverse_zeus.S
new file mode 100644
index 0000000..79c8b2f
--- /dev/null
+++ b/lib/cpus/aarch64/neoverse_zeus.S
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2019, ARM Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <common/bl_common.h>
+#include <neoverse_zeus.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+	/* ---------------------------------------------
+	 * HW will do the cache maintenance while powering down
+	 * ---------------------------------------------
+	 */
+func neoverse_zeus_core_pwr_dwn
+	/* ---------------------------------------------
+	 * Enable CPU power down bit in power control register
+	 * ---------------------------------------------
+	 */
+	mrs	x0, NEOVERSE_ZEUS_CPUPWRCTLR_EL1
+	orr	x0, x0, #NEOVERSE_ZEUS_CPUPWRCTLR_EL1_CORE_PWRDN_BIT
+	msr	NEOVERSE_ZEUS_CPUPWRCTLR_EL1, x0
+	isb
+	ret
+endfunc neoverse_zeus_core_pwr_dwn
+
+	/*
+	 * Errata printing function for Neoverse Zeus. Must follow AAPCS.
+	 */
+#if REPORT_ERRATA
+func neoverse_zeus_errata_report
+	ret
+endfunc neoverse_zeus_errata_report
+#endif
+
+	/* ---------------------------------------------
+	 * This function provides Neoverse-Zeus specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.neoverse_zeus_regs, "aS"
+neoverse_zeus_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func neoverse_zeus_cpu_reg_dump
+	adr	x6, neoverse_zeus_regs
+	mrs	x8, NEOVERSE_ZEUS_CPUECTLR_EL1
+	ret
+endfunc neoverse_zeus_cpu_reg_dump
+
+declare_cpu_ops neoverse_zeus, NEOVERSE_ZEUS_MIDR, \
+	CPU_NO_RESET_FUNC, \
+	neoverse_zeus_core_pwr_dwn
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 4371cb2..e6ab19b 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -356,7 +356,7 @@
 	msr	APIAKeyLo_EL1, x9
 	msr	APIAKeyHi_EL1, x10
 
-	ldp	x9, x10, [x11, #CTX_PACIAKEY_LO]
+	ldp	x9, x10, [x11, #CTX_PACIBKEY_LO]
 	msr	APIBKeyLo_EL1, x9
 	msr	APIBKeyHi_EL1, x10
 
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index 7957b61..0e6a6fa 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -325,9 +325,8 @@
 
 	return action;
 }
-
 /*
- * Function that writes to the translation tables and unmaps the
+ * Recursive function that writes to the translation tables and unmaps the
  * specified region.
  */
 static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
@@ -338,137 +337,70 @@
 {
 	assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
 
-	/*
-	 * data structure to track DESC_TABLE entry before iterate into subtable
-	 * of next translation level. it will be used to restore previous level
-	 * after finish subtable iteration.
-	 */
-	struct desc_table_unmap {
-		uint64_t *table_base;
-		uintptr_t table_idx_va;
-		unsigned int idx;
-	} desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
-		{NULL, 0U, XLAT_TABLE_ENTRIES}, };
+	uint64_t *subtable;
+	uint64_t desc;
 
-	unsigned int this_level = level;
-	uint64_t *this_base = table_base;
-	unsigned int max_entries = table_entries;
-	size_t level_size = XLAT_BLOCK_SIZE(this_level);
-	unsigned int table_idx;
 	uintptr_t table_idx_va;
+	uintptr_t table_idx_end_va; /* End VA of this entry */
 
 	uintptr_t region_end_va = mm->base_va + mm->size - 1U;
 
+	unsigned int table_idx;
+
 	table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
 	table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
 
-	while (this_base != NULL) {
+	while (table_idx < table_entries) {
 
-		uint64_t desc;
-		uint64_t desc_type;
-		uintptr_t table_idx_end_va; /* End VA of this entry */
-		action_t action;
+		table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
 
-		/* finish current xlat level iteration. */
-		if (table_idx >= max_entries) {
-			if (this_level > ctx->base_level) {
-				xlat_table_dec_regions_count(ctx, this_base);
-			}
+		desc = table_base[table_idx];
+		uint64_t desc_type = desc & DESC_MASK;
 
-			if (this_level > level) {
-				uint64_t *subtable;
-
-				/* back from subtable iteration, restore
-				 * previous DESC_TABLE entry.
-				 */
-				this_level--;
-				this_base = desc_tables[this_level].table_base;
-				table_idx = desc_tables[this_level].idx;
-				table_idx_va =
-					desc_tables[this_level].table_idx_va;
-				level_size = XLAT_BLOCK_SIZE(this_level);
-
-				if (this_level == level) {
-					max_entries = table_entries;
-				} else {
-					max_entries = XLAT_TABLE_ENTRIES;
-				}
-
-				desc = this_base[table_idx];
-				subtable =  (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
-				/*
-				 * If the subtable is now empty, remove its reference.
-				 */
-				if (xlat_table_is_empty(ctx, subtable)) {
-					this_base[table_idx] = INVALID_DESC;
-					xlat_arch_tlbi_va(table_idx_va,
-							ctx->xlat_regime);
-				}
-				table_idx++;
-				table_idx_va += level_size;
-
-			} else {
-				/* reached end of top level, exit.*/
-				this_base = NULL;
-				break;
-			}
-
-		}
-
-		/* If reached the end of the region, stop iterating entries in
-		 * current xlat level.
-		 */
-		if (region_end_va <= table_idx_va) {
-			table_idx = max_entries;
-			continue;
-		}
-
-
-		table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(this_level) - 1U;
-
-		desc = this_base[table_idx];
-		desc_type = desc & DESC_MASK;
-
-		action = xlat_tables_unmap_region_action(mm, table_idx_va,
-							 table_idx_end_va,
-							 this_level,
-							 desc_type);
+		action_t action = xlat_tables_unmap_region_action(mm,
+				table_idx_va, table_idx_end_va, level,
+				desc_type);
 
 		if (action == ACTION_WRITE_BLOCK_ENTRY) {
-			this_base[table_idx] = INVALID_DESC;
+
+			table_base[table_idx] = INVALID_DESC;
 			xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
 
-			table_idx++;
-			table_idx_va += level_size;
 		} else if (action == ACTION_RECURSE_INTO_TABLE) {
 
-			uint64_t *subtable;
-			uintptr_t base_va;
-
 			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
 
-			desc_tables[this_level].table_base = this_base;
-			desc_tables[this_level].table_idx_va = table_idx_va;
-			base_va = table_idx_va;
-			desc_tables[this_level].idx = table_idx;
+			/* Recurse to write into subtable */
+			xlat_tables_unmap_region(ctx, mm, table_idx_va,
+						 subtable, XLAT_TABLE_ENTRIES,
+						 level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+			xlat_clean_dcache_range((uintptr_t)subtable,
+				XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+			/*
+			 * If the subtable is now empty, remove its reference.
+			 */
+			if (xlat_table_is_empty(ctx, subtable)) {
+				table_base[table_idx] = INVALID_DESC;
+				xlat_arch_tlbi_va(table_idx_va,
+						  ctx->xlat_regime);
+			}
 
-			this_base = subtable;
-			this_level++;
-
-			max_entries = XLAT_TABLE_ENTRIES;
-			level_size = XLAT_BLOCK_SIZE(this_level);
-
-			table_idx_va = xlat_tables_find_start_va(mm,
-					base_va, this_level);
-			table_idx = xlat_tables_va_to_index(base_va,
-					table_idx_va, this_level);
 		} else {
 			assert(action == ACTION_NONE);
-
-			table_idx++;
-			table_idx_va += level_size;
 		}
+
+		table_idx++;
+		table_idx_va += XLAT_BLOCK_SIZE(level);
+
+		/* If reached the end of the region, exit */
+		if (region_end_va <= table_idx_va)
+			break;
 	}
+
+	if (level > ctx->base_level)
+		xlat_table_dec_regions_count(ctx, table_base);
 }
 
 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
@@ -605,169 +537,105 @@
 }
 
 /*
- * Function that writes to the translation tables and maps the
+ * Recursive function that writes to the translation tables and maps the
  * specified region. On success, it returns the VA of the last byte that was
  * successfully mapped. On error, it returns the VA of the next entry that
  * should have been mapped.
  */
 static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
-				   const uintptr_t table_base_va,
+				   uintptr_t table_base_va,
 				   uint64_t *const table_base,
 				   unsigned int table_entries,
 				   unsigned int level)
 {
-
 	assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
 
-	/*
-	 * data structure to track DESC_TABLE entry before iterate into subtable
-	 * of next translation level. it will be used to restore previous level
-	 * after finish subtable iteration.
-	 */
-	struct desc_table_map {
-		uint64_t *table_base;
-		uintptr_t table_idx_va;
-		unsigned int idx;
-	} desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
-		{NULL, 0U, XLAT_TABLE_ENTRIES}, };
-
-	unsigned int this_level = level;
-	uint64_t *this_base = table_base;
-	unsigned int max_entries = table_entries;
-	size_t level_size = XLAT_BLOCK_SIZE(this_level);
 	uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
 
 	uintptr_t table_idx_va;
+	unsigned long long table_idx_pa;
+
+	uint64_t *subtable;
+	uint64_t desc;
+
 	unsigned int table_idx;
 
 	table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
 	table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
 
-	while (this_base != NULL) {
-
-		uint64_t desc;
-		uint64_t desc_type;
-		unsigned long long table_idx_pa;
-		action_t action;
-
-		/* finish current xlat level iteration. */
-		if (table_idx >= max_entries) {
-			if (this_level <= level) {
-				this_base = NULL;
-				break;
-			} else {
-
-				/* back from subtable iteration, restore
-				 * previous DESC_TABLE entry.
-				 */
-				this_level--;
-				level_size = XLAT_BLOCK_SIZE(this_level);
-				this_base = desc_tables[this_level].table_base;
-				table_idx = desc_tables[this_level].idx;
-				if (this_level == level) {
-					max_entries = table_entries;
-				} else {
-					max_entries = XLAT_TABLE_ENTRIES;
-				}
-#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
-				uintptr_t subtable;
-				desc = this_base[table_idx];
-				subtable = (uintptr_t)(desc & TABLE_ADDR_MASK);
-				xlat_clean_dcache_range(subtable,
-					XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#if PLAT_XLAT_TABLES_DYNAMIC
+	if (level > ctx->base_level)
+		xlat_table_inc_regions_count(ctx, table_base);
 #endif
 
-				table_idx++;
-				table_idx_va =
-					desc_tables[this_level].table_idx_va +
-					level_size;
-			}
-		}
+	while (table_idx < table_entries) {
 
-		desc = this_base[table_idx];
-		desc_type = desc & DESC_MASK;
+		desc = table_base[table_idx];
 
 		table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
 
-		/* If reached the end of the region, simply exit since we
-		 * already write all BLOCK entries and create all required
-		 * subtables.
-		 */
-		if (mm_end_va <= table_idx_va) {
-			this_base = NULL;
-			break;
-		}
-
-		action = xlat_tables_map_region_action(mm, desc_type,
-				table_idx_pa, table_idx_va, this_level);
+		action_t action = xlat_tables_map_region_action(mm,
+			(uint32_t)(desc & DESC_MASK), table_idx_pa,
+			table_idx_va, level);
 
 		if (action == ACTION_WRITE_BLOCK_ENTRY) {
-			this_base[table_idx] = xlat_desc(ctx, mm->attr,
-					table_idx_pa, this_level);
-			table_idx++;
-			table_idx_va += level_size;
+
+			table_base[table_idx] =
+				xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
+					  level);
+
 		} else if (action == ACTION_CREATE_NEW_TABLE) {
+			uintptr_t end_va;
 
-			uintptr_t base_va;
-
-			uint64_t *subtable = xlat_table_get_empty(ctx);
+			subtable = xlat_table_get_empty(ctx);
 			if (subtable == NULL) {
-				/* Not enough free tables to map this region. */
+				/* Not enough free tables to map this region */
 				return table_idx_va;
 			}
 
 			/* Point to new subtable from this one. */
-			this_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
+			table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
 
-			desc_tables[this_level].table_base = this_base;
-			desc_tables[this_level].table_idx_va = table_idx_va;
-			desc_tables[this_level].idx = table_idx;
-			base_va = table_idx_va;
-
-			this_level++;
-			this_base = subtable;
-			level_size = XLAT_BLOCK_SIZE(this_level);
-			table_idx_va = xlat_tables_find_start_va(mm, base_va,
-					this_level);
-			table_idx = xlat_tables_va_to_index(base_va,
-					table_idx_va, this_level);
-			max_entries = XLAT_TABLE_ENTRIES;
-
-#if PLAT_XLAT_TABLES_DYNAMIC
-			if (this_level > ctx->base_level) {
-				xlat_table_inc_regions_count(ctx, subtable);
-			}
+			/* Recurse to write into subtable */
+			end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+					       subtable, XLAT_TABLE_ENTRIES,
+					       level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+			xlat_clean_dcache_range((uintptr_t)subtable,
+				XLAT_TABLE_ENTRIES * sizeof(uint64_t));
 #endif
+			if (end_va !=
+				(table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
+				return end_va;
 
 		} else if (action == ACTION_RECURSE_INTO_TABLE) {
+			uintptr_t end_va;
 
-			uintptr_t base_va;
-			uint64_t *subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
-
-			desc_tables[this_level].table_base = this_base;
-			desc_tables[this_level].table_idx_va = table_idx_va;
-			desc_tables[this_level].idx = table_idx;
-			base_va = table_idx_va;
-
-			this_level++;
-			level_size = XLAT_BLOCK_SIZE(this_level);
-			table_idx_va = xlat_tables_find_start_va(mm, base_va,
-					this_level);
-			table_idx = xlat_tables_va_to_index(base_va,
-					table_idx_va, this_level);
-			this_base = subtable;
-			max_entries = XLAT_TABLE_ENTRIES;
-
-#if PLAT_XLAT_TABLES_DYNAMIC
-			if (this_level > ctx->base_level) {
-				xlat_table_inc_regions_count(ctx, subtable);
-			}
+			subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+			/* Recurse to write into subtable */
+			end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+					       subtable, XLAT_TABLE_ENTRIES,
+					       level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+			xlat_clean_dcache_range((uintptr_t)subtable,
+				XLAT_TABLE_ENTRIES * sizeof(uint64_t));
 #endif
+			if (end_va !=
+				(table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
+				return end_va;
+
 		} else {
+
 			assert(action == ACTION_NONE);
-			table_idx++;
-			table_idx_va += level_size;
+
 		}
+
+		table_idx++;
+		table_idx_va += XLAT_BLOCK_SIZE(level);
+
+		/* If reached the end of the region, exit */
+		if (mm_end_va <= table_idx_va)
+			break;
 	}
 
 	return table_idx_va - 1U;
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index 7d0449a..f5848a2 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -109,7 +109,7 @@
 		"%s(%d invalid descriptors omitted)\n";
 
 /*
- * Function that reads the translation tables passed as an argument
+ * Recursive function that reads the translation tables passed as an argument
  * and prints their status.
  */
 static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
@@ -118,23 +118,10 @@
 {
 	assert(level <= XLAT_TABLE_LEVEL_MAX);
 
-	/*
-	 * data structure to track DESC_TABLE entry before iterate into subtable
-	 * of next translation level. it will be restored after return from
-	 * subtable iteration.
-	 */
-	struct desc_table {
-		const uint64_t *table_base;
-		uintptr_t table_idx_va;
-		unsigned int idx;
-	} desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
-		{NULL, 0U, XLAT_TABLE_ENTRIES}, };
-	unsigned int this_level = level;
-	const uint64_t *this_base = table_base;
-	unsigned int max_entries = table_entries;
-	size_t level_size = XLAT_BLOCK_SIZE(this_level);
-	unsigned int table_idx = 0U;
+	uint64_t desc;
 	uintptr_t table_idx_va = table_base_va;
+	unsigned int table_idx = 0U;
+	size_t level_size = XLAT_BLOCK_SIZE(level);
 
 	/*
 	 * Keep track of how many invalid descriptors are counted in a row.
@@ -144,110 +131,67 @@
 	 */
 	int invalid_row_count = 0;
 
-	while (this_base != NULL) {
-		/* finish current xlat level */
-		if (table_idx >= max_entries) {
+	while (table_idx < table_entries) {
+
+		desc = table_base[table_idx];
+
+		if ((desc & DESC_MASK) == INVALID_DESC) {
+
+			if (invalid_row_count == 0) {
+				printf("%sVA:0x%lx size:0x%zx\n",
+				       level_spacers[level],
+				       table_idx_va, level_size);
+			}
+			invalid_row_count++;
+
+		} else {
+
 			if (invalid_row_count > 1) {
 				printf(invalid_descriptors_ommited,
-					  level_spacers[this_level],
-					  invalid_row_count - 1);
+				       level_spacers[level],
+				       invalid_row_count - 1);
 			}
 			invalid_row_count = 0;
 
-			/* no parent level to iterate. */
-			if (this_level <= level) {
-				this_base = NULL;
-				table_idx = max_entries + 1;
-			} else {
-				/* retore previous DESC_TABLE entry and start
-				 * to iterate.
-				 */
-				this_level--;
-				level_size = XLAT_BLOCK_SIZE(this_level);
-				this_base = desc_tables[this_level].table_base;
-				table_idx = desc_tables[this_level].idx;
-				table_idx_va =
-					desc_tables[this_level].table_idx_va;
-				if (this_level == level) {
-					max_entries = table_entries;
-				} else {
-					max_entries = XLAT_TABLE_ENTRIES;
-				}
-
-				assert(this_base != NULL);
-			}
-		} else {
-			uint64_t desc = this_base[table_idx];
-
-			if ((desc & DESC_MASK) == INVALID_DESC) {
-				if (invalid_row_count == 0) {
-					printf("%sVA:0x%lx size:0x%zx\n",
-						  level_spacers[this_level],
-						  table_idx_va, level_size);
-				}
-				invalid_row_count++;
-				table_idx++;
-				table_idx_va += level_size;
-			} else {
-				if (invalid_row_count > 1) {
-					printf(invalid_descriptors_ommited,
-						  level_spacers[this_level],
-						  invalid_row_count - 1);
-				}
-				invalid_row_count = 0;
+			/*
+			 * Check if this is a table or a block. Tables are only
+			 * allowed in levels other than 3, but DESC_PAGE has the
+			 * same value as DESC_TABLE, so we need to check.
+			 */
+			if (((desc & DESC_MASK) == TABLE_DESC) &&
+					(level < XLAT_TABLE_LEVEL_MAX)) {
 				/*
-				 * Check if this is a table or a block. Tables
-				 * are only allowed in levels other than 3, but
-				 * DESC_PAGE has the same value as DESC_TABLE,
-				 * so we need to check.
+				 * Do not print any PA for a table descriptor,
+				 * as it doesn't directly map physical memory
+				 * but instead points to the next translation
+				 * table in the translation table walk.
 				 */
+				printf("%sVA:0x%lx size:0x%zx\n",
+				       level_spacers[level],
+				       table_idx_va, level_size);
 
-				if (((desc & DESC_MASK) == TABLE_DESC) &&
-				    (this_level < XLAT_TABLE_LEVEL_MAX)) {
-					uintptr_t addr_inner;
+				uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
 
-					/*
-					 * Do not print any PA for a table
-					 * descriptor, as it doesn't directly
-					 * map physical memory but instead
-					 * points to the next translation
-					 * table in the translation table walk.
-					 */
-					printf("%sVA:0x%lx size:0x%zx\n",
-					       level_spacers[this_level],
-					       table_idx_va, level_size);
-
-					addr_inner = desc & TABLE_ADDR_MASK;
-					/* save current xlat level */
-					desc_tables[this_level].table_base =
-						this_base;
-					desc_tables[this_level].idx =
-						table_idx + 1;
-					desc_tables[this_level].table_idx_va =
-						table_idx_va + level_size;
-
-					/* start iterating next level entries */
-					this_base = (uint64_t *)addr_inner;
-					max_entries = XLAT_TABLE_ENTRIES;
-					this_level++;
-					level_size =
-						XLAT_BLOCK_SIZE(this_level);
-					table_idx = 0U;
-				} else {
-					printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
-					       level_spacers[this_level],
-					       table_idx_va,
-					       (uint64_t)(desc & TABLE_ADDR_MASK),
-					       level_size);
-					xlat_desc_print(ctx, desc);
-					printf("\n");
-
-					table_idx++;
-					table_idx_va += level_size;
-
-				}
+				xlat_tables_print_internal(ctx, table_idx_va,
+					(uint64_t *)addr_inner,
+					XLAT_TABLE_ENTRIES, level + 1U);
+			} else {
+				printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
+				       level_spacers[level], table_idx_va,
+				       (uint64_t)(desc & TABLE_ADDR_MASK),
+				       level_size);
+				xlat_desc_print(ctx, desc);
+				printf("\n");
 			}
 		}
+
+		table_idx++;
+		table_idx_va += level_size;
+	}
+
+	if (invalid_row_count > 1) {
+		printf(invalid_descriptors_ommited,
+		       level_spacers[level], invalid_row_count - 1);
 	}
 }
 
diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h
index 780e6fc..6856a28 100644
--- a/plat/arm/board/fvp/include/platform_def.h
+++ b/plat/arm/board/fvp/include/platform_def.h
@@ -146,13 +146,7 @@
 #elif defined(IMAGE_BL2U)
 # define PLATFORM_STACK_SIZE		UL(0x400)
 #elif defined(IMAGE_BL31)
-# if ENABLE_SPM
-#  define PLATFORM_STACK_SIZE		UL(0x600)
-# elif PLAT_XLAT_TABLES_DYNAMIC
 #  define PLATFORM_STACK_SIZE		UL(0x800)
-# else
-#  define PLATFORM_STACK_SIZE		UL(0x400)
-# endif
 #elif defined(IMAGE_BL32)
 # define PLATFORM_STACK_SIZE		UL(0x440)
 #endif
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index 8e69399..61a3734 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -104,7 +104,9 @@
 				lib/cpus/aarch64/cortex_a75.S			\
 				lib/cpus/aarch64/cortex_a76.S			\
 				lib/cpus/aarch64/neoverse_n1.S			\
-				lib/cpus/aarch64/cortex_deimos.S
+				lib/cpus/aarch64/cortex_deimos.S		\
+				lib/cpus/aarch64/neoverse_zeus.S
+
 else
 FVP_CPU_LIBS		+=	lib/cpus/aarch32/cortex_a32.S
 endif
diff --git a/plat/arm/common/aarch64/arm_pauth.c b/plat/arm/common/aarch64/arm_pauth.c
index c847119..a685c31 100644
--- a/plat/arm/common/aarch64/arm_pauth.c
+++ b/plat/arm/common/aarch64/arm_pauth.c
@@ -9,11 +9,9 @@
 
 /*
  * Instruction pointer authentication key A. The low 64-bit are at [0], and the
- * high bits at [1]. They are run-time constants so they are placed in the
- * rodata section. They are written before MMU is turned on and the permissions
- * are effective.
+ * high bits at [1].
  */
-uint64_t plat_apiakey[2] __section("rodata.apiakey");
+uint64_t plat_apiakey[2];
 
 /*
  * This is only a toy implementation to generate a seemingly random 128-bit key
diff --git a/plat/imx/imx8m/gpc_common.c b/plat/imx/imx8m/gpc_common.c
new file mode 100644
index 0000000..eb2801c
--- /dev/null
+++ b/plat/imx/imx8m/gpc_common.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+
+#include <gpc.h>
+#include <imx8m_psci.h>
+#include <plat_imx8.h>
+
+static uint32_t gpc_imr_offset[] = { 0x30, 0x40, 0x1c0, 0x1d0, };
+
+#pragma weak imx_set_cpu_pwr_off
+#pragma weak imx_set_cpu_pwr_on
+#pragma weak imx_set_cpu_lpm
+#pragma weak imx_set_cluster_powerdown
+
+void imx_set_cpu_secure_entry(unsigned int core_id, uintptr_t sec_entrypoint)
+{
+	uint64_t temp_base;
+
+	temp_base = (uint64_t) sec_entrypoint;
+	temp_base >>= 2;
+
+	mmio_write_32(IMX_SRC_BASE + SRC_GPR1_OFFSET + (core_id << 3),
+		((uint32_t)(temp_base >> 22) & 0xffff));
+	mmio_write_32(IMX_SRC_BASE + SRC_GPR1_OFFSET + (core_id << 3) + 4,
+		((uint32_t)temp_base & 0x003fffff));
+}
+
+void imx_set_cpu_pwr_off(unsigned int core_id)
+{
+	/* enable the wfi power down of the core */
+	mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id));
+	/* assert the pcg pcr bit of the core */
+	mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
+}
+
+void imx_set_cpu_pwr_on(unsigned int core_id)
+{
+	/* clear the wfi power down bit of the core */
+	mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id));
+	/* assert the ncpuporeset */
+	mmio_clrbits_32(IMX_SRC_BASE + SRC_A53RCR1, (1 << core_id));
+	/* assert the pcg pcr bit of the core */
+	mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
+	/* sw power up the core */
+	mmio_setbits_32(IMX_GPC_BASE + CPU_PGC_UP_TRG, (1 << core_id));
+
+	/* wait for the power up finished */
+	while ((mmio_read_32(IMX_GPC_BASE + CPU_PGC_UP_TRG) & (1 << core_id)) != 0)
+		;
+
+	/* deassert the pcg pcr bit of the core */
+	mmio_clrbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
+	/* deassert the ncpuporeset */
+	mmio_setbits_32(IMX_SRC_BASE + SRC_A53RCR1, (1 << core_id));
+}
+
+void imx_set_cpu_lpm(unsigned int core_id, bool pdn)
+{
+	if (pdn) {
+		/* enable the core WFI PDN & IRQ PUP */
+		mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) |
+				COREx_IRQ_WUP(core_id));
+		/* assert the pcg pcr bit of the core */
+		mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
+	} else {
+		/* disbale CORE WFI PDN & IRQ PUP */
+		mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) |
+				COREx_IRQ_WUP(core_id));
+		/* deassert the pcg pcr bit of the core */
+		mmio_clrbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
+	}
+}
+
+/*
+ * the plat and noc can only be power up & down by slot method,
+ * slot0: plat power down; slot1: noc power down; slot2: noc power up;
+ * slot3: plat power up. plat's pup&pdn ack is used by default. if
+ * noc is config to power down, then noc's pdn ack should be used.
+ */
+static void imx_a53_plat_slot_config(bool pdn)
+{
+	if (pdn) {
+		mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(0), PLAT_PDN_SLT_CTRL);
+		mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(3), PLAT_PUP_SLT_CTRL);
+		mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, A53_PLAT_PDN_ACK |
+			A53_PLAT_PUP_ACK);
+		mmio_setbits_32(IMX_GPC_BASE + PLAT_PGC_PCR, 0x1);
+	} else {
+		mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(0), PLAT_PDN_SLT_CTRL);
+		mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(3), PLAT_PUP_SLT_CTRL);
+		mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, A53_DUMMY_PUP_ACK |
+			A53_DUMMY_PDN_ACK);
+		mmio_clrbits_32(IMX_GPC_BASE + PLAT_PGC_PCR, 0x1);
+	}
+}
+
+void imx_set_cluster_standby(bool enter)
+{
+	/*
+	 * Enable BIT 6 of A53 AD register to make sure system
+	 * don't enter LPM mode.
+	 */
+	if (enter)
+		mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, (1 << 6));
+	else
+		mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, (1 << 6));
+}
+
+/* i.mx8mq need to override it */
+void imx_set_cluster_powerdown(unsigned int last_core, uint8_t power_state)
+{
+	uint32_t val;
+
+	if (!is_local_state_run(power_state)) {
+		/* config C0~1's LPM, enable a53 clock off in LPM */
+		mmio_clrsetbits_32(IMX_GPC_BASE + LPCR_A53_BSC, A53_CLK_ON_LPM,
+			LPM_MODE(power_state));
+		/* config C2-3's LPM */
+		mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, LPM_MODE(power_state));
+
+		/* enable PLAT/SCU power down */
+		val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_AD);
+		val &= ~EN_L2_WFI_PDN;
+		/* L2 cache memory is on in WAIT mode */
+		if (is_local_state_off(power_state))
+			val |= (L2PGE | EN_PLAT_PDN);
+		else
+			val |= EN_PLAT_PDN;
+
+		mmio_write_32(IMX_GPC_BASE + LPCR_A53_AD, val);
+
+		imx_a53_plat_slot_config(true);
+	} else {
+		/* clear the slot and ack for cluster power down */
+		imx_a53_plat_slot_config(false);
+		/* reverse the cluster level setting */
+		mmio_clrsetbits_32(IMX_GPC_BASE + LPCR_A53_BSC, 0xf, A53_CLK_ON_LPM);
+		mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_BSC2, 0xf);
+
+		/* clear PLAT/SCU power down */
+		mmio_clrsetbits_32(IMX_GPC_BASE + LPCR_A53_AD, (L2PGE | EN_PLAT_PDN),
+			EN_L2_WFI_PDN);
+	}
+}
+
+static unsigned int gicd_read_isenabler(uintptr_t base, unsigned int id)
+{
+	unsigned int n = id >> ISENABLER_SHIFT;
+
+	return mmio_read_32(base + GICD_ISENABLER + (n << 2));
+}
+
+/*
+ * gic's clock will be gated in system suspend, so gic has no ability to
+ * to wakeup the system, we need to config the imr based on the irq
+ * enable status in gic, then gpc will monitor the wakeup irq
+ */
+void imx_set_sys_wakeup(unsigned int last_core, bool pdn)
+{
+	uint32_t irq_mask;
+	uintptr_t gicd_base = PLAT_GICD_BASE;
+
+	if (pdn)
+		mmio_clrsetbits_32(IMX_GPC_BASE + LPCR_A53_BSC, A53_CORE_WUP_SRC(last_core),
+			IRQ_SRC_A53_WUP);
+	else
+		mmio_clrsetbits_32(IMX_GPC_BASE + LPCR_A53_BSC, IRQ_SRC_A53_WUP,
+			A53_CORE_WUP_SRC(last_core));
+
+	/* clear last core's IMR based on GIC's mask setting */
+	for (int i = 0; i < IRQ_IMR_NUM; i++) {
+		if (pdn)
+			/* set the wakeup irq base GIC */
+			irq_mask = ~gicd_read_isenabler(gicd_base, 32 * (i + 1));
+		else
+			irq_mask = IMR_MASK_ALL;
+
+		mmio_write_32(IMX_GPC_BASE + gpc_imr_offset[last_core] + i * 4,
+			      irq_mask);
+	}
+}
+
+#pragma weak imx_noc_slot_config
+/*
+ * this function only need to be override by platform
+ * that support noc power down, for example: imx8mm.
+ *  otherwize, keep it empty.
+ */
+void imx_noc_slot_config(bool pdn)
+{
+
+}
+
+/* this is common for all imx8m soc */
+void imx_set_sys_lpm(unsigned int last_core, bool retention)
+{
+	uint32_t val;
+
+	val = mmio_read_32(IMX_GPC_BASE + SLPCR);
+	val &= ~(SLPCR_EN_DSM | SLPCR_VSTBY | SLPCR_SBYOS |
+		 SLPCR_BYPASS_PMIC_READY | SLPCR_A53_FASTWUP_STOP_MODE);
+
+	if (retention)
+		val |= (SLPCR_EN_DSM | SLPCR_VSTBY | SLPCR_SBYOS |
+			SLPCR_BYPASS_PMIC_READY | SLPCR_A53_FASTWUP_STOP_MODE);
+
+	mmio_write_32(IMX_GPC_BASE + SLPCR, val);
+
+	/* config the noc power down */
+	imx_noc_slot_config(retention);
+
+	/* config wakeup irqs' mask in gpc */
+	imx_set_sys_wakeup(last_core, retention);
+}
+
+void imx_set_rbc_count(void)
+{
+	mmio_setbits_32(IMX_GPC_BASE + SLPCR, SLPCR_RBC_EN |
+		(0x8 << SLPCR_RBC_COUNT_SHIFT));
+}
+
+void imx_clear_rbc_count(void)
+{
+	mmio_clrbits_32(IMX_GPC_BASE + SLPCR, SLPCR_RBC_EN |
+		(0x3f << SLPCR_RBC_COUNT_SHIFT));
+}
diff --git a/plat/imx/imx8m/imx8m_psci_common.c b/plat/imx/imx8m/imx8m_psci_common.c
new file mode 100644
index 0000000..d641628
--- /dev/null
+++ b/plat/imx/imx8m/imx8m_psci_common.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+
+#include <gpc.h>
+#include <imx8m_psci.h>
+#include <plat_imx8.h>
+
+/*
+ * below callback functions need to be override by i.mx8mq,
+ * for other i.mx8m soc, if no special requirement,
+ * reuse below ones.
+ */
+#pragma weak imx_validate_power_state
+#pragma weak imx_domain_suspend
+#pragma weak imx_domain_suspend_finish
+#pragma weak imx_get_sys_suspend_power_state
+
+int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint)
+{
+	/* The non-secure entrypoint should be in RAM space */
+	if (ns_entrypoint < PLAT_NS_IMAGE_OFFSET)
+		return PSCI_E_INVALID_PARAMS;
+
+	return PSCI_E_SUCCESS;
+}
+
+int imx_pwr_domain_on(u_register_t mpidr)
+{
+	unsigned int core_id;
+	uint64_t base_addr = BL31_BASE;
+
+	core_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+	imx_set_cpu_secure_entry(core_id, base_addr);
+	imx_set_cpu_pwr_on(core_id);
+
+	return PSCI_E_SUCCESS;
+}
+
+void imx_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	plat_gic_pcpu_init();
+	plat_gic_cpuif_enable();
+}
+
+void imx_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	uint64_t mpidr = read_mpidr_el1();
+	unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+	plat_gic_cpuif_disable();
+	imx_set_cpu_pwr_off(core_id);
+}
+
+int imx_validate_power_state(unsigned int power_state,
+			 psci_power_state_t *req_state)
+{
+	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+	int pwr_type = psci_get_pstate_type(power_state);
+	int state_id = psci_get_pstate_id(power_state);
+
+	if (pwr_lvl > PLAT_MAX_PWR_LVL)
+		return PSCI_E_INVALID_PARAMS;
+
+	if (pwr_type == PSTATE_TYPE_STANDBY) {
+		CORE_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+		CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+	}
+
+	if (pwr_type == PSTATE_TYPE_POWERDOWN && state_id == 0x33) {
+		CORE_PWR_STATE(req_state) = PLAT_MAX_OFF_STATE;
+		CLUSTER_PWR_STATE(req_state) = PLAT_WAIT_RET_STATE;
+	}
+
+	return PSCI_E_SUCCESS;
+}
+
+void imx_cpu_standby(plat_local_state_t cpu_state)
+{
+	dsb();
+	write_scr_el3(read_scr_el3() | SCR_FIQ_BIT);
+	isb();
+
+	wfi();
+
+	write_scr_el3(read_scr_el3() & (~SCR_FIQ_BIT));
+	isb();
+}
+
+void imx_domain_suspend(const psci_power_state_t *target_state)
+{
+	uint64_t base_addr = BL31_BASE;
+	uint64_t mpidr = read_mpidr_el1();
+	unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+	if (is_local_state_off(CORE_PWR_STATE(target_state))) {
+		plat_gic_cpuif_disable();
+		imx_set_cpu_secure_entry(core_id, base_addr);
+		imx_set_cpu_lpm(core_id, true);
+	} else {
+		dsb();
+		write_scr_el3(read_scr_el3() | SCR_FIQ_BIT);
+		isb();
+	}
+
+	if (!is_local_state_run(CLUSTER_PWR_STATE(target_state)))
+		imx_set_cluster_powerdown(core_id, CLUSTER_PWR_STATE(target_state));
+
+	if (is_local_state_off(SYSTEM_PWR_STATE(target_state)))
+		imx_set_sys_lpm(core_id, true);
+}
+
+void imx_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+	uint64_t mpidr = read_mpidr_el1();
+	unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+	if (is_local_state_off(SYSTEM_PWR_STATE(target_state)))
+		imx_set_sys_lpm(core_id, false);
+
+	if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) {
+		imx_clear_rbc_count();
+		imx_set_cluster_powerdown(core_id, PSCI_LOCAL_STATE_RUN);
+	}
+
+	if (is_local_state_off(CORE_PWR_STATE(target_state))) {
+		imx_set_cpu_lpm(core_id, false);
+		plat_gic_cpuif_enable();
+	} else {
+		write_scr_el3(read_scr_el3() & (~SCR_FIQ_BIT));
+		isb();
+	}
+}
+
+void imx_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+	unsigned int i;
+
+	for (i = IMX_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++)
+		req_state->pwr_domain_state[i] = PLAT_STOP_OFF_STATE;
+}
+
+void __dead2 imx_system_reset(void)
+{
+	uintptr_t wdog_base = IMX_WDOG_BASE;
+	unsigned int val;
+
+	/* WDOG_B reset */
+	val = mmio_read_16(wdog_base);
+#ifdef IMX_WDOG_B_RESET
+	val = (val & 0x00FF) | WDOG_WCR_WDZST | WDOG_WCR_WDE |
+		WDOG_WCR_WDT | WDOG_WCR_SRS;
+#else
+	val = (val & 0x00FF) | WDOG_WCR_WDZST | WDOG_WCR_SRS;
+#endif
+	mmio_write_16(wdog_base, val);
+
+	mmio_write_16(wdog_base + WDOG_WSR, 0x5555);
+	mmio_write_16(wdog_base + WDOG_WSR, 0xaaaa);
+	while (1)
+		;
+}
+
+void __dead2 imx_system_off(void)
+{
+	mmio_write_32(IMX_SNVS_BASE + SNVS_LPCR, SNVS_LPCR_SRTC_ENV |
+			SNVS_LPCR_DP_EN | SNVS_LPCR_TOP);
+
+	while (1)
+		;
+}
+
+void __dead2 imx_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state)
+{
+	/*
+	 * before enter WAIT or STOP mode with PLAT(SCU) power down,
+	 * rbc count need to be enabled to make sure PLAT is
+	 * power down successfully even if the the wakeup IRQ is pending
+	 * early before the power down sequence. the RBC counter is
+	 * drived by the 32K OSC, so delay 30us to make sure the counter
+	 * is really running.
+	 */
+	if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) {
+		imx_set_rbc_count();
+		udelay(30);
+	}
+
+	while (1)
+		wfi();
+}
diff --git a/plat/imx/imx8m/imx8mm/gpc.c b/plat/imx/imx8m/imx8mm/gpc.c
new file mode 100644
index 0000000..ab59292
--- /dev/null
+++ b/plat/imx/imx8m/imx8mm/gpc.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+#include <lib/smccc.h>
+#include <platform_def.h>
+#include <services/std_svc.h>
+
+#include <gpc.h>
+#include <imx_sip_svc.h>
+
+void imx_gpc_init(void)
+{
+	unsigned int val;
+	int i;
+
+	/* mask all the wakeup irq by default */
+	for (i = 0; i < 4; i++) {
+		mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_A53 + i * 4, ~0x0);
+		mmio_write_32(IMX_GPC_BASE + IMR1_CORE1_A53 + i * 4, ~0x0);
+		mmio_write_32(IMX_GPC_BASE + IMR1_CORE2_A53 + i * 4, ~0x0);
+		mmio_write_32(IMX_GPC_BASE + IMR1_CORE3_A53 + i * 4, ~0x0);
+		mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_M4 + i * 4, ~0x0);
+	}
+
+	val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC);
+	/* use GIC wake_request to wakeup C0~C3 from LPM */
+	val |= 0x30c00000;
+	/* clear the MASTER0 LPM handshake */
+	val &= ~(1 << 6);
+	mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val);
+
+	/* clear MASTER1 & MASTER2 mapping in CPU0(A53) */
+	mmio_clrbits_32(IMX_GPC_BASE + MST_CPU_MAPPING, (MASTER1_MAPPING |
+		MASTER2_MAPPING));
+
+	/* set all mix/PU in A53 domain */
+	mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0xffff);
+
+	/*
+	 * Set the CORE & SCU power up timing:
+	 * SW = 0x1, SW2ISO = 0x1;
+	 * the CPU CORE and SCU power up timming counter
+	 * is drived  by 32K OSC, each domain's power up
+	 * latency is (SW + SW2ISO) / 32768
+	 */
+	mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(0) + 0x4, 0x81);
+	mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(1) + 0x4, 0x81);
+	mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(2) + 0x4, 0x81);
+	mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(3) + 0x4, 0x81);
+	mmio_write_32(IMX_GPC_BASE + PLAT_PGC_PCR + 0x4, 0x81);
+	mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING,
+		      (0x59 << 10) | 0x5B | (0x2 << 20));
+
+	/* set DUMMY PDN/PUP ACK by default for A53 domain */
+	mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53,
+		      A53_DUMMY_PUP_ACK | A53_DUMMY_PDN_ACK);
+
+	/* clear DSM by default */
+	val = mmio_read_32(IMX_GPC_BASE + SLPCR);
+	val &= ~SLPCR_EN_DSM;
+	/* enable the fast wakeup wait mode */
+	val |= SLPCR_A53_FASTWUP_WAIT_MODE;
+	/* clear the RBC */
+	val &= ~(0x3f << SLPCR_RBC_COUNT_SHIFT);
+	/* set the STBY_COUNT to 0x5, (128 * 30)us */
+	val &= ~(0x7 << SLPCR_STBY_COUNT_SHFT);
+	val |= (0x5 << SLPCR_STBY_COUNT_SHFT);
+	mmio_write_32(IMX_GPC_BASE + SLPCR, val);
+
+	/*
+	 * USB PHY power up needs to make sure RESET bit in SRC is clear,
+	 * otherwise, the PU power up bit in GPC will NOT self-cleared.
+	 * only need to do it once.
+	 */
+	mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1);
+	mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1);
+
+	/* enable all the power domain by default */
+	mmio_write_32(IMX_GPC_BASE + PU_PGC_UP_TRG, 0x3fcf);
+}
diff --git a/plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c b/plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c
new file mode 100644
index 0000000..a541ed3
--- /dev/null
+++ b/plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <platform_def.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <context.h>
+#include <drivers/arm/tzc380.h>
+#include <drivers/console.h>
+#include <drivers/generic_delay_timer.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables.h>
+#include <plat/common/platform.h>
+
+#include <gpc.h>
+#include <imx_uart.h>
+#include <plat_imx8.h>
+
+static const mmap_region_t imx_mmap[] = {
+	MAP_REGION_FLAT(IMX_GIC_BASE, IMX_GIC_SIZE, MT_DEVICE | MT_RW),
+	MAP_REGION_FLAT(IMX_AIPS_BASE, IMX_AIPS_SIZE, MT_DEVICE | MT_RW), /* AIPS map */
+	{0},
+};
+
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+/* get SPSR for BL33 entry */
+static uint32_t get_spsr_for_bl33_entry(void)
+{
+	unsigned long el_status;
+	unsigned long mode;
+	uint32_t spsr;
+
+	/* figure out what mode we enter the non-secure world */
+	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+	el_status &= ID_AA64PFR0_ELX_MASK;
+
+	mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+
+void bl31_tzc380_setup(void)
+{
+	unsigned int val;
+
+	val = mmio_read_32(IMX_IOMUX_GPR_BASE + 0x28);
+	if ((val & GPR_TZASC_EN) != GPR_TZASC_EN)
+		return;
+
+	tzc380_init(IMX_TZASC_BASE);
+
+	/*
+	 * Need to substact offset 0x40000000 from CPU address when
+	 * programming tzasc region for i.mx8mm.
+	 */
+
+	/* Enable 1G-5G S/NS RW */
+	tzc380_configure_region(0, 0x00000000, TZC_ATTR_REGION_SIZE(TZC_REGION_SIZE_4G) |
+		TZC_ATTR_REGION_EN_MASK | TZC_ATTR_SP_ALL);
+}
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+		u_register_t arg2, u_register_t arg3)
+{
+	static console_uart_t console;
+	int i;
+
+	/* Enable CSU NS access permission */
+	for (i = 0; i < 64; i++) {
+		mmio_write_32(IMX_CSU_BASE + i * 4, 0x00ff00ff);
+	}
+
+
+	console_imx_uart_register(IMX_BOOT_UART_BASE, IMX_BOOT_UART_CLK_IN_HZ,
+		IMX_CONSOLE_BAUDRATE, &console);
+	/* This console is only used for boot stage */
+	console_set_scope(&console.console, CONSOLE_FLAG_BOOT);
+
+	/*
+	 * tell BL3-1 where the non-secure software image is located
+	 * and the entry state information.
+	 */
+	bl33_image_ep_info.pc = PLAT_NS_IMAGE_OFFSET;
+	bl33_image_ep_info.spsr = get_spsr_for_bl33_entry();
+	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+
+	bl31_tzc380_setup();
+}
+
+void bl31_plat_arch_setup(void)
+{
+	mmap_add_region(BL31_BASE, BL31_BASE, (BL31_LIMIT - BL31_BASE),
+		MT_MEMORY | MT_RW | MT_SECURE);
+	mmap_add_region(BL_CODE_BASE, BL_CODE_BASE, (BL_CODE_END - BL_CODE_BASE),
+		MT_MEMORY | MT_RO | MT_SECURE);
+#if USE_COHERENT_MEM
+	mmap_add_region(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_BASE,
+		(BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE),
+		MT_DEVICE | MT_RW | MT_SECURE);
+#endif
+	mmap_add(imx_mmap);
+
+	init_xlat_tables();
+
+	enable_mmu_el3(0);
+}
+
+void bl31_platform_setup(void)
+{
+	generic_delay_timer_init();
+
+	/* select the CKIL source to 32K OSC */
+	mmio_write_32(IMX_ANAMIX_BASE + ANAMIX_MISC_CTL, 0x1);
+
+	plat_gic_driver_init();
+	plat_gic_init();
+
+	imx_gpc_init();
+}
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(unsigned int type)
+{
+	if (type == NON_SECURE)
+		return &bl33_image_ep_info;
+	if (type == SECURE)
+		return &bl32_image_ep_info;
+
+	return NULL;
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	return COUNTER_FREQUENCY;
+}
diff --git a/plat/imx/imx8m/imx8mm/imx8mm_psci.c b/plat/imx/imx8m/imx8mm/imx8mm_psci.c
new file mode 100644
index 0000000..e558724
--- /dev/null
+++ b/plat/imx/imx8m/imx8mm/imx8mm_psci.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+
+#include <gpc.h>
+#include <imx8m_psci.h>
+#include <plat_imx8.h>
+
+static const plat_psci_ops_t imx_plat_psci_ops = {
+	.pwr_domain_on = imx_pwr_domain_on,
+	.pwr_domain_on_finish = imx_pwr_domain_on_finish,
+	.pwr_domain_off = imx_pwr_domain_off,
+	.validate_ns_entrypoint = imx_validate_ns_entrypoint,
+	.validate_power_state = imx_validate_power_state,
+	.cpu_standby = imx_cpu_standby,
+	.pwr_domain_suspend = imx_domain_suspend,
+	.pwr_domain_suspend_finish = imx_domain_suspend_finish,
+	.pwr_domain_pwr_down_wfi = imx_pwr_domain_pwr_down_wfi,
+	.get_sys_suspend_power_state = imx_get_sys_suspend_power_state,
+	.system_reset = imx_system_reset,
+	.system_off = imx_system_off,
+};
+
+/* export the platform specific psci ops */
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const plat_psci_ops_t **psci_ops)
+{
+	/* sec_entrypoint is used for warm reset */
+	imx_mailbox_init(sec_entrypoint);
+
+	*psci_ops = &imx_plat_psci_ops;
+
+	return 0;
+}
diff --git a/plat/imx/imx8m/imx8mm/include/platform_def.h b/plat/imx/imx8m/imx8mm/include/platform_def.h
new file mode 100644
index 0000000..a95ab83
--- /dev/null
+++ b/plat/imx/imx8m/imx8mm/include/platform_def.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#define PLATFORM_LINKER_FORMAT		"elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH		aarch64
+
+#define PLATFORM_STACK_SIZE		0xB00
+#define CACHE_WRITEBACK_GRANULE		64
+
+#define PLAT_PRIMARY_CPU		0x0
+#define PLATFORM_MAX_CPU_PER_CLUSTER	4
+#define PLATFORM_CLUSTER_COUNT		1
+#define PLATFORM_CLUSTER0_CORE_COUNT	4
+#define PLATFORM_CLUSTER1_CORE_COUNT	0
+#define PLATFORM_CORE_COUNT		(PLATFORM_CLUSTER0_CORE_COUNT)
+
+#define IMX_PWR_LVL0			MPIDR_AFFLVL0
+#define IMX_PWR_LVL1			MPIDR_AFFLVL1
+#define IMX_PWR_LVL2			MPIDR_AFFLVL2
+
+#define PWR_DOMAIN_AT_MAX_LVL		U(1)
+#define PLAT_MAX_PWR_LVL		U(2)
+#define PLAT_MAX_OFF_STATE		U(4)
+#define PLAT_MAX_RET_STATE		U(2)
+
+#define PLAT_WAIT_RET_STATE		U(1)
+#define PLAT_STOP_OFF_STATE		U(3)
+
+#define BL31_BASE			U(0x920000)
+#define BL31_LIMIT			U(0x940000)
+#define BL32_BASE			U(0xbe000000)
+
+/* non-secure uboot base */
+#define PLAT_NS_IMAGE_OFFSET		U(0x40200000)
+
+/* GICv3 base address */
+#define PLAT_GICD_BASE			U(0x38800000)
+#define PLAT_GICR_BASE			U(0x38880000)
+
+#define PLAT_VIRT_ADDR_SPACE_SIZE	(1ull << 32)
+#define PLAT_PHY_ADDR_SPACE_SIZE	(1ull << 32)
+
+#define MAX_XLAT_TABLES			8
+#define MAX_MMAP_REGIONS		16
+
+#define HAB_RVT_BASE			U(0x00000900) /* HAB_RVT for i.MX8MM */
+
+#define IMX_BOOT_UART_BASE		U(0x30890000)
+#define IMX_BOOT_UART_CLK_IN_HZ		24000000 /* Select 24MHz oscillator */
+
+#define PLAT_CRASH_UART_BASE		IMX_BOOT_UART_BASE
+#define PLAT_CRASH_UART_CLK_IN_HZ	24000000
+#define IMX_CONSOLE_BAUDRATE		115200
+
+#define IMX_AIPSTZ1			U(0x301f0000)
+#define IMX_AIPSTZ2			U(0x305f0000)
+#define IMX_AIPSTZ3			U(0x309f0000)
+#define IMX_AIPSTZ4			U(0x32df0000)
+
+#define IMX_AIPS_BASE			U(0x30000000)
+#define IMX_AIPS_SIZE			U(0xC00000)
+#define IMX_GPV_BASE			U(0x32000000)
+#define IMX_GPV_SIZE			U(0x800000)
+#define IMX_AIPS1_BASE			U(0x30200000)
+#define IMX_AIPS4_BASE			U(0x32c00000)
+#define IMX_ANAMIX_BASE			U(0x30360000)
+#define IMX_CCM_BASE			U(0x30380000)
+#define IMX_SRC_BASE			U(0x30390000)
+#define IMX_GPC_BASE			U(0x303a0000)
+#define IMX_RDC_BASE			U(0x303d0000)
+#define IMX_CSU_BASE			U(0x303e0000)
+#define IMX_WDOG_BASE			U(0x30280000)
+#define IMX_SNVS_BASE			U(0x30370000)
+#define IMX_NOC_BASE			U(0x32700000)
+#define IMX_TZASC_BASE			U(0x32F80000)
+#define IMX_IOMUX_GPR_BASE		U(0x30340000)
+#define IMX_DDRC_BASE			U(0x3d400000)
+#define IMX_DDRPHY_BASE			U(0x3c000000)
+#define IMX_DDR_IPS_BASE		U(0x3d000000)
+#define IMX_ROM_BASE			U(0x0)
+
+#define GPV_BASE			U(0x32000000)
+#define GPV_SIZE			U(0x800000)
+#define IMX_GIC_BASE			PLAT_GICD_BASE
+#define IMX_GIC_SIZE			U(0x200000)
+
+#define WDOG_WSR			U(0x2)
+#define WDOG_WCR_WDZST			BIT(0)
+#define WDOG_WCR_WDBG			BIT(1)
+#define WDOG_WCR_WDE			BIT(2)
+#define WDOG_WCR_WDT			BIT(3)
+#define WDOG_WCR_SRS			BIT(4)
+#define WDOG_WCR_WDA			BIT(5)
+#define WDOG_WCR_SRE			BIT(6)
+#define WDOG_WCR_WDW			BIT(7)
+
+#define SRC_A53RCR0			U(0x4)
+#define SRC_A53RCR1			U(0x8)
+#define SRC_OTG1PHY_SCR			U(0x20)
+#define SRC_OTG2PHY_SCR			U(0x24)
+#define SRC_GPR1_OFFSET			U(0x74)
+
+#define SNVS_LPCR			U(0x38)
+#define SNVS_LPCR_SRTC_ENV		BIT(0)
+#define SNVS_LPCR_DP_EN			BIT(5)
+#define SNVS_LPCR_TOP			BIT(6)
+
+#define IOMUXC_GPR10			U(0x28)
+#define GPR_TZASC_EN			BIT(0)
+#define GPR_TZASC_EN_LOCK		BIT(16)
+
+#define ANAMIX_MISC_CTL			U(0x124)
+
+#define MAX_CSU_NUM			U(64)
+
+#define OCRAM_S_BASE			U(0x00180000)
+#define OCRAM_S_SIZE			U(0x8000)
+#define OCRAM_S_LIMIT			(OCRAM_S_BASE + OCRAM_S_SIZE)
+
+#define COUNTER_FREQUENCY		8000000 /* 8MHz */
+
+#define IMX_WDOG_B_RESET
diff --git a/plat/imx/imx8m/imx8mm/platform.mk b/plat/imx/imx8m/imx8mm/platform.mk
new file mode 100644
index 0000000..d50af20
--- /dev/null
+++ b/plat/imx/imx8m/imx8mm/platform.mk
@@ -0,0 +1,44 @@
+#
+# Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES		:=	-Iplat/imx/common/include		\
+				-Iplat/imx/imx8m/include		\
+				-Iplat/imx/imx8m/imx8mm/include
+
+IMX_GIC_SOURCES		:=	drivers/arm/gic/v3/gicv3_helpers.c	\
+				drivers/arm/gic/v3/arm_gicv3_common.c   \
+				drivers/arm/gic/v3/gic500.c             \
+				drivers/arm/gic/v3/gicv3_main.c		\
+				drivers/arm/gic/common/gic_common.c	\
+				plat/common/plat_gicv3.c		\
+				plat/common/plat_psci_common.c		\
+				plat/imx/common/plat_imx8_gic.c
+
+BL31_SOURCES		+=	plat/imx/common/imx8_helpers.S			\
+				plat/imx/imx8m/gpc_common.c			\
+				plat/imx/imx8m/imx8m_psci_common.c		\
+				plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c	\
+				plat/imx/imx8m/imx8mm/imx8mm_psci.c		\
+				plat/imx/imx8m/imx8mm/gpc.c			\
+				plat/imx/common/imx8_topology.c			\
+				plat/imx/common/imx_uart_console.S		\
+				lib/xlat_tables/aarch64/xlat_tables.c		\
+				lib/xlat_tables/xlat_tables_common.c		\
+				lib/cpus/aarch64/cortex_a53.S			\
+				drivers/console/aarch64/console.S		\
+				drivers/arm/tzc/tzc380.c			\
+				drivers/delay_timer/delay_timer.c		\
+				drivers/delay_timer/generic_delay_timer.c	\
+				${IMX_GIC_SOURCES}
+
+USE_COHERENT_MEM	:=	1
+RESET_TO_BL31		:=	1
+A53_DISABLE_NON_TEMPORAL_HINT := 0
+MULTI_CONSOLE_API	:=	1
+
+ERRATA_A53_835769	:=	1
+ERRATA_A53_843419	:=	1
+ERRATA_A53_855873	:=	1
diff --git a/plat/imx/imx8m/imx8mq/gpc.c b/plat/imx/imx8m/imx8mq/gpc.c
index 7fa5a3d..02c640b 100644
--- a/plat/imx/imx8m/imx8mq/gpc.c
+++ b/plat/imx/imx8m/imx8mq/gpc.c
@@ -16,19 +16,6 @@
 
 #include <gpc.h>
 
-void imx_set_cpu_secure_entry(unsigned int core_id, uintptr_t sec_entrypoint)
-{
-	uint64_t temp_base;
-
-	temp_base = (uint64_t) sec_entrypoint;
-	temp_base >>= 2;
-
-	mmio_write_32(IMX_SRC_BASE + SRC_GPR1_OFFSET + (core_id << 3),
-		((uint32_t)(temp_base >> 22) & 0xffff));
-	mmio_write_32(IMX_SRC_BASE + SRC_GPR1_OFFSET + (core_id << 3) + 4,
-		((uint32_t)temp_base & 0x003fffff));
-}
-
 /* use wfi power down the core */
 void imx_set_cpu_pwr_off(unsigned int core_id)
 {
@@ -39,28 +26,6 @@
 	mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
 };
 
-/* use the sw method to power up the core */
-void imx_set_cpu_pwr_on(unsigned int core_id)
-{
-	/* clear the wfi power down bit of the core */
-	mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id));
-	/* assert the ncpuporeset */
-	mmio_clrbits_32(IMX_SRC_BASE + SRC_A53RCR1, (1 << core_id));
-	/* assert the pcg pcr bit of the core */
-	mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
-	/* sw power up the core */
-	mmio_setbits_32(IMX_GPC_BASE + CPU_PGC_UP_TRG, (1 << core_id));
-
-	/* wait for the power up finished */
-	while ((mmio_read_32(IMX_GPC_BASE + CPU_PGC_UP_TRG) & (1 << core_id)) != 0)
-		;
-
-	/* deassert the pcg pcr bit of the core */
-	mmio_clrbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
-	/* deassert the ncpuporeset */
-	mmio_setbits_32(IMX_SRC_BASE + SRC_A53RCR1, (1 << core_id));
-}
-
 /* if out of lpm, we need to do reverse steps */
 void imx_set_cpu_lpm(unsigned int core_id, bool pdn)
 {
@@ -79,11 +44,6 @@
 	}
 }
 
-void imx_set_sys_wakeup(unsigned int last_core, bool pdn)
-{
-	/* TODO */
-}
-
 void imx_pup_pdn_slot_config(int last_core, bool pdn)
 {
 	if (pdn) {
@@ -105,18 +65,6 @@
 	}
 }
 
-void imx_set_cluster_standby(bool retention)
-{
-	/*
-	 * Enable BIT 6 of A53 AD register to make sure system
-	 * don't enter LPM mode.
-	 */
-	if (retention)
-		mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_AD, (1 << 6));
-	else
-		mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, (1 << 6));
-}
-
 void imx_set_cluster_powerdown(unsigned int last_core, uint8_t power_state)
 {
 	uint32_t val;
@@ -166,34 +114,6 @@
 	}
 }
 
-/* config the system level power mode */
-void imx_set_sys_lpm(bool retention)
-{
-	uint32_t val;
-
-	/* set system DSM mode SLPCR(0x14) */
-	val = mmio_read_32(IMX_GPC_BASE + SLPCR);
-	val &= ~(SLPCR_EN_DSM | SLPCR_VSTBY | SLPCR_SBYOS |
-		 SLPCR_BYPASS_PMIC_READY | SLPCR_RBC_EN);
-
-	if (retention)
-		val |= (SLPCR_EN_DSM | SLPCR_VSTBY | SLPCR_SBYOS |
-			 SLPCR_BYPASS_PMIC_READY | SLPCR_RBC_EN |
-			 SLPCR_A53_FASTWUP_STOP_MODE);
-
-	mmio_write_32(IMX_GPC_BASE + SLPCR, val);
-}
-
-void imx_set_rbc_count(void)
-{
-	mmio_setbits_32(IMX_GPC_BASE + SLPCR, 0x3f << SLPCR_RBC_COUNT_SHIFT);
-}
-
-void imx_clear_rbc_count(void)
-{
-	mmio_clrbits_32(IMX_GPC_BASE + SLPCR, 0x3f << SLPCR_RBC_COUNT_SHIFT);
-}
-
 void imx_gpc_init(void)
 {
 	uint32_t val;
diff --git a/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c b/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c
index 99fa980..9263354 100644
--- a/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c
+++ b/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c
@@ -15,6 +15,7 @@
 #include <context.h>
 #include <drivers/arm/tzc380.h>
 #include <drivers/console.h>
+#include <drivers/generic_delay_timer.h>
 #include <lib/el3_runtime/context_mgmt.h>
 #include <lib/mmio.h>
 #include <lib/xlat_tables/xlat_tables.h>
@@ -128,6 +129,8 @@
 
 void bl31_platform_setup(void)
 {
+	generic_delay_timer_init();
+
 	/* init the GICv3 cpu and distributor interface */
 	plat_gic_driver_init();
 	plat_gic_init();
diff --git a/plat/imx/imx8m/imx8mq/imx8mq_psci.c b/plat/imx/imx8m/imx8mq/imx8mq_psci.c
index ee58865..04e191f 100644
--- a/plat/imx/imx8m/imx8mq/imx8mq_psci.c
+++ b/plat/imx/imx8m/imx8mq/imx8mq_psci.c
@@ -13,55 +13,9 @@
 #include <lib/psci/psci.h>
 
 #include <gpc.h>
+#include <imx8m_psci.h>
 #include <plat_imx8.h>
 
-#define CORE_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL0])
-#define CLUSTER_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL1])
-#define SYSTEM_PWR_STATE(state) ((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
-
-int imx_pwr_domain_on(u_register_t mpidr)
-{
-	unsigned int core_id;
-	uint64_t base_addr = BL31_BASE;
-
-	core_id = MPIDR_AFFLVL0_VAL(mpidr);
-
-	/* set the secure entrypoint */
-	imx_set_cpu_secure_entry(core_id, base_addr);
-	/* power up the core */
-	imx_set_cpu_pwr_on(core_id);
-
-	return PSCI_E_SUCCESS;
-}
-
-void imx_pwr_domain_on_finish(const psci_power_state_t *target_state)
-{
-	/* program the GIC per cpu dist and rdist interface */
-	plat_gic_pcpu_init();
-	/* enable the GICv3 cpu interface */
-	plat_gic_cpuif_enable();
-}
-
-void imx_pwr_domain_off(const psci_power_state_t *target_state)
-{
-	uint64_t mpidr = read_mpidr_el1();
-	unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr);
-
-	/* disable the GIC cpu interface first */
-	plat_gic_cpuif_disable();
-	/* config the core for power down */
-	imx_set_cpu_pwr_off(core_id);
-}
-
-int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint)
-{
-	/* The non-secure entrypoint should be in RAM space */
-	if (ns_entrypoint < PLAT_NS_IMAGE_OFFSET)
-		return PSCI_E_INVALID_PARAMS;
-
-	return PSCI_E_SUCCESS;
-}
-
 int imx_validate_power_state(unsigned int power_state,
 			 psci_power_state_t *req_state)
 {
@@ -85,18 +39,6 @@
 	return PSCI_E_SUCCESS;
 }
 
-void imx_cpu_standby(plat_local_state_t cpu_state)
-{
-	dsb();
-	write_scr_el3(read_scr_el3() | SCR_FIQ_BIT);
-	isb();
-
-	wfi();
-
-	write_scr_el3(read_scr_el3() & (~SCR_FIQ_BIT));
-	isb();
-}
-
 void imx_domain_suspend(const psci_power_state_t *target_state)
 {
 	uint64_t base_addr = BL31_BASE;
@@ -120,7 +62,7 @@
 		imx_set_cluster_standby(true);
 
 	if (is_local_state_retn(SYSTEM_PWR_STATE(target_state))) {
-		imx_set_sys_lpm(true);
+		imx_set_sys_lpm(core_id, true);
 	}
 }
 
@@ -131,7 +73,7 @@
 
 	/* check the system level status */
 	if (is_local_state_retn(SYSTEM_PWR_STATE(target_state))) {
-		imx_set_sys_lpm(false);
+		imx_set_sys_lpm(core_id, false);
 		imx_clear_rbc_count();
 	}
 
@@ -163,47 +105,6 @@
 	req_state->pwr_domain_state[PLAT_MAX_PWR_LVL] = PLAT_MAX_RET_STATE;
 }
 
-void __dead2 imx_system_reset(void)
-{
-	uintptr_t wdog_base = IMX_WDOG_BASE;
-	unsigned int val;
-
-	/* WDOG_B reset */
-	val = mmio_read_16(wdog_base);
-#ifdef IMX_WDOG_B_RESET
-	val = (val & 0x00FF) | WDOG_WCR_WDZST | WDOG_WCR_WDE |
-		WDOG_WCR_WDT | WDOG_WCR_SRS;
-#else
-	val = (val & 0x00FF) | WDOG_WCR_WDZST | WDOG_WCR_SRS;
-#endif
-	mmio_write_16(wdog_base, val);
-
-	mmio_write_16(wdog_base + WDOG_WSR, 0x5555);
-	mmio_write_16(wdog_base + WDOG_WSR, 0xaaaa);
-	while (1)
-		;
-}
-
-
-
-void __dead2 imx_system_off(void)
-{
-	mmio_write_32(IMX_SNVS_BASE + SNVS_LPCR, SNVS_LPCR_SRTC_ENV |
-			SNVS_LPCR_DP_EN | SNVS_LPCR_TOP);
-
-	while (1)
-		;
-}
-
-void __dead2 imx_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state)
-{
-	if (is_local_state_off(CLUSTER_PWR_STATE(target_state)))
-		imx_set_rbc_count();
-
-	while (1)
-		wfi();
-}
-
 static const plat_psci_ops_t imx_plat_psci_ops = {
 	.pwr_domain_on = imx_pwr_domain_on,
 	.pwr_domain_on_finish = imx_pwr_domain_on_finish,
diff --git a/plat/imx/imx8m/imx8mq/include/platform_def.h b/plat/imx/imx8m/imx8mq/include/platform_def.h
index 5c5b0a5..b54201e 100644
--- a/plat/imx/imx8m/imx8mq/include/platform_def.h
+++ b/plat/imx/imx8m/imx8mq/include/platform_def.h
@@ -26,6 +26,7 @@
 #define PLAT_MAX_OFF_STATE		U(4)
 #define PLAT_MAX_RET_STATE		U(1)
 
+#define PLAT_WAIT_RET_STATE		PLAT_MAX_RET_STATE
 #define PLAT_WAIT_OFF_STATE		U(2)
 #define PLAT_STOP_OFF_STATE		U(3)
 
diff --git a/plat/imx/imx8m/imx8mq/platform.mk b/plat/imx/imx8m/imx8mq/platform.mk
index 2681e57..7004c56 100644
--- a/plat/imx/imx8m/imx8mq/platform.mk
+++ b/plat/imx/imx8m/imx8mq/platform.mk
@@ -20,6 +20,8 @@
 BL31_SOURCES		+=	plat/imx/common/imx8_helpers.S			\
 				plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c	\
 				plat/imx/imx8m/imx8mq/imx8mq_psci.c		\
+				plat/imx/imx8m/gpc_common.c			\
+				plat/imx/imx8m/imx8m_psci_common.c		\
 				plat/imx/imx8m/imx8mq/gpc.c			\
 				plat/imx/common/imx8_topology.c			\
 				plat/imx/common/imx_uart_console.S		\
@@ -28,6 +30,8 @@
 				lib/cpus/aarch64/cortex_a53.S			\
 				drivers/console/aarch64/console.S		\
 				drivers/arm/tzc/tzc380.c			\
+				drivers/delay_timer/delay_timer.c		\
+				drivers/delay_timer/generic_delay_timer.c	\
 				${IMX_GIC_SOURCES}
 
 USE_COHERENT_MEM	:=	1
diff --git a/plat/imx/imx8m/include/gpc.h b/plat/imx/imx8m/include/gpc.h
index 6fdf6ad..139132c 100644
--- a/plat/imx/imx8m/include/gpc.h
+++ b/plat/imx/imx8m/include/gpc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -29,15 +29,18 @@
 #define PU_PGC_DN_TRG			0x104
 #define A53_CORE0_PGC			0x800
 #define A53_PLAT_PGC			0x900
+#define PLAT_PGC_PCR			0x900
 #define PGC_SCU_TIMING			0x910
 
 #define MASK_DSM_TRIGGER_A53		BIT(31)
 #define IRQ_SRC_A53_WUP			BIT(30)
+#define IRQ_SRC_A53_WUP_SHIFT		30
 #define IRQ_SRC_C1			BIT(29)
 #define IRQ_SRC_C0			BIT(28)
 #define IRQ_SRC_C3			BIT(23)
 #define IRQ_SRC_C2			BIT(22)
 #define CPU_CLOCK_ON_LPM		BIT(14)
+#define A53_CLK_ON_LPM			BIT(14)
 #define MASTER0_LPM_HSK			BIT(6)
 
 #define L2PGE				BIT(31)
@@ -52,19 +55,27 @@
 #define SLPCR_SBYOS			BIT(1)
 #define SLPCR_BYPASS_PMIC_READY		BIT(0)
 #define SLPCR_RBC_COUNT_SHIFT		24
+#define SLPCR_STBY_COUNT_SHFT		3
 
 #define A53_DUMMY_PDN_ACK		BIT(15)
 #define A53_DUMMY_PUP_ACK		BIT(31)
 #define A53_PLAT_PDN_ACK		BIT(2)
 #define A53_PLAT_PUP_ACK		BIT(18)
 
+#define PLAT_PUP_SLT_CTRL		BIT(9)
+#define PLAT_PDN_SLT_CTRL		BIT(8)
+
 #define SLT_PLAT_PDN			BIT(8)
 #define SLT_PLAT_PUP			BIT(9)
 
+#define MASTER1_MAPPING			BIT(1)
+#define MASTER2_MAPPING			BIT(2)
+
 /* helper macro */
 #define A53_LPM_MASK	U(0xF)
 #define A53_LPM_WAIT	U(0x5)
 #define A53_LPM_STOP	U(0xA)
+#define LPM_MODE(local_state)		((local_state) == PLAT_WAIT_RET_STATE ? A53_LPM_WAIT : A53_LPM_STOP)
 
 #define DSM_MODE_MASK	BIT(31)
 
@@ -76,6 +87,9 @@
 #define SLTx_CFG(n)			((SLT0_CFG + ((n) * 4)))
 #define SLT_COREx_PUP(core_id)		(0x2 << ((core_id) * 2))
 
+#define IRQ_IMR_NUM	4
+#define IMR_MASK_ALL	0xffffffff
+
 /* function declare */
 void imx_gpc_init(void);
 void imx_set_cpu_secure_entry(unsigned int core_index, uintptr_t sec_entrypoint);
@@ -84,7 +98,9 @@
 void imx_set_cpu_lpm(unsigned int core_index, bool pdn);
 void imx_set_cluster_standby(bool retention);
 void imx_set_cluster_powerdown(unsigned int last_core, uint8_t power_state);
-void imx_set_sys_lpm(bool retention);
+void imx_noc_slot_config(bool pdn);
+void imx_set_sys_wakeup(unsigned int last_core, bool pdn);
+void imx_set_sys_lpm(unsigned last_core, bool retention);
 void imx_set_rbc_count(void);
 void imx_clear_rbc_count(void);
 
diff --git a/plat/imx/imx8m/include/imx8m_psci.h b/plat/imx/imx8m/include/imx8m_psci.h
new file mode 100644
index 0000000..4966403
--- /dev/null
+++ b/plat/imx/imx8m/include/imx8m_psci.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX8M_PSCI_H
+#define IMX8M_PSCI_H
+
+#define CORE_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL0])
+#define CLUSTER_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL1])
+#define SYSTEM_PWR_STATE(state) ((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
+
+int imx_pwr_domain_on(u_register_t mpidr);
+void imx_pwr_domain_on_finish(const psci_power_state_t *target_state);
+void imx_pwr_domain_off(const psci_power_state_t *target_state);
+int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint);
+int imx_validate_power_state(unsigned int power_state, psci_power_state_t *rq_state);
+void imx_cpu_standby(plat_local_state_t cpu_state);
+void imx_domain_suspend(const psci_power_state_t *target_state);
+void imx_domain_suspend_finish(const psci_power_state_t *target_state);
+void imx_get_sys_suspend_power_state(psci_power_state_t *req_state);
+void __dead2 imx_system_reset(void);
+void __dead2 imx_system_off(void);
+void __dead2 imx_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state);
+
+#endif /* IMX8M_PSCI_H */
diff --git a/plat/rockchip/common/params_setup.c b/plat/rockchip/common/params_setup.c
index 3d1b40b..dda98d9 100644
--- a/plat/rockchip/common/params_setup.c
+++ b/plat/rockchip/common/params_setup.c
@@ -11,6 +11,7 @@
 #include <common/debug.h>
 #include <drivers/console.h>
 #include <drivers/gpio.h>
+#include <libfdt.h>
 #include <lib/coreboot.h>
 #include <lib/mmio.h>
 #include <plat/common/platform.h>
@@ -27,6 +28,13 @@
 uint32_t suspend_gpio_cnt;
 static struct apio_info *suspend_apio;
 
+static uint8_t fdt_buffer[0x10000];
+
+void *plat_get_fdt(void)
+{
+	return &fdt_buffer[0];
+}
+
 struct gpio_info *plat_get_rockchip_gpio_reset(void)
 {
 	return rst_gpio;
@@ -49,11 +57,30 @@
 	return suspend_apio;
 }
 
+static int dt_process_fdt(void *blob)
+{
+	void *fdt = plat_get_fdt();
+	int ret;
+
+	ret = fdt_open_into(blob, fdt, 0x10000);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
 void params_early_setup(void *plat_param_from_bl2)
 {
 	struct bl31_plat_param *bl2_param;
 	struct bl31_gpio_param *gpio_param;
 
+	/*
+	 * Test if this is a FDT passed as a platform-specific parameter
+	 * block.
+	 */
+	if (!dt_process_fdt(plat_param_from_bl2))
+		return;
+
 	/* keep plat parameters for later processing if need */
 	bl2_param = (struct bl31_plat_param *)plat_param_from_bl2;
 	while (bl2_param) {
diff --git a/plat/rockchip/rk3328/platform.mk b/plat/rockchip/rk3328/platform.mk
index ca6345b..18b1b94 100644
--- a/plat/rockchip/rk3328/platform.mk
+++ b/plat/rockchip/rk3328/platform.mk
@@ -8,6 +8,8 @@
 RK_PLAT_SOC		:=	${RK_PLAT}/${PLAT}
 RK_PLAT_COMMON		:=	${RK_PLAT}/common
 
+include lib/libfdt/libfdt.mk
+
 PLAT_INCLUDES		:=	-Idrivers/arm/gic/common/			\
 				-Idrivers/arm/gic/v2/			\
 				-I${RK_PLAT_COMMON}/                            \
@@ -38,6 +40,7 @@
 				drivers/delay_timer/generic_delay_timer.c	\
 				lib/cpus/aarch64/aem_generic.S			\
 				lib/cpus/aarch64/cortex_a53.S			\
+				$(LIBFDT_SRCS)					\
 				${RK_PLAT_COMMON}/drivers/parameter/ddr_parameter.c	\
 				${RK_PLAT_COMMON}/aarch64/plat_helpers.S	\
 				${RK_PLAT_COMMON}/bl31_plat_setup.c		\
diff --git a/plat/rockchip/rk3368/platform.mk b/plat/rockchip/rk3368/platform.mk
index c0164c1..d1315fc 100644
--- a/plat/rockchip/rk3368/platform.mk
+++ b/plat/rockchip/rk3368/platform.mk
@@ -8,6 +8,8 @@
 RK_PLAT_SOC		:=	${RK_PLAT}/${PLAT}
 RK_PLAT_COMMON		:=	${RK_PLAT}/common
 
+include lib/libfdt/libfdt.mk
+
 PLAT_INCLUDES		:=	-I${RK_PLAT_COMMON}/				\
 				-I${RK_PLAT_COMMON}/include/			\
 				-I${RK_PLAT_COMMON}/pmusram			\
@@ -35,6 +37,7 @@
 				drivers/delay_timer/delay_timer.c		\
 				drivers/delay_timer/generic_delay_timer.c	\
 				lib/cpus/aarch64/cortex_a53.S			\
+				$(LIBFDT_SRCS)					\
 				${RK_PLAT_COMMON}/aarch64/plat_helpers.S	\
 				${RK_PLAT_COMMON}/bl31_plat_setup.c		\
 				${RK_PLAT_COMMON}/params_setup.c                \
diff --git a/plat/rockchip/rk3399/platform.mk b/plat/rockchip/rk3399/platform.mk
index b624717..1013598 100644
--- a/plat/rockchip/rk3399/platform.mk
+++ b/plat/rockchip/rk3399/platform.mk
@@ -8,6 +8,8 @@
 RK_PLAT_SOC	:=	${RK_PLAT}/${PLAT}
 RK_PLAT_COMMON	:=	${RK_PLAT}/common
 
+include lib/libfdt/libfdt.mk
+
 PLAT_INCLUDES		:=	-I${RK_PLAT_COMMON}/			\
 				-I${RK_PLAT_COMMON}/include/		\
 				-I${RK_PLAT_COMMON}/pmusram		\
@@ -43,6 +45,7 @@
 			drivers/gpio/gpio.c				\
 			lib/cpus/aarch64/cortex_a53.S			\
 			lib/cpus/aarch64/cortex_a72.S			\
+			$(LIBFDT_SRCS)					\
 			${RK_PLAT_COMMON}/aarch64/plat_helpers.S	\
 			${RK_PLAT_COMMON}/bl31_plat_setup.c		\
 			${RK_PLAT_COMMON}/params_setup.c		\