Merge pull request #1437 from jeenu-arm/ras-remaining

SDEI dispatch changes to enable RAS use cases
diff --git a/docs/plat/synquacer.rst b/docs/plat/synquacer.rst
new file mode 100644
index 0000000..ca53deb
--- /dev/null
+++ b/docs/plat/synquacer.rst
@@ -0,0 +1,117 @@
+Trusted Firmware-A for Socionext Synquacer SoCs
+===============================================
+
+Socionext's Synquacer SC2A11 is a multi-core processor with 24 cores of Arm
+Cortex-A53. The Developerbox, of 96boards, is a platform that contains this
+processor. This port of the Trusted Firmware only supports this platform at
+the moment.
+
+More information are listed in `link`_.
+
+How to build
+============
+
+Code Locations
+--------------
+
+-  Trusted Firmware-A:
+   `link <https://github.com/ARM-software/arm-trusted-firmware>`__
+
+-  edk2:
+   `link <https://github.com/tianocore/edk2>`__
+
+-  edk2-platforms:
+   `link <https://github.com/tianocore/edk2-platforms>`__
+
+-  edk2-non-osi:
+   `link <https://github.com/tianocore/edk2-non-osi>`__
+
+Boot Flow
+---------
+
+SCP firmware --> TF-A BL31 --> UEFI(edk2)
+
+Build Procedure
+---------------
+
+-  Firstly, in addition to the “normal” build tools you will also need a
+   few specialist tools. On a Debian or Ubuntu operating system try:
+
+   .. code:: shell
+
+       sudo apt install acpica-tools device-tree-compiler uuid-dev
+
+-  Secondly, create a new working directory and store the absolute path to this
+   directory in an environment variable, WORKSPACE. It does not matter where
+   this directory is created but as an example:
+
+   .. code:: shell
+
+       export WORKSPACE=$HOME/build/developerbox-firmware
+       mkdir -p $WORKSPACE
+
+-  Run the following commands to clone the source code:
+
+   .. code:: shell
+
+       cd $WORKSPACE
+       git clone https://github.com/ARM-software/arm-trusted-firmware -b master
+       git clone https://github.com/tianocore/edk2.git -b master
+       git clone https://github.com/tianocore/edk2-platforms.git -b master
+       git clone https://github.com/tianocore/edk2-non-osi.git -b master
+
+-  Build ATF:
+
+   .. code:: shell
+
+       cd $WORKSPACE/arm-trusted-firmware
+       make -j`nproc` PLAT=synquacer PRELOADED_BL33_BASE=0x8200000 bl31 fiptool
+       tools/fiptool/fiptool create \
+             --tb-fw ./build/synquacer/release/bl31.bin \
+             --soc-fw ./build/synquacer/release/bl31.bin \
+             --scp-fw ./build/synquacer/release/bl31.bin \
+             ../edk2-non-osi/Platform/Socionext/DeveloperBox/fip_all_arm_tf.bin
+
+-  Build EDK2:
+
+   .. code:: shell
+
+       cd $WORKSPACE
+       export PACKAGES_PATH=$WORKSPACE/edk2:$WORKSPACE/edk2-platforms:$WORKSPACE/edk2-non-osi
+       export ACTIVE_PLATFORM="Platform/Socionext/DeveloperBox/DeveloperBox.dsc"
+       export GCC5_AARCH64_PREFIX=aarch64-linux-gnu-
+       unset ARCH
+
+       . edk2/edksetup.sh
+       make -C edk2/BaseTools
+
+       build -p $ACTIVE_PLATFORM -b RELEASE -a AARCH64 -t GCC5 -n `nproc` -D DO_X86EMU=TRUE
+
+-  The firmware image, which comprises the option ROM, ARM trusted firmware and
+   EDK2 itself, can be found $WORKSPACE/../Build/DeveloperBox/RELEASE_GCC5/FV/.
+   Use SYNQUACERFIRMWAREUPDATECAPSULEFMPPKCS7.Cap for UEFI capsule update and
+   SPI_NOR_IMAGE.fd for the serial flasher.
+
+   Note #1: -t GCC5 can be loosely translated as “enable link-time-optimization”;
+   any version of gcc >= 5 will support this feature and may be used to build EDK2.
+
+   Note #2: Replace -b RELEASE with -b DEBUG to build a debug.
+
+Install the System Firmware
+---------------------------
+
+-  Providing your Developerbox is fully working and has on operating system
+   installed then you can adopt your the newly compiled system firmware using
+   the capsule update method:.
+
+   .. code:: shell
+
+       sudo apt install fwupdate
+       sudo fwupdate --apply {50b94ce5-8b63-4849-8af4-ea479356f0e3} \
+                     SYNQUACERFIRMWAREUPDATECAPSULEFMPPKCS7.Cap
+       sudo reboot
+
+-  Alternatively you can install SPI_NOR_IMAGE.fd using the `board recovery method`_.
+
+.. _link: https://www.96boards.org/product/developerbox/
+.. _board recovery method: https://www.96boards.org/documentation/enterprise/developerbox/installation/board-recovery.md.html
diff --git a/docs/xlat-tables-lib-v2-design.rst b/docs/xlat-tables-lib-v2-design.rst
index d256b6b..d207f30 100644
--- a/docs/xlat-tables-lib-v2-design.rst
+++ b/docs/xlat-tables-lib-v2-design.rst
@@ -79,9 +79,9 @@
 normal memory) as well as the memory access permissions (read-only or
 read-write, executable or not, secure or non-secure, and so on). In the case of
 the EL1&0 translation regime, the attributes also specify whether the region is
-a User region (EL0) or Privileged region (EL1). See the ``mmap_attr_t``
-enumeration type in `xlat\_tables\_v2.h`_. Note that for the EL1&0 translation
-regime the Execute Never attribute is set simultaneously for both EL1 and EL0.
+a User region (EL0) or Privileged region (EL1). See the ``MT_xxx`` definitions
+in `xlat\_tables\_v2.h`_. Note that for the EL1&0 translation regime the Execute
+Never attribute is set simultaneously for both EL1 and EL0.
 
 The granularity controls the translation table level to go down to when mapping
 the region. For example, assuming the MMU has been configured to use a 4KB
diff --git a/include/lib/xlat_tables/xlat_tables.h b/include/lib/xlat_tables/xlat_tables.h
index 91f2f05..c017e19 100644
--- a/include/lib/xlat_tables/xlat_tables.h
+++ b/include/lib/xlat_tables/xlat_tables.h
@@ -25,7 +25,7 @@
 #define MAP_REGION(pa, va, sz, attr) {(pa), (va), (sz), (attr)}
 
 /*
- * Shifts and masks to access fields of an mmap_attr_t
+ * Shifts and masks to access fields of an mmap attribute
  */
 #define MT_TYPE_MASK	U(0x7)
 #define MT_TYPE(_attr)	((_attr) & MT_TYPE_MASK)
@@ -39,37 +39,41 @@
 /*
  * Memory mapping attributes
  */
-typedef enum  {
-	/*
-	 * Memory types supported.
-	 * These are organised so that, going down the list, the memory types
-	 * are getting weaker; conversely going up the list the memory types are
-	 * getting stronger.
-	 */
-	MT_DEVICE,
-	MT_NON_CACHEABLE,
-	MT_MEMORY,
-	/* Values up to 7 are reserved to add new memory types in the future */
 
-	MT_RO		= U(0) << MT_PERM_SHIFT,
-	MT_RW		= U(1) << MT_PERM_SHIFT,
+/*
+ * Memory types supported.
+ * These are organised so that, going down the list, the memory types are
+ * getting weaker; conversely going up the list the memory types are getting
+ * stronger.
+ */
+#define MT_DEVICE		U(0)
+#define MT_NON_CACHEABLE	U(1)
+#define MT_MEMORY		U(2)
+/* Values up to 7 are reserved to add new memory types in the future */
 
-	MT_SECURE	= U(0) << MT_SEC_SHIFT,
-	MT_NS		= U(1) << MT_SEC_SHIFT,
+#define MT_RO			(U(0) << MT_PERM_SHIFT)
+#define MT_RW			(U(1) << MT_PERM_SHIFT)
 
-	/*
-	 * Access permissions for instruction execution are only relevant for
-	 * normal read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored
-	 * (and potentially overridden) otherwise:
-	 *  - Device memory is always marked as execute-never.
-	 *  - Read-write normal memory is always marked as execute-never.
-	 */
-	MT_EXECUTE		= U(0) << MT_EXECUTE_SHIFT,
-	MT_EXECUTE_NEVER	= U(1) << MT_EXECUTE_SHIFT,
-} mmap_attr_t;
+#define MT_SECURE		(U(0) << MT_SEC_SHIFT)
+#define MT_NS			(U(1) << MT_SEC_SHIFT)
 
-#define MT_CODE		(MT_MEMORY | MT_RO | MT_EXECUTE)
-#define MT_RO_DATA	(MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
+/*
+ * Access permissions for instruction execution are only relevant for normal
+ * read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored (and potentially
+ * overridden) otherwise:
+ *  - Device memory is always marked as execute-never.
+ *  - Read-write normal memory is always marked as execute-never.
+ */
+#define MT_EXECUTE		(U(0) << MT_EXECUTE_SHIFT)
+#define MT_EXECUTE_NEVER	(U(1) << MT_EXECUTE_SHIFT)
+
+/* Compound attributes for most common usages */
+#define MT_CODE			(MT_MEMORY | MT_RO | MT_EXECUTE)
+#define MT_RO_DATA		(MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
+
+#if !ERROR_DEPRECATED
+typedef unsigned int mmap_attr_t;
+#endif
 
 /*
  * Structure for specifying a single region of memory.
@@ -78,13 +82,13 @@
 	unsigned long long	base_pa;
 	uintptr_t		base_va;
 	size_t			size;
-	mmap_attr_t		attr;
+	unsigned int		attr;
 } mmap_region_t;
 
 /* Generic translation table APIs */
 void init_xlat_tables(void);
 void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
-				size_t size, mmap_attr_t attr);
+		     size_t size, unsigned int attr);
 void mmap_add(const mmap_region_t *mm);
 
 #endif /*__ASSEMBLY__*/
diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h
index b237945..98f00d7 100644
--- a/include/lib/xlat_tables/xlat_tables_v2.h
+++ b/include/lib/xlat_tables/xlat_tables_v2.h
@@ -47,7 +47,7 @@
 	_MAP_REGION_FULL_SPEC(_pa, _va, _sz, _attr, _gr)
 
 /*
- * Shifts and masks to access fields of an mmap_attr_t
+ * Shifts and masks to access fields of an mmap attribute
  */
 #define MT_TYPE_MASK		U(0x7)
 #define MT_TYPE(_attr)		((_attr) & MT_TYPE_MASK)
@@ -57,57 +57,56 @@
 #define MT_SEC_SHIFT		U(4)
 /* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
 #define MT_EXECUTE_SHIFT	U(5)
-/*
- * In the EL1&0 translation regime, mark the region as User (EL0) or
- * Privileged (EL1). In the EL3 translation regime this has no effect.
- */
+/* In the EL1&0 translation regime, User (EL0) or Privileged (EL1). */
 #define MT_USER_SHIFT		U(6)
 /* All other bits are reserved */
 
 /*
  * Memory mapping attributes
  */
-typedef enum  {
-	/*
-	 * Memory types supported.
-	 * These are organised so that, going down the list, the memory types
-	 * are getting weaker; conversely going up the list the memory types are
-	 * getting stronger.
-	 */
-	MT_DEVICE,
-	MT_NON_CACHEABLE,
-	MT_MEMORY,
-	/* Values up to 7 are reserved to add new memory types in the future */
 
-	MT_RO		= U(0) << MT_PERM_SHIFT,
-	MT_RW		= U(1) << MT_PERM_SHIFT,
+/*
+ * Memory types supported.
+ * These are organised so that, going down the list, the memory types are
+ * getting weaker; conversely going up the list the memory types are getting
+ * stronger.
+ */
+#define MT_DEVICE		U(0)
+#define MT_NON_CACHEABLE	U(1)
+#define MT_MEMORY		U(2)
+/* Values up to 7 are reserved to add new memory types in the future */
 
-	MT_SECURE	= U(0) << MT_SEC_SHIFT,
-	MT_NS		= U(1) << MT_SEC_SHIFT,
+#define MT_RO			(U(0) << MT_PERM_SHIFT)
+#define MT_RW			(U(1) << MT_PERM_SHIFT)
 
-	/*
-	 * Access permissions for instruction execution are only relevant for
-	 * normal read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored
-	 * (and potentially overridden) otherwise:
-	 *  - Device memory is always marked as execute-never.
-	 *  - Read-write normal memory is always marked as execute-never.
-	 */
-	MT_EXECUTE		= U(0) << MT_EXECUTE_SHIFT,
-	MT_EXECUTE_NEVER	= U(1) << MT_EXECUTE_SHIFT,
+#define MT_SECURE		(U(0) << MT_SEC_SHIFT)
+#define MT_NS			(U(1) << MT_SEC_SHIFT)
 
-	/*
-	 * When mapping a region at EL0 or EL1, this attribute will be used to
-	 * determine if a User mapping (EL0) will be created or a Privileged
-	 * mapping (EL1).
-	 */
-	MT_USER				= U(1) << MT_USER_SHIFT,
-	MT_PRIVILEGED			= U(0) << MT_USER_SHIFT,
-} mmap_attr_t;
+/*
+ * Access permissions for instruction execution are only relevant for normal
+ * read-only memory, i.e. MT_MEMORY | MT_RO. They are ignored (and potentially
+ * overridden) otherwise:
+ *  - Device memory is always marked as execute-never.
+ *  - Read-write normal memory is always marked as execute-never.
+ */
+#define MT_EXECUTE		(U(0) << MT_EXECUTE_SHIFT)
+#define MT_EXECUTE_NEVER	(U(1) << MT_EXECUTE_SHIFT)
+
+/*
+ * When mapping a region at EL0 or EL1, this attribute will be used to determine
+ * if a User mapping (EL0) will be created or a Privileged mapping (EL1).
+ */
+#define MT_USER			(U(1) << MT_USER_SHIFT)
+#define MT_PRIVILEGED		(U(0) << MT_USER_SHIFT)
 
 /* Compound attributes for most common usages */
-#define MT_CODE		(MT_MEMORY | MT_RO | MT_EXECUTE)
-#define MT_RO_DATA	(MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
-#define MT_RW_DATA	(MT_MEMORY | MT_RW | MT_EXECUTE_NEVER)
+#define MT_CODE			(MT_MEMORY | MT_RO | MT_EXECUTE)
+#define MT_RO_DATA		(MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
+#define MT_RW_DATA		(MT_MEMORY | MT_RW | MT_EXECUTE_NEVER)
+
+#if !ERROR_DEPRECATED
+typedef unsigned int mmap_attr_t;
+#endif
 
 /*
  * Structure for specifying a single region of memory.
@@ -116,7 +115,7 @@
 	unsigned long long	base_pa;
 	uintptr_t		base_va;
 	size_t			size;
-	mmap_attr_t		attr;
+	unsigned int		attr;
 	/* Desired granularity. See the MAP_REGION2() macro for more details. */
 	size_t			granularity;
 } mmap_region_t;
@@ -213,7 +212,7 @@
  * removed afterwards.
  */
 void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
-				size_t size, mmap_attr_t attr);
+		     size_t size, unsigned int attr);
 void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm);
 
 /*
@@ -238,7 +237,7 @@
  *    EPERM: It overlaps another region in an invalid way.
  */
 int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
-				size_t size, mmap_attr_t attr);
+			    size_t size, unsigned int attr);
 int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm);
 
 /*
diff --git a/include/services/spm_svc.h b/include/services/spm_svc.h
index 8f872c3..0200992 100644
--- a/include/services/spm_svc.h
+++ b/include/services/spm_svc.h
@@ -74,6 +74,9 @@
 			 void *handle,
 			 uint64_t flags);
 
+/* Helper to enter a Secure Partition */
+uint64_t spm_sp_call(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3);
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __SPM_SVC_H__ */
diff --git a/lib/utils/mem_region.c b/lib/utils/mem_region.c
index 24c2c1d..e9541ba 100644
--- a/lib/utils/mem_region.c
+++ b/lib/utils/mem_region.c
@@ -58,7 +58,7 @@
 	uintptr_t begin;
 	int r;
 	size_t size;
-	const mmap_attr_t attr = MT_MEMORY|MT_RW|MT_NS;
+	const unsigned int attr = MT_MEMORY | MT_RW | MT_NS;
 
 	assert(regions != NULL);
 	assert(nregions > 0 && chunk > 0);
diff --git a/lib/xlat_tables/xlat_tables_common.c b/lib/xlat_tables/xlat_tables_common.c
index 21bf489..b42cd68 100644
--- a/lib/xlat_tables/xlat_tables_common.c
+++ b/lib/xlat_tables/xlat_tables_common.c
@@ -66,7 +66,7 @@
 }
 
 void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
-			size_t size, mmap_attr_t attr)
+		     size_t size, unsigned int attr)
 {
 	mmap_region_t *mm = mmap;
 	mmap_region_t *mm_last = mm + ARRAY_SIZE(mmap) - 1;
@@ -178,8 +178,8 @@
 	}
 }
 
-static uint64_t mmap_desc(mmap_attr_t attr, unsigned long long addr_pa,
-							unsigned int level)
+static uint64_t mmap_desc(unsigned int attr, unsigned long long addr_pa,
+			  unsigned int level)
 {
 	uint64_t desc;
 	int mem_type;
@@ -264,7 +264,7 @@
  * value pointed by attr should be ignored by the caller.
  */
 static int mmap_region_attr(mmap_region_t *mm, uintptr_t base_va,
-					size_t size, mmap_attr_t *attr)
+			    size_t size, unsigned int *attr)
 {
 	/* Don't assume that the area is contained in the first region */
 	int ret = -1;
@@ -348,7 +348,7 @@
 			 * there are partially overlapping regions. On success,
 			 * it will return the innermost region's attributes.
 			 */
-			mmap_attr_t attr;
+			unsigned int attr;
 			int r = mmap_region_attr(mm, base_va, level_size, &attr);
 
 			if (!r) {
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index 3b586b2..5beb51e 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -823,10 +823,8 @@
 		ctx->max_va = end_va;
 }
 
-void mmap_add_region(unsigned long long base_pa,
-				uintptr_t base_va,
-				size_t size,
-				mmap_attr_t attr)
+void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
+		     unsigned int attr)
 {
 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
 	mmap_add_region_ctx(&tf_xlat_ctx, &mm);
@@ -947,8 +945,8 @@
 	return 0;
 }
 
-int mmap_add_dynamic_region(unsigned long long base_pa,
-			    uintptr_t base_va, size_t size, mmap_attr_t attr)
+int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
+			    size_t size, unsigned int attr)
 {
 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
 	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 07963b1..157dd03 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -12,27 +12,26 @@
 
 #if PLAT_XLAT_TABLES_DYNAMIC
 /*
- * Shifts and masks to access fields of an mmap_attr_t
+ * Private shifts and masks to access fields of an mmap attribute
  */
 /* Dynamic or static */
-#define MT_DYN_SHIFT		30 /* 31 would cause undefined behaviours */
+#define MT_DYN_SHIFT		U(31)
 
 /*
  * Memory mapping private attributes
  *
- * Private attributes not exposed in the mmap_attr_t enum.
+ * Private attributes not exposed in the public header.
  */
-typedef enum  {
-	/*
-	 * Regions mapped before the MMU can't be unmapped dynamically (they are
-	 * static) and regions mapped with MMU enabled can be unmapped. This
-	 * behaviour can't be overridden.
-	 *
-	 * Static regions can overlap each other, dynamic regions can't.
-	 */
-	MT_STATIC	= 0 << MT_DYN_SHIFT,
-	MT_DYNAMIC	= 1 << MT_DYN_SHIFT
-} mmap_priv_attr_t;
+
+/*
+ * Regions mapped before the MMU can't be unmapped dynamically (they are
+ * static) and regions mapped with MMU enabled can be unmapped. This
+ * behaviour can't be overridden.
+ *
+ * Static regions can overlap each other, dynamic regions can't.
+ */
+#define MT_STATIC	(U(0) << MT_DYN_SHIFT)
+#define MT_DYNAMIC	(U(1) << MT_DYN_SHIFT)
 
 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
 
diff --git a/maintainers.rst b/maintainers.rst
index 96ff0e0..4466a56 100644
--- a/maintainers.rst
+++ b/maintainers.rst
@@ -132,6 +132,16 @@
 
 -  plat/rockchip/\*
 
+Synquacer platform sub-maintainer
+---------------------------------
+
+Sumit Garg (sumit.garg@linaro.org, `b49020`_)
+
+Files:
+
+- docs/plat/synquacer.rst
+- plat/socionext/synquacer/\*
+
 Texas Instruments platform sub-maintainer
 -----------------------------------------
 
@@ -183,3 +193,4 @@
 .. _etienne-lms: https://github.com/etienne-lms
 .. _qoriq-open-source: https://github.com/qoriq-open-source
 .. _Andre-ARM: https://github.com/Andre-ARM
+.. _b49020: https://github.com/b49020
diff --git a/plat/socionext/synquacer/drivers/mhu/sq_mhu.c b/plat/socionext/synquacer/drivers/mhu/sq_mhu.c
new file mode 100644
index 0000000..5c2a635
--- /dev/null
+++ b/plat/socionext/synquacer/drivers/mhu/sq_mhu.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <sq_common.h>
+#include "sq_mhu.h"
+
+/* SCP MHU secure channel registers */
+#define SCP_INTR_S_STAT		0x200
+#define SCP_INTR_S_SET		0x208
+#define SCP_INTR_S_CLEAR	0x210
+
+/* CPU MHU secure channel registers */
+#define CPU_INTR_S_STAT		0x300
+#define CPU_INTR_S_SET		0x308
+#define CPU_INTR_S_CLEAR	0x310
+
+DEFINE_BAKERY_LOCK(sq_lock);
+
+/*
+ * Slot 31 is reserved because the MHU hardware uses this register bit to
+ * indicate a non-secure access attempt. The total number of available slots is
+ * therefore 31 [30:0].
+ */
+#define MHU_MAX_SLOT_ID		30
+
+void mhu_secure_message_start(unsigned int slot_id)
+{
+	assert(slot_id <= MHU_MAX_SLOT_ID);
+
+	bakery_lock_get(&sq_lock);
+
+	/* Make sure any previous command has finished */
+	while (mmio_read_32(PLAT_SQ_MHU_BASE + CPU_INTR_S_STAT) &
+							(1 << slot_id))
+		;
+}
+
+void mhu_secure_message_send(unsigned int slot_id)
+{
+	assert(slot_id <= MHU_MAX_SLOT_ID);
+	assert(!(mmio_read_32(PLAT_SQ_MHU_BASE + CPU_INTR_S_STAT) &
+							(1 << slot_id)));
+
+	/* Send command to SCP */
+	mmio_write_32(PLAT_SQ_MHU_BASE + CPU_INTR_S_SET, 1 << slot_id);
+}
+
+uint32_t mhu_secure_message_wait(void)
+{
+	uint32_t response;
+
+	/* Wait for response from SCP */
+	while (!(response = mmio_read_32(PLAT_SQ_MHU_BASE + SCP_INTR_S_STAT)))
+		;
+
+	return response;
+}
+
+void mhu_secure_message_end(unsigned int slot_id)
+{
+	assert(slot_id <= MHU_MAX_SLOT_ID);
+
+	/*
+	 * Clear any response we got by writing one in the relevant slot bit to
+	 * the CLEAR register
+	 */
+	mmio_write_32(PLAT_SQ_MHU_BASE + SCP_INTR_S_CLEAR, 1 << slot_id);
+
+	bakery_lock_release(&sq_lock);
+}
+
+void mhu_secure_init(void)
+{
+	bakery_lock_init(&sq_lock);
+
+	/*
+	 * The STAT register resets to zero. Ensure it is in the expected state,
+	 * as a stale or garbage value would make us think it's a message we've
+	 * already sent.
+	 */
+	assert(mmio_read_32(PLAT_SQ_MHU_BASE + CPU_INTR_S_STAT) == 0);
+}
+
+void plat_sq_pwrc_setup(void)
+{
+	mhu_secure_init();
+}
diff --git a/plat/socionext/synquacer/drivers/mhu/sq_mhu.h b/plat/socionext/synquacer/drivers/mhu/sq_mhu.h
new file mode 100644
index 0000000..bef117d
--- /dev/null
+++ b/plat/socionext/synquacer/drivers/mhu/sq_mhu.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SQ_MHU_H__
+#define __SQ_MHU_H__
+
+#include <stdint.h>
+
+void mhu_secure_message_start(unsigned int slot_id);
+void mhu_secure_message_send(unsigned int slot_id);
+uint32_t mhu_secure_message_wait(void);
+void mhu_secure_message_end(unsigned int slot_id);
+
+void mhu_secure_init(void);
+
+#endif	/* __SQ_MHU_H__ */
diff --git a/plat/socionext/synquacer/drivers/scpi/sq_scpi.c b/plat/socionext/synquacer/drivers/scpi/sq_scpi.c
new file mode 100644
index 0000000..170b7e1
--- /dev/null
+++ b/plat/socionext/synquacer/drivers/scpi/sq_scpi.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <platform_def.h>
+#include <sq_common.h>
+#include <debug.h>
+#include <string.h>
+#include "sq_mhu.h"
+#include "sq_scpi.h"
+
+#define SCPI_SHARED_MEM_SCP_TO_AP	PLAT_SQ_SCP_COM_SHARED_MEM_BASE
+#define SCPI_SHARED_MEM_AP_TO_SCP	(PLAT_SQ_SCP_COM_SHARED_MEM_BASE \
+								 + 0x100)
+
+#define SCPI_CMD_HEADER_AP_TO_SCP		\
+	((scpi_cmd_t *) SCPI_SHARED_MEM_AP_TO_SCP)
+#define SCPI_CMD_PAYLOAD_AP_TO_SCP		\
+	((void *) (SCPI_SHARED_MEM_AP_TO_SCP + sizeof(scpi_cmd_t)))
+
+/* ID of the MHU slot used for the SCPI protocol */
+#define SCPI_MHU_SLOT_ID		0
+
+static void scpi_secure_message_start(void)
+{
+	mhu_secure_message_start(SCPI_MHU_SLOT_ID);
+}
+
+static void scpi_secure_message_send(size_t payload_size)
+{
+	/*
+	 * Ensure that any write to the SCPI payload area is seen by SCP before
+	 * we write to the MHU register. If these 2 writes were reordered by
+	 * the CPU then SCP would read stale payload data
+	 */
+	dmbst();
+
+	mhu_secure_message_send(SCPI_MHU_SLOT_ID);
+}
+
+static void scpi_secure_message_receive(scpi_cmd_t *cmd)
+{
+	uint32_t mhu_status;
+
+	assert(cmd != NULL);
+
+	mhu_status = mhu_secure_message_wait();
+
+	/* Expect an SCPI message, reject any other protocol */
+	if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
+		ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
+			mhu_status);
+		panic();
+	}
+
+	/*
+	 * Ensure that any read to the SCPI payload area is done after reading
+	 * the MHU register. If these 2 reads were reordered then the CPU would
+	 * read invalid payload data
+	 */
+	dmbld();
+
+	memcpy(cmd, (void *) SCPI_SHARED_MEM_SCP_TO_AP, sizeof(*cmd));
+}
+
+static void scpi_secure_message_end(void)
+{
+	mhu_secure_message_end(SCPI_MHU_SLOT_ID);
+}
+
+int scpi_wait_ready(void)
+{
+	scpi_cmd_t scpi_cmd;
+	scpi_status_t status = SCP_OK;
+
+	VERBOSE("Waiting for SCP_READY command...\n");
+
+	/* Get a message from the SCP */
+	scpi_secure_message_start();
+	scpi_secure_message_receive(&scpi_cmd);
+	scpi_secure_message_end();
+
+	/* We are expecting 'SCP Ready', produce correct error if it's not */
+	if (scpi_cmd.id != SCPI_CMD_SCP_READY) {
+		ERROR("Unexpected SCP command: expected command #%u,"
+		      "got command #%u\n", SCPI_CMD_SCP_READY, scpi_cmd.id);
+		status = SCP_E_SUPPORT;
+	} else if (scpi_cmd.size != 0) {
+		ERROR("SCP_READY command has incorrect size: expected 0,"
+		      "got %u\n", scpi_cmd.size);
+		status = SCP_E_SIZE;
+	}
+
+	VERBOSE("Sending response for SCP_READY command\n");
+
+	/*
+	 * Send our response back to SCP.
+	 * We are using the same SCPI header, just update the status field.
+	 */
+	scpi_cmd.status = status;
+	scpi_secure_message_start();
+	memcpy((void *) SCPI_SHARED_MEM_AP_TO_SCP, &scpi_cmd, sizeof(scpi_cmd));
+	scpi_secure_message_send(0);
+	scpi_secure_message_end();
+
+	return status == SCP_OK ? 0 : -1;
+}
+
+void scpi_set_sq_power_state(unsigned int mpidr, scpi_power_state_t cpu_state,
+		scpi_power_state_t cluster_state, scpi_power_state_t sq_state)
+{
+	scpi_cmd_t *cmd;
+	uint32_t state = 0;
+	uint32_t *payload_addr;
+
+	state |= mpidr & 0x0f;	/* CPU ID */
+	state |= (mpidr & 0xf00) >> 4;	/* Cluster ID */
+	state |= cpu_state << 8;
+	state |= cluster_state << 12;
+	state |= sq_state << 16;
+
+	scpi_secure_message_start();
+
+	/* Populate the command header */
+	cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+	cmd->id = SCPI_CMD_SET_POWER_STATE;
+	cmd->set = SCPI_SET_NORMAL;
+	cmd->sender = 0;
+	cmd->size = sizeof(state);
+	/* Populate the command payload */
+	payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
+	*payload_addr = state;
+	scpi_secure_message_send(sizeof(state));
+
+	/*
+	 * SCP does not reply to this command in order to avoid MHU interrupts
+	 * from the sender, which could interfere with its power state request.
+	 */
+	scpi_secure_message_end();
+}
+
+uint32_t scpi_sys_power_state(scpi_system_state_t system_state)
+{
+	scpi_cmd_t *cmd;
+	uint8_t *payload_addr;
+	scpi_cmd_t response;
+
+	scpi_secure_message_start();
+
+	/* Populate the command header */
+	cmd = SCPI_CMD_HEADER_AP_TO_SCP;
+	cmd->id = SCPI_CMD_SYS_POWER_STATE;
+	cmd->set = 0;
+	cmd->sender = 0;
+	cmd->size = sizeof(*payload_addr);
+	/* Populate the command payload */
+	payload_addr = SCPI_CMD_PAYLOAD_AP_TO_SCP;
+	*payload_addr = system_state & 0xff;
+	scpi_secure_message_send(sizeof(*payload_addr));
+
+	scpi_secure_message_receive(&response);
+
+	scpi_secure_message_end();
+
+	return response.status;
+}
+
+uint32_t scpi_get_draminfo(struct draminfo *info)
+{
+	scpi_cmd_t *cmd;
+	struct {
+		scpi_cmd_t	cmd;
+		struct draminfo	info;
+	} response;
+	uint32_t mhu_status;
+
+	scpi_secure_message_start();
+
+	/* Populate the command header */
+	cmd		= SCPI_CMD_HEADER_AP_TO_SCP;
+	cmd->id		= SCPI_CMD_GET_DRAMINFO;
+	cmd->set	= SCPI_SET_EXTENDED;
+	cmd->sender	= 0;
+	cmd->size	= 0;
+
+	scpi_secure_message_send(0);
+
+	mhu_status = mhu_secure_message_wait();
+
+	/* Expect an SCPI message, reject any other protocol */
+	if (mhu_status != (1 << SCPI_MHU_SLOT_ID)) {
+		ERROR("MHU: Unexpected protocol (MHU status: 0x%x)\n",
+			mhu_status);
+		panic();
+	}
+
+	/*
+	 * Ensure that any read to the SCPI payload area is done after reading
+	 * the MHU register. If these 2 reads were reordered then the CPU would
+	 * read invalid payload data
+	 */
+	dmbld();
+
+	memcpy(&response, (void *)SCPI_SHARED_MEM_SCP_TO_AP, sizeof(response));
+
+	scpi_secure_message_end();
+
+	if (response.cmd.status == SCP_OK)
+		*info = response.info;
+
+	return response.cmd.status;
+}
diff --git a/plat/socionext/synquacer/drivers/scpi/sq_scpi.h b/plat/socionext/synquacer/drivers/scpi/sq_scpi.h
new file mode 100644
index 0000000..30fa5c7
--- /dev/null
+++ b/plat/socionext/synquacer/drivers/scpi/sq_scpi.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SQ_SCPI_H__
+#define __SQ_SCPI_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+/*
+ * An SCPI command consists of a header and a payload.
+ * The following structure describes the header. It is 64-bit long.
+ */
+typedef struct {
+	/* Command ID */
+	uint32_t id		: 7;
+	/* Set ID. Identifies whether this is a standard or extended command. */
+	uint32_t set		: 1;
+	/* Sender ID to match a reply. The value is sender specific. */
+	uint32_t sender		: 8;
+	/* Size of the payload in bytes (0 - 511) */
+	uint32_t size		: 9;
+	uint32_t reserved	: 7;
+	/*
+	 * Status indicating the success of a command.
+	 * See the enum below.
+	 */
+	uint32_t status;
+} scpi_cmd_t;
+
+typedef enum {
+	SCPI_SET_NORMAL = 0,	/* Normal SCPI commands */
+	SCPI_SET_EXTENDED	/* Extended SCPI commands */
+} scpi_set_t;
+
+enum {
+	SCP_OK = 0,	/* Success */
+	SCP_E_PARAM,	/* Invalid parameter(s) */
+	SCP_E_ALIGN,	/* Invalid alignment */
+	SCP_E_SIZE,	/* Invalid size */
+	SCP_E_HANDLER,	/* Invalid handler or callback */
+	SCP_E_ACCESS,	/* Invalid access or permission denied */
+	SCP_E_RANGE,	/* Value out of range */
+	SCP_E_TIMEOUT,	/* Time out has ocurred */
+	SCP_E_NOMEM,	/* Invalid memory area or pointer */
+	SCP_E_PWRSTATE,	/* Invalid power state */
+	SCP_E_SUPPORT,	/* Feature not supported or disabled */
+	SCPI_E_DEVICE,	/* Device error */
+	SCPI_E_BUSY,	/* Device is busy */
+};
+
+typedef uint32_t scpi_status_t;
+
+typedef enum {
+	SCPI_CMD_SCP_READY = 0x01,
+	SCPI_CMD_SET_POWER_STATE = 0x03,
+	SCPI_CMD_SYS_POWER_STATE = 0x05
+} scpi_command_t;
+
+typedef enum {
+	scpi_power_on = 0,
+	scpi_power_retention = 1,
+	scpi_power_off = 3,
+} scpi_power_state_t;
+
+typedef enum {
+	scpi_system_shutdown = 0,
+	scpi_system_reboot = 1,
+	scpi_system_reset = 2
+} scpi_system_state_t;
+
+extern int scpi_wait_ready(void);
+extern void scpi_set_sq_power_state(unsigned int mpidr,
+					scpi_power_state_t cpu_state,
+					scpi_power_state_t cluster_state,
+					scpi_power_state_t css_state);
+uint32_t scpi_sys_power_state(scpi_system_state_t system_state);
+
+#endif	/* __SQ_SCPI_H__ */
diff --git a/plat/socionext/synquacer/include/plat_macros.S b/plat/socionext/synquacer/include/plat_macros.S
new file mode 100644
index 0000000..3dc06aa
--- /dev/null
+++ b/plat/socionext/synquacer/include/plat_macros.S
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLAT_MACROS_S__
+#define __PLAT_MACROS_S__
+
+/*
+ * Print CCN registers
+ */
+	.macro plat_crash_print_regs
+	.endm
+#endif /* __PLAT_MACROS_S__ */
diff --git a/plat/socionext/synquacer/include/platform_def.h b/plat/socionext/synquacer/include/platform_def.h
new file mode 100644
index 0000000..3e16642
--- /dev/null
+++ b/plat/socionext/synquacer/include/platform_def.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <common_def.h>
+
+/* CPU topology */
+#define PLAT_MAX_CORES_PER_CLUSTER	2
+#define PLAT_CLUSTER_COUNT		12
+#define PLATFORM_CORE_COUNT		(PLAT_CLUSTER_COUNT *	\
+					 PLAT_MAX_CORES_PER_CLUSTER)
+
+#define PLAT_MAX_PWR_LVL		1
+#define PLAT_MAX_RET_STATE		1
+#define PLAT_MAX_OFF_STATE		2
+
+#define SQ_LOCAL_STATE_RUN		0
+#define SQ_LOCAL_STATE_RET		1
+#define SQ_LOCAL_STATE_OFF		2
+
+#define CACHE_WRITEBACK_SHIFT		6
+#define CACHE_WRITEBACK_GRANULE		(1 << CACHE_WRITEBACK_SHIFT)
+
+#define PLAT_PHY_ADDR_SPACE_SIZE	(1ULL << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE	(1ULL << 32)
+#define MAX_XLAT_TABLES			4
+#define MAX_MMAP_REGIONS		6
+
+#define PLATFORM_STACK_SIZE		0x400
+
+#define BL31_BASE			0x04000000
+#define BL31_SIZE			0x00080000
+#define BL31_LIMIT			(BL31_BASE + BL31_SIZE)
+
+#define PLAT_SQ_CCN_BASE		0x32000000
+#define PLAT_SQ_CLUSTER_TO_CCN_ID_MAP					\
+					0,	/* Cluster 0 */		\
+					18,	/* Cluster 1 */		\
+					11,	/* Cluster 2 */		\
+					29,	/* Cluster 3 */		\
+					35,	/* Cluster 4 */		\
+					17,	/* Cluster 5 */		\
+					12,	/* Cluster 6 */		\
+					30,	/* Cluster 7 */		\
+					14,	/* Cluster 8 */		\
+					32,	/* Cluster 9 */		\
+					15,	/* Cluster 10 */	\
+					33	/* Cluster 11 */
+
+/* UART related constants */
+#define PLAT_SQ_BOOT_UART_BASE		0x2A400000
+#define PLAT_SQ_BOOT_UART_CLK_IN_HZ	62500000
+#define SQ_CONSOLE_BAUDRATE		115200
+
+#define SQ_SYS_CNTCTL_BASE		0x2a430000
+
+#define SQ_SYS_TIMCTL_BASE		0x2a810000
+#define PLAT_SQ_NSTIMER_FRAME_ID	0
+
+#define DRAMINFO_BASE			0x2E00FFC0
+
+#define PLAT_SQ_MHU_BASE		0x45000000
+
+#define PLAT_SQ_SCP_COM_SHARED_MEM_BASE		0x45400000
+#define SCPI_CMD_GET_DRAMINFO			0x1
+
+#define SQ_BOOT_CFG_ADDR			0x45410000
+#define PLAT_SQ_PRIMARY_CPU_SHIFT		8
+#define PLAT_SQ_PRIMARY_CPU_BIT_WIDTH		6
+
+#define PLAT_SQ_GICD_BASE		0x30000000
+#define PLAT_SQ_GICR_BASE		0x30400000
+
+#define PLAT_SQ_GPIO_BASE		0x51000000
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/socionext/synquacer/include/sq_common.h b/plat/socionext/synquacer/include/sq_common.h
new file mode 100644
index 0000000..58b1e24
--- /dev/null
+++ b/plat/socionext/synquacer/include/sq_common.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SQ_COMMON_H__
+#define __SQ_COMMON_H__
+
+#include <sys/types.h>
+#include <xlat_tables_v2.h>
+
+struct draminfo {
+	uint32_t	num_regions;
+	uint32_t	reserved;
+	uint64_t	base1;
+	uint64_t	size1;
+	uint64_t	base2;
+	uint64_t	size2;
+	uint64_t	base3;
+	uint64_t	size3;
+};
+
+uint32_t scpi_get_draminfo(struct draminfo *info);
+
+void plat_sq_pwrc_setup(void);
+
+void plat_sq_interconnect_init(void);
+void plat_sq_interconnect_enter_coherency(void);
+void plat_sq_interconnect_exit_coherency(void);
+
+unsigned int sq_calc_core_pos(u_register_t mpidr);
+
+void sq_gic_driver_init(void);
+void sq_gic_init(void);
+void sq_gic_cpuif_enable(void);
+void sq_gic_cpuif_disable(void);
+void sq_gic_pcpu_init(void);
+
+void sq_mmap_setup(uintptr_t total_base, size_t total_size,
+		   const struct mmap_region *mmap);
+
+#endif /* __SQ_COMMON_H__ */
diff --git a/plat/socionext/synquacer/platform.mk b/plat/socionext/synquacer/platform.mk
new file mode 100644
index 0000000..546f84a
--- /dev/null
+++ b/plat/socionext/synquacer/platform.mk
@@ -0,0 +1,46 @@
+#
+# Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+override RESET_TO_BL31			:= 1
+override ENABLE_PLAT_COMPAT		:= 0
+override MULTI_CONSOLE_API		:= 1
+override PROGRAMMABLE_RESET_ADDRESS	:= 1
+override USE_COHERENT_MEM		:= 1
+override SEPARATE_CODE_AND_RODATA	:= 1
+override ENABLE_SVE_FOR_NS		:= 0
+
+# Enable workarounds for selected Cortex-A53 erratas.
+ERRATA_A53_855873		:= 1
+
+# Libraries
+include lib/xlat_tables_v2/xlat_tables.mk
+
+PLAT_PATH		:=	plat/socionext/synquacer
+PLAT_INCLUDES		:=	-I$(PLAT_PATH)/include		\
+				-I$(PLAT_PATH)/drivers/scpi	\
+				-I$(PLAT_PATH)/drivers/mhu
+
+PLAT_BL_COMMON_SOURCES	+=	$(PLAT_PATH)/sq_helpers.S		\
+				drivers/arm/pl011/pl011_console.S	\
+				drivers/delay_timer/delay_timer.c	\
+				drivers/delay_timer/generic_delay_timer.c \
+				${XLAT_TABLES_LIB_SRCS}
+
+BL31_SOURCES		+=	drivers/arm/ccn/ccn.c			\
+				drivers/arm/gic/common/gic_common.c	\
+				drivers/arm/gic/v3/gicv3_helpers.c	\
+				drivers/arm/gic/v3/gicv3_main.c		\
+				lib/cpus/aarch64/cortex_a53.S		\
+				plat/common/plat_gicv3.c		\
+				plat/common/plat_psci_common.c		\
+				$(PLAT_PATH)/sq_bl31_setup.c		\
+				$(PLAT_PATH)/sq_ccn.c			\
+				$(PLAT_PATH)/sq_topology.c		\
+				$(PLAT_PATH)/sq_psci.c			\
+				$(PLAT_PATH)/sq_gicv3.c			\
+				$(PLAT_PATH)/sq_xlat_setup.c		\
+				$(PLAT_PATH)/drivers/scpi/sq_scpi.c	\
+				$(PLAT_PATH)/drivers/mhu/sq_mhu.c
diff --git a/plat/socionext/synquacer/sq_bl31_setup.c b/plat/socionext/synquacer/sq_bl31_setup.c
new file mode 100644
index 0000000..461c8de
--- /dev/null
+++ b/plat/socionext/synquacer/sq_bl31_setup.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <platform_def.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <pl011.h>
+#include <debug.h>
+#include <mmio.h>
+#include <sq_common.h>
+
+static console_pl011_t console;
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+	assert(sec_state_is_valid(type));
+	return type == NON_SECURE ? &bl33_image_ep_info : &bl32_image_ep_info;
+}
+
+/*******************************************************************************
+ * Gets SPSR for BL32 entry
+ ******************************************************************************/
+uint32_t sq_get_spsr_for_bl32_entry(void)
+{
+	/*
+	 * The Secure Payload Dispatcher service is responsible for
+	 * setting the SPSR prior to entry into the BL32 image.
+	 */
+	return 0;
+}
+
+/*******************************************************************************
+ * Gets SPSR for BL33 entry
+ ******************************************************************************/
+uint32_t sq_get_spsr_for_bl33_entry(void)
+{
+	unsigned long el_status;
+	unsigned int mode;
+	uint32_t spsr;
+
+	/* Figure out what mode we enter the non-secure world in */
+	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+	el_status &= ID_AA64PFR0_ELX_MASK;
+
+	mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+	return spsr;
+}
+
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+				void *plat_params_from_bl2)
+{
+	/* Initialize the console to provide early debug support */
+	(void)console_pl011_register(PLAT_SQ_BOOT_UART_BASE,
+			       PLAT_SQ_BOOT_UART_CLK_IN_HZ,
+			       SQ_CONSOLE_BAUDRATE, &console);
+
+	console_set_scope(&console.console, CONSOLE_FLAG_BOOT |
+			  CONSOLE_FLAG_RUNTIME);
+
+	/* There are no parameters from BL2 if BL31 is a reset vector */
+	assert(from_bl2 == NULL);
+	assert(plat_params_from_bl2 == NULL);
+
+#ifdef BL32_BASE
+	/* Populate entry point information for BL32 */
+	SET_PARAM_HEAD(&bl32_image_ep_info,
+				PARAM_EP,
+				VERSION_1,
+				0);
+	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+	bl32_image_ep_info.pc = BL32_BASE;
+	bl32_image_ep_info.spsr = sq_get_spsr_for_bl32_entry();
+#endif /* BL32_BASE */
+
+	/* Populate entry point information for BL33 */
+	SET_PARAM_HEAD(&bl33_image_ep_info,
+				PARAM_EP,
+				VERSION_1,
+				0);
+	/*
+	 * Tell BL31 where the non-trusted software image
+	 * is located and the entry state information
+	 */
+	bl33_image_ep_info.pc = PRELOADED_BL33_BASE;
+	bl33_image_ep_info.spsr = sq_get_spsr_for_bl33_entry();
+	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+}
+
+static void sq_configure_sys_timer(void)
+{
+	unsigned int reg_val;
+
+	reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT);
+	reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT);
+	reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT);
+	mmio_write_32(SQ_SYS_TIMCTL_BASE +
+		      CNTACR_BASE(PLAT_SQ_NSTIMER_FRAME_ID), reg_val);
+
+	reg_val = (1 << CNTNSAR_NS_SHIFT(PLAT_SQ_NSTIMER_FRAME_ID));
+	mmio_write_32(SQ_SYS_TIMCTL_BASE + CNTNSAR, reg_val);
+}
+
+void bl31_platform_setup(void)
+{
+	/* Initialize the CCN interconnect */
+	plat_sq_interconnect_init();
+	plat_sq_interconnect_enter_coherency();
+
+	/* Initialize the GIC driver, cpu and distributor interfaces */
+	sq_gic_driver_init();
+	sq_gic_init();
+
+	/* Enable and initialize the System level generic timer */
+	mmio_write_32(SQ_SYS_CNTCTL_BASE + CNTCR_OFF,
+			CNTCR_FCREQ(0) | CNTCR_EN);
+
+	/* Allow access to the System counter timer module */
+	sq_configure_sys_timer();
+
+	/* Initialize power controller before setting up topology */
+	plat_sq_pwrc_setup();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+	struct draminfo *di = (struct draminfo *)(unsigned long)DRAMINFO_BASE;
+
+	scpi_get_draminfo(di);
+}
+
+void bl31_plat_arch_setup(void)
+{
+	sq_mmap_setup(BL31_BASE, BL31_SIZE, NULL);
+	enable_mmu_el3(XLAT_TABLE_NC);
+}
+
+void bl31_plat_enable_mmu(uint32_t flags)
+{
+	enable_mmu_el3(flags | XLAT_TABLE_NC);
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+	unsigned int counter_base_frequency;
+
+	/* Read the frequency from Frequency modes table */
+	counter_base_frequency = mmio_read_32(SQ_SYS_CNTCTL_BASE + CNTFID_OFF);
+
+	/* The first entry of the frequency modes table must not be 0 */
+	if (counter_base_frequency == 0)
+		panic();
+
+	return counter_base_frequency;
+}
diff --git a/plat/socionext/synquacer/sq_ccn.c b/plat/socionext/synquacer/sq_ccn.c
new file mode 100644
index 0000000..bb70d5d
--- /dev/null
+++ b/plat/socionext/synquacer/sq_ccn.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <ccn.h>
+#include <platform_def.h>
+
+static const unsigned char master_to_rn_id_map[] = {
+	PLAT_SQ_CLUSTER_TO_CCN_ID_MAP
+};
+
+static const ccn_desc_t sq_ccn_desc = {
+	.periphbase = PLAT_SQ_CCN_BASE,
+	.num_masters = ARRAY_SIZE(master_to_rn_id_map),
+	.master_to_rn_id_map = master_to_rn_id_map
+};
+
+/******************************************************************************
+ * Helper function to initialize SQ CCN driver.
+ *****************************************************************************/
+void plat_sq_interconnect_init(void)
+{
+	ccn_init(&sq_ccn_desc);
+}
+
+/******************************************************************************
+ * Helper function to place current master into coherency
+ *****************************************************************************/
+void plat_sq_interconnect_enter_coherency(void)
+{
+	ccn_enter_snoop_dvm_domain(1 << MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
+
+/******************************************************************************
+ * Helper function to remove current master from coherency
+ *****************************************************************************/
+void plat_sq_interconnect_exit_coherency(void)
+{
+	ccn_exit_snoop_dvm_domain(1 << MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
+}
diff --git a/plat/socionext/synquacer/sq_gicv3.c b/plat/socionext/synquacer/sq_gicv3.c
new file mode 100644
index 0000000..94e5a66
--- /dev/null
+++ b/plat/socionext/synquacer/sq_gicv3.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <gicv3.h>
+#include <interrupt_props.h>
+#include <platform.h>
+#include <platform_def.h>
+
+#include "sq_common.h"
+
+static uintptr_t sq_rdistif_base_addrs[PLATFORM_CORE_COUNT];
+
+static const interrupt_prop_t sq_interrupt_props[] = {
+	/* G0 interrupts */
+
+	/* SGI0 */
+	INTR_PROP_DESC(8, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
+			GIC_INTR_CFG_EDGE),
+	/* SGI6 */
+	INTR_PROP_DESC(14, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP0,
+			GIC_INTR_CFG_EDGE),
+
+	/* G1S interrupts */
+
+	/* Timer */
+	INTR_PROP_DESC(29, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_LEVEL),
+	/* SGI1 */
+	INTR_PROP_DESC(9, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_EDGE),
+	/* SGI2 */
+	INTR_PROP_DESC(10, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_EDGE),
+	/* SGI3 */
+	INTR_PROP_DESC(11, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_EDGE),
+	/* SGI4 */
+	INTR_PROP_DESC(12, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_EDGE),
+	/* SGI5 */
+	INTR_PROP_DESC(13, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_EDGE),
+	/* SGI7 */
+	INTR_PROP_DESC(15, GIC_HIGHEST_SEC_PRIORITY, INTR_GROUP1S,
+			GIC_INTR_CFG_EDGE)
+};
+
+static unsigned int sq_mpidr_to_core_pos(u_register_t mpidr)
+{
+	return plat_core_pos_by_mpidr(mpidr);
+}
+
+static const struct gicv3_driver_data sq_gic_driver_data = {
+		.gicd_base = PLAT_SQ_GICD_BASE,
+		.gicr_base = PLAT_SQ_GICR_BASE,
+		.interrupt_props = sq_interrupt_props,
+		.interrupt_props_num = ARRAY_SIZE(sq_interrupt_props),
+		.rdistif_num = PLATFORM_CORE_COUNT,
+		.rdistif_base_addrs = sq_rdistif_base_addrs,
+		.mpidr_to_core_pos = sq_mpidr_to_core_pos,
+};
+
+void sq_gic_driver_init(void)
+{
+	gicv3_driver_init(&sq_gic_driver_data);
+}
+
+void sq_gic_init(void)
+{
+	gicv3_distif_init();
+	gicv3_rdistif_init(plat_my_core_pos());
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+void sq_gic_cpuif_enable(void)
+{
+	gicv3_cpuif_enable(plat_my_core_pos());
+}
+
+void sq_gic_cpuif_disable(void)
+{
+	gicv3_cpuif_disable(plat_my_core_pos());
+}
+
+void sq_gic_pcpu_init(void)
+{
+	gicv3_rdistif_init(plat_my_core_pos());
+}
diff --git a/plat/socionext/synquacer/sq_helpers.S b/plat/socionext/synquacer/sq_helpers.S
new file mode 100644
index 0000000..558aa15
--- /dev/null
+++ b/plat/socionext/synquacer/sq_helpers.S
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <platform_def.h>
+
+	.global	sq_calc_core_pos
+	.global	plat_my_core_pos
+	.global	platform_mem_init
+	.global	plat_is_my_cpu_primary
+	.global plat_secondary_cold_boot_setup
+	.global	plat_crash_console_init
+	.global	plat_crash_console_putc
+	.global	plat_crash_console_flush
+
+/*
+ * unsigned int sq_calc_core_pos(u_register_t mpidr)
+ * core_pos = (cluster_id * max_cpus_per_cluster) + core_id
+ */
+func sq_calc_core_pos
+	and	x1, x0, #MPIDR_CPU_MASK
+	and	x0, x0, #MPIDR_CLUSTER_MASK
+	add	x0, x1, x0, lsr #7
+	ret
+endfunc sq_calc_core_pos
+
+func plat_my_core_pos
+	mrs	x0, mpidr_el1
+	b	sq_calc_core_pos
+endfunc plat_my_core_pos
+
+func platform_mem_init
+	ret
+endfunc platform_mem_init
+
+/*
+ * Secondary CPUs are placed in a holding pen, waiting for their mailbox
+ * to be populated. Note that all CPUs share the same mailbox ; therefore,
+ * populating it will release all CPUs from their holding pen. If
+ * finer-grained control is needed then this should be handled in the
+ * code that secondary CPUs jump to.
+ */
+func plat_secondary_cold_boot_setup
+	ldr	x0, sq_sec_entrypoint
+
+	/* Wait until the mailbox gets populated */
+poll_mailbox:
+	cbz	x0, 1f
+	br	x0
+1:
+	wfe
+	b	poll_mailbox
+endfunc plat_secondary_cold_boot_setup
+
+/*
+ * Find out whether the current cpu is the primary
+ * cpu (applicable only after a cold boot)
+ */
+func plat_is_my_cpu_primary
+	mov	x9, x30
+	bl	plat_my_core_pos
+	ldr	x1, =SQ_BOOT_CFG_ADDR
+	ldr	x1, [x1]
+	ubfx	x1, x1, #PLAT_SQ_PRIMARY_CPU_SHIFT, \
+			#PLAT_SQ_PRIMARY_CPU_BIT_WIDTH
+	cmp	x0, x1
+	cset	w0, eq
+	ret	x9
+endfunc plat_is_my_cpu_primary
+
+/*
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0, x1, x2
+ */
+func plat_crash_console_init
+	mov_imm x0, PLAT_SQ_BOOT_UART_BASE
+	mov_imm x1, PLAT_SQ_BOOT_UART_CLK_IN_HZ
+	mov_imm x2, SQ_CONSOLE_BAUDRATE
+	b	console_pl011_core_init
+endfunc plat_crash_console_init
+
+/*
+ * int plat_crash_console_putc(int c)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ */
+func plat_crash_console_putc
+	mov_imm	x1, PLAT_SQ_BOOT_UART_BASE
+	b	console_pl011_core_putc
+endfunc plat_crash_console_putc
+
+/*
+ * int plat_crash_console_flush(int c)
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * Out : return -1 on error else return 0.
+ * Clobber list : x0, x1
+ */
+func plat_crash_console_flush
+	mov_imm	x0, PLAT_SQ_BOOT_UART_BASE
+	b	console_pl011_core_flush
+endfunc plat_crash_console_flush
diff --git a/plat/socionext/synquacer/sq_psci.c b/plat/socionext/synquacer/sq_psci.c
new file mode 100644
index 0000000..c327f1d
--- /dev/null
+++ b/plat/socionext/synquacer/sq_psci.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <cassert.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <errno.h>
+#include <generic_delay_timer.h>
+#include <platform_def.h>
+#include <sq_common.h>
+#include "sq_scpi.h"
+#include <psci.h>
+
+/* Macros to read the SQ power domain state */
+#define SQ_PWR_LVL0	MPIDR_AFFLVL0
+#define SQ_PWR_LVL1	MPIDR_AFFLVL1
+#define SQ_PWR_LVL2	MPIDR_AFFLVL2
+
+#define SQ_CORE_PWR_STATE(state)	(state)->pwr_domain_state[SQ_PWR_LVL0]
+#define SQ_CLUSTER_PWR_STATE(state)	(state)->pwr_domain_state[SQ_PWR_LVL1]
+#define SQ_SYSTEM_PWR_STATE(state)	((PLAT_MAX_PWR_LVL > SQ_PWR_LVL1) ?\
+				(state)->pwr_domain_state[SQ_PWR_LVL2] : 0)
+
+uintptr_t sq_sec_entrypoint;
+
+int sq_pwr_domain_on(u_register_t mpidr)
+{
+	/*
+	 * SCP takes care of powering up parent power domains so we
+	 * only need to care about level 0
+	 */
+	scpi_set_sq_power_state(mpidr, scpi_power_on, scpi_power_on,
+				 scpi_power_on);
+
+	return PSCI_E_SUCCESS;
+}
+
+static void sq_pwr_domain_on_finisher_common(
+		const psci_power_state_t *target_state)
+{
+	assert(SQ_CORE_PWR_STATE(target_state) == SQ_LOCAL_STATE_OFF);
+
+	/*
+	 * Perform the common cluster specific operations i.e enable coherency
+	 * if this cluster was off.
+	 */
+	if (SQ_CLUSTER_PWR_STATE(target_state) == SQ_LOCAL_STATE_OFF)
+		plat_sq_interconnect_enter_coherency();
+}
+
+void sq_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+	/* Assert that the system power domain need not be initialized */
+	assert(SQ_SYSTEM_PWR_STATE(target_state) == SQ_LOCAL_STATE_RUN);
+
+	sq_pwr_domain_on_finisher_common(target_state);
+
+	/* Program the gic per-cpu distributor or re-distributor interface */
+	sq_gic_pcpu_init();
+
+	/* Enable the gic cpu interface */
+	sq_gic_cpuif_enable();
+}
+
+static void sq_power_down_common(const psci_power_state_t *target_state)
+{
+	uint32_t cluster_state = scpi_power_on;
+	uint32_t system_state = scpi_power_on;
+
+	/* Prevent interrupts from spuriously waking up this cpu */
+	sq_gic_cpuif_disable();
+
+	/* Check if power down at system power domain level is requested */
+	if (SQ_SYSTEM_PWR_STATE(target_state) == SQ_LOCAL_STATE_OFF)
+		system_state = scpi_power_retention;
+
+	/* Cluster is to be turned off, so disable coherency */
+	if (SQ_CLUSTER_PWR_STATE(target_state) == SQ_LOCAL_STATE_OFF) {
+		plat_sq_interconnect_exit_coherency();
+		cluster_state = scpi_power_off;
+	}
+
+	/*
+	 * Ask the SCP to power down the appropriate components depending upon
+	 * their state.
+	 */
+	scpi_set_sq_power_state(read_mpidr_el1(),
+				 scpi_power_off,
+				 cluster_state,
+				 system_state);
+}
+
+void sq_pwr_domain_off(const psci_power_state_t *target_state)
+{
+	sq_power_down_common(target_state);
+}
+
+void __dead2 sq_system_off(void)
+{
+	volatile uint32_t *gpio = (uint32_t *)PLAT_SQ_GPIO_BASE;
+
+	/* set PD[9] high to power off the system */
+	gpio[5] |= 0x2;		/* set output */
+	gpio[1] |= 0x2;		/* set high */
+	dmbst();
+
+	generic_delay_timer_init();
+
+	mdelay(1);
+
+	while (1) {
+		gpio[1] &= ~0x2;	/* set low */
+		dmbst();
+
+		mdelay(1);
+
+		gpio[1] |= 0x2;		/* set high */
+		dmbst();
+
+		mdelay(100);
+	}
+
+	wfi();
+	ERROR("SQ System Off: operation not handled.\n");
+	panic();
+}
+
+void __dead2 sq_system_reset(void)
+{
+	uint32_t response;
+
+	/* Send the system reset request to the SCP */
+	response = scpi_sys_power_state(scpi_system_reboot);
+
+	if (response != SCP_OK) {
+		ERROR("SQ System Reset: SCP error %u.\n", response);
+		panic();
+	}
+	wfi();
+	ERROR("SQ System Reset: operation not handled.\n");
+	panic();
+}
+
+void sq_cpu_standby(plat_local_state_t cpu_state)
+{
+	unsigned int scr;
+
+	assert(cpu_state == SQ_LOCAL_STATE_RET);
+
+	scr = read_scr_el3();
+	/* Enable PhysicalIRQ bit for NS world to wake the CPU */
+	write_scr_el3(scr | SCR_IRQ_BIT);
+	isb();
+	dsb();
+	wfi();
+
+	/*
+	 * Restore SCR to the original value, synchronisation of scr_el3 is
+	 * done by eret while el3_exit to save some execution cycles.
+	 */
+	write_scr_el3(scr);
+}
+
+const plat_psci_ops_t sq_psci_ops = {
+	.pwr_domain_on		= sq_pwr_domain_on,
+	.pwr_domain_off		= sq_pwr_domain_off,
+	.pwr_domain_on_finish	= sq_pwr_domain_on_finish,
+	.cpu_standby		= sq_cpu_standby,
+	.system_off		= sq_system_off,
+	.system_reset		= sq_system_reset,
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+			const struct plat_psci_ops **psci_ops)
+{
+	sq_sec_entrypoint = sec_entrypoint;
+	flush_dcache_range((uint64_t)&sq_sec_entrypoint,
+			   sizeof(sq_sec_entrypoint));
+
+	*psci_ops = &sq_psci_ops;
+
+	return 0;
+}
diff --git a/plat/socionext/synquacer/sq_topology.c b/plat/socionext/synquacer/sq_topology.c
new file mode 100644
index 0000000..aa20355
--- /dev/null
+++ b/plat/socionext/synquacer/sq_topology.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <sq_common.h>
+#include <platform_def.h>
+
+unsigned char sq_pd_tree_desc[PLAT_CLUSTER_COUNT + 1];
+
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+	unsigned int cluster_id, cpu_id;
+
+	cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+	if (cluster_id >= PLAT_CLUSTER_COUNT)
+		return -1;
+
+	cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+	if (cpu_id >= PLAT_MAX_CORES_PER_CLUSTER)
+		return -1;
+
+	return sq_calc_core_pos(mpidr);
+}
+
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+	int i;
+
+	sq_pd_tree_desc[0] = PLAT_CLUSTER_COUNT;
+
+	for (i = 0; i < PLAT_CLUSTER_COUNT; i++)
+		sq_pd_tree_desc[i + 1] = PLAT_MAX_CORES_PER_CLUSTER;
+
+	return sq_pd_tree_desc;
+}
diff --git a/plat/socionext/synquacer/sq_xlat_setup.c b/plat/socionext/synquacer/sq_xlat_setup.c
new file mode 100644
index 0000000..ae14700
--- /dev/null
+++ b/plat/socionext/synquacer/sq_xlat_setup.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <platform_def.h>
+#include <xlat_tables_v2.h>
+
+#define SQ_REG_REGION_BASE	0x20000000ULL
+#define SQ_REG_REGION_SIZE	0x60000000ULL
+
+void sq_mmap_setup(uintptr_t total_base, size_t total_size,
+			 const struct mmap_region *mmap)
+{
+	VERBOSE("Trusted RAM seen by this BL image: %p - %p\n",
+		(void *)total_base, (void *)(total_base + total_size));
+	mmap_add_region(total_base, total_base,
+			total_size,
+			MT_NON_CACHEABLE | MT_RW | MT_SECURE);
+
+	/* remap the code section */
+	VERBOSE("Code region: %p - %p\n",
+		(void *)BL_CODE_BASE, (void *)BL_CODE_END);
+	mmap_add_region(BL_CODE_BASE, BL_CODE_BASE,
+			round_up(BL_CODE_END, PAGE_SIZE) - BL_CODE_BASE,
+			MT_NON_CACHEABLE | MT_RO | MT_SECURE);
+
+	/* Re-map the read-only data section */
+	VERBOSE("Read-only data region: %p - %p\n",
+		(void *)BL_RO_DATA_BASE, (void *)BL_RO_DATA_END);
+	mmap_add_region(BL_RO_DATA_BASE, BL_RO_DATA_BASE,
+			round_up(BL_RO_DATA_END, PAGE_SIZE) - BL_RO_DATA_BASE,
+			(MT_NON_CACHEABLE | MT_RO | MT_EXECUTE_NEVER |
+			 MT_SECURE));
+
+	/* remap the coherent memory region */
+	VERBOSE("Coherent region: %p - %p\n",
+		(void *)BL_COHERENT_RAM_BASE, (void *)BL_COHERENT_RAM_END);
+	mmap_add_region(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_BASE,
+			BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
+			MT_DEVICE | MT_RW | MT_SECURE);
+
+	/* register region */
+	mmap_add_region(SQ_REG_REGION_BASE, SQ_REG_REGION_BASE,
+			SQ_REG_REGION_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE);
+
+	/* additional regions if needed */
+	if (mmap)
+		mmap_add(mmap);
+
+	init_xlat_tables();
+}
diff --git a/readme.rst b/readme.rst
index 3738c97..c87936c 100644
--- a/readme.rst
+++ b/readme.rst
@@ -182,7 +182,7 @@
 -  QEMU emulator
 -  Raspberry Pi 3 board
 -  RockChip RK3328, RK3368 and RK3399 SoCs
--  Socionext UniPhier SoC family
+-  Socionext UniPhier SoC family and SynQuacer SC2A11 SoCs
 -  Texas Instruments K3 SoCs
 -  Xilinx Zynq UltraScale + MPSoC
 
diff --git a/services/std_svc/spm/spm_main.c b/services/std_svc/spm/spm_main.c
index f63f9c4..585707d 100644
--- a/services/std_svc/spm/spm_main.c
+++ b/services/std_svc/spm/spm_main.c
@@ -181,6 +181,35 @@
 }
 
 /*******************************************************************************
+ * Function to perform a call to a Secure Partition.
+ ******************************************************************************/
+uint64_t spm_sp_call(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3)
+{
+	uint64_t rc;
+	sp_context_t *sp_ptr = &sp_ctx;
+
+	/* Wait until the Secure Partition is idle and set it to busy. */
+	sp_state_wait_switch(sp_ptr, SP_STATE_IDLE, SP_STATE_BUSY);
+
+	/* Set values for registers on SP entry */
+	cpu_context_t *cpu_ctx = &(sp_ptr->cpu_ctx);
+
+	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X0, smc_fid);
+	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X1, x1);
+	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X2, x2);
+	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3, x3);
+
+	/* Jump to the Secure Partition. */
+	rc = spm_sp_synchronous_entry(sp_ptr);
+
+	/* Flag Secure Partition as idle. */
+	assert(sp_ptr->state == SP_STATE_BUSY);
+	sp_state_set(sp_ptr, SP_STATE_IDLE);
+
+	return rc;
+}
+
+/*******************************************************************************
  * MM_COMMUNICATE handler
  ******************************************************************************/
 static uint64_t mm_communicate(uint32_t smc_fid, uint64_t mm_cookie,
@@ -188,7 +217,6 @@
 			       uint64_t comm_size_address, void *handle)
 {
 	uint64_t rc;
-	sp_context_t *ctx = &sp_ctx;
 
 	/* Cookie. Reserved for future use. It must be zero. */
 	if (mm_cookie != 0U) {
@@ -208,23 +236,8 @@
 	/* Save the Normal world context */
 	cm_el1_sysregs_context_save(NON_SECURE);
 
-	/* Wait until the Secure Partition is idle and set it to busy. */
-	sp_state_wait_switch(ctx, SP_STATE_IDLE, SP_STATE_BUSY);
-
-	/* Set values for registers on SP entry */
-	cpu_context_t *cpu_ctx = &(ctx->cpu_ctx);
-
-	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X0, smc_fid);
-	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X1, comm_buffer_address);
-	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X2, comm_size_address);
-	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3, plat_my_core_pos());
-
-	/* Jump to the Secure Partition. */
-	rc = spm_sp_synchronous_entry(ctx);
-
-	/* Flag Secure Partition as idle. */
-	assert(ctx->state == SP_STATE_BUSY);
-	sp_state_set(ctx, SP_STATE_IDLE);
+	rc = spm_sp_call(smc_fid, comm_buffer_address, comm_size_address,
+			 plat_my_core_pos());
 
 	/* Restore non-secure state */
 	cm_el1_sysregs_context_restore(NON_SECURE);