Merge pull request #1708 from Yann-lms/warnings

Add possibility to add compilation warnings
diff --git a/Makefile b/Makefile
index 6f14825..9525d41 100644
--- a/Makefile
+++ b/Makefile
@@ -543,6 +543,10 @@
 FIPTOOLPATH		?=	tools/fiptool
 FIPTOOL			?=	${FIPTOOLPATH}/fiptool${BIN_EXT}
 
+# Variables for use with sptool
+SPTOOLPATH		?=	tools/sptool
+SPTOOL			?=	${SPTOOLPATH}/sptool${BIN_EXT}
+
 # Variables for use with ROMLIB
 ROMLIBPATH		?=	lib/romlib
 
@@ -612,6 +616,7 @@
 $(eval $(call assert_boolean,SAVE_KEYS))
 $(eval $(call assert_boolean,SEPARATE_CODE_AND_RODATA))
 $(eval $(call assert_boolean,SPIN_ON_BL1_EXIT))
+$(eval $(call assert_boolean,SPM_DEPRECATED))
 $(eval $(call assert_boolean,TRUSTED_BOARD_BOOT))
 $(eval $(call assert_boolean,USE_COHERENT_MEM))
 $(eval $(call assert_boolean,USE_ROMLIB))
@@ -665,6 +670,7 @@
 $(eval $(call add_define,SMCCC_MAJOR_VERSION))
 $(eval $(call add_define,SPD_${SPD}))
 $(eval $(call add_define,SPIN_ON_BL1_EXIT))
+$(eval $(call add_define,SPM_DEPRECATED))
 $(eval $(call add_define,TRUSTED_BOARD_BOOT))
 $(eval $(call add_define,USE_COHERENT_MEM))
 $(eval $(call add_define,USE_ROMLIB))
@@ -699,7 +705,7 @@
 # Build targets
 ################################################################################
 
-.PHONY:	all msg_start clean realclean distclean cscope locate-checkpatch checkcodebase checkpatch fiptool fip fwu_fip certtool dtbs
+.PHONY:	all msg_start clean realclean distclean cscope locate-checkpatch checkcodebase checkpatch fiptool sptool fip fwu_fip certtool dtbs
 .SUFFIXES:
 
 all: msg_start
@@ -786,6 +792,7 @@
 	$(call SHELL_REMOVE_DIR,${BUILD_BASE})
 	$(call SHELL_DELETE_ALL, ${CURDIR}/cscope.*)
 	${Q}${MAKE} --no-print-directory -C ${FIPTOOLPATH} clean
+	${Q}${MAKE} --no-print-directory -C ${SPTOOLPATH} clean
 	${Q}${MAKE} PLAT=${PLAT} --no-print-directory -C ${CRTTOOLPATH} clean
 	${Q}${MAKE} --no-print-directory -C ${ROMLIBPATH} clean
 
@@ -866,6 +873,11 @@
 ${FIPTOOL}:
 	${Q}${MAKE} CPPFLAGS="-DVERSION='\"${VERSION_STRING}\"'" --no-print-directory -C ${FIPTOOLPATH}
 
+sptool: ${SPTOOL}
+.PHONY: ${SPTOOL}
+${SPTOOL}:
+	${Q}${MAKE} CPPFLAGS="-DVERSION='\"${VERSION_STRING}\"'" --no-print-directory -C ${SPTOOLPATH}
+
 .PHONY: libraries
 romlib.bin: libraries
 	${Q}${MAKE} BUILD_PLAT=${BUILD_PLAT} INCLUDES='${INCLUDES}' DEFINES='${DEFINES}' --no-print-directory -C ${ROMLIBPATH} all
@@ -905,6 +917,7 @@
 	@echo "  distclean      Remove all build artifacts for all platforms"
 	@echo "  certtool       Build the Certificate generation tool"
 	@echo "  fiptool        Build the Firmware Image Package (FIP) creation tool"
+	@echo "  sptool         Build the Secure Partition Package creation tool"
 	@echo "  dtbs           Build the Device Tree Blobs (if required for the platform)"
 	@echo ""
 	@echo "Note: most build targets require PLAT to be set to a specific platform."
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index 77bd63e..ab61e8c 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -382,8 +382,16 @@
 	 */
 	tbz	x0, #(FUNCID_NAMESPACE_SHIFT + 1), compat_or_vendor
 
-	/* Namespaces SPRT and SPCI currently unimplemented */
+	/* Namespace is b'10 (SPRT) or b'11 (SPCI) */
+#if ENABLE_SPM
+	tst	x0, #(1 << FUNCID_NAMESPACE_SHIFT)
+	adr	x15, spci_smc_handler
+	adr	x16, sprt_smc_handler
+	csel	x15, x15, x16, ne
+	b	prepare_enter_handler
+#else
 	b	smc_unknown
+#endif
 
 compat_or_vendor:
 
@@ -401,6 +409,8 @@
 
 	load_rt_svc_desc_pointer
 
+prepare_enter_handler:
+
 #endif /* SMCCC_MAJOR_VERSION */
 
 	/*
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 019a19e..eddd164 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -8,11 +8,16 @@
 # Include SPM Makefile
 ################################################################################
 ifeq (${ENABLE_SPM},1)
-ifeq (${EL3_EXCEPTION_HANDLING},0)
-  $(error EL3_EXCEPTION_HANDLING must be 1 for SPM support)
-endif
-$(info Including SPM makefile)
-include services/std_svc/spm/spm.mk
+  ifeq (${SPM_DEPRECATED},1)
+    ifeq (${EL3_EXCEPTION_HANDLING},0)
+      $(error EL3_EXCEPTION_HANDLING must be 1 for SPM support)
+    endif
+    $(info Including deprecated SPM makefile)
+    include services/std_svc/spm_deprecated/spm.mk
+  else
+    $(info Including SPM makefile)
+    include services/std_svc/spm/spm.mk
+  endif
 endif
 
 
diff --git a/drivers/arm/ccn/ccn.c b/drivers/arm/ccn/ccn.c
index 59a7576..d46e020 100644
--- a/drivers/arm/ccn/ccn.c
+++ b/drivers/arm/ccn/ccn.c
@@ -553,7 +553,14 @@
 		return REGION_ID_LIMIT;
 	}
 
-	region_id += node_pos_in_map;
+	/*
+	 * According to section 3.1.1 in CCN specification, region offset for
+	 * the RN-I components is calculated as (128 + NodeID of RN-I).
+	 */
+	if (node_type == NODE_TYPE_RNI)
+		region_id += node_id;
+	else
+		region_id += node_pos_in_map;
 
 	return region_id;
 }
diff --git a/include/common/aarch32/el3_common_macros.S b/include/common/aarch32/el3_common_macros.S
index 9b18ba3..2438423 100644
--- a/include/common/aarch32/el3_common_macros.S
+++ b/include/common/aarch32/el3_common_macros.S
@@ -177,9 +177,13 @@
 		 *
 		 * SCTLR.V: Set to zero to select the normal exception vectors
 		 *  with base address held in VBAR.
+		 *
+		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
+		 *  safe behaviour upon exception entry to EL3.
 		 * -------------------------------------------------------------
 		 */
-		ldr     r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | SCTLR_V_BIT))
+		ldr     r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | \
+				SCTLR_V_BIT | SCTLR_DSSBS_BIT))
 		stcopr	r0, SCTLR
 		isb
 	.endif /* _init_sctlr */
diff --git a/include/common/aarch64/el3_common_macros.S b/include/common/aarch64/el3_common_macros.S
index adfb54e..008daca 100644
--- a/include/common/aarch64/el3_common_macros.S
+++ b/include/common/aarch64/el3_common_macros.S
@@ -194,10 +194,13 @@
 		 * SCTLR_EL3.SA: Set to zero to disable Stack Alignment check.
 		 *
 		 * SCTLR_EL3.A: Set to zero to disable Alignment fault checking.
+		 *
+		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
+		 *  safe behaviour upon exception entry to EL3.
 		 * -------------------------------------------------------------
 		 */
 		mov_imm	x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
-				| SCTLR_SA_BIT | SCTLR_A_BIT))
+				| SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
 		msr	sctlr_el3, x0
 		isb
 	.endif /* _init_sctlr */
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
index 3e5e3fb..fa6e5db 100644
--- a/include/lib/aarch32/arch.h
+++ b/include/lib/aarch32/arch.h
@@ -132,6 +132,7 @@
 #define SCTLR_TRE_BIT		(U(1) << 28)
 #define SCTLR_AFE_BIT		(U(1) << 29)
 #define SCTLR_TE_BIT		(U(1) << 30)
+#define SCTLR_DSSBS_BIT		(U(1) << 31)
 #define SCTLR_RESET_VAL         (SCTLR_RES1 | SCTLR_NTWE_BIT |		\
 				SCTLR_NTWI_BIT | SCTLR_CP15BEN_BIT)
 
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index d7867bc..97595e9 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -198,6 +198,12 @@
 #define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED	ULL(0x1)
 #define ID_AA64MMFR0_EL1_TGRAN16_NOT_SUPPORTED	ULL(0x0)
 
+/* ID_AA64PFR1_EL1 definitions */
+#define ID_AA64PFR1_EL1_SSBS_SHIFT	U(4)
+#define ID_AA64PFR1_EL1_SSBS_MASK	ULL(0xf)
+
+#define SSBS_UNAVAILABLE	ULL(0)	/* No architectural SSBS support */
+
 /* ID_PFR1_EL1 definitions */
 #define ID_PFR1_VIRTEXT_SHIFT	U(12)
 #define ID_PFR1_VIRTEXT_MASK	U(0xf)
@@ -219,29 +225,30 @@
 			(U(1) << 22) | (U(1) << 18) | (U(1) << 16) | \
 			(U(1) << 11) | (U(1) << 5) | (U(1) << 4))
 
-#define SCTLR_M_BIT		(U(1) << 0)
-#define SCTLR_A_BIT		(U(1) << 1)
-#define SCTLR_C_BIT		(U(1) << 2)
-#define SCTLR_SA_BIT		(U(1) << 3)
-#define SCTLR_SA0_BIT		(U(1) << 4)
-#define SCTLR_CP15BEN_BIT	(U(1) << 5)
-#define SCTLR_ITD_BIT		(U(1) << 7)
-#define SCTLR_SED_BIT		(U(1) << 8)
-#define SCTLR_UMA_BIT		(U(1) << 9)
-#define SCTLR_I_BIT		(U(1) << 12)
-#define SCTLR_V_BIT		(U(1) << 13)
-#define SCTLR_DZE_BIT		(U(1) << 14)
-#define SCTLR_UCT_BIT		(U(1) << 15)
-#define SCTLR_NTWI_BIT		(U(1) << 16)
-#define SCTLR_NTWE_BIT		(U(1) << 18)
-#define SCTLR_WXN_BIT		(U(1) << 19)
-#define SCTLR_UWXN_BIT		(U(1) << 20)
-#define SCTLR_E0E_BIT		(U(1) << 24)
-#define SCTLR_EE_BIT		(U(1) << 25)
-#define SCTLR_UCI_BIT		(U(1) << 26)
-#define SCTLR_TRE_BIT		(U(1) << 28)
-#define SCTLR_AFE_BIT		(U(1) << 29)
-#define SCTLR_TE_BIT		(U(1) << 30)
+#define SCTLR_M_BIT		(ULL(1) << 0)
+#define SCTLR_A_BIT		(ULL(1) << 1)
+#define SCTLR_C_BIT		(ULL(1) << 2)
+#define SCTLR_SA_BIT		(ULL(1) << 3)
+#define SCTLR_SA0_BIT		(ULL(1) << 4)
+#define SCTLR_CP15BEN_BIT	(ULL(1) << 5)
+#define SCTLR_ITD_BIT		(ULL(1) << 7)
+#define SCTLR_SED_BIT		(ULL(1) << 8)
+#define SCTLR_UMA_BIT		(ULL(1) << 9)
+#define SCTLR_I_BIT		(ULL(1) << 12)
+#define SCTLR_V_BIT		(ULL(1) << 13)
+#define SCTLR_DZE_BIT		(ULL(1) << 14)
+#define SCTLR_UCT_BIT		(ULL(1) << 15)
+#define SCTLR_NTWI_BIT		(ULL(1) << 16)
+#define SCTLR_NTWE_BIT		(ULL(1) << 18)
+#define SCTLR_WXN_BIT		(ULL(1) << 19)
+#define SCTLR_UWXN_BIT		(ULL(1) << 20)
+#define SCTLR_E0E_BIT		(ULL(1) << 24)
+#define SCTLR_EE_BIT		(ULL(1) << 25)
+#define SCTLR_UCI_BIT		(ULL(1) << 26)
+#define SCTLR_TRE_BIT		(ULL(1) << 28)
+#define SCTLR_AFE_BIT		(ULL(1) << 29)
+#define SCTLR_TE_BIT		(ULL(1) << 30)
+#define SCTLR_DSSBS_BIT		(ULL(1) << 44)
 #define SCTLR_RESET_VAL		SCTLR_EL3_RES1
 
 /* CPACR_El1 definitions */
diff --git a/include/lib/sprt/sprt_common.h b/include/lib/sprt/sprt_common.h
new file mode 100644
index 0000000..27d5027
--- /dev/null
+++ b/include/lib/sprt/sprt_common.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPRT_COMMON_H
+#define SPRT_COMMON_H
+
+#define SPRT_MAX_MSG_ARGS	6
+
+/*
+ * Message types supported.
+ */
+#define SPRT_MSG_TYPE_SERVICE_HANDLE_OPEN		1
+#define SPRT_MSG_TYPE_SERVICE_HANDLE_CLOSE		2
+/* TODO: Add other types of SPRT messages. */
+#define SPRT_MSG_TYPE_SERVICE_TUN_REQUEST		10
+
+/*
+ * Struct that defines the layout of the fields corresponding to a request in
+ * shared memory.
+ */
+struct __attribute__((__packed__)) sprt_queue_entry_message {
+	uint32_t type;		/* Type of message (result of an SPCI call). */
+	uint16_t client_id;	/* SPCI client ID */
+	uint16_t service_handle;/* SPCI service handle */
+	uint32_t session_id;	/* Optional SPCI session ID */
+	uint32_t token;		/* SPCI request token */
+	uint64_t args[SPRT_MAX_MSG_ARGS];
+};
+
+#define SPRT_QUEUE_ENTRY_MSG_SIZE	(sizeof(struct sprt_queue_entry_message))
+
+#define SPRT_QUEUE_NUM_BLOCKING		0
+#define SPRT_QUEUE_NUM_NON_BLOCKING	1
+
+#endif /* SPRT_COMMON_H */
diff --git a/include/lib/sprt/sprt_host.h b/include/lib/sprt/sprt_host.h
new file mode 100644
index 0000000..f888141
--- /dev/null
+++ b/include/lib/sprt/sprt_host.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef SPRT_HOST_H
+#define SPRT_HOST_H
+
+#include <stddef.h>
+
+#include "sprt_common.h"
+
+/*
+ * Initialize the specified buffer to be used by SPM.
+ */
+void sprt_initialize_queues(void *buffer_base, size_t buffer_size);
+
+/*
+ * Push a message to the queue number `queue_num` in a buffer that has been
+ * initialized by `sprt_initialize_queues`.
+ */
+int sprt_push_message(void *buffer_base,
+		      const struct sprt_queue_entry_message *message,
+		      int queue_num);
+
+#endif /* SPRT_HOST_H */
diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h
index 8c0a567..5c4edc3 100644
--- a/include/lib/xlat_tables/xlat_tables_v2.h
+++ b/include/lib/xlat_tables/xlat_tables_v2.h
@@ -209,6 +209,17 @@
 void init_xlat_tables_ctx(xlat_ctx_t *ctx);
 
 /*
+ * Fill all fields of a dynamic translation tables context. It must be done
+ * either statically with REGISTER_XLAT_CONTEXT() or at runtime with this
+ * function.
+ */
+void xlat_setup_dynamic_ctx(xlat_ctx_t *ctx, unsigned long long pa_max,
+			    uintptr_t va_max, struct mmap_region *mmap,
+			    unsigned int mmap_num, uint64_t **tables,
+			    unsigned int tables_num, uint64_t *base_table,
+			    int xlat_regime, int *mapped_regions);
+
+/*
  * Add a static region with defined base PA and base VA. This function can only
  * be used before initializing the translation tables. The region cannot be
  * removed afterwards.
diff --git a/include/plat/arm/common/arm_spm_def.h b/include/plat/arm/common/arm_spm_def.h
index 69aae4a..bf3cb8f 100644
--- a/include/plat/arm/common/arm_spm_def.h
+++ b/include/plat/arm/common/arm_spm_def.h
@@ -11,6 +11,31 @@
 #include <xlat_tables_defs.h>
 
 /*
+ * Reserve 4 MiB for binaries of Secure Partitions and Resource Description
+ * blobs.
+ */
+#define PLAT_SP_PACKAGE_BASE	BL32_BASE
+#define PLAT_SP_PACKAGE_SIZE	ULL(0x400000)
+
+#define PLAT_MAP_SP_PACKAGE_MEM_RO	MAP_REGION_FLAT(		\
+						PLAT_SP_PACKAGE_BASE,	\
+						PLAT_SP_PACKAGE_SIZE,	\
+						MT_MEMORY | MT_RO | MT_SECURE)
+#define PLAT_MAP_SP_PACKAGE_MEM_RW	MAP_REGION_FLAT(		\
+						PLAT_SP_PACKAGE_BASE,	\
+						PLAT_SP_PACKAGE_SIZE,	\
+						MT_MEMORY | MT_RW | MT_SECURE)
+
+/*
+ * The rest of the memory reserved for BL32 is free for SPM to use it as memory
+ * pool to allocate memory regions requested in the resource description.
+ */
+#define PLAT_SPM_HEAP_BASE	(PLAT_SP_PACKAGE_BASE + PLAT_SP_PACKAGE_SIZE)
+#define PLAT_SPM_HEAP_SIZE	(BL32_LIMIT - BL32_BASE - PLAT_SP_PACKAGE_SIZE)
+
+#if SPM_DEPRECATED
+
+/*
  * If BL31 is placed in DRAM, place the Secure Partition in DRAM right after the
  * region used by BL31. If BL31 it is placed in SRAM, put the Secure Partition
  * at the base of DRAM.
@@ -27,6 +52,7 @@
 						ARM_SP_IMAGE_SIZE,		\
 						MT_MEMORY | MT_RW | MT_SECURE)
 #endif
+
 #ifdef IMAGE_BL31
 /* SPM Payload memory. Mapped as code in S-EL1 */
 #define ARM_SP_IMAGE_MMAP		MAP_REGION2(				\
@@ -96,8 +122,23 @@
 /* Total number of memory regions with distinct properties */
 #define ARM_SP_IMAGE_NUM_MEM_REGIONS	6
 
+#endif /* SPM_DEPRECATED */
+
 /* Cookies passed to the Secure Partition at boot. Not used by ARM platforms. */
 #define PLAT_SPM_COOKIE_0		ULL(0)
 #define PLAT_SPM_COOKIE_1		ULL(0)
 
+/*
+ * Max number of elements supported by SPM in this platform. The defines below
+ * are used to allocate memory at compile time for different arrays in SPM.
+ */
+#define PLAT_SPM_MAX_PARTITIONS		U(2)
+
+#define PLAT_SPM_MEM_REGIONS_MAX	U(80)
+#define PLAT_SPM_NOTIFICATIONS_MAX	U(30)
+#define PLAT_SPM_SERVICES_MAX		U(30)
+
+#define PLAT_SPCI_HANDLES_MAX_NUM	U(20)
+#define PLAT_SPM_RESPONSES_MAX		U(30)
+
 #endif /* ARM_SPM_DEF_H */
diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h
index e7082d0..9b45984 100644
--- a/include/plat/arm/common/plat_arm.h
+++ b/include/plat/arm/common/plat_arm.h
@@ -37,7 +37,7 @@
  *   - Region 1 with secure access only;
  *   - the remaining DRAM regions access from the given Non-Secure masters.
  ******************************************************************************/
-#if ENABLE_SPM
+#if ENABLE_SPM && SPM_DEPRECATED
 #define ARM_TZC_REGIONS_DEF						\
 	{ARM_AP_TZC_DRAM1_BASE, ARM_EL3_TZC_DRAM1_END,			\
 		TZC_REGION_S_RDWR, 0},					\
diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h
index a30b579..04272b1 100644
--- a/include/plat/common/platform.h
+++ b/include/plat/common/platform.h
@@ -22,6 +22,7 @@
 struct bl_params;
 struct mmap_region;
 struct secure_partition_boot_info;
+struct sp_res_desc;
 
 /*******************************************************************************
  * plat_get_rotpk_info() flags
@@ -266,6 +267,9 @@
 const struct mmap_region *plat_get_secure_partition_mmap(void *cookie);
 const struct secure_partition_boot_info *plat_get_secure_partition_boot_info(
 		void *cookie);
+int plat_spm_sp_rd_load(struct sp_res_desc *rd, const void *ptr, size_t size);
+int plat_spm_sp_get_next_address(void **sp_base, size_t *sp_size,
+				 void **rd_base, size_t *rd_size);
 
 /*******************************************************************************
  * Mandatory BL image load functions(may be overridden).
diff --git a/include/services/mm_svc.h b/include/services/mm_svc.h
index ed2b324..e2ce90f 100644
--- a/include/services/mm_svc.h
+++ b/include/services/mm_svc.h
@@ -7,6 +7,8 @@
 #ifndef MM_SVC_H
 #define MM_SVC_H
 
+#if SPM_DEPRECATED
+
 #include <utils_def.h>
 
 #define MM_VERSION_MAJOR	U(1)
@@ -28,4 +30,6 @@
 #define MM_COMMUNICATE_AARCH64		U(0xC4000041)
 #define MM_COMMUNICATE_AARCH32		U(0x84000041)
 
+#endif /* SPM_DEPRECATED */
+
 #endif /* MM_SVC_H */
diff --git a/include/services/secure_partition.h b/include/services/secure_partition.h
index d565e0c..0ae6cf9 100644
--- a/include/services/secure_partition.h
+++ b/include/services/secure_partition.h
@@ -7,6 +7,8 @@
 #ifndef SECURE_PARTITION_H
 #define SECURE_PARTITION_H
 
+#if SPM_DEPRECATED
+
 #include <stdint.h>
 #include <utils_def.h>
 
@@ -46,4 +48,6 @@
 	secure_partition_mp_info_t	*mp_info;
 } secure_partition_boot_info_t;
 
+#endif /* SPM_DEPRECATED */
+
 #endif /* SECURE_PARTITION_H */
diff --git a/include/services/sp_res_desc.h b/include/services/sp_res_desc.h
new file mode 100644
index 0000000..dc00221
--- /dev/null
+++ b/include/services/sp_res_desc.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_RES_DESC_H
+#define SPM_RES_DESC_H
+
+#include <stdint.h>
+#include <sp_res_desc_def.h>
+
+/*******************************************************************************
+ * Attribute Section
+ ******************************************************************************/
+
+struct sp_rd_sect_attribute {
+	/*
+	 * Version of the resource description.
+	 */
+	uint16_t version;
+
+	/*
+	 * Type of the Secure Partition:
+	 * - bit[0]: SP Type
+	 *   - b'0: UP SP
+	 *   - b'1: MP SP
+	 * If UP SP:
+	 * - bit[1]: Type of UP SP
+	 *   - b'0: Migratable UP SP
+	 *   - b'1: Pinned UP SP
+	 */
+	uint16_t sp_type;
+
+	/*
+	 * If this is a Pinned UP SP, PE on which the Pinned UP SP will run.
+	 */
+	uint32_t pe_mpidr;
+
+	/*
+	 * Run-Time Exception Level:
+	 * - 0: SEL0 SP
+	 * - 1: SEL1 SP
+	 */
+	uint8_t runtime_el;
+
+	/*
+	 * Type of Execution:
+	 * - 0: Init-time only
+	 * - 1: Run-time Execution
+	 */
+	uint8_t exec_type;
+
+	/*
+	 * Expected behavior upon failure:
+	 * - 0: Restartable
+	 * - 1: One-Shot
+	 */
+	uint8_t panic_policy;
+
+	/*
+	 * Translation Granule to use in the SP translation regime:
+	 * - 0: 4KB
+	 * - 1: 16KB
+	 * - 2: 64KB
+	 */
+	uint8_t xlat_granule;
+
+	/*
+	 * Size of the SP binary in bytes.
+	 */
+	uint32_t binary_size;
+
+	/*
+	 * - If SP is NOT PIE:
+	 *   - VA Address where the SP expects to be loaded.
+	 * - If SP is PIE:
+	 *   - Ignored.
+	 */
+	uint64_t load_address;
+
+	/*
+	 * Initial execution address. This is a VA as the SP sees it.
+	 */
+	uint64_t entrypoint;
+};
+
+/*******************************************************************************
+ * Memory Region Section
+ ******************************************************************************/
+
+struct sp_rd_sect_mem_region {
+	/*
+	 * Name of a Memory region, including null terminator. Reserved names:
+	 * - "Client Shared Memory Region":
+	 *   Memory region where memory shared by clients shall be mapped.
+	 * - "Queue Memory Region":
+	 *   Memory region shared with SPM for SP queue management.
+	 */
+	char name[RD_MEM_REGION_NAME_LEN];
+
+	/*
+	 * Memory Attributes:
+	 * - bits[3:0]: Type of memory
+	 *   - 0: Device
+	 *   - 1: Code
+	 *   - 2: Data
+	 *   - 3: BSS
+	 *   - 4: Read-only Data
+	 *   - 5: SPM-to-SP Shared Memory Region
+	 *   - 6: Client Shared Memory Region
+	 *   - 7: Miscellaneous
+	 * - If memory is { SPM-to-SP shared Memory, Client Shared Memory,
+	 *   Miscellaneous }
+	 *   - bits[4]: Position Independent
+	 *     - b'0: Position Dependent
+	 *     - b'1: Position Independent
+	 */
+	uint32_t attr;
+
+	/*
+	 * Base address of the memory region.
+	 */
+	uint64_t base;
+
+	/*
+	 * Size of the memory region.
+	 */
+	uint64_t size;
+
+	/*
+	 * Pointer to next memory region (or NULL if this is the last one).
+	 */
+	struct sp_rd_sect_mem_region *next;
+};
+
+/*******************************************************************************
+ * Notification Section
+ ******************************************************************************/
+
+struct sp_rd_sect_notification {
+	/*
+	 * Notification attributes:
+	 * - bit[31]: Notification Type
+	 *   - b'0: Platform Notification
+	 *   - b'1: Interrupt
+	 * If Notification Type == Platform Notification
+	 * - bits[15:0]: Implementation-defined Notification ID
+	 * If Notification Type == Interrupt
+	 * - bits[15:0]: IRQ number
+	 * - bits[23:16]: Interrupt Priority
+	 * - bit[24]: Trigger Type
+	 *   - b'0: Edge Triggered
+	 *   - b'1: Level Triggered
+	 * - bit[25]: Trigger Level
+	 *   - b'0: Falling or Low
+	 *   - b'1: Rising or High
+	 */
+	uint32_t attr;
+
+	/*
+	 * Processing Element.
+	 * If Notification Type == Interrupt && IRQ number is { SGI, LPI }
+	 * - PE ID to which IRQ will be forwarded
+	 */
+	uint32_t pe;
+
+	/*
+	 * Pointer to next notification (or NULL if this is the last one).
+	 */
+	struct sp_rd_sect_notification *next;
+};
+
+/*******************************************************************************
+ * Service Description Section
+ ******************************************************************************/
+
+struct sp_rd_sect_service {
+	/*
+	 * Service identifier.
+	 */
+	uint32_t uuid[4];
+
+	/*
+	 * Accessibility Options:
+	 * - bit[0]: Accessibility by secure-world clients
+	 *   - b'0: Not Accessible
+	 *   - b'1: Accessible
+	 * - bit[1]: Accessible by EL3
+	 *   - b'0: Not Accessible
+	 *   - b'1: Accessible
+	 * - bit[2]: Accessible by normal-world clients
+	 *   - b'0: Not Accessible
+	 *   - b'1: Accessible
+	 */
+	uint8_t accessibility;
+
+	/*
+	 * Request type supported:
+	 * - bit[0]: Blocking request
+	 *   - b'0: Not Enable
+	 *   - b'1: Enable
+	 * - bit[1]: Non-blocking request
+	 *   - b'0: Not Enable
+	 *   - b'1: Enable
+	 */
+	uint8_t request_type;
+
+	/*
+	 * Maximum number of client connections that the service can support.
+	 */
+	uint16_t connection_quota;
+
+	/*
+	 * If the service requires secure world memory to be shared with its
+	 * clients:
+	 * - Maximum amount of secure world memory in bytes to reserve from the
+	 *   secure world memory pool for the service.
+	 */
+	uint32_t secure_mem_size;
+
+	/*
+	 * Interrupt number used to notify the SP for the service.
+	 * - Should also be enabled in the Notification Section.
+	 */
+	uint32_t interrupt_num;
+
+	/*
+	 * Pointer to next service (or NULL if this is the last one).
+	 */
+	struct sp_rd_sect_service *next;
+};
+
+/*******************************************************************************
+ * Complete resource description struct
+ ******************************************************************************/
+
+struct sp_res_desc {
+
+	/* Attribute Section */
+	struct sp_rd_sect_attribute attribute;
+
+	/* System Resource Section */
+	struct sp_rd_sect_mem_region *mem_region;
+
+	struct sp_rd_sect_notification *notification;
+
+	/* Service Section */
+	struct sp_rd_sect_service *service;
+};
+
+#endif /* SPM_RES_DESC_H */
diff --git a/include/services/sp_res_desc_def.h b/include/services/sp_res_desc_def.h
new file mode 100644
index 0000000..68df297
--- /dev/null
+++ b/include/services/sp_res_desc_def.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_RES_DESC_DEFS_H
+#define SPM_RES_DESC_DEFS_H
+
+#include <utils_def.h>
+
+/*******************************************************************************
+ * Attribute Section
+ ******************************************************************************/
+
+#define RD_ATTR_TYPE_UP_MIGRATABLE	U(0)
+#define RD_ATTR_TYPE_UP_PINNED		U(2)
+#define RD_ATTR_TYPE_MP			U(1)
+
+#define RD_ATTR_RUNTIME_SEL0		U(0)
+#define RD_ATTR_RUNTIME_SEL1		U(1)
+
+#define RD_ATTR_INIT_ONLY		U(0)
+#define RD_ATTR_RUNTIME			U(1)
+
+#define RD_ATTR_PANIC_RESTART		U(0)
+#define RD_ATTR_PANIC_ONESHOT		U(1)
+
+#define RD_ATTR_XLAT_GRANULE_4KB	U(0)
+#define RD_ATTR_XLAT_GRANULE_16KB	U(1)
+#define RD_ATTR_XLAT_GRANULE_64KB	U(2)
+
+/*******************************************************************************
+ * Memory Region Section
+ ******************************************************************************/
+
+#define RD_MEM_REGION_NAME_LEN		U(32)
+
+#define RD_MEM_DEVICE			U(0)
+#define RD_MEM_NORMAL_CODE		U(1)
+#define RD_MEM_NORMAL_DATA		U(2)
+#define RD_MEM_NORMAL_BSS		U(3)
+#define RD_MEM_NORMAL_RODATA		U(4)
+#define RD_MEM_NORMAL_SPM_SP_SHARED_MEM	U(5)
+#define RD_MEM_NORMAL_CLIENT_SHARED_MEM	U(6)
+#define RD_MEM_NORMAL_MISCELLANEOUS	U(7)
+
+#define RD_MEM_MASK			U(15)
+
+#define RD_MEM_IS_PIE			(U(1) << 4)
+
+/*******************************************************************************
+ * Notification Section
+ ******************************************************************************/
+
+#define RD_NOTIF_TYPE_PLATFORM		(U(0) << 31)
+#define RD_NOTIF_TYPE_INTERRUPT		(U(1) << 31)
+
+#define RD_NOTIF_PLAT_ID_MASK		U(0xFFFF)
+#define RD_NOTIF_PLAT_ID_SHIFT		U(0)
+
+#define RD_NOTIF_PLATFORM(id)						\
+	(RD_NOTIF_TYPE_PLATFORM						\
+	| (((id) & RD_NOTIF_PLAT_ID_MASK) << RD_NOTIF_PLAT_ID_SHIFT))
+
+#define RD_NOTIF_IRQ_NUM_MASK		U(0xFFFF)
+#define RD_NOTIF_IRQ_NUM_SHIFT		U(0)
+#define RD_NOTIF_IRQ_PRIO_MASK		U(0xFF)
+#define RD_NOTIF_IRQ_PRIO_SHIFT		U(16)
+
+#define RD_NOTIF_IRQ_EDGE_FALLING	U(0)
+#define RD_NOTIF_IRQ_EDGE_RISING	U(2)
+#define RD_NOTIF_IRQ_LEVEL_LOW		U(1)
+#define RD_NOTIF_IRQ_LEVEL_HIGH		U(3)
+#define RD_NOTIF_IRQ_TRIGGER_SHIFT	U(24)
+
+#define RD_NOTIF_IRQ(num, prio, trig)					\
+	(RD_NOTIF_TYPE_IRQ						\
+	| (((num) & RD_NOTIF_IRQ_NUM_MASK) << RD_NOTIF_IRQ_NUM_SHIFT)	\
+	| (((prio) & RD_NOTIF_IRQ_PRIO_MASK) << RD_NOTIF_IRQ_PRIO_SHIFT) \
+	| (((trig) << RD_NOTIF_IRQ_TRIGGER_SHIFT)))
+
+/*******************************************************************************
+ * Service Description Section
+ ******************************************************************************/
+
+#define RD_SERV_ACCESS_SECURE		(U(1) << 0)
+#define RD_SERV_ACCESS_EL3		(U(1) << 1)
+#define RD_SERV_ACCESS_NORMAL		(U(1) << 2)
+
+#define RD_SERV_SUPPORT_BLOCKING	(U(1) << 0)
+#define RD_SERV_SUPPORT_NON_BLOCKING	(U(1) << 0)
+
+#endif /* SPM_RES_DESC_DEFS_H */
diff --git a/include/services/spci_svc.h b/include/services/spci_svc.h
new file mode 100644
index 0000000..eae4fb7
--- /dev/null
+++ b/include/services/spci_svc.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPCI_SVC_H
+#define SPCI_SVC_H
+
+#include <smccc.h>
+#include <utils_def.h>
+
+/* SPCI_VERSION helpers */
+
+#define SPCI_VERSION_MAJOR		U(0)
+#define SPCI_VERSION_MAJOR_SHIFT	16
+#define SPCI_VERSION_MAJOR_MASK		U(0x7FFF)
+#define SPCI_VERSION_MINOR		U(1)
+#define SPCI_VERSION_MINOR_SHIFT	0
+#define SPCI_VERSION_MINOR_MASK		U(0xFFFF)
+#define SPCI_VERSION_FORM(major, minor)	((((major) & SPCI_VERSION_MAJOR_MASK)  \
+						<< SPCI_VERSION_MAJOR_SHIFT) | \
+					((minor) & SPCI_VERSION_MINOR_MASK))
+#define SPCI_VERSION_COMPILED		SPCI_VERSION_FORM(SPCI_VERSION_MAJOR, \
+							  SPCI_VERSION_MINOR)
+
+/* Definitions to build the complete SMC ID */
+
+#define SPCI_FID_MISC_FLAG		(U(0) << 27)
+#define SPCI_FID_MISC_SHIFT		U(20)
+#define SPCI_FID_MISC_MASK		U(0x7F)
+
+#define SPCI_FID_TUN_FLAG		(U(1) << 27)
+#define SPCI_FID_TUN_SHIFT		U(24)
+#define SPCI_FID_TUN_MASK		U(0x7)
+
+#define SPCI_SMC(spci_fid)	((FUNCID_NAMESPACE_SPCI << FUNCID_NAMESPACE_SHIFT) | \
+				 (U(1) << 31) | (spci_fid))
+#define SPCI_MISC_32(misc_fid)	((SMC_32 << FUNCID_CC_SHIFT) |	\
+				 SPCI_FID_MISC_FLAG |		\
+				 SPCI_SMC((misc_fid) << SPCI_FID_MISC_SHIFT))
+#define SPCI_MISC_64(misc_fid)	((SMC_64 << FUNCID_CC_SHIFT) |	\
+				 SPCI_FID_MISC_FLAG |		\
+				 SPCI_SMC((misc_fid) << SPCI_FID_MISC_SHIFT))
+#define SPCI_TUN_32(tun_fid)	((SMC_32 << FUNCID_CC_SHIFT) |	\
+				 SPCI_FID_TUN_FLAG |		\
+				 SPCI_SMC((tun_fid) << SPCI_FID_TUN_SHIFT))
+#define SPCI_TUN_64(tun_fid)	((SMC_64 << FUNCID_CC_SHIFT) |	\
+				 SPCI_FID_TUN_FLAG |		\
+				 SPCI_SMC((tun_fid) << SPCI_FID_TUN_SHIFT))
+
+/* SPCI miscellaneous functions */
+
+#define SPCI_FID_VERSION			U(0x0)
+#define SPCI_FID_SERVICE_HANDLE_OPEN		U(0x2)
+#define SPCI_FID_SERVICE_HANDLE_CLOSE		U(0x3)
+#define SPCI_FID_SERVICE_MEM_REGISTER		U(0x4)
+#define SPCI_FID_SERVICE_MEM_UNREGISTER		U(0x5)
+#define SPCI_FID_SERVICE_MEM_PUBLISH		U(0x6)
+#define SPCI_FID_SERVICE_REQUEST_BLOCKING	U(0x7)
+#define SPCI_FID_SERVICE_REQUEST_START		U(0x8)
+#define SPCI_FID_SERVICE_GET_RESPONSE		U(0x9)
+#define SPCI_FID_SERVICE_RESET_CLIENT_STATE	U(0xA)
+
+/* SPCI tunneling functions */
+
+#define SPCI_FID_SERVICE_TUN_REQUEST_START	U(0x0)
+#define SPCI_FID_SERVICE_REQUEST_RESUME		U(0x1)
+#define SPCI_FID_SERVICE_TUN_REQUEST_BLOCKING	U(0x2)
+
+/* Complete SMC IDs and associated values */
+
+#define SPCI_VERSION				SPCI_MISC_32(SPCI_FID_VERSION)
+
+#define SPCI_SERVICE_HANDLE_OPEN		SPCI_MISC_32(SPCI_FID_SERVICE_HANDLE_OPEN)
+#define SPCI_SERVICE_HANDLE_OPEN_NOTIFY_BIT	U(1)
+
+#define SPCI_SERVICE_HANDLE_CLOSE		SPCI_MISC_32(SPCI_FID_SERVICE_HANDLE_CLOSE)
+
+#define SPCI_SERVICE_MEM_REGISTER_AARCH32	SPCI_MISC_32(SPCI_FID_SERVICE_MEM_REGISTER)
+#define SPCI_SERVICE_MEM_REGISTER_AARCH64	SPCI_MISC_64(SPCI_FID_SERVICE_MEM_REGISTER)
+
+#define SPCI_SERVICE_MEM_UNREGISTER_AARCH32	SPCI_MISC_32(SPCI_FID_SERVICE_MEM_UNREGISTER)
+#define SPCI_SERVICE_MEM_UNREGISTER_AARCH64	SPCI_MISC_64(SPCI_FID_SERVICE_MEM_UNREGISTER)
+
+#define SPCI_SERVICE_MEM_PUBLISH_AARCH32	SPCI_MISC_32(SPCI_FID_SERVICE_MEM_PUBLISH)
+#define SPCI_SERVICE_MEM_PUBLISH_AARCH64	SPCI_MISC_64(SPCI_FID_SERVICE_MEM_PUBLISH)
+
+#define SPCI_SERVICE_REQUEST_BLOCKING_AARCH32	SPCI_MISC_32(SPCI_FID_SERVICE_REQUEST_BLOCKING)
+#define SPCI_SERVICE_REQUEST_BLOCKING_AARCH64	SPCI_MISC_64(SPCI_FID_SERVICE_REQUEST_BLOCKING)
+
+#define SPCI_SERVICE_REQUEST_START_AARCH32	SPCI_MISC_32(SPCI_FID_SERVICE_REQUEST_START)
+#define SPCI_SERVICE_REQUEST_START_AARCH64	SPCI_MISC_64(SPCI_FID_SERVICE_REQUEST_START)
+
+#define SPCI_SERVICE_GET_RESPONSE_AARCH32	SPCI_MISC_32(SPCI_FID_SERVICE_GET_RESPONSE)
+#define SPCI_SERVICE_GET_RESPONSE_AARCH64	SPCI_MISC_64(SPCI_FID_SERVICE_GET_RESPONSE)
+
+#define SPCI_SERVICE_RESET_CLIENT_STATE_AARCH32	SPCI_MISC_32(SPCI_FID_SERVICE_RESET_CLIENT_STATE)
+#define SPCI_SERVICE_RESET_CLIENT_STATE_AARCH64	SPCI_MISC_64(SPCI_FID_SERVICE_RESET_CLIENT_STATE)
+
+#define SPCI_SERVICE_TUN_REQUEST_START_AARCH32	SPCI_TUN_32(SPCI_FID_SERVICE_TUN_REQUEST_START)
+#define SPCI_SERVICE_TUN_REQUEST_START_AARCH64	SPCI_TUN_64(SPCI_FID_SERVICE_TUN_REQUEST_START)
+
+#define SPCI_SERVICE_REQUEST_RESUME_AARCH32	SPCI_TUN_32(SPCI_FID_SERVICE_REQUEST_RESUME)
+#define SPCI_SERVICE_REQUEST_RESUME_AARCH64	SPCI_TUN_64(SPCI_FID_SERVICE_REQUEST_RESUME)
+
+#define SPCI_SERVICE_TUN_REQUEST_BLOCKING_AARCH32 SPCI_TUN_32(SPCI_FID_SERVICE_TUN_REQUEST_BLOCKING)
+#define SPCI_SERVICE_TUN_REQUEST_BLOCKING_AARCH64 SPCI_TUN_64(SPCI_FID_SERVICE_TUN_REQUEST_BLOCKING)
+
+/* SPCI error codes. */
+
+#define SPCI_SUCCESS		 0
+#define SPCI_NOT_SUPPORTED	-1
+#define SPCI_INVALID_PARAMETER	-2
+#define SPCI_NO_MEMORY		-3
+#define SPCI_BUSY		-4
+#define SPCI_QUEUED		-5
+#define SPCI_DENIED		-6
+#define SPCI_NOT_PRESENT	-7
+
+#endif /* SPCI_SVC_H */
diff --git a/include/services/spm_svc.h b/include/services/spm_svc.h
index 7a69b28..763b24e 100644
--- a/include/services/spm_svc.h
+++ b/include/services/spm_svc.h
@@ -7,6 +7,8 @@
 #ifndef SPM_SVC_H
 #define SPM_SVC_H
 
+#if SPM_DEPRECATED
+
 #include <utils_def.h>
 
 #define SPM_VERSION_MAJOR	U(0)
@@ -59,12 +61,16 @@
 #define SPM_DENIED		-3
 #define SPM_NO_MEMORY		-5
 
+#endif /* SPM_DEPRECATED */
+
 #ifndef __ASSEMBLY__
 
 #include <stdint.h>
 
 int32_t spm_setup(void);
 
+#if SPM_DEPRECATED
+
 uint64_t spm_smc_handler(uint32_t smc_fid,
 			 uint64_t x1,
 			 uint64_t x2,
@@ -77,6 +83,8 @@
 /* Helper to enter a Secure Partition */
 uint64_t spm_sp_call(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3);
 
+#endif /* SPM_DEPRECATED */
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* SPM_SVC_H */
diff --git a/include/services/sprt_svc.h b/include/services/sprt_svc.h
new file mode 100644
index 0000000..b6b51dd
--- /dev/null
+++ b/include/services/sprt_svc.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPRT_SVC_H
+#define SPRT_SVC_H
+
+#include <smccc.h>
+#include <utils_def.h>
+
+/* SPRT_VERSION helpers */
+
+#define SPRT_VERSION_MAJOR		U(0)
+#define SPRT_VERSION_MAJOR_SHIFT	16
+#define SPRT_VERSION_MAJOR_MASK		U(0x7FFF)
+#define SPRT_VERSION_MINOR		U(1)
+#define SPRT_VERSION_MINOR_SHIFT	0
+#define SPRT_VERSION_MINOR_MASK		U(0xFFFF)
+#define SPRT_VERSION_FORM(major, minor)	((((major) & SPRT_VERSION_MAJOR_MASK)  \
+						<< SPRT_VERSION_MAJOR_SHIFT) | \
+					((minor) & SPRT_VERSION_MINOR_MASK))
+#define SPRT_VERSION_COMPILED		SPRT_VERSION_FORM(SPRT_VERSION_MAJOR, \
+							  SPRT_VERSION_MINOR)
+
+/* SPRT function IDs */
+
+#define SPRT_FID_VERSION		U(0x0)
+#define SPRT_FID_PUT_RESPONSE		U(0x1)
+#define SPRT_FID_YIELD			U(0x5)
+#define SPRT_FID_PANIC			U(0x7)
+#define SPRT_FID_MEMORY_PERM_ATTR_GET	U(0xB)
+#define SPRT_FID_MEMORY_PERM_ATTR_SET	U(0xC)
+
+#define SPRT_FID_MASK			U(0xFF)
+
+/* Definitions to build the complete SMC ID */
+
+#define SPRT_SMC_64(sprt_fid)	((FUNCID_NAMESPACE_SPRT << FUNCID_NAMESPACE_SHIFT) | \
+				 (U(1) << 31) | ((sprt_fid) & SPRT_FID_MASK) | \
+				 (SMC_64 << FUNCID_CC_SHIFT))
+#define SPRT_SMC_32(sprt_fid)	((FUNCID_NAMESPACE_SPRT << FUNCID_NAMESPACE_SHIFT) | \
+				 (U(1) << 31) | ((sprt_fid) & SPRT_FID_MASK) | \
+				 (SMC_32 << FUNCID_CC_SHIFT))
+
+/* Complete SMC IDs */
+
+#define SPRT_VERSION				SPRT_SMC_32(SPRT_FID_VERSION)
+#define SPRT_PUT_RESPONSE_AARCH64		SPRT_SMC_64(SPRT_FID_PUT_RESPONSE)
+#define SPRT_YIELD_AARCH64			SPRT_SMC_64(SPRT_FID_YIELD)
+#define SPRT_PANIC_AARCH64			SPRT_SMC_64(SPRT_FID_PANIC)
+#define SPRT_MEMORY_PERM_ATTR_GET_AARCH64	SPRT_SMC_64(SPRT_FID_MEMORY_PERM_ATTR_GET)
+#define SPRT_MEMORY_PERM_ATTR_SET_AARCH64	SPRT_SMC_64(SPRT_FID_MEMORY_PERM_ATTR_SET)
+
+/* Defines used by SPRT_MEMORY_PERM_ATTR_{GET,SET}_AARCH64 */
+
+#define SPRT_MEMORY_PERM_ATTR_RO	U(0)
+#define SPRT_MEMORY_PERM_ATTR_RW	U(1)
+#define SPRT_MEMORY_PERM_ATTR_RO_EXEC	U(2)
+/* U(3) is reserved */
+#define SPRT_MEMORY_PERM_ATTR_MASK	U(3)
+#define SPRT_MEMORY_PERM_ATTR_SHIFT	3
+
+/* SPRT error codes. */
+
+#define SPRT_SUCCESS		 0
+#define SPRT_NOT_SUPPORTED	-1
+#define SPRT_INVALID_PARAMETER	-2
+
+#endif /* SPRT_SVC_H */
diff --git a/include/tools_share/sptool.h b/include/tools_share/sptool.h
new file mode 100644
index 0000000..67a2cf0
--- /dev/null
+++ b/include/tools_share/sptool.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPTOOL_H
+#define SPTOOL_H
+
+#include <stdint.h>
+
+/* Header for a secure partition package. There is one per package. */
+struct sp_pkg_header {
+	uint64_t version;
+	uint64_t number_of_sp;
+};
+
+/*
+ * Entry descriptor in a secure partition package. Each entry comprises a
+ * secure partition and its resource description.
+ */
+struct sp_pkg_entry {
+	uint64_t sp_offset;
+	uint64_t sp_size;
+	uint64_t rd_offset;
+	uint64_t rd_size;
+};
+
+#endif /* SPTOOL_H */
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index 1697c55..4def143 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -208,14 +208,20 @@
 
 func cortex_a76_reset_func
 	mov	x19, x30
+
 #if WORKAROUND_CVE_2018_3639
+	/* If the PE implements SSBS, we don't need the dynamic workaround */
+	mrs	x0, id_aa64pfr1_el1
+	lsr	x0, x0, #ID_AA64PFR1_EL1_SSBS_SHIFT
+	and     x0, x0, #ID_AA64PFR1_EL1_SSBS_MASK
+	cbnz	x0, 1f
+
 	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
 	orr	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
 	msr	CORTEX_A76_CPUACTLR2_EL1, x0
 	isb
-#endif
 
-#if IMAGE_BL31 && WORKAROUND_CVE_2018_3639
+#ifdef IMAGE_BL31
 	/*
 	 * The Cortex-A76 generic vectors are overwritten to use the vectors
 	 * defined above.  This is required in order to apply mitigation
@@ -226,6 +232,9 @@
 	isb
 #endif
 
+1:
+#endif
+
 #if ERRATA_DSU_936184
 	bl	errata_dsu_936184_wa
 #endif
diff --git a/lib/sprt/sprt_host.c b/lib/sprt/sprt_host.c
new file mode 100644
index 0000000..c4d436e
--- /dev/null
+++ b/lib/sprt/sprt_host.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "sprt_common.h"
+#include "sprt_queue.h"
+
+void sprt_initialize_queues(void *buffer_base, size_t buffer_size)
+{
+	/* Initialize queue for blocking messages */
+
+	void *blocking_base = buffer_base;
+	uint32_t blocking_num = 4U;
+	size_t blocking_size = SPRT_QUEUE_HEADER_SIZE +
+			       SPRT_QUEUE_ENTRY_MSG_SIZE * blocking_num;
+
+	sprt_queue_init(blocking_base, blocking_num, SPRT_QUEUE_ENTRY_MSG_SIZE);
+
+	/* Initialize queue for non-blocking messages */
+
+	void *non_blocking_base = (void *)((uintptr_t)blocking_base + blocking_size);
+	size_t non_blocking_size = buffer_size - blocking_size;
+	uint32_t non_blocking_num = (non_blocking_size - SPRT_QUEUE_HEADER_SIZE) /
+		SPRT_QUEUE_ENTRY_MSG_SIZE;
+
+	sprt_queue_init(non_blocking_base, non_blocking_num, SPRT_QUEUE_ENTRY_MSG_SIZE);
+}
+
+int sprt_push_message(void *buffer_base,
+		      const struct sprt_queue_entry_message *message,
+		      int queue_num)
+{
+	struct sprt_queue *q = buffer_base;
+
+	while (queue_num-- > 0) {
+		uintptr_t next_addr = (uintptr_t)q + sizeof(struct sprt_queue) +
+				      q->entry_num * q->entry_size;
+		q = (struct sprt_queue *) next_addr;
+	}
+
+	return sprt_queue_push(q, message);
+}
diff --git a/lib/sprt/sprt_host.mk b/lib/sprt/sprt_host.mk
new file mode 100644
index 0000000..abcfe5e
--- /dev/null
+++ b/lib/sprt/sprt_host.mk
@@ -0,0 +1,11 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+SPRT_LIB_SOURCES	:=	$(addprefix lib/sprt/,			\
+					sprt_host.c			\
+					sprt_queue.c)
+
+SPRT_LIB_INCLUDES	:=	-Iinclude/lib/sprt/
diff --git a/lib/sprt/sprt_queue.c b/lib/sprt/sprt_queue.c
new file mode 100644
index 0000000..2bd4139
--- /dev/null
+++ b/lib/sprt/sprt_queue.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "sprt_queue.h"
+
+void sprt_queue_init(void *queue_base, uint32_t entry_num, uint32_t entry_size)
+{
+	assert(queue_base != NULL);
+	assert(entry_size > 0U);
+	assert(entry_num > 0U);
+
+	struct sprt_queue *queue = (struct sprt_queue *)queue_base;
+
+	queue->entry_num = entry_num;
+	queue->entry_size = entry_size;
+	queue->idx_write = 0U;
+	queue->idx_read = 0U;
+
+	memset(queue->data, 0, entry_num * entry_size);
+}
+
+int sprt_queue_is_empty(void *queue_base)
+{
+	assert(queue_base != NULL);
+
+	struct sprt_queue *queue = (struct sprt_queue *)queue_base;
+
+	return (queue->idx_write == queue->idx_read);
+}
+
+int sprt_queue_is_full(void *queue_base)
+{
+	assert(queue_base != NULL);
+
+	struct sprt_queue *queue = (struct sprt_queue *)queue_base;
+
+	uint32_t idx_next_write = (queue->idx_write + 1) % queue->entry_num;
+
+	return (idx_next_write == queue->idx_read);
+}
+
+int sprt_queue_push(void *queue_base, const void *entry)
+{
+	assert(entry != NULL);
+	assert(queue_base != NULL);
+
+	if (sprt_queue_is_full(queue_base) != 0) {
+		return -ENOMEM;
+	}
+
+	struct sprt_queue *queue = (struct sprt_queue *)queue_base;
+
+	uint8_t *dst_entry = &queue->data[queue->entry_size * queue->idx_write];
+
+	memcpy(dst_entry, entry, queue->entry_size);
+
+	/*
+	 * Make sure that the message data is visible before increasing the
+	 * counter of available messages.
+	 */
+	__asm__ volatile("dmb st" ::: "memory");
+
+	queue->idx_write = (queue->idx_write + 1) % queue->entry_num;
+
+	__asm__ volatile("dmb st" ::: "memory");
+
+	return 0;
+}
+
+int sprt_queue_pop(void *queue_base, void *entry)
+{
+	assert(entry != NULL);
+	assert(queue_base != NULL);
+
+	if (sprt_queue_is_empty(queue_base) != 0) {
+		return -ENOENT;
+	}
+
+	struct sprt_queue *queue = (struct sprt_queue *)queue_base;
+
+	uint8_t *src_entry = &queue->data[queue->entry_size * queue->idx_read];
+
+	memcpy(entry, src_entry, queue->entry_size);
+
+	/*
+	 * Make sure that the message data is visible before increasing the
+	 * counter of read messages.
+	 */
+	__asm__ volatile("dmb st" ::: "memory");
+
+	queue->idx_read = (queue->idx_read + 1) % queue->entry_num;
+
+	__asm__ volatile("dmb st" ::: "memory");
+
+	return 0;
+}
diff --git a/lib/sprt/sprt_queue.h b/lib/sprt/sprt_queue.h
new file mode 100644
index 0000000..4ea1bc2
--- /dev/null
+++ b/lib/sprt/sprt_queue.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPRT_QUEUE_H
+#define SPRT_QUEUE_H
+
+#include <stdint.h>
+
+/* Struct that defines a queue. Not to be used directly. */
+struct __attribute__((__packed__)) sprt_queue {
+	uint32_t entry_num;	/* Number of entries */
+	uint32_t entry_size;	/* Size of an entry */
+	uint32_t idx_write;	/* Index of first empty entry */
+	uint32_t idx_read;	/* Index of first entry to read */
+	uint8_t  data[0];	/* Start of data */
+};
+
+#define SPRT_QUEUE_HEADER_SIZE	(sizeof(struct sprt_queue))
+
+/*
+ * Initializes a memory region to be used as a queue of the given number of
+ * entries with the specified size.
+ */
+void sprt_queue_init(void *queue_base, uint32_t entry_num, uint32_t entry_size);
+
+/* Returns 1 if the queue is empty, 0 otherwise */
+int sprt_queue_is_empty(void *queue_base);
+
+/* Returns 1 if the queue is full, 0 otherwise */
+int sprt_queue_is_full(void *queue_base);
+
+/*
+ * Pushes a new entry intro the queue. Returns 0 on success, -ENOMEM if the
+ * queue is full.
+ */
+int sprt_queue_push(void *queue_base, const void *entry);
+
+/*
+ * Pops an entry from the queue. Returns 0 on success, -ENOENT if the queue is
+ * empty.
+ */
+int sprt_queue_pop(void *queue_base, void *entry);
+
+#endif /* SPRT_QUEUE_H */
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index 185473a..53fc874 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -1101,6 +1101,36 @@
 	return 0;
 }
 
+void xlat_setup_dynamic_ctx(xlat_ctx_t *ctx, unsigned long long pa_max,
+			    uintptr_t va_max, struct mmap_region *mmap,
+			    unsigned int mmap_num, uint64_t **tables,
+			    unsigned int tables_num, uint64_t *base_table,
+			    int xlat_regime, int *mapped_regions)
+{
+	ctx->xlat_regime = xlat_regime;
+
+	ctx->pa_max_address = pa_max;
+	ctx->va_max_address = va_max;
+
+	ctx->mmap = mmap;
+	ctx->mmap_num = mmap_num;
+	memset(ctx->mmap, 0, sizeof(struct mmap_region) * mmap_num);
+
+	ctx->tables = (void *) tables;
+	ctx->tables_num = tables_num;
+
+	uintptr_t va_space_size = va_max + 1;
+	ctx->base_level = GET_XLAT_TABLE_LEVEL_BASE(va_space_size);
+	ctx->base_table = base_table;
+	ctx->base_table_entries = GET_NUM_BASE_LEVEL_ENTRIES(va_space_size);
+
+	ctx->tables_mapped_regions = mapped_regions;
+
+	ctx->max_pa = 0;
+	ctx->max_va = 0;
+	ctx->initialized = 0;
+}
+
 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
 
 void __init init_xlat_tables_ctx(xlat_ctx_t *ctx)
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index 4a3f541..a55e729 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -162,6 +162,9 @@
 # For including the Secure Partition Manager
 ENABLE_SPM			:= 0
 
+# Use the deprecated SPM based on MM
+SPM_DEPRECATED			:= 1
+
 # Flag to introduce an infinite loop in BL1 just before it exits into the next
 # image. This is meant to help debugging the post-BL2 phase.
 SPIN_ON_BL1_EXIT		:= 0
diff --git a/plat/arm/board/fvp/fvp_common.c b/plat/arm/board/fvp/fvp_common.c
index 0edf6ba..f36b637 100644
--- a/plat/arm/board/fvp/fvp_common.c
+++ b/plat/arm/board/fvp/fvp_common.c
@@ -96,9 +96,12 @@
 	ARM_MAP_BL1_RW,
 #endif
 #endif /* TRUSTED_BOARD_BOOT */
-#if ENABLE_SPM
+#if ENABLE_SPM && SPM_DEPRECATED
 	ARM_SP_IMAGE_MMAP,
 #endif
+#if ENABLE_SPM && !SPM_DEPRECATED
+	PLAT_MAP_SP_PACKAGE_MEM_RW,
+#endif
 #if ARM_BL31_IN_DRAM
 	ARM_MAP_BL31_SEC_DRAM,
 #endif
@@ -124,13 +127,16 @@
 	MAP_DEVICE0,
 	MAP_DEVICE1,
 	ARM_V2M_MAP_MEM_PROTECT,
-#if ENABLE_SPM
+#if ENABLE_SPM && SPM_DEPRECATED
 	ARM_SPM_BUF_EL3_MMAP,
 #endif
+#if ENABLE_SPM && !SPM_DEPRECATED
+	PLAT_MAP_SP_PACKAGE_MEM_RO,
+#endif
 	{0}
 };
 
-#if ENABLE_SPM && defined(IMAGE_BL31)
+#if ENABLE_SPM && defined(IMAGE_BL31) && SPM_DEPRECATED
 const mmap_region_t plat_arm_secure_partition_mmap[] = {
 	V2M_MAP_IOFPGA_EL0, /* for the UART */
 	MAP_REGION_FLAT(DEVICE0_BASE,				\
@@ -184,7 +190,7 @@
 }
 #endif
 
-#if ENABLE_SPM && defined(IMAGE_BL31)
+#if ENABLE_SPM && defined(IMAGE_BL31) && SPM_DEPRECATED
 /*
  * Boot information passed to a secure partition during initialisation. Linear
  * indices in MP information will be filled at runtime.
@@ -232,7 +238,6 @@
 {
 	return &plat_arm_secure_partition_boot_info;
 }
-
 #endif
 
 /*******************************************************************************
diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h
index 58b68ab..3097f90 100644
--- a/plat/arm/board/fvp/include/platform_def.h
+++ b/plat/arm/board/fvp/include/platform_def.h
@@ -13,7 +13,7 @@
 #  define PLAT_XLAT_TABLES_DYNAMIC     1
 # endif
 #else
-# if defined(IMAGE_BL31) && RESET_TO_BL31
+# if defined(IMAGE_BL31) && (RESET_TO_BL31 || (ENABLE_SPM && !SPM_DEPRECATED))
 #  define PLAT_XLAT_TABLES_DYNAMIC     1
 # endif
 #endif /* AARCH32 */
@@ -72,8 +72,8 @@
 #if defined(IMAGE_BL31)
 # if ENABLE_SPM
 #  define PLAT_ARM_MMAP_ENTRIES		9
-#  define MAX_XLAT_TABLES		7
-#  define PLAT_SP_IMAGE_MMAP_REGIONS	7
+#  define MAX_XLAT_TABLES		9
+#  define PLAT_SP_IMAGE_MMAP_REGIONS	30
 #  define PLAT_SP_IMAGE_MAX_XLAT_TABLES	10
 # else
 #  define PLAT_ARM_MMAP_ENTRIES		8
@@ -123,7 +123,11 @@
  * calculated using the current BL31 PROGBITS debug size plus the sizes of
  * BL2 and BL1-RW
  */
+#if ENABLE_SPM && !SPM_DEPRECATED
+#define PLAT_ARM_MAX_BL31_SIZE		UL(0x60000)
+#else
 #define PLAT_ARM_MAX_BL31_SIZE		UL(0x3B000)
+#endif
 
 #ifdef AARCH32
 /*
@@ -153,7 +157,7 @@
 # define PLATFORM_STACK_SIZE		UL(0x400)
 #elif defined(IMAGE_BL31)
 # if ENABLE_SPM
-#  define PLATFORM_STACK_SIZE		UL(0x500)
+#  define PLATFORM_STACK_SIZE		UL(0x600)
 # elif PLAT_XLAT_TABLES_DYNAMIC
 #  define PLATFORM_STACK_SIZE		UL(0x800)
 # else
diff --git a/plat/arm/board/sgiclarkh/fdts/sgiclarkh_nt_fw_config.dts b/plat/arm/board/sgiclarkh/fdts/sgiclarkh_nt_fw_config.dts
new file mode 100644
index 0000000..3dedf1d
--- /dev/null
+++ b/plat/arm/board/sgiclarkh/fdts/sgiclarkh_nt_fw_config.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/dts-v1/;
+/ {
+	/* compatible string */
+	compatible = "arm,sgi-clark";
+
+	/*
+	 * Place holder for system-id node with default values. The
+	 * value of platform-id and config-id will be set to the
+	 * correct values during the BL2 stage of boot.
+	 */
+	system-id {
+		platform-id = <0x0>;
+		config-id = <0x0>;
+	};
+
+};
diff --git a/plat/arm/board/sgiclarkh/fdts/sgiclarkh_tb_fw_config.dts b/plat/arm/board/sgiclarkh/fdts/sgiclarkh_tb_fw_config.dts
new file mode 100644
index 0000000..766dc00
--- /dev/null
+++ b/plat/arm/board/sgiclarkh/fdts/sgiclarkh_tb_fw_config.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/dts-v1/;
+
+/ {
+	/* Platform Config */
+	compatible = "arm,tb_fw";
+	nt_fw_config_addr = <0x0 0xFEF00000>;
+	nt_fw_config_max_size = <0x0100000>;
+	/*
+	 * The following two entries are placeholders for Mbed TLS
+	 * heap information. The default values don't matter since
+	 * they will be overwritten by BL1.
+	 * In case of having shared Mbed TLS heap between BL1 and BL2,
+	 * BL1 will populate these two properties with the respective
+	 * info about the shared heap. This info will be available for
+	 * BL2 in order to locate and re-use the heap.
+	 */
+	mbedtls_heap_addr = <0x0 0x0>;
+	mbedtls_heap_size = <0x0>;
+};
diff --git a/plat/arm/board/sgiclarkh/include/platform_def.h b/plat/arm/board/sgiclarkh/include/platform_def.h
new file mode 100644
index 0000000..544ad90
--- /dev/null
+++ b/plat/arm/board/sgiclarkh/include/platform_def.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <sgi_base_platform_def.h>
+#include <utils_def.h>
+
+#define PLAT_ARM_CLUSTER_COUNT		2
+#define CSS_SGI_MAX_CPUS_PER_CLUSTER	8
+#define CSS_SGI_MAX_PE_PER_CPU		2
+
+#define PLAT_CSS_MHU_BASE		UL(0x45400000)
+
+/* Base address of DMC-620 instances */
+#define SGICLARKH_DMC620_BASE0		UL(0x4e000000)
+#define SGICLARKH_DMC620_BASE1		UL(0x4e100000)
+
+#define PLAT_MAX_PWR_LVL		ARM_PWR_LVL2
+
+#define CSS_SYSTEM_PWR_DMN_LVL		ARM_PWR_LVL3
+
+#endif /* PLATFORM_DEF_H */
diff --git a/plat/arm/board/sgiclarkh/platform.mk b/plat/arm/board/sgiclarkh/platform.mk
new file mode 100644
index 0000000..222ca60
--- /dev/null
+++ b/plat/arm/board/sgiclarkh/platform.mk
@@ -0,0 +1,42 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include plat/arm/css/sgi/sgi-common.mk
+
+SGICLARKH_BASE		=	plat/arm/board/sgiclarkh
+
+PLAT_INCLUDES		+=	-I${SGICLARKH_BASE}/include/
+
+SGI_CPU_SOURCES		:=	lib/cpus/aarch64/cortex_helios.S
+
+BL1_SOURCES		+=	${SGI_CPU_SOURCES}
+
+BL2_SOURCES		+=	${SGICLARKH_BASE}/sgiclarkh_plat.c	\
+				${SGICLARKH_BASE}/sgiclarkh_security.c	\
+				drivers/arm/tzc/tzc_dmc620.c		\
+				lib/utils/mem_region.c			\
+				plat/arm/common/arm_nor_psci_mem_protect.c
+
+BL31_SOURCES		+=	${SGI_CPU_SOURCES}			\
+				${SGICLARKH_BASE}/sgiclarkh_plat.c	\
+				drivers/cfi/v2m/v2m_flash.c		\
+				lib/utils/mem_region.c			\
+				plat/arm/common/arm_nor_psci_mem_protect.c
+
+# Add the FDT_SOURCES and options for Dynamic Config
+FDT_SOURCES		+=	${SGICLARKH_BASE}/fdts/${PLAT}_tb_fw_config.dts
+TB_FW_CONFIG		:=	${BUILD_PLAT}/fdts/${PLAT}_tb_fw_config.dtb
+
+# Add the TB_FW_CONFIG to FIP and specify the same to certtool
+$(eval $(call TOOL_ADD_PAYLOAD,${TB_FW_CONFIG},--tb-fw-config))
+
+FDT_SOURCES		+=	${SGICLARKH_BASE}/fdts/${PLAT}_nt_fw_config.dts
+NT_FW_CONFIG		:=	${BUILD_PLAT}/fdts/${PLAT}_nt_fw_config.dtb
+
+# Add the NT_FW_CONFIG to FIP and specify the same to certtool
+$(eval $(call TOOL_ADD_PAYLOAD,${NT_FW_CONFIG},--nt-fw-config))
+
+override CTX_INCLUDE_AARCH32_REGS	:= 0
diff --git a/plat/arm/board/sgiclarkh/sgiclarkh_plat.c b/plat/arm/board/sgiclarkh/sgiclarkh_plat.c
new file mode 100644
index 0000000..437592d
--- /dev/null
+++ b/plat/arm/board/sgiclarkh/sgiclarkh_plat.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+
+unsigned int plat_arm_sgi_get_platform_id(void)
+{
+	return mmio_read_32(SID_REG_BASE + SID_SYSTEM_ID_OFFSET)
+				& SID_SYSTEM_ID_PART_NUM_MASK;
+}
+
+unsigned int plat_arm_sgi_get_config_id(void)
+{
+	return mmio_read_32(SID_REG_BASE + SID_SYSTEM_CFG_OFFSET);
+}
diff --git a/plat/arm/board/sgiclarkh/sgiclarkh_security.c b/plat/arm/board/sgiclarkh/sgiclarkh_security.c
new file mode 100644
index 0000000..5ae0b82
--- /dev/null
+++ b/plat/arm/board/sgiclarkh/sgiclarkh_security.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <platform_def.h>
+#include <tzc_dmc620.h>
+
+uintptr_t sgiclarkh_dmc_base[] = {
+	SGICLARKH_DMC620_BASE0,
+	SGICLARKH_DMC620_BASE1
+};
+
+static const tzc_dmc620_driver_data_t sgiclarkh_plat_driver_data = {
+	.dmc_base = sgiclarkh_dmc_base,
+	.dmc_count = ARRAY_SIZE(sgiclarkh_dmc_base)
+};
+
+static const tzc_dmc620_acc_addr_data_t sgiclarkh_acc_addr_data[] = {
+	{
+		.region_base = ARM_AP_TZC_DRAM1_BASE,
+		.region_top = ARM_AP_TZC_DRAM1_BASE + ARM_TZC_DRAM1_SIZE - 1,
+		.sec_attr = TZC_DMC620_REGION_S_RDWR
+	}
+};
+
+static const tzc_dmc620_config_data_t sgiclarkh_plat_config_data = {
+	.plat_drv_data = &sgiclarkh_plat_driver_data,
+	.plat_acc_addr_data = sgiclarkh_acc_addr_data,
+	.acc_addr_count = ARRAY_SIZE(sgiclarkh_acc_addr_data)
+};
+
+/* Initialize the secure environment */
+void plat_arm_security_setup(void)
+{
+	arm_tzc_dmc620_setup(&sgiclarkh_plat_config_data);
+}
diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk
index cb969b2..24b61e8 100644
--- a/plat/arm/common/arm_common.mk
+++ b/plat/arm/common/arm_common.mk
@@ -244,6 +244,16 @@
 				lib/extensions/ras/ras_common.c
 endif
 
+# SPM uses libfdt in Arm platforms
+ifeq (${SPM_DEPRECATED},0)
+ifeq (${ENABLE_SPM},1)
+BL31_SOURCES		+=	common/fdt_wrappers.c			\
+				plat/common/plat_spm_rd.c		\
+				plat/common/plat_spm_sp.c		\
+				${LIBFDT_SRCS}
+endif
+endif
+
 ifneq (${TRUSTED_BOARD_BOOT},0)
 
     # Include common TBB sources
diff --git a/plat/arm/css/sgi/include/sgi_variant.h b/plat/arm/css/sgi/include/sgi_variant.h
index 56dc334..e9b96dd 100644
--- a/plat/arm/css/sgi/include/sgi_variant.h
+++ b/plat/arm/css/sgi/include/sgi_variant.h
@@ -12,6 +12,7 @@
 
 /* SID Version values for SGI-Clark */
 #define SGI_CLARK_SID_VER_PART_NUM		0x0786
+#define SGI_CLARK_HELIOS_CONFIG_ID		0x2
 
 /* Structure containing SGI platform variant information */
 typedef struct sgi_platform_info {
diff --git a/plat/arm/css/sgi/sgi_bl31_setup.c b/plat/arm/css/sgi/sgi_bl31_setup.c
index a254388..b1aa089 100644
--- a/plat/arm/css/sgi/sgi_bl31_setup.c
+++ b/plat/arm/css/sgi/sgi_bl31_setup.c
@@ -62,5 +62,16 @@
 
 const plat_psci_ops_t *plat_arm_psci_override_pm_ops(plat_psci_ops_t *ops)
 {
+	/* For SGI-Clark.Helios platform only CPU ON/OFF is supported */
+	if ((sgi_plat_info.platform_id == SGI_CLARK_SID_VER_PART_NUM) &&
+	    (sgi_plat_info.config_id == SGI_CLARK_HELIOS_CONFIG_ID)) {
+		ops->cpu_standby = NULL;
+		ops->system_off = NULL;
+		ops->system_reset = NULL;
+		ops->get_sys_suspend_power_state = NULL;
+		ops->pwr_domain_suspend = NULL;
+		ops->pwr_domain_suspend_finish = NULL;
+	}
+
 	return css_scmi_override_pm_ops(ops);
 }
diff --git a/plat/arm/css/sgi/sgi_topology.c b/plat/arm/css/sgi/sgi_topology.c
index e524f11..2921c0c 100644
--- a/plat/arm/css/sgi/sgi_topology.c
+++ b/plat/arm/css/sgi/sgi_topology.c
@@ -5,6 +5,7 @@
  */
 
 #include <plat_arm.h>
+#include <sgi_variant.h>
 
 /* Topology */
 /*
@@ -19,12 +20,39 @@
 	CSS_SGI_MAX_CPUS_PER_CLUSTER
 };
 
+/* SGI-Clark.Helios platform consists of 16 physical CPUS and 32 threads */
+const unsigned char sgi_clark_helios_pd_tree_desc[] = {
+	PLAT_ARM_CLUSTER_COUNT,
+	CSS_SGI_MAX_CPUS_PER_CLUSTER,
+	CSS_SGI_MAX_CPUS_PER_CLUSTER,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU,
+	CSS_SGI_MAX_PE_PER_CPU
+};
+
 /*******************************************************************************
  * This function returns the topology tree information.
  ******************************************************************************/
 const unsigned char *plat_get_power_domain_tree_desc(void)
 {
-	return sgi_pd_tree_desc;
+	if (sgi_plat_info.platform_id == SGI_CLARK_SID_VER_PART_NUM &&
+	    sgi_plat_info.config_id == SGI_CLARK_HELIOS_CONFIG_ID)
+		return sgi_clark_helios_pd_tree_desc;
+	else
+		return sgi_pd_tree_desc;
 }
 
 /*******************************************************************************
diff --git a/plat/common/plat_spm_rd.c b/plat/common/plat_spm_rd.c
new file mode 100644
index 0000000..69b9a23
--- /dev/null
+++ b/plat/common/plat_spm_rd.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <fdt_wrappers.h>
+#include <libfdt.h>
+#include <platform_def.h>
+#include <sp_res_desc.h>
+#include <string.h>
+#include <object_pool.h>
+
+/*******************************************************************************
+ * Resource pool
+ ******************************************************************************/
+static struct sp_rd_sect_mem_region rd_mem_regions[PLAT_SPM_MEM_REGIONS_MAX];
+static OBJECT_POOL_ARRAY(rd_mem_regions_pool, rd_mem_regions);
+
+static struct sp_rd_sect_notification rd_notifs[PLAT_SPM_NOTIFICATIONS_MAX];
+static OBJECT_POOL_ARRAY(rd_notifs_pool, rd_notifs);
+
+static struct sp_rd_sect_service rd_services[PLAT_SPM_SERVICES_MAX];
+static OBJECT_POOL_ARRAY(rd_services_pool, rd_services);
+
+/*******************************************************************************
+ * Attribute section handler
+ ******************************************************************************/
+static void rd_parse_attribute(struct sp_rd_sect_attribute *attr,
+			       const void *fdt, int node)
+{
+	int rc = 0;
+
+	/* The minimum size that can be read from the DTB is 32-bit. */
+	uint32_t version, sp_type, runtime_el, exec_type;
+	uint32_t panic_policy, xlat_granule;
+
+	rc |= fdtw_read_cells(fdt, node, "version", 1, &version);
+
+	if (version != 1) {
+		ERROR("Unsupported resource description version: 0x%x\n",
+		      version);
+		panic();
+	}
+
+	rc |= fdtw_read_cells(fdt, node, "sp_type", 1, &sp_type);
+	rc |= fdtw_read_cells(fdt, node, "pe_mpidr", 1, &attr->pe_mpidr);
+	rc |= fdtw_read_cells(fdt, node, "runtime_el", 1, &runtime_el);
+	rc |= fdtw_read_cells(fdt, node, "exec_type", 1, &exec_type);
+	rc |= fdtw_read_cells(fdt, node, "panic_policy", 1, &panic_policy);
+	rc |= fdtw_read_cells(fdt, node, "xlat_granule", 1, &xlat_granule);
+	rc |= fdtw_read_cells(fdt, node, "binary_size", 1, &attr->binary_size);
+	rc |= fdtw_read_cells(fdt, node, "load_address", 2, &attr->load_address);
+	rc |= fdtw_read_cells(fdt, node, "entrypoint", 2, &attr->entrypoint);
+
+	attr->version = version;
+	attr->sp_type = sp_type;
+	attr->runtime_el = runtime_el;
+	attr->exec_type = exec_type;
+	attr->panic_policy = panic_policy;
+	attr->xlat_granule = xlat_granule;
+
+	VERBOSE(" Attribute Section:\n");
+	VERBOSE("  version: 0x%x\n", version);
+	VERBOSE("  sp_type: 0x%x\n", sp_type);
+	VERBOSE("  pe_mpidr: 0x%x\n", attr->pe_mpidr);
+	VERBOSE("  runtime_el: 0x%x\n", runtime_el);
+	VERBOSE("  exec_type: 0x%x\n", exec_type);
+	VERBOSE("  panic_policy: 0x%x\n", panic_policy);
+	VERBOSE("  xlat_granule: 0x%x\n", xlat_granule);
+	VERBOSE("  binary_size: 0x%x\n", attr->binary_size);
+	VERBOSE("  load_address: 0x%llx\n", attr->load_address);
+	VERBOSE("  entrypoint: 0x%llx\n", attr->entrypoint);
+
+	if (rc) {
+		ERROR("Failed to read attribute node elements.\n");
+		panic();
+	}
+}
+
+/*******************************************************************************
+ * Memory regions section handlers
+ ******************************************************************************/
+static void rd_parse_memory_region(struct sp_rd_sect_mem_region *rdmem,
+				   const void *fdt, int node)
+{
+	int rc = 0;
+	char name[RD_MEM_REGION_NAME_LEN];
+
+	rc |= fdtw_read_string(fdt, node, "str", (char *)&name, sizeof(name));
+	rc |= fdtw_read_cells(fdt, node, "attr", 1, &rdmem->attr);
+	rc |= fdtw_read_cells(fdt, node, "base", 2, &rdmem->base);
+	rc |= fdtw_read_cells(fdt, node, "size", 2, &rdmem->size);
+
+	size_t len = strlcpy(rdmem->name, name, RD_MEM_REGION_NAME_LEN);
+
+	if (len >= RD_MEM_REGION_NAME_LEN) {
+		WARN("Memory region name truncated: '%s'\n", name);
+	}
+
+	VERBOSE(" Memory Region:\n");
+	VERBOSE("  name: '%s'\n", rdmem->name);
+	VERBOSE("  attr: 0x%x\n", rdmem->attr);
+	VERBOSE("  base: 0x%llx\n", rdmem->base);
+	VERBOSE("  size: 0x%llx\n", rdmem->size);
+
+	if (rc) {
+		ERROR("Failed to read mem_region node elements.\n");
+		panic();
+	}
+}
+
+static void rd_parse_memory_regions(struct sp_res_desc *rd, const void *fdt,
+				    int node)
+{
+	int child;
+	struct sp_rd_sect_mem_region *rdmem, *old_rdmem;
+
+	fdt_for_each_subnode(child, fdt, node) {
+		rdmem = pool_alloc(&rd_mem_regions_pool);
+
+		/* Add element to the start of the list */
+		old_rdmem = rd->mem_region;
+		rd->mem_region = rdmem;
+		rdmem->next = old_rdmem;
+
+		rd_parse_memory_region(rdmem, fdt, child);
+	}
+
+	if ((child < 0) && (child != -FDT_ERR_NOTFOUND)) {
+		ERROR("%d: fdt_for_each_subnode(): %d\n", __LINE__, node);
+		panic();
+	}
+}
+
+/*******************************************************************************
+ * Notifications section handlers
+ ******************************************************************************/
+static void rd_parse_notification(struct sp_rd_sect_notification *rdnot,
+				   const void *fdt, int node)
+{
+	int rc = 0;
+
+	rc |= fdtw_read_cells(fdt, node, "attr", 1, &rdnot->attr);
+	rc |= fdtw_read_cells(fdt, node, "pe", 1, &rdnot->pe);
+
+	VERBOSE(" Notification:\n");
+	VERBOSE("  attr: 0x%x\n", rdnot->attr);
+	VERBOSE("  pe: 0x%x\n", rdnot->pe);
+
+	if (rc) {
+		ERROR("Failed to read notification node elements.\n");
+		panic();
+	}
+}
+
+static void rd_parse_notifications(struct sp_res_desc *rd, const void *fdt, int node)
+{
+	int child;
+	struct sp_rd_sect_notification *rdnot, *old_rdnot;
+
+	fdt_for_each_subnode(child, fdt, node) {
+		rdnot = pool_alloc(&rd_notifs_pool);
+
+		/* Add element to the start of the list */
+		old_rdnot = rd->notification;
+		rd->notification = rdnot;
+		rdnot->next = old_rdnot;
+
+		rd_parse_notification(rdnot, fdt, child);
+	}
+
+	if ((child < 0) && (child != -FDT_ERR_NOTFOUND)) {
+		ERROR("%d: fdt_for_each_subnode(): %d\n", __LINE__, child);
+		panic();
+	}
+}
+
+/*******************************************************************************
+ * Services section handlers
+ ******************************************************************************/
+static void rd_parse_service(struct sp_rd_sect_service *rdsvc, const void *fdt,
+			     int node)
+{
+	int rc = 0;
+
+	/* The minimum size that can be read from the DTB is 32-bit. */
+	uint32_t accessibility, request_type, connection_quota;
+
+	rc |= fdtw_read_array(fdt, node, "uuid", 4, &rdsvc->uuid);
+	rc |= fdtw_read_cells(fdt, node, "accessibility", 1, &accessibility);
+	rc |= fdtw_read_cells(fdt, node, "request_type", 1, &request_type);
+	rc |= fdtw_read_cells(fdt, node, "connection_quota", 1, &connection_quota);
+	rc |= fdtw_read_cells(fdt, node, "sec_mem_size", 1, &rdsvc->secure_mem_size);
+	rc |= fdtw_read_cells(fdt, node, "interrupt_num", 1, &rdsvc->interrupt_num);
+
+	rdsvc->accessibility = accessibility;
+	rdsvc->request_type = request_type;
+	rdsvc->connection_quota = connection_quota;
+
+	VERBOSE(" Service:\n");
+	VERBOSE("  uuid: 0x%08x 0x%08x 0x%08x 0x%08x\n", rdsvc->uuid[0],
+		rdsvc->uuid[1], rdsvc->uuid[2], rdsvc->uuid[3]);
+	VERBOSE("  accessibility: 0x%x\n", accessibility);
+	VERBOSE("  request_type: 0x%x\n", request_type);
+	VERBOSE("  connection_quota: 0x%x\n", connection_quota);
+	VERBOSE("  secure_memory_size: 0x%x\n", rdsvc->secure_mem_size);
+	VERBOSE("  interrupt_num: 0x%x\n", rdsvc->interrupt_num);
+
+	if (rc) {
+		ERROR("Failed to read attribute node elements.\n");
+		panic();
+	}
+}
+
+static void rd_parse_services(struct sp_res_desc *rd, const void *fdt, int node)
+{
+	int child;
+	struct sp_rd_sect_service *rdsvc, *old_rdsvc;
+
+	fdt_for_each_subnode(child, fdt, node) {
+		rdsvc = pool_alloc(&rd_services_pool);
+
+		/* Add element to the start of the list */
+		old_rdsvc = rd->service;
+		rd->service = rdsvc;
+		rdsvc->next = old_rdsvc;
+
+		rd_parse_service(rdsvc, fdt, child);
+	}
+
+	if ((child < 0) && (child != -FDT_ERR_NOTFOUND)) {
+		ERROR("%d: fdt_for_each_subnode(): %d\n", __LINE__, node);
+		panic();
+	}
+}
+
+/*******************************************************************************
+ * Root node handler
+ ******************************************************************************/
+static void rd_parse_root(struct sp_res_desc *rd, const void *fdt, int root)
+{
+	int node;
+	char *str;
+
+	str = "attribute";
+	node = fdt_subnode_offset_namelen(fdt, root, str, strlen(str));
+	if (node < 0) {
+		ERROR("Root node doesn't contain subnode '%s'\n", str);
+		panic();
+	} else {
+		rd_parse_attribute(&rd->attribute, fdt, node);
+	}
+
+	str = "memory_regions";
+	node = fdt_subnode_offset_namelen(fdt, root, str, strlen(str));
+	if (node < 0) {
+		ERROR("Root node doesn't contain subnode '%s'\n", str);
+		panic();
+	} else {
+		rd_parse_memory_regions(rd, fdt, node);
+	}
+
+	str = "notifications";
+	node = fdt_subnode_offset_namelen(fdt, root, str, strlen(str));
+	if (node < 0) {
+		WARN("Root node doesn't contain subnode '%s'\n", str);
+	} else {
+		rd_parse_notifications(rd, fdt, node);
+	}
+
+	str = "services";
+	node = fdt_subnode_offset_namelen(fdt, root, str, strlen(str));
+	if (node < 0) {
+		WARN("Root node doesn't contain subnode '%s'\n", str);
+	} else {
+		rd_parse_services(rd, fdt, node);
+	}
+}
+
+/*******************************************************************************
+ * Platform handler to load resource descriptor blobs into the active Secure
+ * Partition context.
+ ******************************************************************************/
+int plat_spm_sp_rd_load(struct sp_res_desc *rd, const void *ptr, size_t size)
+{
+	int rc;
+	int root_node;
+
+	assert(rd != NULL);
+	assert(ptr != NULL);
+
+	INFO("Reading RD blob at address %p\n", ptr);
+
+	rc = fdt_check_header(ptr);
+	if (rc != 0) {
+		ERROR("Wrong format for resource descriptor blob (%d).\n", rc);
+		return -1;
+	}
+
+	root_node = fdt_node_offset_by_compatible(ptr, -1, "arm,sp_rd");
+	if (root_node < 0) {
+		ERROR("Unrecognized resource descriptor blob (%d)\n", rc);
+		return -1;
+	}
+
+	rd_parse_root(rd, ptr, root_node);
+
+	return 0;
+}
diff --git a/plat/common/plat_spm_sp.c b/plat/common/plat_spm_sp.c
new file mode 100644
index 0000000..7b7fbd9
--- /dev/null
+++ b/plat/common/plat_spm_sp.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <sptool.h>
+
+static unsigned int sp_next;
+
+/*******************************************************************************
+ * Platform handler get the address of a Secure Partition and its resource
+ * description blob. It iterates through all SPs detected by the platform. If
+ * there is information for another SP, it returns 0. If there are no more SPs,
+ * it returns -1.
+ ******************************************************************************/
+int plat_spm_sp_get_next_address(void **sp_base, size_t *sp_size,
+				 void **rd_base, size_t *rd_size)
+{
+	assert((sp_base != NULL) && (sp_size != NULL));
+	assert((rd_base != NULL) && (rd_base != NULL));
+
+	const uint64_t *pkg_base = (uint64_t *)PLAT_SP_PACKAGE_BASE;
+
+	struct sp_pkg_header *pkg_header = (struct sp_pkg_header *)pkg_base;
+
+	if (sp_next == 0) {
+		if (pkg_header->version != 0x1) {
+			ERROR("SP package has an unsupported version 0x%llx\n",
+			      pkg_header->version);
+			panic();
+		}
+	}
+
+	if (sp_next >= pkg_header->number_of_sp) {
+		/* No more partitions in the package */
+		return -1;
+	}
+
+	const struct sp_pkg_entry *entry_list =
+		(const struct sp_pkg_entry *)((uintptr_t)pkg_base
+					       + sizeof(struct sp_pkg_header));
+
+	const struct sp_pkg_entry *entry = &(entry_list[sp_next]);
+
+	uint64_t sp_offset = entry->sp_offset;
+	uint64_t rd_offset = entry->rd_offset;
+
+	uintptr_t pkg_sp_base = ((uintptr_t)PLAT_SP_PACKAGE_BASE + sp_offset);
+	uintptr_t pkg_rd_base = ((uintptr_t)PLAT_SP_PACKAGE_BASE + rd_offset);
+
+	uint64_t pkg_sp_size = entry->sp_size;
+	uint64_t pkg_rd_size = entry->rd_size;
+
+	uintptr_t pkg_end = (uintptr_t)PLAT_SP_PACKAGE_BASE
+			  + (uintptr_t)PLAT_SP_PACKAGE_SIZE - 1U;
+
+	/*
+	 * Check for overflows. The package header isn't trusted, so assert()
+	 * can't be used here.
+	 */
+
+	uintptr_t pkg_sp_end = pkg_sp_base + pkg_sp_size - 1U;
+	uintptr_t pkg_rd_end = pkg_rd_base + pkg_rd_size - 1U;
+
+	if ((pkg_sp_end > pkg_end) || (pkg_sp_end < pkg_sp_base)) {
+		ERROR("Invalid Secure Partition size (0x%llx)\n", pkg_sp_size);
+		panic();
+	}
+
+	if ((pkg_rd_end > pkg_end) || (pkg_rd_end < pkg_rd_base)) {
+		ERROR("Invalid Resource Description blob size (0x%llx)\n",
+		      pkg_rd_size);
+		panic();
+	}
+
+	/* Return location of the binaries. */
+
+	*sp_base = (void *)pkg_sp_base;
+	*sp_size = pkg_sp_size;
+	*rd_base = (void *)pkg_rd_base;
+	*rd_size = pkg_rd_size;
+
+	sp_next++;
+
+	return 0;
+}
diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c
index 45c4704..3a5299f 100644
--- a/services/arm_arch_svc/arm_arch_svc_setup.c
+++ b/services/arm_arch_svc/arm_arch_svc_setup.c
@@ -30,9 +30,27 @@
 			return 1;
 		return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
 #endif
+
 #if WORKAROUND_CVE_2018_3639
-	case SMCCC_ARCH_WORKAROUND_2:
+	case SMCCC_ARCH_WORKAROUND_2: {
 #if DYNAMIC_WORKAROUND_CVE_2018_3639
+		unsigned long long ssbs;
+
+		/*
+		 * Firmware doesn't have to carry out dynamic workaround if the
+		 * PE implements architectural Speculation Store Bypass Safe
+		 * (SSBS) feature.
+		 */
+		ssbs = (read_id_aa64pfr0_el1() >> ID_AA64PFR1_EL1_SSBS_SHIFT) &
+			ID_AA64PFR1_EL1_SSBS_MASK;
+
+		/*
+		 * If architectural SSBS is available on this PE, no firmware
+		 * mitigation via SMCCC_ARCH_WORKAROUND_2 is required.
+		 */
+		if (ssbs != SSBS_UNAVAILABLE)
+			return 1;
+
 		/*
 		 * On a platform where at least one CPU requires
 		 * dynamic mitigation but others are either unaffected
@@ -50,7 +68,11 @@
 		/* Either the CPUs are unaffected or permanently mitigated */
 		return SMCCC_ARCH_NOT_REQUIRED;
 #endif
+	}
 #endif
+
+	/* Fallthrough */
+
 	default:
 		return SMC_UNK;
 	}
diff --git a/services/std_svc/spm/README.rst b/services/std_svc/spm/README.rst
new file mode 100644
index 0000000..63406a3
--- /dev/null
+++ b/services/std_svc/spm/README.rst
@@ -0,0 +1,3 @@
+This is a prototype loosely based on the SPCI Alpha and SPRT pre-alpha
+specifications. Any interface / platform API introduced for this is subject to
+change as it evolves.
diff --git a/services/std_svc/spm/spci.c b/services/std_svc/spm/spci.c
new file mode 100644
index 0000000..5e4ff91
--- /dev/null
+++ b/services/std_svc/spm/spci.c
@@ -0,0 +1,774 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <errno.h>
+#include <smccc.h>
+#include <smccc_helpers.h>
+#include <spci_svc.h>
+#include <spinlock.h>
+#include <sprt_host.h>
+#include <sprt_svc.h>
+#include <string.h>
+#include <utils.h>
+
+#include "spm_private.h"
+
+/*******************************************************************************
+ * Macros to print UUIDs.
+ ******************************************************************************/
+#define PRINT_UUID_FORMAT	"%08x-%08x-%08x-%08x"
+#define PRINT_UUID_ARGS(x)	x[0], x[1], x[2], x[3]
+
+/*******************************************************************************
+ * Array of structs that contains information about all handles of Secure
+ * Services that are currently open.
+ ******************************************************************************/
+typedef enum spci_handle_status {
+	HANDLE_STATUS_CLOSED = 0,
+	HANDLE_STATUS_OPEN,
+} spci_handle_status_t;
+
+typedef struct spci_handle {
+	/* 16-bit value used as reference in all SPCI calls */
+	uint16_t handle;
+
+	/* Client ID of the client that requested the handle */
+	uint16_t client_id;
+
+	/* Current status of the handle */
+	spci_handle_status_t status;
+
+	/*
+	 * Context of the Secure Partition that provides the Secure Service
+	 * referenced by this handle.
+	 */
+	sp_context_t *sp_ctx;
+
+	/*
+	 * The same handle might be used for multiple requests, keep a reference
+	 * counter of them.
+	 */
+	unsigned int num_active_requests;
+} spci_handle_t;
+
+static spci_handle_t spci_handles[PLAT_SPCI_HANDLES_MAX_NUM];
+static spinlock_t spci_handles_lock;
+
+/*
+ * Given a handle and a client ID, return the element of the spci_handles
+ * array that contains the information of the handle. It can only return open
+ * handles. It returns NULL if it couldn't find the element in the array.
+ */
+static spci_handle_t *spci_handle_info_get(uint16_t handle, uint16_t client_id)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(spci_handles); i++) {
+		spci_handle_t *h = &(spci_handles[i]);
+
+		/* Only check for open handles */
+		if (h->status == HANDLE_STATUS_CLOSED) {
+			continue;
+		}
+
+		/* Check if either the handle or the client ID are different */
+		if ((h->handle != handle) || (h->client_id != client_id)) {
+			continue;
+		}
+
+		return h;
+	}
+
+	return NULL;
+}
+
+/*
+ * Returns a unique value for a handle. This function must be called while
+ * spci_handles_lock is locked. It returns 0 on success, -1 on error.
+ */
+static int spci_create_handle_value(uint16_t *handle)
+{
+	/*
+	 * Trivial implementation that relies on the fact that any handle will
+	 * be closed before 2^16 more handles have been opened.
+	 */
+	static uint16_t handle_count;
+
+	*handle = handle_count;
+
+	handle_count++;
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Returns a unique token for a Secure Service request.
+ ******************************************************************************/
+static uint32_t spci_create_token_value(void)
+{
+	/*
+	 * Trivial implementation that relies on the fact that any response will
+	 * be read before 2^32 more service requests have been done.
+	 */
+	static uint32_t token_count;
+
+	return token_count++;
+}
+
+/*******************************************************************************
+ * This function looks for a Secure Partition that has a Secure Service
+ * identified by the given UUID. It returns a handle that the client can use to
+ * access the service, and an SPCI_*** error code.
+ ******************************************************************************/
+static uint64_t spci_service_handle_open_poll(void *handle, u_register_t x1,
+			u_register_t x2, u_register_t x3, u_register_t x4,
+			u_register_t x5, u_register_t x6, u_register_t x7)
+{
+	unsigned int i;
+	sp_context_t *sp_ptr;
+	uint16_t service_handle;
+
+	/* Bits 31:16 of w7 are reserved (MBZ). */
+	assert((x7 & 0xFFFF0000U) == 0);
+
+	uint16_t client_id = x7 & 0x0000FFFFU;
+	uint32_t uuid[4] = { x1, x2, x3, x4 };
+
+	/* Get pointer to the Secure Partition that handles this service */
+	sp_ptr = spm_sp_get_by_uuid(&uuid);
+	if (sp_ptr == NULL) {
+		WARN("SPCI: Service requested by client 0x%04x not found\n",
+		     client_id);
+		WARN("SPCI:   UUID: " PRINT_UUID_FORMAT "\n",
+		     PRINT_UUID_ARGS(uuid));
+
+		SMC_RET2(handle, SPCI_NOT_PRESENT, 0);
+	}
+
+	/* Get lock of the array of handles */
+	spin_lock(&spci_handles_lock);
+
+	/*
+	 * We need to record the client ID and Secure Partition that correspond
+	 * to this handle. Look for the first free entry in the array.
+	 */
+	for (i = 0; i < PLAT_SPCI_HANDLES_MAX_NUM; i++) {
+		if (spci_handles[i].status == HANDLE_STATUS_CLOSED) {
+			break;
+		}
+	}
+
+	if (i == PLAT_SPCI_HANDLES_MAX_NUM) {
+		spin_unlock(&spci_handles_lock);
+
+		WARN("SPCI: Can't open more handles. Client 0x%04x\n",
+		     client_id);
+		WARN("SPCI:   UUID: " PRINT_UUID_FORMAT "\n",
+		     PRINT_UUID_ARGS(uuid));
+
+		SMC_RET2(handle, SPCI_NO_MEMORY, 0);
+	}
+
+	/* Create new handle value */
+	if (spci_create_handle_value(&service_handle) != 0) {
+		spin_unlock(&spci_handles_lock);
+
+		WARN("SPCI: Can't create a new handle value. Client 0x%04x\n",
+		     client_id);
+		WARN("SPCI:   UUID: " PRINT_UUID_FORMAT "\n",
+		     PRINT_UUID_ARGS(uuid));
+
+		SMC_RET2(handle, SPCI_NO_MEMORY, 0);
+	}
+
+	/* Save all information about this handle */
+	spci_handles[i].status = HANDLE_STATUS_OPEN;
+	spci_handles[i].client_id = client_id;
+	spci_handles[i].handle = service_handle;
+	spci_handles[i].num_active_requests = 0U;
+	spci_handles[i].sp_ctx = sp_ptr;
+
+	/* Release lock of the array of handles */
+	spin_unlock(&spci_handles_lock);
+
+	VERBOSE("SPCI: Service handle request by client 0x%04x: 0x%04x\n",
+		client_id, service_handle);
+	VERBOSE("SPCI:   UUID: " PRINT_UUID_FORMAT "\n", PRINT_UUID_ARGS(uuid));
+
+	/* The handle is returned in the top 16 bits of x1 */
+	SMC_RET2(handle, SPCI_SUCCESS, ((uint32_t)service_handle) << 16);
+}
+
+/*******************************************************************************
+ * This function closes a handle that a specific client uses to access a Secure
+ * Service. It returns a SPCI_*** error code.
+ ******************************************************************************/
+static uint64_t spci_service_handle_close(void *handle, u_register_t x1)
+{
+	spci_handle_t *handle_info;
+	uint16_t client_id = x1 & 0x0000FFFFU;
+	uint16_t service_handle = (x1 >> 16) & 0x0000FFFFU;
+
+	spin_lock(&spci_handles_lock);
+
+	handle_info = spci_handle_info_get(service_handle, client_id);
+
+	if (handle_info == NULL) {
+		spin_unlock(&spci_handles_lock);
+
+		WARN("SPCI: Tried to close invalid handle 0x%04x by client 0x%04x\n",
+		     service_handle, client_id);
+
+		SMC_RET1(handle, SPCI_INVALID_PARAMETER);
+	}
+
+	if (handle_info->status != HANDLE_STATUS_OPEN) {
+		spin_unlock(&spci_handles_lock);
+
+		WARN("SPCI: Tried to close handle 0x%04x by client 0x%04x in status %d\n",
+			service_handle, client_id, handle_info->status);
+
+		SMC_RET1(handle, SPCI_INVALID_PARAMETER);
+	}
+
+	if (handle_info->num_active_requests != 0U) {
+		spin_unlock(&spci_handles_lock);
+
+		/* A handle can't be closed if there are requests left */
+		WARN("SPCI: Tried to close handle 0x%04x by client 0x%04x with %d requests left\n",
+			service_handle, client_id,
+			handle_info->num_active_requests);
+
+		SMC_RET1(handle, SPCI_BUSY);
+	}
+
+	memset(handle_info, 0, sizeof(spci_handle_t));
+
+	handle_info->status = HANDLE_STATUS_CLOSED;
+
+	spin_unlock(&spci_handles_lock);
+
+	VERBOSE("SPCI: Closed handle 0x%04x by client 0x%04x.\n",
+		service_handle, client_id);
+
+	SMC_RET1(handle, SPCI_SUCCESS);
+}
+
+/*******************************************************************************
+ * This function requests a Secure Service from a given handle and client ID.
+ ******************************************************************************/
+static uint64_t spci_service_request_blocking(void *handle,
+			uint32_t smc_fid, u_register_t x1, u_register_t x2,
+			u_register_t x3, u_register_t x4, u_register_t x5,
+			u_register_t x6, u_register_t x7)
+{
+	spci_handle_t *handle_info;
+	sp_context_t *sp_ctx;
+	cpu_context_t *cpu_ctx;
+	uint32_t rx0;
+	u_register_t rx1, rx2, rx3;
+	uint16_t request_handle, client_id;
+
+	/* Get handle array lock */
+	spin_lock(&spci_handles_lock);
+
+	/* Get pointer to struct of this open handle and client ID. */
+	request_handle = (x7 >> 16U) & 0x0000FFFFU;
+	client_id = x7 & 0x0000FFFFU;
+
+	handle_info = spci_handle_info_get(request_handle, client_id);
+	if (handle_info == NULL) {
+		spin_unlock(&spci_handles_lock);
+
+		WARN("SPCI_SERVICE_TUN_REQUEST_BLOCKING: Not found.\n");
+		WARN("  Handle 0x%04x. Client ID 0x%04x\n", request_handle,
+		     client_id);
+
+		SMC_RET1(handle, SPCI_BUSY);
+	}
+
+	/* Get pointer to the Secure Partition that handles the service */
+	sp_ctx = handle_info->sp_ctx;
+	assert(sp_ctx != NULL);
+	cpu_ctx = &(sp_ctx->cpu_ctx);
+
+	/* Blocking requests are only allowed if the queue is empty */
+	if (handle_info->num_active_requests > 0) {
+		spin_unlock(&spci_handles_lock);
+
+		SMC_RET1(handle, SPCI_BUSY);
+	}
+
+	if (spm_sp_request_increase_if_zero(sp_ctx) == -1) {
+		spin_unlock(&spci_handles_lock);
+
+		SMC_RET1(handle, SPCI_BUSY);
+	}
+
+	/* Prevent this handle from being closed */
+	handle_info->num_active_requests += 1;
+
+	/* Release handle lock */
+	spin_unlock(&spci_handles_lock);
+
+	/* Save the Normal world context */
+	cm_el1_sysregs_context_save(NON_SECURE);
+
+	/* Wait until the Secure Partition is idle and set it to busy. */
+	sp_state_wait_switch(sp_ctx, SP_STATE_IDLE, SP_STATE_BUSY);
+
+	/* Pass arguments to the Secure Partition */
+	struct sprt_queue_entry_message message = {
+		.type = SPRT_MSG_TYPE_SERVICE_TUN_REQUEST,
+		.client_id = client_id,
+		.service_handle = request_handle,
+		.session_id = x6,
+		.token = 0, /* No token needed for blocking requests */
+		.args = {smc_fid, x1, x2, x3, x4, x5}
+	};
+
+	spin_lock(&(sp_ctx->spm_sp_buffer_lock));
+	int rc = sprt_push_message((void *)sp_ctx->spm_sp_buffer_base, &message,
+				   SPRT_QUEUE_NUM_BLOCKING);
+	spin_unlock(&(sp_ctx->spm_sp_buffer_lock));
+	if (rc != 0) {
+		/*
+		 * This shouldn't happen, blocking requests can only be made if
+		 * the request queue is empty.
+		 */
+		assert(rc == -ENOMEM);
+		ERROR("SPCI_SERVICE_TUN_REQUEST_BLOCKING: Queue is full.\n");
+		panic();
+	}
+
+	/* Jump to the Secure Partition. */
+	rx0 = spm_sp_synchronous_entry(sp_ctx, 0);
+
+	/* Verify returned value */
+	if (rx0 != SPRT_PUT_RESPONSE_AARCH64) {
+		ERROR("SPM: %s: Unexpected x0 value 0x%x\n", __func__, rx0);
+		panic();
+	}
+
+	rx1 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3);
+	rx2 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4);
+	rx3 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X5);
+
+	/* Flag Secure Partition as idle. */
+	assert(sp_ctx->state == SP_STATE_BUSY);
+	sp_state_set(sp_ctx, SP_STATE_IDLE);
+
+	/* Decrease count of requests. */
+	spin_lock(&spci_handles_lock);
+	handle_info->num_active_requests -= 1;
+	spin_unlock(&spci_handles_lock);
+	spm_sp_request_decrease(sp_ctx);
+
+	/* Restore non-secure state */
+	cm_el1_sysregs_context_restore(NON_SECURE);
+	cm_set_next_eret_context(NON_SECURE);
+
+	SMC_RET4(handle, SPCI_SUCCESS, rx1, rx2, rx3);
+}
+
+/*******************************************************************************
+ * This function requests a Secure Service from a given handle and client ID.
+ ******************************************************************************/
+static uint64_t spci_service_request_start(void *handle,
+			uint32_t smc_fid, u_register_t x1, u_register_t x2,
+			u_register_t x3, u_register_t x4, u_register_t x5,
+			u_register_t x6, u_register_t x7)
+{
+	spci_handle_t *handle_info;
+	sp_context_t *sp_ctx;
+	cpu_context_t *cpu_ctx;
+	uint16_t request_handle, client_id;
+	uint32_t token;
+
+	/* Get handle array lock */
+	spin_lock(&spci_handles_lock);
+
+	/* Get pointer to struct of this open handle and client ID. */
+	request_handle = (x7 >> 16U) & 0x0000FFFFU;
+	client_id = x7 & 0x0000FFFFU;
+
+	handle_info = spci_handle_info_get(request_handle, client_id);
+	if (handle_info == NULL) {
+		spin_unlock(&spci_handles_lock);
+
+		WARN("SPCI_SERVICE_TUN_REQUEST_START: Not found.\n"
+		     "  Handle 0x%04x. Client ID 0x%04x\n", request_handle,
+		     client_id);
+
+		SMC_RET1(handle, SPCI_INVALID_PARAMETER);
+	}
+
+	/* Get pointer to the Secure Partition that handles the service */
+	sp_ctx = handle_info->sp_ctx;
+	assert(sp_ctx != NULL);
+	cpu_ctx = &(sp_ctx->cpu_ctx);
+
+	/* Prevent this handle from being closed */
+	handle_info->num_active_requests += 1;
+
+	spm_sp_request_increase(sp_ctx);
+
+	/* Create new token for this request */
+	token = spci_create_token_value();
+
+	/* Release handle lock */
+	spin_unlock(&spci_handles_lock);
+
+	/* Pass arguments to the Secure Partition */
+	struct sprt_queue_entry_message message = {
+		.type = SPRT_MSG_TYPE_SERVICE_TUN_REQUEST,
+		.client_id = client_id,
+		.service_handle = request_handle,
+		.session_id = x6,
+		.token = token,
+		.args = {smc_fid, x1, x2, x3, x4, x5}
+	};
+
+	spin_lock(&(sp_ctx->spm_sp_buffer_lock));
+	int rc = sprt_push_message((void *)sp_ctx->spm_sp_buffer_base, &message,
+				   SPRT_QUEUE_NUM_NON_BLOCKING);
+	spin_unlock(&(sp_ctx->spm_sp_buffer_lock));
+	if (rc != 0) {
+		WARN("SPCI_SERVICE_TUN_REQUEST_START: SPRT queue full.\n"
+		     "  Handle 0x%04x. Client ID 0x%04x\n", request_handle,
+		     client_id);
+		SMC_RET1(handle, SPCI_NO_MEMORY);
+	}
+
+	/* Try to enter the partition. If it's not possible, simply return. */
+	if (sp_state_try_switch(sp_ctx, SP_STATE_IDLE, SP_STATE_BUSY) != 0) {
+		SMC_RET2(handle, SPCI_SUCCESS, token);
+	}
+
+	/* Save the Normal world context */
+	cm_el1_sysregs_context_save(NON_SECURE);
+
+	/*
+	 * This request is non-blocking and needs to be interruptible by
+	 * non-secure interrupts. Enable their routing to EL3 during the
+	 * processing of the Secure Partition's service on this core.
+	 */
+
+	/* Jump to the Secure Partition. */
+	uint64_t ret = spm_sp_synchronous_entry(sp_ctx, 1);
+
+	/* Verify returned values */
+	if (ret == SPRT_PUT_RESPONSE_AARCH64) {
+		uint32_t token;
+		uint64_t rx1, rx2, rx3, x6;
+
+		token = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X1);
+		rx1 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3);
+		rx2 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4);
+		rx3 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X5);
+		x6 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X6);
+
+		uint16_t client_id = x6 & 0xFFFFU;
+		uint16_t service_handle = x6 >> 16;
+
+		int rc = spm_response_add(client_id, service_handle, token,
+					  rx1, rx2, rx3);
+		if (rc != 0) {
+			/*
+			 * This is error fatal because we can't return to the SP
+			 * from this SMC. The SP has crashed.
+			 */
+			panic();
+		}
+	} else if ((ret != SPRT_YIELD_AARCH64) &&
+		   (ret != SPM_SECURE_PARTITION_PREEMPTED)) {
+		ERROR("SPM: %s: Unexpected x0 value 0x%llx\n", __func__, ret);
+		panic();
+	}
+
+	/* Flag Secure Partition as idle. */
+	assert(sp_ctx->state == SP_STATE_BUSY);
+	sp_state_set(sp_ctx, SP_STATE_IDLE);
+
+	/* Restore non-secure state */
+	cm_el1_sysregs_context_restore(NON_SECURE);
+	cm_set_next_eret_context(NON_SECURE);
+
+	SMC_RET2(handle, SPCI_SUCCESS, token);
+}
+
+/*******************************************************************************
+ * This function returns the response of a Secure Service given a handle, a
+ * client ID and a token. If not available, it will schedule a Secure Partition
+ * and give it CPU time.
+ ******************************************************************************/
+static uint64_t spci_service_request_resume(void *handle, u_register_t x1,
+					    u_register_t x7)
+{
+	int rc;
+	u_register_t rx1 = 0, rx2 = 0, rx3 = 0;
+	spci_handle_t *handle_info;
+	sp_context_t *sp_ctx;
+	cpu_context_t *cpu_ctx;
+	uint32_t token = (uint32_t) x1;
+	uint16_t client_id = x7 & 0x0000FFFF;
+	uint16_t service_handle = (x7 >> 16) & 0x0000FFFF;
+
+	/* Get pointer to struct of this open handle and client ID. */
+	spin_lock(&spci_handles_lock);
+
+	handle_info = spci_handle_info_get(service_handle, client_id);
+	if (handle_info == NULL) {
+		spin_unlock(&spci_handles_lock);
+		WARN("SPCI_SERVICE_REQUEST_RESUME: Not found.\n"
+		     "Handle 0x%04x. Client ID 0x%04x, Token 0x%08x.\n",
+		     client_id, service_handle, token);
+
+		SMC_RET1(handle, SPCI_INVALID_PARAMETER);
+	}
+
+	/* Get pointer to the Secure Partition that handles the service */
+	sp_ctx = handle_info->sp_ctx;
+	assert(sp_ctx != NULL);
+	cpu_ctx = &(sp_ctx->cpu_ctx);
+
+	spin_unlock(&spci_handles_lock);
+
+	/* Look for a valid response in the global queue */
+	rc = spm_response_get(client_id, service_handle, token,
+			      &rx1, &rx2, &rx3);
+	if (rc == 0) {
+		/* Decrease request count */
+		spin_lock(&spci_handles_lock);
+		handle_info->num_active_requests -= 1;
+		spin_unlock(&spci_handles_lock);
+		spm_sp_request_decrease(sp_ctx);
+
+		SMC_RET4(handle, SPCI_SUCCESS, rx1, rx2, rx3);
+	}
+
+	/* Try to enter the partition. If it's not possible, simply return. */
+	if (sp_state_try_switch(sp_ctx, SP_STATE_IDLE, SP_STATE_BUSY) != 0) {
+		SMC_RET1(handle, SPCI_QUEUED);
+	}
+
+	/* Save the Normal world context */
+	cm_el1_sysregs_context_save(NON_SECURE);
+
+	/*
+	 * This request is non-blocking and needs to be interruptible by
+	 * non-secure interrupts. Enable their routing to EL3 during the
+	 * processing of the Secure Partition's service on this core.
+	 */
+
+	/* Jump to the Secure Partition. */
+	uint64_t ret = spm_sp_synchronous_entry(sp_ctx, 1);
+
+	/* Verify returned values */
+	if (ret == SPRT_PUT_RESPONSE_AARCH64) {
+		uint32_t token;
+		uint64_t rx1, rx2, rx3, x6;
+
+		token = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X1);
+		rx1 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3);
+		rx2 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4);
+		rx3 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X5);
+		x6 = read_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X6);
+
+		uint16_t client_id = x6 & 0xFFFFU;
+		uint16_t service_handle = x6 >> 16;
+
+		int rc = spm_response_add(client_id, service_handle, token,
+					  rx1, rx2, rx3);
+		if (rc != 0) {
+			/*
+			 * This is error fatal because we can't return to the SP
+			 * from this SMC. The SP has crashed.
+			 */
+			panic();
+		}
+	} else if ((ret != SPRT_YIELD_AARCH64) &&
+		   (ret != SPM_SECURE_PARTITION_PREEMPTED)) {
+		ERROR("SPM: %s: Unexpected x0 value 0x%llx\n", __func__, ret);
+		panic();
+	}
+
+	/* Flag Secure Partition as idle. */
+	assert(sp_ctx->state == SP_STATE_BUSY);
+	sp_state_set(sp_ctx, SP_STATE_IDLE);
+
+	/* Restore non-secure state */
+	cm_el1_sysregs_context_restore(NON_SECURE);
+	cm_set_next_eret_context(NON_SECURE);
+
+	/* Look for a valid response in the global queue */
+	rc = spm_response_get(client_id, service_handle, token,
+			      &rx1, &rx2, &rx3);
+	if (rc != 0) {
+		SMC_RET1(handle, SPCI_QUEUED);
+	}
+
+	/* Decrease request count */
+	spin_lock(&spci_handles_lock);
+	handle_info->num_active_requests -= 1;
+	spin_unlock(&spci_handles_lock);
+	spm_sp_request_decrease(sp_ctx);
+
+	/* Return response */
+	SMC_RET4(handle, SPCI_SUCCESS, rx1, rx2, rx3);
+}
+
+/*******************************************************************************
+ * This function returns the response of a Secure Service given a handle, a
+ * client ID and a token.
+ ******************************************************************************/
+static uint64_t spci_service_get_response(void *handle, u_register_t x1,
+					    u_register_t x7)
+
+{
+	int rc;
+	u_register_t rx1 = 0, rx2 = 0, rx3 = 0;
+	spci_handle_t *handle_info;
+	uint32_t token = (uint32_t) x1;
+	uint16_t client_id = x7 & 0x0000FFFF;
+	uint16_t service_handle = (x7 >> 16) & 0x0000FFFF;
+
+	/* Get pointer to struct of this open handle and client ID. */
+
+	spin_lock(&spci_handles_lock);
+
+	handle_info = spci_handle_info_get(service_handle, client_id);
+	if (handle_info == NULL) {
+		spin_unlock(&spci_handles_lock);
+		WARN("SPCI_SERVICE_GET_RESPONSE: Not found.\n"
+		     "Handle 0x%04x. Client ID 0x%04x, Token 0x%08x.\n",
+		     client_id, service_handle, token);
+
+		SMC_RET1(handle, SPCI_INVALID_PARAMETER);
+	}
+
+	spin_unlock(&spci_handles_lock);
+
+	/* Look for a valid response in the global queue */
+	rc = spm_response_get(client_id, service_handle, token,
+			      &rx1, &rx2, &rx3);
+
+	if (rc != 0) {
+		SMC_RET1(handle, SPCI_QUEUED);
+	}
+
+	/* Decrease request count */
+	spin_lock(&spci_handles_lock);
+	handle_info->num_active_requests -= 1;
+	sp_context_t *sp_ctx;
+	sp_ctx = handle_info->sp_ctx;
+	spin_unlock(&spci_handles_lock);
+	spm_sp_request_decrease(sp_ctx);
+
+	/* Return response */
+	SMC_RET4(handle, SPCI_SUCCESS, rx1, rx2, rx3);
+}
+
+/*******************************************************************************
+ * This function handles all SMCs in the range reserved for SPCI.
+ ******************************************************************************/
+uint64_t spci_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+			  uint64_t x3, uint64_t x4, void *cookie, void *handle,
+			  uint64_t flags)
+{
+	uint32_t spci_fid;
+
+	/* SPCI only supported from the Non-secure world for now */
+	if (is_caller_non_secure(flags) == SMC_FROM_SECURE) {
+		SMC_RET1(handle, SMC_UNK);
+	}
+
+	if ((smc_fid & SPCI_FID_TUN_FLAG) == 0) {
+
+		/* Miscellaneous calls */
+
+		spci_fid = (smc_fid >> SPCI_FID_MISC_SHIFT) & SPCI_FID_MISC_MASK;
+
+		switch (spci_fid) {
+
+		case SPCI_FID_VERSION:
+			SMC_RET1(handle, SPCI_VERSION_COMPILED);
+
+		case SPCI_FID_SERVICE_HANDLE_OPEN:
+		{
+			if ((smc_fid & SPCI_SERVICE_HANDLE_OPEN_NOTIFY_BIT) != 0) {
+				/* Not supported for now */
+				WARN("SPCI_SERVICE_HANDLE_OPEN_NOTIFY not supported.\n");
+				SMC_RET1(handle, SPCI_INVALID_PARAMETER);
+			}
+
+			uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+			uint64_t x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+			uint64_t x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+			return spci_service_handle_open_poll(handle, x1, x2, x3,
+							     x4, x5, x6, x7);
+		}
+		case SPCI_FID_SERVICE_HANDLE_CLOSE:
+			return spci_service_handle_close(handle, x1);
+
+		case SPCI_FID_SERVICE_REQUEST_BLOCKING:
+		{
+			uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+			uint64_t x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+			uint64_t x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+			return spci_service_request_blocking(handle,
+					smc_fid, x1, x2, x3, x4, x5, x6, x7);
+		}
+
+		case SPCI_FID_SERVICE_REQUEST_START:
+		{
+			uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
+			uint64_t x6 = SMC_GET_GP(handle, CTX_GPREG_X6);
+			uint64_t x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+			return spci_service_request_start(handle,
+					smc_fid, x1, x2, x3, x4, x5, x6, x7);
+		}
+
+		case SPCI_FID_SERVICE_GET_RESPONSE:
+		{
+			uint64_t x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+			return spci_service_get_response(handle, x1, x7);
+		}
+
+		default:
+			break;
+		}
+
+	} else {
+
+		/* Tunneled calls */
+
+		spci_fid = (smc_fid >> SPCI_FID_TUN_SHIFT) & SPCI_FID_TUN_MASK;
+
+		switch (spci_fid) {
+
+		case SPCI_FID_SERVICE_REQUEST_RESUME:
+		{
+			uint64_t x7 = SMC_GET_GP(handle, CTX_GPREG_X7);
+
+			return spci_service_request_resume(handle, x1, x7);
+		}
+
+		default:
+			break;
+		}
+	}
+
+	WARN("SPCI: Unsupported call 0x%08x\n", smc_fid);
+	SMC_RET1(handle, SPCI_NOT_SUPPORTED);
+}
diff --git a/services/std_svc/spm/spm.mk b/services/std_svc/spm/spm.mk
index 0e77086..4ba9feb 100644
--- a/services/std_svc/spm/spm.mk
+++ b/services/std_svc/spm/spm.mk
@@ -11,13 +11,23 @@
         $(error "Error: SPM is only supported on aarch64.")
 endif
 
+include lib/sprt/sprt_host.mk
+
 SPM_SOURCES	:=	$(addprefix services/std_svc/spm/,	\
 			${ARCH}/spm_helpers.S			\
 			${ARCH}/spm_shim_exceptions.S		\
+			spci.c					\
+			spm_buffers.c				\
 			spm_main.c				\
-			sp_setup.c				\
-			sp_xlat.c)
+			spm_setup.c				\
+			spm_xlat.c				\
+			sprt.c)					\
+			${SPRT_LIB_SOURCES}
 
+INCLUDES	+=	${SPRT_LIB_INCLUDES}
+
+# Force SMC Calling Convention 2 when using SPM
+SMCCC_MAJOR_VERSION	:=	2
 
 # Let the top-level Makefile know that we intend to include a BL32 image
 NEED_BL32		:=	yes
diff --git a/services/std_svc/spm/spm_buffers.c b/services/std_svc/spm/spm_buffers.c
new file mode 100644
index 0000000..747337a
--- /dev/null
+++ b/services/std_svc/spm/spm_buffers.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <platform_def.h>
+#include <spinlock.h>
+#include <utils_def.h>
+
+/*******************************************************************************
+ * Secure Service response global array. All the responses to the requests done
+ * to the Secure Partition are stored here. They are removed from the array as
+ * soon as their value is read.
+ ******************************************************************************/
+struct sprt_response {
+	int is_valid;
+	uint32_t token;
+	uint16_t client_id, handle;
+	u_register_t x1, x2, x3;
+};
+
+static struct sprt_response responses[PLAT_SPM_RESPONSES_MAX];
+
+static spinlock_t responses_lock;
+
+/* Add response to the global response buffer. Returns 0 on success else -1. */
+int spm_response_add(uint16_t client_id, uint16_t handle, uint32_t token,
+		     u_register_t x1, u_register_t x2, u_register_t x3)
+{
+	spin_lock(&responses_lock);
+
+	/* Make sure that there isn't any other response with the same token. */
+	for (unsigned int i = 0U; i < ARRAY_SIZE(responses); i++) {
+		struct sprt_response *resp = &(responses[i]);
+
+		if ((resp->is_valid == 1) && (resp->token == token)) {
+			return -1;
+		}
+	}
+
+	for (int i = 0; i < ARRAY_SIZE(responses); i++) {
+		struct sprt_response *resp = &(responses[i]);
+
+		if (resp->is_valid == 0) {
+			resp->token = token;
+			resp->client_id = client_id;
+			resp->handle = handle;
+			resp->x1 = x1;
+			resp->x2 = x2;
+			resp->x3 = x3;
+
+			dmbish();
+
+			resp->is_valid = 1;
+
+			spin_unlock(&responses_lock);
+
+			return 0;
+		}
+	}
+
+	spin_unlock(&responses_lock);
+
+	return -1;
+}
+
+/*
+ * Returns a response from the requests array and removes it from it. Returns 0
+ * on success, -1 if it wasn't found.
+ */
+int spm_response_get(uint16_t client_id, uint16_t handle, uint32_t token,
+		     u_register_t *x1, u_register_t *x2, u_register_t *x3)
+{
+	spin_lock(&responses_lock);
+
+	for (unsigned int i = 0U; i < ARRAY_SIZE(responses); i++) {
+		struct sprt_response *resp = &(responses[i]);
+
+		/* Ignore invalid entries */
+		if (resp->is_valid == 0) {
+			continue;
+		}
+
+		/* Make sure that all the information matches the stored one */
+		if ((resp->token != token) || (resp->client_id != client_id) ||
+		    (resp->handle != handle)) {
+			continue;
+		}
+
+		*x1 = resp->x1;
+		*x2 = resp->x2;
+		*x3 = resp->x3;
+
+		dmbish();
+
+		resp->is_valid = 0;
+
+		spin_unlock(&responses_lock);
+
+		return 0;
+	}
+
+	spin_unlock(&responses_lock);
+
+	return -1;
+}
diff --git a/services/std_svc/spm/spm_main.c b/services/std_svc/spm/spm_main.c
index 880e86e..460d1fb 100644
--- a/services/std_svc/spm/spm_main.c
+++ b/services/std_svc/spm/spm_main.c
@@ -11,14 +11,14 @@
 #include <debug.h>
 #include <ehf.h>
 #include <errno.h>
-#include <mm_svc.h>
+#include <interrupt_mgmt.h>
 #include <platform.h>
 #include <runtime_svc.h>
-#include <secure_partition.h>
 #include <smccc.h>
 #include <smccc_helpers.h>
 #include <spinlock.h>
-#include <spm_svc.h>
+#include <string.h>
+#include <sprt_svc.h>
 #include <utils.h>
 #include <xlat_tables_v2.h>
 
@@ -27,7 +27,89 @@
 /*******************************************************************************
  * Secure Partition context information.
  ******************************************************************************/
-static sp_context_t sp_ctx;
+sp_context_t sp_ctx_array[PLAT_SPM_MAX_PARTITIONS];
+
+/* Last Secure Partition last used by the CPU */
+sp_context_t *cpu_sp_ctx[PLATFORM_CORE_COUNT];
+
+void spm_cpu_set_sp_ctx(unsigned int linear_id, sp_context_t *sp_ctx)
+{
+	assert(linear_id < PLATFORM_CORE_COUNT);
+
+	cpu_sp_ctx[linear_id] = sp_ctx;
+}
+
+sp_context_t *spm_cpu_get_sp_ctx(unsigned int linear_id)
+{
+	assert(linear_id < PLATFORM_CORE_COUNT);
+
+	return cpu_sp_ctx[linear_id];
+}
+
+/*******************************************************************************
+ * Functions to keep track of how many requests a Secure Partition has received
+ * and hasn't finished.
+ ******************************************************************************/
+void spm_sp_request_increase(sp_context_t *sp_ctx)
+{
+	spin_lock(&(sp_ctx->request_count_lock));
+	sp_ctx->request_count++;
+	spin_unlock(&(sp_ctx->request_count_lock));
+}
+
+void spm_sp_request_decrease(sp_context_t *sp_ctx)
+{
+	spin_lock(&(sp_ctx->request_count_lock));
+	sp_ctx->request_count--;
+	spin_unlock(&(sp_ctx->request_count_lock));
+}
+
+/* Returns 0 if it was originally 0, -1 otherwise. */
+int spm_sp_request_increase_if_zero(sp_context_t *sp_ctx)
+{
+	int ret = -1;
+
+	spin_lock(&(sp_ctx->request_count_lock));
+	if (sp_ctx->request_count == 0U) {
+		sp_ctx->request_count++;
+		ret = 0U;
+	}
+	spin_unlock(&(sp_ctx->request_count_lock));
+
+	return ret;
+}
+
+/*******************************************************************************
+ * This function returns a pointer to the context of the Secure Partition that
+ * handles the service specified by an UUID. It returns NULL if the UUID wasn't
+ * found.
+ ******************************************************************************/
+sp_context_t *spm_sp_get_by_uuid(const uint32_t (*svc_uuid)[4])
+{
+	unsigned int i;
+
+	for (i = 0U; i < PLAT_SPM_MAX_PARTITIONS; i++) {
+
+		sp_context_t *sp_ctx = &sp_ctx_array[i];
+
+		if (sp_ctx->is_present == 0) {
+			continue;
+		}
+
+		struct sp_rd_sect_service *rdsvc;
+
+		for (rdsvc = sp_ctx->rd.service; rdsvc != NULL;
+		     rdsvc = rdsvc->next) {
+			uint32_t *rd_uuid = (uint32_t *)(rdsvc->uuid);
+
+			if (memcmp(rd_uuid, svc_uuid, sizeof(rd_uuid)) == 0) {
+				return sp_ctx;
+			}
+		}
+	}
+
+	return NULL;
+}
 
 /*******************************************************************************
  * Set state of a Secure Partition context.
@@ -85,13 +167,15 @@
  * This function takes an SP context pointer and performs a synchronous entry
  * into it.
  ******************************************************************************/
-static uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx)
+uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx, int can_preempt)
 {
 	uint64_t rc;
+	unsigned int linear_id = plat_my_core_pos();
 
 	assert(sp_ctx != NULL);
 
 	/* Assign the context of the SP to this CPU */
+	spm_cpu_set_sp_ctx(linear_id, sp_ctx);
 	cm_set_context(&(sp_ctx->cpu_ctx), SECURE);
 
 	/* Restore the context assigned above */
@@ -102,6 +186,12 @@
 	tlbivmalle1();
 	dsbish();
 
+	if (can_preempt == 1) {
+		enable_intr_rm_local(INTR_TYPE_NS, SECURE);
+	} else {
+		disable_intr_rm_local(INTR_TYPE_NS, SECURE);
+	}
+
 	/* Enter Secure Partition */
 	rc = spm_secure_partition_enter(&sp_ctx->c_rt_ctx);
 
@@ -115,9 +205,11 @@
  * This function returns to the place where spm_sp_synchronous_entry() was
  * called originally.
  ******************************************************************************/
-__dead2 static void spm_sp_synchronous_exit(uint64_t rc)
+__dead2 void spm_sp_synchronous_exit(uint64_t rc)
 {
-	sp_context_t *ctx = &sp_ctx;
+	/* Get context of the SP in use by this CPU. */
+	unsigned int linear_id = plat_my_core_pos();
+	sp_context_t *ctx = spm_cpu_get_sp_ctx(linear_id);
 
 	/*
 	 * The SPM must have initiated the original request through a
@@ -130,25 +222,49 @@
 }
 
 /*******************************************************************************
+ * This function is the handler registered for Non secure interrupts by the SPM.
+ * It validates the interrupt and upon success arranges entry into the normal
+ * world for handling the interrupt.
+ ******************************************************************************/
+static uint64_t spm_ns_interrupt_handler(uint32_t id, uint32_t flags,
+					  void *handle, void *cookie)
+{
+	/* Check the security state when the exception was generated */
+	assert(get_interrupt_src_ss(flags) == SECURE);
+
+	spm_sp_synchronous_exit(SPM_SECURE_PARTITION_PREEMPTED);
+}
+
+/*******************************************************************************
  * Jump to each Secure Partition for the first time.
  ******************************************************************************/
 static int32_t spm_init(void)
 {
-	uint64_t rc;
+	uint64_t rc = 0;
 	sp_context_t *ctx;
 
-	INFO("Secure Partition init...\n");
+	for (unsigned int i = 0U; i < PLAT_SPM_MAX_PARTITIONS; i++) {
 
-	ctx = &sp_ctx;
+		ctx = &sp_ctx_array[i];
 
-	ctx->state = SP_STATE_RESET;
+		if (ctx->is_present == 0) {
+			continue;
+		}
 
-	rc = spm_sp_synchronous_entry(ctx);
-	assert(rc == 0);
+		INFO("Secure Partition %u init...\n", i);
 
-	ctx->state = SP_STATE_IDLE;
+		ctx->state = SP_STATE_RESET;
 
-	INFO("Secure Partition initialized.\n");
+		rc = spm_sp_synchronous_entry(ctx, 0);
+		if (rc != SPRT_YIELD_AARCH64) {
+			ERROR("Unexpected return value 0x%llx\n", rc);
+			panic();
+		}
+
+		ctx->state = SP_STATE_IDLE;
+
+		INFO("Secure Partition %u initialized.\n", i);
+	}
 
 	return rc;
 }
@@ -158,196 +274,84 @@
  ******************************************************************************/
 int32_t spm_setup(void)
 {
+	int rc;
 	sp_context_t *ctx;
+	void *sp_base, *rd_base;
+	size_t sp_size, rd_size;
+	uint64_t flags = 0U;
 
 	/* Disable MMU at EL1 (initialized by BL2) */
 	disable_mmu_icache_el1();
 
-	/* Initialize context of the SP */
-	INFO("Secure Partition context setup start...\n");
+	/*
+	 * Non-blocking services can be interrupted by Non-secure interrupts.
+	 * Register an interrupt handler for NS interrupts when generated while
+	 * the CPU is in secure state. They are routed to EL3.
+	 */
+	set_interrupt_rm_flag(flags, SECURE);
 
-	ctx = &sp_ctx;
+	uint64_t rc_int = register_interrupt_type_handler(INTR_TYPE_NS,
+				spm_ns_interrupt_handler, flags);
+	if (rc_int) {
+		ERROR("SPM: Failed to register NS interrupt handler with rc = %llx\n",
+		      rc_int);
+		panic();
+	}
 
-	/* Assign translation tables context. */
-	ctx->xlat_ctx_handle = spm_get_sp_xlat_context();
+	/*
+	 * Setup all Secure Partitions.
+	 */
+	unsigned int i = 0U;
 
-	spm_sp_setup(ctx);
+	while (1) {
+		rc = plat_spm_sp_get_next_address(&sp_base, &sp_size,
+						&rd_base, &rd_size);
+		if (rc < 0) {
+			/* Reached the end of the package. */
+			break;
+		}
+
+		if (i >= PLAT_SPM_MAX_PARTITIONS) {
+			ERROR("Too many partitions in the package.\n");
+			panic();
+		}
+
+		ctx = &sp_ctx_array[i];
+
+		assert(ctx->is_present == 0);
+
+		/* Initialize context of the SP */
+		INFO("Secure Partition %u context setup start...\n", i);
+
+		/* Assign translation tables context. */
+		ctx->xlat_ctx_handle = spm_sp_xlat_context_alloc();
+
+		/* Save location of the image in physical memory */
+		ctx->image_base = (uintptr_t)sp_base;
+		ctx->image_size = sp_size;
+
+		rc = plat_spm_sp_rd_load(&ctx->rd, rd_base, rd_size);
+		if (rc < 0) {
+			ERROR("Error while loading RD blob.\n");
+			panic();
+		}
+
+		spm_sp_setup(ctx);
+
+		ctx->is_present = 1;
+
+		INFO("Secure Partition %u setup done.\n", i);
+
+		i++;
+	}
+
+	if (i == 0U) {
+		ERROR("No present partitions in the package.\n");
+		panic();
+	}
 
 	/* Register init function for deferred init.  */
 	bl31_register_bl32_init(&spm_init);
 
-	INFO("Secure Partition setup done.\n");
-
 	return 0;
 }
-
-/*******************************************************************************
- * Function to perform a call to a Secure Partition.
- ******************************************************************************/
-uint64_t spm_sp_call(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3)
-{
-	uint64_t rc;
-	sp_context_t *sp_ptr = &sp_ctx;
-
-	/* Wait until the Secure Partition is idle and set it to busy. */
-	sp_state_wait_switch(sp_ptr, SP_STATE_IDLE, SP_STATE_BUSY);
-
-	/* Set values for registers on SP entry */
-	cpu_context_t *cpu_ctx = &(sp_ptr->cpu_ctx);
-
-	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X0, smc_fid);
-	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X1, x1);
-	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X2, x2);
-	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3, x3);
-
-	/* Jump to the Secure Partition. */
-	rc = spm_sp_synchronous_entry(sp_ptr);
-
-	/* Flag Secure Partition as idle. */
-	assert(sp_ptr->state == SP_STATE_BUSY);
-	sp_state_set(sp_ptr, SP_STATE_IDLE);
-
-	return rc;
-}
-
-/*******************************************************************************
- * MM_COMMUNICATE handler
- ******************************************************************************/
-static uint64_t mm_communicate(uint32_t smc_fid, uint64_t mm_cookie,
-			       uint64_t comm_buffer_address,
-			       uint64_t comm_size_address, void *handle)
-{
-	uint64_t rc;
-
-	/* Cookie. Reserved for future use. It must be zero. */
-	if (mm_cookie != 0U) {
-		ERROR("MM_COMMUNICATE: cookie is not zero\n");
-		SMC_RET1(handle, SPM_INVALID_PARAMETER);
-	}
-
-	if (comm_buffer_address == 0U) {
-		ERROR("MM_COMMUNICATE: comm_buffer_address is zero\n");
-		SMC_RET1(handle, SPM_INVALID_PARAMETER);
-	}
-
-	if (comm_size_address != 0U) {
-		VERBOSE("MM_COMMUNICATE: comm_size_address is not 0 as recommended.\n");
-	}
-
-	/*
-	 * The current secure partition design mandates
-	 * - at any point, only a single core can be
-	 *   executing in the secure partiton.
-	 * - a core cannot be preempted by an interrupt
-	 *   while executing in secure partition.
-	 * Raise the running priority of the core to the
-	 * interrupt level configured for secure partition
-	 * so as to block any interrupt from preempting this
-	 * core.
-	 */
-	ehf_activate_priority(PLAT_SP_PRI);
-
-	/* Save the Normal world context */
-	cm_el1_sysregs_context_save(NON_SECURE);
-
-	rc = spm_sp_call(smc_fid, comm_buffer_address, comm_size_address,
-			 plat_my_core_pos());
-
-	/* Restore non-secure state */
-	cm_el1_sysregs_context_restore(NON_SECURE);
-	cm_set_next_eret_context(NON_SECURE);
-
-	/*
-	 * Exited from secure partition. This core can take
-	 * interrupts now.
-	 */
-	ehf_deactivate_priority(PLAT_SP_PRI);
-
-	SMC_RET1(handle, rc);
-}
-
-/*******************************************************************************
- * Secure Partition Manager SMC handler.
- ******************************************************************************/
-uint64_t spm_smc_handler(uint32_t smc_fid,
-			 uint64_t x1,
-			 uint64_t x2,
-			 uint64_t x3,
-			 uint64_t x4,
-			 void *cookie,
-			 void *handle,
-			 uint64_t flags)
-{
-	unsigned int ns;
-
-	/* Determine which security state this SMC originated from */
-	ns = is_caller_non_secure(flags);
-
-	if (ns == SMC_FROM_SECURE) {
-
-		/* Handle SMCs from Secure world. */
-
-		assert(handle == cm_get_context(SECURE));
-
-		/* Make next ERET jump to S-EL0 instead of S-EL1. */
-		cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
-
-		switch (smc_fid) {
-
-		case SPM_VERSION_AARCH32:
-			SMC_RET1(handle, SPM_VERSION_COMPILED);
-
-		case SP_EVENT_COMPLETE_AARCH64:
-			spm_sp_synchronous_exit(x1);
-
-		case SP_MEMORY_ATTRIBUTES_GET_AARCH64:
-			INFO("Received SP_MEMORY_ATTRIBUTES_GET_AARCH64 SMC\n");
-
-			if (sp_ctx.state != SP_STATE_RESET) {
-				WARN("SP_MEMORY_ATTRIBUTES_GET_AARCH64 is available at boot time only\n");
-				SMC_RET1(handle, SPM_NOT_SUPPORTED);
-			}
-			SMC_RET1(handle,
-				 spm_memory_attributes_get_smc_handler(
-					 &sp_ctx, x1));
-
-		case SP_MEMORY_ATTRIBUTES_SET_AARCH64:
-			INFO("Received SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC\n");
-
-			if (sp_ctx.state != SP_STATE_RESET) {
-				WARN("SP_MEMORY_ATTRIBUTES_SET_AARCH64 is available at boot time only\n");
-				SMC_RET1(handle, SPM_NOT_SUPPORTED);
-			}
-			SMC_RET1(handle,
-				 spm_memory_attributes_set_smc_handler(
-					&sp_ctx, x1, x2, x3));
-		default:
-			break;
-		}
-	} else {
-
-		/* Handle SMCs from Non-secure world. */
-
-		assert(handle == cm_get_context(NON_SECURE));
-
-		switch (smc_fid) {
-
-		case MM_VERSION_AARCH32:
-			SMC_RET1(handle, MM_VERSION_COMPILED);
-
-		case MM_COMMUNICATE_AARCH32:
-		case MM_COMMUNICATE_AARCH64:
-			return mm_communicate(smc_fid, x1, x2, x3, handle);
-
-		case SP_MEMORY_ATTRIBUTES_GET_AARCH64:
-		case SP_MEMORY_ATTRIBUTES_SET_AARCH64:
-			/* SMC interfaces reserved for secure callers. */
-			SMC_RET1(handle, SPM_NOT_SUPPORTED);
-
-		default:
-			break;
-		}
-	}
-
-	SMC_RET1(handle, SMC_UNK);
-}
diff --git a/services/std_svc/spm/spm_private.h b/services/std_svc/spm/spm_private.h
index ec3f48e..c1aad93 100644
--- a/services/std_svc/spm/spm_private.h
+++ b/services/std_svc/spm/spm_private.h
@@ -29,9 +29,13 @@
 #define SP_C_RT_CTX_SIZE	0x60
 #define SP_C_RT_CTX_ENTRIES	(SP_C_RT_CTX_SIZE >> DWORD_SHIFT)
 
+/* Value returned by spm_sp_synchronous_entry() when a partition is preempted */
+#define SPM_SECURE_PARTITION_PREEMPTED	U(0x1234)
+
 #ifndef __ASSEMBLY__
 
 #include <spinlock.h>
+#include <sp_res_desc.h>
 #include <stdint.h>
 #include <xlat_tables_v2.h>
 
@@ -42,28 +46,68 @@
 } sp_state_t;
 
 typedef struct sp_context {
+	/* 1 if the partition is present, 0 otherwise */
+	int is_present;
+
+	/* Location of the image in physical memory */
+	unsigned long long image_base;
+	size_t image_size;
+
 	uint64_t c_rt_ctx;
 	cpu_context_t cpu_ctx;
+	struct sp_res_desc rd;
+
+	/* Translation tables context */
 	xlat_ctx_t *xlat_ctx_handle;
+	spinlock_t xlat_ctx_lock;
 
 	sp_state_t state;
 	spinlock_t state_lock;
+
+	unsigned int request_count;
+	spinlock_t request_count_lock;
+
+	/* Base and size of the shared SPM<->SP buffer */
+	uintptr_t spm_sp_buffer_base;
+	size_t spm_sp_buffer_size;
+	spinlock_t spm_sp_buffer_lock;
 } sp_context_t;
 
+/* Functions used to enter/exit a Secure Partition synchronously */
+uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx, int can_preempt);
+__dead2 void spm_sp_synchronous_exit(uint64_t rc);
+
 /* Assembly helpers */
 uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx);
 void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret);
 
+/* Secure Partition setup */
 void spm_sp_setup(sp_context_t *sp_ctx);
 
-xlat_ctx_t *spm_get_sp_xlat_context(void);
+/* Secure Partition state management helpers */
+void sp_state_set(sp_context_t *sp_ptr, sp_state_t state);
+void sp_state_wait_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to);
+int sp_state_try_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to);
 
-int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
-					      uintptr_t base_va);
-int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx,
-					  u_register_t page_address,
-					  u_register_t pages_count,
-					  u_register_t smc_attributes);
+/* Functions to keep track of the number of active requests per SP */
+void spm_sp_request_increase(sp_context_t *sp_ctx);
+void spm_sp_request_decrease(sp_context_t *sp_ctx);
+int spm_sp_request_increase_if_zero(sp_context_t *sp_ctx);
+
+/* Functions related to the translation tables management */
+xlat_ctx_t *spm_sp_xlat_context_alloc(void);
+void sp_map_memory_regions(sp_context_t *sp_ctx);
+
+/* Functions to handle Secure Partition contexts */
+void spm_cpu_set_sp_ctx(unsigned int linear_id, sp_context_t *sp_ctx);
+sp_context_t *spm_cpu_get_sp_ctx(unsigned int linear_id);
+sp_context_t *spm_sp_get_by_uuid(const uint32_t (*svc_uuid)[4]);
+
+/* Functions to manipulate response and requests buffers */
+int spm_response_add(uint16_t client_id, uint16_t handle, uint32_t token,
+		     u_register_t x1, u_register_t x2, u_register_t x3);
+int spm_response_get(uint16_t client_id, uint16_t handle, uint32_t token,
+		     u_register_t *x1, u_register_t *x2, u_register_t *x3);
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/services/std_svc/spm/spm_setup.c b/services/std_svc/spm/spm_setup.c
new file mode 100644
index 0000000..aca779f
--- /dev/null
+++ b/services/std_svc/spm/spm_setup.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common_def.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <platform_def.h>
+#include <platform.h>
+#include <sp_res_desc.h>
+#include <sprt_host.h>
+#include <string.h>
+#include <xlat_tables_v2.h>
+
+#include "spm_private.h"
+#include "spm_shim_private.h"
+
+/* Setup context of the Secure Partition */
+void spm_sp_setup(sp_context_t *sp_ctx)
+{
+	cpu_context_t *ctx = &(sp_ctx->cpu_ctx);
+
+	/*
+	 * Initialize CPU context
+	 * ----------------------
+	 */
+
+	entry_point_info_t ep_info = {0};
+
+	SET_PARAM_HEAD(&ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
+
+	/* Setup entrypoint and SPSR */
+	ep_info.pc = sp_ctx->rd.attribute.entrypoint;
+	ep_info.spsr = SPSR_64(MODE_EL0, MODE_SP_EL0, DISABLE_ALL_EXCEPTIONS);
+
+	/*
+	 * X0: Unused (MBZ).
+	 * X1: Unused (MBZ).
+	 * X2: cookie value (Implementation Defined)
+	 * X3: cookie value (Implementation Defined)
+	 * X4 to X7 = 0
+	 */
+	ep_info.args.arg0 = 0;
+	ep_info.args.arg1 = 0;
+	ep_info.args.arg2 = PLAT_SPM_COOKIE_0;
+	ep_info.args.arg3 = PLAT_SPM_COOKIE_1;
+
+	cm_setup_context(ctx, &ep_info);
+
+	/*
+	 * Setup translation tables
+	 * ------------------------
+	 */
+
+	sp_map_memory_regions(sp_ctx);
+
+	/*
+	 * MMU-related registers
+	 * ---------------------
+	 */
+	xlat_ctx_t *xlat_ctx = sp_ctx->xlat_ctx_handle;
+
+	uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+	setup_mmu_cfg((uint64_t *)&mmu_cfg_params, 0, xlat_ctx->base_table,
+		      xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
+		      EL1_EL0_REGIME);
+
+	write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1,
+		      mmu_cfg_params[MMU_CFG_MAIR]);
+
+	write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1,
+		      mmu_cfg_params[MMU_CFG_TCR]);
+
+	write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1,
+		      mmu_cfg_params[MMU_CFG_TTBR0]);
+
+	/* Setup SCTLR_EL1 */
+	u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1);
+
+	sctlr_el1 |=
+		/*SCTLR_EL1_RES1 |*/
+		/* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
+		SCTLR_UCI_BIT							|
+		/* RW regions at xlat regime EL1&0 are forced to be XN. */
+		SCTLR_WXN_BIT							|
+		/* Don't trap to EL1 execution of WFI or WFE at EL0. */
+		SCTLR_NTWI_BIT | SCTLR_NTWE_BIT					|
+		/* Don't trap to EL1 accesses to CTR_EL0 from EL0. */
+		SCTLR_UCT_BIT							|
+		/* Don't trap to EL1 execution of DZ ZVA at EL0. */
+		SCTLR_DZE_BIT							|
+		/* Enable SP Alignment check for EL0 */
+		SCTLR_SA0_BIT							|
+		/* Allow cacheable data and instr. accesses to normal memory. */
+		SCTLR_C_BIT | SCTLR_I_BIT					|
+		/* Alignment fault checking enabled when at EL1 and EL0. */
+		SCTLR_A_BIT							|
+		/* Enable MMU. */
+		SCTLR_M_BIT
+	;
+
+	sctlr_el1 &= ~(
+		/* Explicit data accesses at EL0 are little-endian. */
+		SCTLR_E0E_BIT							|
+		/* Accesses to DAIF from EL0 are trapped to EL1. */
+		SCTLR_UMA_BIT
+	);
+
+	write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
+
+	/*
+	 * Setup other system registers
+	 * ----------------------------
+	 */
+
+	/* Shim Exception Vector Base Address */
+	write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1,
+			SPM_SHIM_EXCEPTIONS_PTR);
+
+	/*
+	 * FPEN: Allow the Secure Partition to access FP/SIMD registers.
+	 * Note that SPM will not do any saving/restoring of these registers on
+	 * behalf of the SP. This falls under the SP's responsibility.
+	 * TTA: Enable access to trace registers.
+	 * ZEN (v8.2): Trap SVE instructions and access to SVE registers.
+	 */
+	write_ctx_reg(get_sysregs_ctx(ctx), CTX_CPACR_EL1,
+			CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
+
+	/*
+	 * Prepare shared buffers
+	 * ----------------------
+	 */
+
+	/* Initialize SPRT queues */
+	sprt_initialize_queues((void *)sp_ctx->spm_sp_buffer_base,
+			       sp_ctx->spm_sp_buffer_size);
+}
diff --git a/services/std_svc/spm/spm_xlat.c b/services/std_svc/spm/spm_xlat.c
new file mode 100644
index 0000000..bbe392d
--- /dev/null
+++ b/services/std_svc/spm/spm_xlat.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <errno.h>
+#include <object_pool.h>
+#include <platform_def.h>
+#include <platform.h>
+#include <sp_res_desc.h>
+#include <string.h>
+#include <utils.h>
+#include <utils_def.h>
+#include <xlat_tables_v2.h>
+
+#include "spm_private.h"
+#include "spm_shim_private.h"
+
+/*******************************************************************************
+ * Instantiation of translation table context
+ ******************************************************************************/
+
+/* Place translation tables by default along with the ones used by BL31. */
+#ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
+#define PLAT_SP_IMAGE_XLAT_SECTION_NAME	"xlat_table"
+#endif
+
+/*
+ * Allocate elements of the translation contexts for the Secure Partitions.
+ */
+
+/* Allocate an array of mmap_region per partition. */
+static struct mmap_region sp_mmap_regions[PLAT_SP_IMAGE_MMAP_REGIONS + 1]
+	[PLAT_SPM_MAX_PARTITIONS];
+static OBJECT_POOL(sp_mmap_regions_pool, sp_mmap_regions,
+	sizeof(mmap_region_t) * (PLAT_SP_IMAGE_MMAP_REGIONS + 1),
+	PLAT_SPM_MAX_PARTITIONS);
+
+/* Allocate individual translation tables. */
+static uint64_t sp_xlat_tables[XLAT_TABLE_ENTRIES]
+	[(PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS]
+	__aligned(XLAT_TABLE_SIZE) __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
+static OBJECT_POOL(sp_xlat_tables_pool, sp_xlat_tables,
+	XLAT_TABLE_ENTRIES * sizeof(uint64_t),
+	(PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS);
+
+/* Allocate base translation tables. */
+static uint64_t sp_xlat_base_tables
+	[GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)]
+	[PLAT_SPM_MAX_PARTITIONS]
+	__aligned(GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
+		  * sizeof(uint64_t))
+	__section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
+static OBJECT_POOL(sp_xlat_base_tables_pool, sp_xlat_base_tables,
+	GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE) * sizeof(uint64_t),
+	PLAT_SPM_MAX_PARTITIONS);
+
+/* Allocate arrays. */
+static int sp_xlat_mapped_regions[PLAT_SP_IMAGE_MAX_XLAT_TABLES]
+	[PLAT_SPM_MAX_PARTITIONS];
+static OBJECT_POOL(sp_xlat_mapped_regions_pool, sp_xlat_mapped_regions,
+	sizeof(int) * PLAT_SP_IMAGE_MAX_XLAT_TABLES, PLAT_SPM_MAX_PARTITIONS);
+
+/* Allocate individual contexts. */
+static xlat_ctx_t sp_xlat_ctx[PLAT_SPM_MAX_PARTITIONS];
+static OBJECT_POOL(sp_xlat_ctx_pool, sp_xlat_ctx, sizeof(xlat_ctx_t),
+	PLAT_SPM_MAX_PARTITIONS);
+
+/* Get handle of Secure Partition translation context */
+xlat_ctx_t *spm_sp_xlat_context_alloc(void)
+{
+	xlat_ctx_t *ctx = pool_alloc(&sp_xlat_ctx_pool);
+
+	struct mmap_region *mmap = pool_alloc(&sp_mmap_regions_pool);
+
+	uint64_t *base_table = pool_alloc(&sp_xlat_base_tables_pool);
+	uint64_t **tables = pool_alloc_n(&sp_xlat_tables_pool,
+					PLAT_SP_IMAGE_MAX_XLAT_TABLES);
+
+	int *mapped_regions = pool_alloc(&sp_xlat_mapped_regions_pool);
+
+	xlat_setup_dynamic_ctx(ctx, PLAT_PHY_ADDR_SPACE_SIZE - 1,
+			       PLAT_VIRT_ADDR_SPACE_SIZE - 1, mmap,
+			       PLAT_SP_IMAGE_MMAP_REGIONS, tables,
+			       PLAT_SP_IMAGE_MAX_XLAT_TABLES, base_table,
+			       EL1_EL0_REGIME, mapped_regions);
+
+	return ctx;
+};
+
+/*******************************************************************************
+ * Functions to allocate memory for regions.
+ ******************************************************************************/
+
+/*
+ * The region with base PLAT_SPM_HEAP_BASE and size PLAT_SPM_HEAP_SIZE is
+ * reserved for SPM to use as heap to allocate memory regions of Secure
+ * Partitions. This is only done at boot.
+ */
+static OBJECT_POOL(spm_heap_mem, (void *)PLAT_SPM_HEAP_BASE, 1U,
+		   PLAT_SPM_HEAP_SIZE);
+
+static uintptr_t spm_alloc_heap(size_t size)
+{
+	return (uintptr_t)pool_alloc_n(&spm_heap_mem, size);
+}
+
+/*******************************************************************************
+ * Functions to map memory regions described in the resource description.
+ ******************************************************************************/
+static unsigned int rdmem_attr_to_mmap_attr(uint32_t attr)
+{
+	unsigned int index = attr & RD_MEM_MASK;
+
+	const unsigned int mmap_attr_arr[8] = {
+		MT_DEVICE | MT_RW | MT_SECURE,	/* RD_MEM_DEVICE */
+		MT_CODE | MT_SECURE,		/* RD_MEM_NORMAL_CODE */
+		MT_MEMORY | MT_RW | MT_SECURE,	/* RD_MEM_NORMAL_DATA */
+		MT_MEMORY | MT_RW | MT_SECURE,	/* RD_MEM_NORMAL_BSS */
+		MT_RO_DATA | MT_SECURE,		/* RD_MEM_NORMAL_RODATA */
+		MT_MEMORY | MT_RW | MT_SECURE,	/* RD_MEM_NORMAL_SPM_SP_SHARED_MEM */
+		MT_MEMORY | MT_RW | MT_SECURE,	/* RD_MEM_NORMAL_CLIENT_SHARED_MEM */
+		MT_MEMORY | MT_RW | MT_SECURE	/* RD_MEM_NORMAL_MISCELLANEOUS */
+	};
+
+	if (index >= ARRAY_SIZE(mmap_attr_arr)) {
+		ERROR("Unsupported RD memory attributes 0x%x\n", attr);
+		panic();
+	}
+
+	return mmap_attr_arr[index];
+}
+
+/*
+ * The data provided in the resource description structure is not directly
+ * compatible with a mmap_region structure. This function handles the conversion
+ * and maps it.
+ */
+static void map_rdmem(sp_context_t *sp_ctx, struct sp_rd_sect_mem_region *rdmem)
+{
+	int rc;
+	mmap_region_t mmap;
+
+	/* Location of the SP image */
+	uintptr_t sp_size = sp_ctx->image_size;
+	uintptr_t sp_base_va = sp_ctx->rd.attribute.load_address;
+	unsigned long long sp_base_pa = sp_ctx->image_base;
+
+	/* Location of the memory region to map */
+	size_t rd_size = rdmem->size;
+	uintptr_t rd_base_va = rdmem->base;
+	unsigned long long rd_base_pa;
+
+	unsigned int memtype = rdmem->attr & RD_MEM_MASK;
+
+	VERBOSE("Adding memory region '%s'\n", rdmem->name);
+
+	mmap.granularity = REGION_DEFAULT_GRANULARITY;
+
+	/* Check if the RD region is inside of the SP image or not */
+	int is_outside = (rd_base_va + rd_size <= sp_base_va) ||
+			 (sp_base_va + sp_size <= rd_base_va);
+
+	/* Set to 1 if it is needed to zero this region */
+	int zero_region = 0;
+
+	switch (memtype) {
+	case RD_MEM_DEVICE:
+		/* Device regions are mapped 1:1 */
+		rd_base_pa = rd_base_va;
+		break;
+
+	case RD_MEM_NORMAL_CODE:
+	case RD_MEM_NORMAL_RODATA:
+	{
+		if (is_outside == 1) {
+			ERROR("Code and rodata sections must be fully contained in the image.");
+			panic();
+		}
+
+		/* Get offset into the image */
+		rd_base_pa = sp_base_pa + rd_base_va - sp_base_va;
+		break;
+	}
+	case RD_MEM_NORMAL_DATA:
+	{
+		if (is_outside == 1) {
+			ERROR("Data sections must be fully contained in the image.");
+			panic();
+		}
+
+		rd_base_pa = spm_alloc_heap(rd_size);
+
+		/* Get offset into the image */
+		void *img_pa = (void *)(sp_base_pa + rd_base_va - sp_base_va);
+
+		VERBOSE("  Copying data from %p to 0x%llx\n", img_pa, rd_base_pa);
+
+		/* Map destination */
+		rc = mmap_add_dynamic_region(rd_base_pa, rd_base_pa,
+				rd_size, MT_MEMORY | MT_RW | MT_SECURE);
+		if (rc != 0) {
+			ERROR("Unable to map data region at EL3: %d\n", rc);
+			panic();
+		}
+
+		/* Copy original data to destination */
+		memcpy((void *)rd_base_pa, img_pa, rd_size);
+
+		/* Unmap destination region */
+		rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
+		if (rc != 0) {
+			ERROR("Unable to remove data region at EL3: %d\n", rc);
+			panic();
+		}
+
+		break;
+	}
+	case RD_MEM_NORMAL_MISCELLANEOUS:
+		/* Allow SPM to change the attributes of the region. */
+		mmap.granularity = PAGE_SIZE;
+		rd_base_pa = spm_alloc_heap(rd_size);
+		zero_region = 1;
+		break;
+
+	case RD_MEM_NORMAL_SPM_SP_SHARED_MEM:
+		if ((sp_ctx->spm_sp_buffer_base != 0) ||
+		    (sp_ctx->spm_sp_buffer_size != 0)) {
+			ERROR("A partition must have only one SPM<->SP buffer.\n");
+			panic();
+		}
+		rd_base_pa = spm_alloc_heap(rd_size);
+		zero_region = 1;
+		/* Save location of this buffer, it is needed by SPM */
+		sp_ctx->spm_sp_buffer_base = rd_base_pa;
+		sp_ctx->spm_sp_buffer_size = rd_size;
+		break;
+
+	case RD_MEM_NORMAL_CLIENT_SHARED_MEM:
+		/* Fallthrough */
+	case RD_MEM_NORMAL_BSS:
+		rd_base_pa = spm_alloc_heap(rd_size);
+		zero_region = 1;
+		break;
+
+	default:
+		panic();
+	}
+
+	mmap.base_pa = rd_base_pa;
+	mmap.base_va = rd_base_va;
+	mmap.size = rd_size;
+
+	/* Only S-EL0 mappings supported for now */
+	mmap.attr = rdmem_attr_to_mmap_attr(rdmem->attr) | MT_USER;
+
+	VERBOSE("  VA: 0x%lx PA: 0x%llx (0x%lx, attr: 0x%x)\n",
+		mmap.base_va, mmap.base_pa, mmap.size, mmap.attr);
+
+	/* Map region in the context of the Secure Partition */
+	mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, &mmap);
+
+	if (zero_region == 1) {
+		VERBOSE("  Zeroing region...\n");
+
+		rc = mmap_add_dynamic_region(mmap.base_pa, mmap.base_pa,
+				mmap.size, MT_MEMORY | MT_RW | MT_SECURE);
+		if (rc != 0) {
+			ERROR("Unable to map memory at EL3 to zero: %d\n",
+			      rc);
+			panic();
+		}
+
+		zeromem((void *)mmap.base_pa, mmap.size);
+
+		/*
+		 * Unmap destination region unless it is the SPM<->SP buffer,
+		 * which must be used by SPM.
+		 */
+		if (memtype != RD_MEM_NORMAL_SPM_SP_SHARED_MEM) {
+			rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
+			if (rc != 0) {
+				ERROR("Unable to remove region at EL3: %d\n", rc);
+				panic();
+			}
+		}
+	}
+}
+
+void sp_map_memory_regions(sp_context_t *sp_ctx)
+{
+	/* This region contains the exception vectors used at S-EL1. */
+	const mmap_region_t sel1_exception_vectors =
+		MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
+				SPM_SHIM_EXCEPTIONS_SIZE,
+				MT_CODE | MT_SECURE | MT_PRIVILEGED);
+
+	mmap_add_region_ctx(sp_ctx->xlat_ctx_handle,
+			    &sel1_exception_vectors);
+
+	struct sp_rd_sect_mem_region *rdmem;
+
+	for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) {
+		map_rdmem(sp_ctx, rdmem);
+	}
+
+	init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle);
+}
diff --git a/services/std_svc/spm/sprt.c b/services/std_svc/spm/sprt.c
new file mode 100644
index 0000000..034dced
--- /dev/null
+++ b/services/std_svc/spm/sprt.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <errno.h>
+#include <limits.h>
+#include <platform.h>
+#include <smccc.h>
+#include <smccc_helpers.h>
+#include <sprt_svc.h>
+#include <utils.h>
+
+#include "spm_private.h"
+
+/*******************************************************************************
+ * Functions to manipulate memory regions
+ ******************************************************************************/
+
+/*
+ * Attributes are encoded using a different format in the SMC interface than in
+ * the Trusted Firmware, where the mmap_attr_t enum type is used. This function
+ * converts an attributes value from the SMC format to the mmap_attr_t format by
+ * setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and MT_EXECUTE/MT_EXECUTE_NEVER.
+ * The other fields are left as 0 because they are ignored by the function
+ * xlat_change_mem_attributes_ctx().
+ */
+static unsigned int smc_attr_to_mmap_attr(unsigned int attributes)
+{
+	unsigned int perm = attributes & SPRT_MEMORY_PERM_ATTR_MASK;
+
+	if (perm == SPRT_MEMORY_PERM_ATTR_RW) {
+		return MT_RW | MT_EXECUTE_NEVER | MT_USER;
+	} else if (perm ==  SPRT_MEMORY_PERM_ATTR_RO) {
+		return MT_RO | MT_EXECUTE_NEVER | MT_USER;
+	} else if (perm == SPRT_MEMORY_PERM_ATTR_RO_EXEC) {
+		return MT_RO | MT_USER;
+	} else {
+		return UINT_MAX;
+	}
+}
+
+/*
+ * This function converts attributes from the Trusted Firmware format into the
+ * SMC interface format.
+ */
+static unsigned int mmap_attr_to_smc_attr(unsigned int attr)
+{
+	unsigned int perm;
+
+	/* No access from EL0. */
+	if ((attr & MT_USER) == 0U)
+		return UINT_MAX;
+
+	if ((attr & MT_RW) != 0) {
+		assert(MT_TYPE(attr) != MT_DEVICE);
+		perm = SPRT_MEMORY_PERM_ATTR_RW;
+	} else {
+		if ((attr & MT_EXECUTE_NEVER) != 0U) {
+			perm = SPRT_MEMORY_PERM_ATTR_RO;
+		} else {
+			perm = SPRT_MEMORY_PERM_ATTR_RO_EXEC;
+		}
+	}
+
+	return perm << SPRT_MEMORY_PERM_ATTR_SHIFT;
+}
+
+static int32_t sprt_memory_perm_attr_get(sp_context_t *sp_ctx, uintptr_t base_va)
+{
+	uint32_t attributes;
+
+	spin_lock(&(sp_ctx->xlat_ctx_lock));
+
+	int ret = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
+				     base_va, &attributes);
+
+	spin_unlock(&(sp_ctx->xlat_ctx_lock));
+
+	/* Convert error codes of xlat_get_mem_attributes_ctx() into SPM. */
+	assert((ret == 0) || (ret == -EINVAL));
+
+	if (ret != 0)
+		return SPRT_INVALID_PARAMETER;
+
+	unsigned int perm = mmap_attr_to_smc_attr(attributes);
+
+	if (perm == UINT_MAX)
+		return SPRT_INVALID_PARAMETER;
+
+	return SPRT_SUCCESS | perm;
+}
+
+static int32_t sprt_memory_perm_attr_set(sp_context_t *sp_ctx,
+		u_register_t page_address, u_register_t pages_count,
+		u_register_t smc_attributes)
+{
+	int ret;
+	uintptr_t base_va = (uintptr_t) page_address;
+	size_t size = pages_count * PAGE_SIZE;
+
+	VERBOSE("  Start address  : 0x%lx\n", base_va);
+	VERBOSE("  Number of pages: %i (%zi bytes)\n", (int) pages_count, size);
+	VERBOSE("  Attributes     : 0x%lx\n", smc_attributes);
+
+	uint32_t mmap_attr = smc_attr_to_mmap_attr(smc_attributes);
+
+	if (mmap_attr == UINT_MAX) {
+		WARN("%s: Invalid memory attributes: 0x%lx\n", __func__,
+		     smc_attributes);
+		return SPRT_INVALID_PARAMETER;
+	}
+
+	/*
+	 * Perform some checks before actually trying to change the memory
+	 * attributes.
+	 */
+
+	spin_lock(&(sp_ctx->xlat_ctx_lock));
+
+	uint32_t attributes;
+
+	ret = xlat_get_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
+				     base_va, &attributes);
+
+	if (ret != 0) {
+		spin_unlock(&(sp_ctx->xlat_ctx_lock));
+		return SPRT_INVALID_PARAMETER;
+	}
+
+	if ((attributes & MT_USER) == 0U) {
+		/* Prohibit changing attributes of S-EL1 regions */
+		spin_unlock(&(sp_ctx->xlat_ctx_lock));
+		return SPRT_INVALID_PARAMETER;
+	}
+
+	ret = xlat_change_mem_attributes_ctx(sp_ctx->xlat_ctx_handle,
+					base_va, size, mmap_attr);
+
+	spin_unlock(&(sp_ctx->xlat_ctx_lock));
+
+	/* Convert error codes of xlat_change_mem_attributes_ctx() into SPM. */
+	assert((ret == 0) || (ret == -EINVAL));
+
+	return (ret == 0) ? SPRT_SUCCESS : SPRT_INVALID_PARAMETER;
+}
+
+/*******************************************************************************
+ * This function handles all SMCs in the range reserved for SPRT.
+ ******************************************************************************/
+uint64_t sprt_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
+			  uint64_t x3, uint64_t x4, void *cookie, void *handle,
+			  uint64_t flags)
+{
+	/* SPRT only supported from the Secure world */
+	if (is_caller_non_secure(flags) == SMC_FROM_NON_SECURE) {
+		SMC_RET1(handle, SMC_UNK);
+	}
+
+	assert(handle == cm_get_context(SECURE));
+
+	/*
+	 * Only S-EL0 partitions are supported for now. Make the next ERET into
+	 * the partition jump directly to S-EL0 instead of S-EL1.
+	 */
+	cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
+
+	switch (smc_fid) {
+	case SPRT_VERSION:
+		SMC_RET1(handle, SPRT_VERSION_COMPILED);
+
+	case SPRT_PUT_RESPONSE_AARCH64:
+		/*
+		 * Registers x1-x3 aren't saved by default to the context,
+		 * but they are needed after spm_sp_synchronous_exit() because
+		 * they hold return values.
+		 */
+		SMC_SET_GP(handle, CTX_GPREG_X1, x1);
+		SMC_SET_GP(handle, CTX_GPREG_X2, x2);
+		SMC_SET_GP(handle, CTX_GPREG_X3, x3);
+		spm_sp_synchronous_exit(SPRT_PUT_RESPONSE_AARCH64);
+
+	case SPRT_YIELD_AARCH64:
+		spm_sp_synchronous_exit(SPRT_YIELD_AARCH64);
+
+	case SPRT_MEMORY_PERM_ATTR_GET_AARCH64:
+	{
+		/* Get context of the SP in use by this CPU. */
+		unsigned int linear_id = plat_my_core_pos();
+		sp_context_t *sp_ctx = spm_cpu_get_sp_ctx(linear_id);
+
+		SMC_RET1(handle, sprt_memory_perm_attr_get(sp_ctx, x1));
+	}
+
+	case SPRT_MEMORY_PERM_ATTR_SET_AARCH64:
+	{
+		/* Get context of the SP in use by this CPU. */
+		unsigned int linear_id = plat_my_core_pos();
+		sp_context_t *sp_ctx = spm_cpu_get_sp_ctx(linear_id);
+
+		SMC_RET1(handle, sprt_memory_perm_attr_set(sp_ctx, x1, x2, x3));
+	}
+
+	default:
+		break;
+	}
+
+	WARN("SPRT: Unsupported call 0x%08x\n", smc_fid);
+	SMC_RET1(handle, SPRT_NOT_SUPPORTED);
+}
diff --git a/services/std_svc/spm_deprecated/aarch64/spm_helpers.S b/services/std_svc/spm_deprecated/aarch64/spm_helpers.S
new file mode 100644
index 0000000..aa35811
--- /dev/null
+++ b/services/std_svc/spm_deprecated/aarch64/spm_helpers.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include "../spm_private.h"
+
+	.global spm_secure_partition_enter
+	.global spm_secure_partition_exit
+
+	/* ---------------------------------------------------------------------
+	 * This function is called with SP_EL0 as stack. Here we stash our EL3
+	 * callee-saved registers on to the stack as a part of saving the C
+	 * runtime and enter the secure payload.
+	 * 'x0' contains a pointer to the memory where the address of the C
+	 *  runtime context is to be saved.
+	 * ---------------------------------------------------------------------
+	 */
+func spm_secure_partition_enter
+	/* Make space for the registers that we're going to save */
+	mov	x3, sp
+	str	x3, [x0, #0]
+	sub	sp, sp, #SP_C_RT_CTX_SIZE
+
+	/* Save callee-saved registers on to the stack */
+	stp	x19, x20, [sp, #SP_C_RT_CTX_X19]
+	stp	x21, x22, [sp, #SP_C_RT_CTX_X21]
+	stp	x23, x24, [sp, #SP_C_RT_CTX_X23]
+	stp	x25, x26, [sp, #SP_C_RT_CTX_X25]
+	stp	x27, x28, [sp, #SP_C_RT_CTX_X27]
+	stp	x29, x30, [sp, #SP_C_RT_CTX_X29]
+
+	/* ---------------------------------------------------------------------
+	 * Everything is setup now. el3_exit() will use the secure context to
+	 * restore to the general purpose and EL3 system registers to ERET
+	 * into the secure payload.
+	 * ---------------------------------------------------------------------
+	 */
+	b	el3_exit
+endfunc spm_secure_partition_enter
+
+	/* ---------------------------------------------------------------------
+	 * This function is called with 'x0' pointing to a C runtime context
+	 * saved in spm_secure_partition_enter().
+	 * It restores the saved registers and jumps to that runtime with 'x0'
+	 * as the new SP register. This destroys the C runtime context that had
+	 * been built on the stack below the saved context by the caller. Later
+	 * the second parameter 'x1' is passed as a return value to the caller.
+	 * ---------------------------------------------------------------------
+	 */
+func spm_secure_partition_exit
+	/* Restore the previous stack */
+	mov	sp, x0
+
+	/* Restore callee-saved registers on to the stack */
+	ldp	x19, x20, [x0, #(SP_C_RT_CTX_X19 - SP_C_RT_CTX_SIZE)]
+	ldp	x21, x22, [x0, #(SP_C_RT_CTX_X21 - SP_C_RT_CTX_SIZE)]
+	ldp	x23, x24, [x0, #(SP_C_RT_CTX_X23 - SP_C_RT_CTX_SIZE)]
+	ldp	x25, x26, [x0, #(SP_C_RT_CTX_X25 - SP_C_RT_CTX_SIZE)]
+	ldp	x27, x28, [x0, #(SP_C_RT_CTX_X27 - SP_C_RT_CTX_SIZE)]
+	ldp	x29, x30, [x0, #(SP_C_RT_CTX_X29 - SP_C_RT_CTX_SIZE)]
+
+	/* ---------------------------------------------------------------------
+	 * This should take us back to the instruction after the call to the
+	 * last spm_secure_partition_enter().* Place the second parameter to x0
+	 * so that the caller will see it as a return value from the original
+	 * entry call.
+	 * ---------------------------------------------------------------------
+	 */
+	mov	x0, x1
+	ret
+endfunc spm_secure_partition_exit
diff --git a/services/std_svc/spm_deprecated/aarch64/spm_shim_exceptions.S b/services/std_svc/spm_deprecated/aarch64/spm_shim_exceptions.S
new file mode 100644
index 0000000..9c218df
--- /dev/null
+++ b/services/std_svc/spm_deprecated/aarch64/spm_shim_exceptions.S
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <context.h>
+
+/* -----------------------------------------------------------------------------
+ * Very simple stackless exception handlers used by the spm shim layer.
+ * -----------------------------------------------------------------------------
+ */
+	.globl	spm_shim_exceptions_ptr
+
+vector_base spm_shim_exceptions_ptr, .spm_shim_exceptions
+
+	/* -----------------------------------------------------
+	 * Current EL with SP0 : 0x0 - 0x200
+	 * -----------------------------------------------------
+	 */
+vector_entry SynchronousExceptionSP0, .spm_shim_exceptions
+	b	.
+end_vector_entry SynchronousExceptionSP0
+
+vector_entry IrqSP0, .spm_shim_exceptions
+	b	.
+end_vector_entry IrqSP0
+
+vector_entry FiqSP0, .spm_shim_exceptions
+	b	.
+end_vector_entry FiqSP0
+
+vector_entry SErrorSP0, .spm_shim_exceptions
+	b	.
+end_vector_entry SErrorSP0
+
+	/* -----------------------------------------------------
+	 * Current EL with SPx: 0x200 - 0x400
+	 * -----------------------------------------------------
+	 */
+vector_entry SynchronousExceptionSPx, .spm_shim_exceptions
+	b	.
+end_vector_entry SynchronousExceptionSPx
+
+vector_entry IrqSPx, .spm_shim_exceptions
+	b	.
+end_vector_entry IrqSPx
+
+vector_entry FiqSPx, .spm_shim_exceptions
+	b	.
+end_vector_entry FiqSPx
+
+vector_entry SErrorSPx, .spm_shim_exceptions
+	b	.
+end_vector_entry SErrorSPx
+
+	/* -----------------------------------------------------
+	 * Lower EL using AArch64 : 0x400 - 0x600. No exceptions
+	 * are handled since secure_partition does not implement
+	 * a lower EL
+	 * -----------------------------------------------------
+	 */
+vector_entry SynchronousExceptionA64, .spm_shim_exceptions
+	msr	tpidr_el1, x30
+	mrs	x30, esr_el1
+	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+	cmp	x30, #EC_AARCH64_SVC
+	b.eq 	do_smc
+
+	cmp	x30, #EC_AARCH32_SVC
+	b.eq	do_smc
+
+	cmp	x30, #EC_AARCH64_SYS
+	b.eq	handle_sys_trap
+
+	/* Fail in all the other cases */
+	b	panic
+
+	/* ---------------------------------------------
+	 * Tell SPM that we are done initialising
+	 * ---------------------------------------------
+	 */
+do_smc:
+	mrs	x30, tpidr_el1
+	smc	#0
+	eret
+
+	/* AArch64 system instructions trap are handled as a panic for now */
+handle_sys_trap:
+panic:
+	b	panic
+end_vector_entry SynchronousExceptionA64
+
+vector_entry IrqA64, .spm_shim_exceptions
+	b	.
+end_vector_entry IrqA64
+
+vector_entry FiqA64, .spm_shim_exceptions
+	b	.
+end_vector_entry FiqA64
+
+vector_entry SErrorA64, .spm_shim_exceptions
+	b	.
+end_vector_entry SErrorA64
+
+	/* -----------------------------------------------------
+	 * Lower EL using AArch32 : 0x600 - 0x800
+	 * -----------------------------------------------------
+	 */
+vector_entry SynchronousExceptionA32, .spm_shim_exceptions
+	b	.
+end_vector_entry SynchronousExceptionA32
+
+vector_entry IrqA32, .spm_shim_exceptions
+	b	.
+end_vector_entry IrqA32
+
+vector_entry FiqA32, .spm_shim_exceptions
+	b	.
+end_vector_entry FiqA32
+
+vector_entry SErrorA32, .spm_shim_exceptions
+	b	.
+end_vector_entry SErrorA32
diff --git a/services/std_svc/spm_deprecated/spm.mk b/services/std_svc/spm_deprecated/spm.mk
new file mode 100644
index 0000000..ed36812
--- /dev/null
+++ b/services/std_svc/spm_deprecated/spm.mk
@@ -0,0 +1,23 @@
+#
+# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifneq (${SPD},none)
+        $(error "Error: SPD and SPM are incompatible build options.")
+endif
+ifneq (${ARCH},aarch64)
+        $(error "Error: SPM is only supported on aarch64.")
+endif
+
+SPM_SOURCES	:=	$(addprefix services/std_svc/spm_deprecated/, \
+			${ARCH}/spm_helpers.S			\
+			${ARCH}/spm_shim_exceptions.S		\
+			spm_main.c				\
+			spm_setup.c				\
+			spm_xlat.c)
+
+
+# Let the top-level Makefile know that we intend to include a BL32 image
+NEED_BL32		:=	yes
diff --git a/services/std_svc/spm_deprecated/spm_main.c b/services/std_svc/spm_deprecated/spm_main.c
new file mode 100644
index 0000000..880e86e
--- /dev/null
+++ b/services/std_svc/spm_deprecated/spm_main.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl31.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <ehf.h>
+#include <errno.h>
+#include <mm_svc.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <secure_partition.h>
+#include <smccc.h>
+#include <smccc_helpers.h>
+#include <spinlock.h>
+#include <spm_svc.h>
+#include <utils.h>
+#include <xlat_tables_v2.h>
+
+#include "spm_private.h"
+
+/*******************************************************************************
+ * Secure Partition context information.
+ ******************************************************************************/
+static sp_context_t sp_ctx;
+
+/*******************************************************************************
+ * Set state of a Secure Partition context.
+ ******************************************************************************/
+void sp_state_set(sp_context_t *sp_ptr, sp_state_t state)
+{
+	spin_lock(&(sp_ptr->state_lock));
+	sp_ptr->state = state;
+	spin_unlock(&(sp_ptr->state_lock));
+}
+
+/*******************************************************************************
+ * Wait until the state of a Secure Partition is the specified one and change it
+ * to the desired state.
+ ******************************************************************************/
+void sp_state_wait_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to)
+{
+	int success = 0;
+
+	while (success == 0) {
+		spin_lock(&(sp_ptr->state_lock));
+
+		if (sp_ptr->state == from) {
+			sp_ptr->state = to;
+
+			success = 1;
+		}
+
+		spin_unlock(&(sp_ptr->state_lock));
+	}
+}
+
+/*******************************************************************************
+ * Check if the state of a Secure Partition is the specified one and, if so,
+ * change it to the desired state. Returns 0 on success, -1 on error.
+ ******************************************************************************/
+int sp_state_try_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to)
+{
+	int ret = -1;
+
+	spin_lock(&(sp_ptr->state_lock));
+
+	if (sp_ptr->state == from) {
+		sp_ptr->state = to;
+
+		ret = 0;
+	}
+
+	spin_unlock(&(sp_ptr->state_lock));
+
+	return ret;
+}
+
+/*******************************************************************************
+ * This function takes an SP context pointer and performs a synchronous entry
+ * into it.
+ ******************************************************************************/
+static uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx)
+{
+	uint64_t rc;
+
+	assert(sp_ctx != NULL);
+
+	/* Assign the context of the SP to this CPU */
+	cm_set_context(&(sp_ctx->cpu_ctx), SECURE);
+
+	/* Restore the context assigned above */
+	cm_el1_sysregs_context_restore(SECURE);
+	cm_set_next_eret_context(SECURE);
+
+	/* Invalidate TLBs at EL1. */
+	tlbivmalle1();
+	dsbish();
+
+	/* Enter Secure Partition */
+	rc = spm_secure_partition_enter(&sp_ctx->c_rt_ctx);
+
+	/* Save secure state */
+	cm_el1_sysregs_context_save(SECURE);
+
+	return rc;
+}
+
+/*******************************************************************************
+ * This function returns to the place where spm_sp_synchronous_entry() was
+ * called originally.
+ ******************************************************************************/
+__dead2 static void spm_sp_synchronous_exit(uint64_t rc)
+{
+	sp_context_t *ctx = &sp_ctx;
+
+	/*
+	 * The SPM must have initiated the original request through a
+	 * synchronous entry into the secure partition. Jump back to the
+	 * original C runtime context with the value of rc in x0;
+	 */
+	spm_secure_partition_exit(ctx->c_rt_ctx, rc);
+
+	panic();
+}
+
+/*******************************************************************************
+ * Jump to each Secure Partition for the first time.
+ ******************************************************************************/
+static int32_t spm_init(void)
+{
+	uint64_t rc;
+	sp_context_t *ctx;
+
+	INFO("Secure Partition init...\n");
+
+	ctx = &sp_ctx;
+
+	ctx->state = SP_STATE_RESET;
+
+	rc = spm_sp_synchronous_entry(ctx);
+	assert(rc == 0);
+
+	ctx->state = SP_STATE_IDLE;
+
+	INFO("Secure Partition initialized.\n");
+
+	return rc;
+}
+
+/*******************************************************************************
+ * Initialize contexts of all Secure Partitions.
+ ******************************************************************************/
+int32_t spm_setup(void)
+{
+	sp_context_t *ctx;
+
+	/* Disable MMU at EL1 (initialized by BL2) */
+	disable_mmu_icache_el1();
+
+	/* Initialize context of the SP */
+	INFO("Secure Partition context setup start...\n");
+
+	ctx = &sp_ctx;
+
+	/* Assign translation tables context. */
+	ctx->xlat_ctx_handle = spm_get_sp_xlat_context();
+
+	spm_sp_setup(ctx);
+
+	/* Register init function for deferred init.  */
+	bl31_register_bl32_init(&spm_init);
+
+	INFO("Secure Partition setup done.\n");
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Function to perform a call to a Secure Partition.
+ ******************************************************************************/
+uint64_t spm_sp_call(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3)
+{
+	uint64_t rc;
+	sp_context_t *sp_ptr = &sp_ctx;
+
+	/* Wait until the Secure Partition is idle and set it to busy. */
+	sp_state_wait_switch(sp_ptr, SP_STATE_IDLE, SP_STATE_BUSY);
+
+	/* Set values for registers on SP entry */
+	cpu_context_t *cpu_ctx = &(sp_ptr->cpu_ctx);
+
+	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X0, smc_fid);
+	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X1, x1);
+	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X2, x2);
+	write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3, x3);
+
+	/* Jump to the Secure Partition. */
+	rc = spm_sp_synchronous_entry(sp_ptr);
+
+	/* Flag Secure Partition as idle. */
+	assert(sp_ptr->state == SP_STATE_BUSY);
+	sp_state_set(sp_ptr, SP_STATE_IDLE);
+
+	return rc;
+}
+
+/*******************************************************************************
+ * MM_COMMUNICATE handler
+ ******************************************************************************/
+static uint64_t mm_communicate(uint32_t smc_fid, uint64_t mm_cookie,
+			       uint64_t comm_buffer_address,
+			       uint64_t comm_size_address, void *handle)
+{
+	uint64_t rc;
+
+	/* Cookie. Reserved for future use. It must be zero. */
+	if (mm_cookie != 0U) {
+		ERROR("MM_COMMUNICATE: cookie is not zero\n");
+		SMC_RET1(handle, SPM_INVALID_PARAMETER);
+	}
+
+	if (comm_buffer_address == 0U) {
+		ERROR("MM_COMMUNICATE: comm_buffer_address is zero\n");
+		SMC_RET1(handle, SPM_INVALID_PARAMETER);
+	}
+
+	if (comm_size_address != 0U) {
+		VERBOSE("MM_COMMUNICATE: comm_size_address is not 0 as recommended.\n");
+	}
+
+	/*
+	 * The current secure partition design mandates
+	 * - at any point, only a single core can be
+	 *   executing in the secure partiton.
+	 * - a core cannot be preempted by an interrupt
+	 *   while executing in secure partition.
+	 * Raise the running priority of the core to the
+	 * interrupt level configured for secure partition
+	 * so as to block any interrupt from preempting this
+	 * core.
+	 */
+	ehf_activate_priority(PLAT_SP_PRI);
+
+	/* Save the Normal world context */
+	cm_el1_sysregs_context_save(NON_SECURE);
+
+	rc = spm_sp_call(smc_fid, comm_buffer_address, comm_size_address,
+			 plat_my_core_pos());
+
+	/* Restore non-secure state */
+	cm_el1_sysregs_context_restore(NON_SECURE);
+	cm_set_next_eret_context(NON_SECURE);
+
+	/*
+	 * Exited from secure partition. This core can take
+	 * interrupts now.
+	 */
+	ehf_deactivate_priority(PLAT_SP_PRI);
+
+	SMC_RET1(handle, rc);
+}
+
+/*******************************************************************************
+ * Secure Partition Manager SMC handler.
+ ******************************************************************************/
+uint64_t spm_smc_handler(uint32_t smc_fid,
+			 uint64_t x1,
+			 uint64_t x2,
+			 uint64_t x3,
+			 uint64_t x4,
+			 void *cookie,
+			 void *handle,
+			 uint64_t flags)
+{
+	unsigned int ns;
+
+	/* Determine which security state this SMC originated from */
+	ns = is_caller_non_secure(flags);
+
+	if (ns == SMC_FROM_SECURE) {
+
+		/* Handle SMCs from Secure world. */
+
+		assert(handle == cm_get_context(SECURE));
+
+		/* Make next ERET jump to S-EL0 instead of S-EL1. */
+		cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1());
+
+		switch (smc_fid) {
+
+		case SPM_VERSION_AARCH32:
+			SMC_RET1(handle, SPM_VERSION_COMPILED);
+
+		case SP_EVENT_COMPLETE_AARCH64:
+			spm_sp_synchronous_exit(x1);
+
+		case SP_MEMORY_ATTRIBUTES_GET_AARCH64:
+			INFO("Received SP_MEMORY_ATTRIBUTES_GET_AARCH64 SMC\n");
+
+			if (sp_ctx.state != SP_STATE_RESET) {
+				WARN("SP_MEMORY_ATTRIBUTES_GET_AARCH64 is available at boot time only\n");
+				SMC_RET1(handle, SPM_NOT_SUPPORTED);
+			}
+			SMC_RET1(handle,
+				 spm_memory_attributes_get_smc_handler(
+					 &sp_ctx, x1));
+
+		case SP_MEMORY_ATTRIBUTES_SET_AARCH64:
+			INFO("Received SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC\n");
+
+			if (sp_ctx.state != SP_STATE_RESET) {
+				WARN("SP_MEMORY_ATTRIBUTES_SET_AARCH64 is available at boot time only\n");
+				SMC_RET1(handle, SPM_NOT_SUPPORTED);
+			}
+			SMC_RET1(handle,
+				 spm_memory_attributes_set_smc_handler(
+					&sp_ctx, x1, x2, x3));
+		default:
+			break;
+		}
+	} else {
+
+		/* Handle SMCs from Non-secure world. */
+
+		assert(handle == cm_get_context(NON_SECURE));
+
+		switch (smc_fid) {
+
+		case MM_VERSION_AARCH32:
+			SMC_RET1(handle, MM_VERSION_COMPILED);
+
+		case MM_COMMUNICATE_AARCH32:
+		case MM_COMMUNICATE_AARCH64:
+			return mm_communicate(smc_fid, x1, x2, x3, handle);
+
+		case SP_MEMORY_ATTRIBUTES_GET_AARCH64:
+		case SP_MEMORY_ATTRIBUTES_SET_AARCH64:
+			/* SMC interfaces reserved for secure callers. */
+			SMC_RET1(handle, SPM_NOT_SUPPORTED);
+
+		default:
+			break;
+		}
+	}
+
+	SMC_RET1(handle, SMC_UNK);
+}
diff --git a/services/std_svc/spm_deprecated/spm_private.h b/services/std_svc/spm_deprecated/spm_private.h
new file mode 100644
index 0000000..ec3f48e
--- /dev/null
+++ b/services/std_svc/spm_deprecated/spm_private.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_PRIVATE_H
+#define SPM_PRIVATE_H
+
+#include <context.h>
+
+/*******************************************************************************
+ * Constants that allow assembler code to preserve callee-saved registers of the
+ * C runtime context while performing a security state switch.
+ ******************************************************************************/
+#define SP_C_RT_CTX_X19		0x0
+#define SP_C_RT_CTX_X20		0x8
+#define SP_C_RT_CTX_X21		0x10
+#define SP_C_RT_CTX_X22		0x18
+#define SP_C_RT_CTX_X23		0x20
+#define SP_C_RT_CTX_X24		0x28
+#define SP_C_RT_CTX_X25		0x30
+#define SP_C_RT_CTX_X26		0x38
+#define SP_C_RT_CTX_X27		0x40
+#define SP_C_RT_CTX_X28		0x48
+#define SP_C_RT_CTX_X29		0x50
+#define SP_C_RT_CTX_X30		0x58
+
+#define SP_C_RT_CTX_SIZE	0x60
+#define SP_C_RT_CTX_ENTRIES	(SP_C_RT_CTX_SIZE >> DWORD_SHIFT)
+
+#ifndef __ASSEMBLY__
+
+#include <spinlock.h>
+#include <stdint.h>
+#include <xlat_tables_v2.h>
+
+typedef enum sp_state {
+	SP_STATE_RESET = 0,
+	SP_STATE_IDLE,
+	SP_STATE_BUSY
+} sp_state_t;
+
+typedef struct sp_context {
+	uint64_t c_rt_ctx;
+	cpu_context_t cpu_ctx;
+	xlat_ctx_t *xlat_ctx_handle;
+
+	sp_state_t state;
+	spinlock_t state_lock;
+} sp_context_t;
+
+/* Assembly helpers */
+uint64_t spm_secure_partition_enter(uint64_t *c_rt_ctx);
+void __dead2 spm_secure_partition_exit(uint64_t c_rt_ctx, uint64_t ret);
+
+void spm_sp_setup(sp_context_t *sp_ctx);
+
+xlat_ctx_t *spm_get_sp_xlat_context(void);
+
+int32_t spm_memory_attributes_get_smc_handler(sp_context_t *sp_ctx,
+					      uintptr_t base_va);
+int spm_memory_attributes_set_smc_handler(sp_context_t *sp_ctx,
+					  u_register_t page_address,
+					  u_register_t pages_count,
+					  u_register_t smc_attributes);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* SPM_PRIVATE_H */
diff --git a/services/std_svc/spm/sp_setup.c b/services/std_svc/spm_deprecated/spm_setup.c
similarity index 100%
rename from services/std_svc/spm/sp_setup.c
rename to services/std_svc/spm_deprecated/spm_setup.c
diff --git a/services/std_svc/spm_deprecated/spm_shim_private.h b/services/std_svc/spm_deprecated/spm_shim_private.h
new file mode 100644
index 0000000..f2a7e05
--- /dev/null
+++ b/services/std_svc/spm_deprecated/spm_shim_private.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_SHIM_PRIVATE_H
+#define SPM_SHIM_PRIVATE_H
+
+#include <stdint.h>
+#include <utils_def.h>
+
+/* Assembly source */
+IMPORT_SYM(uintptr_t, spm_shim_exceptions_ptr,		SPM_SHIM_EXCEPTIONS_PTR);
+
+/* Linker symbols */
+IMPORT_SYM(uintptr_t, __SPM_SHIM_EXCEPTIONS_START__,	SPM_SHIM_EXCEPTIONS_START);
+IMPORT_SYM(uintptr_t, __SPM_SHIM_EXCEPTIONS_END__,	SPM_SHIM_EXCEPTIONS_END);
+
+/* Definitions */
+
+#define SPM_SHIM_EXCEPTIONS_SIZE	\
+	(SPM_SHIM_EXCEPTIONS_END - SPM_SHIM_EXCEPTIONS_START)
+
+#endif /* SPM_SHIM_PRIVATE_H */
diff --git a/services/std_svc/spm/sp_xlat.c b/services/std_svc/spm_deprecated/spm_xlat.c
similarity index 100%
rename from services/std_svc/spm/sp_xlat.c
rename to services/std_svc/spm_deprecated/spm_xlat.c
diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c
index 1a81a0a..86ecdf7 100644
--- a/services/std_svc/std_svc_setup.c
+++ b/services/std_svc/std_svc_setup.c
@@ -102,7 +102,7 @@
 		SMC_RET1(handle, ret);
 	}
 
-#if ENABLE_SPM
+#if ENABLE_SPM && SPM_DEPRECATED
 	/*
 	 * Dispatch SPM calls to SPM SMC handler and return its return
 	 * value
diff --git a/tools/sptool/Makefile b/tools/sptool/Makefile
new file mode 100644
index 0000000..9325207
--- /dev/null
+++ b/tools/sptool/Makefile
@@ -0,0 +1,49 @@
+#
+# Copyright (c) 2018, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+MAKE_HELPERS_DIRECTORY := ../../make_helpers/
+include ${MAKE_HELPERS_DIRECTORY}build_macros.mk
+include ${MAKE_HELPERS_DIRECTORY}build_env.mk
+
+PROJECT := sptool${BIN_EXT}
+OBJECTS := sptool.o
+V ?= 0
+
+override CPPFLAGS += -D_GNU_SOURCE -D_XOPEN_SOURCE=700
+HOSTCCFLAGS := -Wall -Werror -pedantic -std=c99
+ifeq (${DEBUG},1)
+  HOSTCCFLAGS += -g -O0 -DDEBUG
+else
+  HOSTCCFLAGS += -O2
+endif
+
+ifeq (${V},0)
+  Q := @
+else
+  Q :=
+endif
+
+INCLUDE_PATHS := -I../../include/tools_share
+
+HOSTCC ?= gcc
+
+.PHONY: all clean distclean
+
+all: ${PROJECT}
+
+${PROJECT}: ${OBJECTS} Makefile
+	@echo "  HOSTLD  $@"
+	${Q}${HOSTCC} ${OBJECTS} -o $@ ${LDLIBS}
+	@${ECHO_BLANK_LINE}
+	@echo "Built $@ successfully"
+	@${ECHO_BLANK_LINE}
+
+%.o: %.c Makefile
+	@echo "  HOSTCC  $<"
+	${Q}${HOSTCC} -c ${CPPFLAGS} ${HOSTCCFLAGS} ${INCLUDE_PATHS} $< -o $@
+
+clean:
+	$(call SHELL_DELETE_ALL, ${PROJECT} ${OBJECTS})
diff --git a/tools/sptool/sptool.c b/tools/sptool/sptool.c
new file mode 100644
index 0000000..a33b664
--- /dev/null
+++ b/tools/sptool/sptool.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2018, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "sptool.h"
+
+#define PAGE_SIZE		4096
+
+/*
+ * Linked list of entries describing entries in the secure
+ * partition package.
+ */
+struct sp_entry_info {
+	/* Location of the files in the host's RAM. */
+	void *sp_data, *rd_data;
+
+	/* Size of the files. */
+	uint64_t sp_size, rd_size;
+
+	/* Location of the binary files inside the package output file */
+	uint64_t sp_offset, rd_offset;
+
+	struct sp_entry_info *next;
+};
+
+static struct sp_entry_info *sp_info_head;
+
+static uint64_t sp_count;
+
+/* Align an address to a power-of-two boundary. */
+static unsigned int align_to(unsigned int address, unsigned int boundary)
+{
+	unsigned int mask = boundary - 1U;
+
+	if ((address & mask) != 0U)
+		return (address + boundary) & ~mask;
+	else
+		return address;
+}
+
+/* Allocate a memory area of 'size' bytes and zero it. */
+static void *xzalloc(size_t size, const char *msg)
+{
+	void *d;
+
+	d = malloc(size);
+	if (d == NULL) {
+		fprintf(stderr, "error: malloc: %s\n", msg);
+		exit(1);
+	}
+
+	memset(d, 0, size);
+
+	return d;
+}
+
+/*
+ * Write 'size' bytes from 'buf' into the specified file stream.
+ * Exit the program on error.
+ */
+static void xfwrite(void *buf, size_t size, FILE *fp)
+{
+	if (fwrite(buf, 1, size, fp) != size) {
+		fprintf(stderr, "error: Failed to write to output file.\n");
+		exit(1);
+	}
+}
+
+/*
+ * Set the file position indicator for the specified file stream.
+ * Exit the program on error.
+ */
+static void xfseek(FILE *fp, long offset, int whence)
+{
+	if (fseek(fp, offset, whence) != 0) {
+		fprintf(stderr, "error: Failed to set file to offset 0x%lx (%d).\n",
+		       offset, whence);
+		perror(NULL);
+		exit(1);
+	}
+}
+
+static void cleanup(void)
+{
+	struct sp_entry_info *sp = sp_info_head;
+
+	while (sp != NULL) {
+		struct sp_entry_info *next = sp->next;
+
+		if (sp->sp_data != NULL)
+			free(sp->sp_data);
+
+		if (sp->rd_data != NULL)
+			free(sp->rd_data);
+
+		free(sp);
+
+		sp = next;
+	}
+
+	sp_count = 0;
+	sp_info_head = NULL;
+}
+
+/*
+ * Allocate a buffer big enough to store the content of the specified file and
+ * load the file into it. Fill 'size' with the file size. Exit the program on
+ * error.
+ */
+static void load_file(const char *path, void **ptr, uint64_t *size)
+{
+	FILE *f = fopen(path, "rb");
+	if (f == NULL) {
+		fprintf(stderr, "error: %s couldn't be opened.\n", path);
+		exit(1);
+	}
+
+	xfseek(f, 0, SEEK_END);
+	*size = ftell(f);
+	if (*size == 0) {
+		fprintf(stderr, "error: Size of %s is 0\n", path);
+		exit(1);
+	}
+
+	rewind(f);
+
+	*ptr = malloc(*size);
+	if (*ptr == NULL) {
+		fprintf(stderr, "error: Not enough memory to load %s\n", path);
+		exit(1);
+	}
+
+	if (fread(*ptr, *size, 1, f) != 1) {
+		fprintf(stderr, "error: Couldn't read %s\n", path);
+		exit(1);
+	}
+
+	fclose(f);
+}
+
+static void load_sp_rd(char *path)
+{
+	char *split_mark = strstr(path, ":");
+
+	*split_mark = '\0';
+
+	char *sp_path = path;
+	char *rd_path = split_mark + 1;
+
+	struct sp_entry_info *sp;
+
+	if (sp_info_head == NULL) {
+		sp_info_head = xzalloc(sizeof(struct sp_entry_info),
+			"Failed to allocate sp_entry_info struct");
+
+		sp = sp_info_head;
+	} else {
+		sp = sp_info_head;
+
+		while (sp->next != NULL) {
+			sp = sp->next;
+		}
+
+		sp->next = xzalloc(sizeof(struct sp_entry_info),
+			"Failed to allocate sp_entry_info struct");
+
+		sp = sp->next;
+	}
+
+	load_file(sp_path, &sp->sp_data, &sp->sp_size);
+	printf("Loaded image file %s (%lu bytes)\n", sp_path, sp->sp_size);
+
+	load_file(rd_path, &sp->rd_data, &sp->rd_size);
+	printf("Loaded RD file %s (%lu bytes)\n", rd_path, sp->rd_size);
+
+	sp_count++;
+}
+
+static void output_write(const char *path)
+{
+	struct sp_entry_info *sp;
+
+	if (sp_count == 0) {
+		fprintf(stderr, "error: At least one SP must be provided.\n");
+		exit(1);
+	}
+
+	/* The layout of the structs is specified in the header file sptool.h */
+
+	printf("Writing %lu partitions to output file.\n", sp_count);
+
+	unsigned int header_size = (sizeof(struct sp_pkg_header) * 8)
+				 + (sizeof(struct sp_pkg_entry) * 8 * sp_count);
+
+	FILE *f = fopen(path, "wb");
+	if (f == NULL) {
+		fprintf(stderr, "error: Failed to open %s\n", path);
+		exit(1);
+	}
+
+	unsigned int file_ptr = align_to(header_size, PAGE_SIZE);
+
+	/* First, save all partition images aligned to page boundaries */
+
+	sp = sp_info_head;
+
+	for (uint64_t i = 0; i < sp_count; i++) {
+		xfseek(f, file_ptr, SEEK_SET);
+
+		printf("Writing image %lu to offset 0x%x (0x%lx bytes)\n",
+		       i, file_ptr, sp->sp_size);
+
+		sp->sp_offset = file_ptr;
+		xfwrite(sp->sp_data, sp->sp_size, f);
+		file_ptr = align_to(file_ptr + sp->sp_size, PAGE_SIZE);
+		sp = sp->next;
+	}
+
+	/* Now, save resource description blobs aligned to 8 bytes */
+
+	sp = sp_info_head;
+
+	for (uint64_t i = 0; i < sp_count; i++) {
+		xfseek(f, file_ptr, SEEK_SET);
+
+		printf("Writing RD blob %lu to offset 0x%x (0x%lx bytes)\n",
+		       i, file_ptr, sp->rd_size);
+
+		sp->rd_offset = file_ptr;
+		xfwrite(sp->rd_data, sp->rd_size, f);
+		file_ptr = align_to(file_ptr + sp->rd_size, 8);
+		sp = sp->next;
+	}
+
+	/* Finally, write header */
+
+	uint64_t version = 0x1;
+	uint64_t sp_num = sp_count;
+
+	xfseek(f, 0, SEEK_SET);
+
+	xfwrite(&version, sizeof(uint64_t), f);
+	xfwrite(&sp_num, sizeof(uint64_t), f);
+
+	sp = sp_info_head;
+
+	for (unsigned int i = 0; i < sp_count; i++) {
+
+		uint64_t sp_offset, sp_size, rd_offset, rd_size;
+
+		sp_offset = sp->sp_offset;
+		sp_size = align_to(sp->sp_size, PAGE_SIZE);
+		rd_offset = sp->rd_offset;
+		rd_size = sp->rd_size;
+
+		xfwrite(&sp_offset, sizeof(uint64_t), f);
+		xfwrite(&sp_size, sizeof(uint64_t), f);
+		xfwrite(&rd_offset, sizeof(uint64_t), f);
+		xfwrite(&rd_size, sizeof(uint64_t), f);
+
+		sp = sp->next;
+	}
+
+	/* All information has been written now */
+
+	fclose(f);
+}
+
+static void usage(void)
+{
+	printf("usage: sptool ");
+#ifdef VERSION
+	printf(VERSION);
+#else
+	/* If built from sptool directory, VERSION is not set. */
+	printf("version unknown");
+#endif
+	printf(" [<args>]\n\n");
+
+	printf("This tool takes as inputs several image binary files and the\n"
+	       "resource description blobs as input and generates a package\n"
+	       "file that contains them.\n\n");
+	printf("Commands supported:\n");
+	printf("  -o <path>            Set output file path.\n");
+	printf("  -i <sp_path:rd_path> Add Secure Partition image and Resource\n"
+	       "                       Description blob (specified in two paths\n"
+	       "                       separated by a colon).\n");
+	printf("  -h                   Show this message.\n");
+	exit(1);
+}
+
+int main(int argc, char *argv[])
+{
+	int ch;
+	const char *outname = NULL;
+
+	while ((ch = getopt(argc, argv, "hi:o:")) != -1) {
+		switch (ch) {
+		case 'i':
+			load_sp_rd(optarg);
+			break;
+		case 'o':
+			outname = optarg;
+			break;
+		case 'h':
+		default:
+			usage();
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	if (outname == NULL) {
+		fprintf(stderr, "error: An output file path must be provided.\n\n");
+		usage();
+		return 1;
+	}
+
+	output_write(outname);
+
+	cleanup();
+
+	return 0;
+}