Merge pull request #140 from athoelke/at/psci_smc_handler

PSCI SMC handler improvements
diff --git a/Makefile b/Makefile
index 22f1fd6..9131f1c 100644
--- a/Makefile
+++ b/Makefile
@@ -75,7 +75,6 @@
 				common/debug.c				\
 				lib/aarch64/cache_helpers.S		\
 				lib/aarch64/misc_helpers.S		\
-				lib/aarch64/tlb_helpers.S		\
 				lib/aarch64/xlat_helpers.c		\
 				lib/stdlib/std.c			\
 				lib/io_storage.c			\
diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S
index 3c9042b..e4dfea4 100644
--- a/bl31/aarch64/bl31_entrypoint.S
+++ b/bl31/aarch64/bl31_entrypoint.S
@@ -72,11 +72,13 @@
 	isb
 
 	/* ---------------------------------------------
-	 * Set the exception vector to something sane.
+	 * Set the exception vector and zero tpidr_el3
+	 * until the crash reporting is set up
 	 * ---------------------------------------------
 	 */
-	adr	x1, early_exceptions
+	adr	x1, runtime_exceptions
 	msr	vbar_el3, x1
+	msr	tpidr_el3, xzr
 
 	/* ---------------------------------------------------------------------
 	 * The initial state of the Architectural feature trap register
@@ -131,6 +133,15 @@
 	bl	zeromem16
 
 	/* ---------------------------------------------
+	 * Initialise cpu_data and crash reporting
+	 * ---------------------------------------------
+	 */
+#if CRASH_REPORTING
+	bl	init_crash_reporting
+#endif
+	bl	init_cpu_data_ptr
+
+	/* ---------------------------------------------
 	 * Use SP_EL0 for the C runtime stack.
 	 * ---------------------------------------------
 	 */
diff --git a/bl31/aarch64/cpu_data.S b/bl31/aarch64/cpu_data.S
new file mode 100644
index 0000000..feb51d6
--- /dev/null
+++ b/bl31/aarch64/cpu_data.S
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+#include <cpu_data.h>
+
+.globl	init_cpu_data_ptr
+.globl	_cpu_data_by_mpidr
+.globl	_cpu_data_by_index
+
+/* -----------------------------------------------------------------
+ * void init_cpu_data_ptr(void)
+ *
+ * Initialise the TPIDR_EL3 register to refer to the cpu_data_t
+ * for the calling CPU. This must be called before cm_get_cpu_data()
+ *
+ * This can be called without a valid stack.
+ * clobbers: x0, x1, x9, x10
+ * -----------------------------------------------------------------
+ */
+func init_cpu_data_ptr
+	mov	x10, x30
+	mrs	x0, mpidr_el1
+	bl	_cpu_data_by_mpidr
+	msr	tpidr_el3, x0
+	ret	x10
+
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data_by_mpidr(uint64_t mpidr)
+ *
+ * Return the cpu_data structure for the CPU with given MPIDR
+ *
+ * This can be called without a valid stack. It assumes that
+ * platform_get_core_pos() does not clobber register x9.
+ * clobbers: x0, x1, x9
+ * -----------------------------------------------------------------
+ */
+func _cpu_data_by_mpidr
+	mov	x9, x30
+	bl	platform_get_core_pos
+	mov	x30, x9
+	b	_cpu_data_by_index
+
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
+ *
+ * Return the cpu_data structure for the CPU with given linear index
+ *
+ * This can be called without a valid stack.
+ * clobbers: x0, x1
+ * -----------------------------------------------------------------
+ */
+func _cpu_data_by_index
+	adr	x1, percpu_data
+	add	x0, x1, x0, LSL #CPU_DATA_LOG2SIZE
+	ret
diff --git a/bl31/aarch64/crash_reporting.S b/bl31/aarch64/crash_reporting.S
index cb9110b..1118e96 100644
--- a/bl31/aarch64/crash_reporting.S
+++ b/bl31/aarch64/crash_reporting.S
@@ -30,12 +30,15 @@
 #include <arch.h>
 #include <asm_macros.S>
 #include <context.h>
+#include <cpu_data.h>
 #include <plat_macros.S>
+#include <platform_def.h>
 
-	.globl	get_crash_stack
 	.globl	dump_state_and_die
 	.globl	dump_intr_state_and_die
+	.globl  init_crash_reporting
 
+#if CRASH_REPORTING
 	/* ------------------------------------------------------
 	 * The below section deals with dumping the system state
 	 * when an unhandled exception is taken in EL3.
@@ -230,7 +233,7 @@
 	/* Check if tpidr is initialized */
 	cbz	x0, infinite_loop
 
-	ldr	x0, [x0, #PTR_CACHE_CRASH_STACK_OFFSET]
+	ldr	x0, [x0, #CPU_DATA_CRASH_STACK_OFFSET]
 	/* store the x30 and sp to stack */
 	str	x30, [x0, #-(REG_SIZE)]!
 	mov	x30, sp
@@ -264,28 +267,46 @@
 	print_el3_sys_regs
 	print_non_el3_sys_0_regs
 	print_non_el3_sys_1_regs
-	b	infinite_loop
 
-func infinite_loop
+#else	/* CRASH_REPORING */
+
+func dump_state_and_die
+dump_intr_state_and_die:
+
+#endif	/* CRASH_REPORING */
+
+infinite_loop:
 	b	infinite_loop
 
 
 #define PCPU_CRASH_STACK_SIZE	0x140
 
 	/* -----------------------------------------------------
-	 * void get_crash_stack (uint64_t mpidr) : This
-	 * function is used to allocate a small stack for
-	 * reporting unhandled exceptions
-	 * -----------------------------------------------------
-	 */
-func get_crash_stack
-	mov	x10, x30 // lr
-	get_mp_stack pcpu_crash_stack, PCPU_CRASH_STACK_SIZE
-	ret	x10
-
-	/* -----------------------------------------------------
 	 * Per-cpu crash stacks in normal memory.
 	 * -----------------------------------------------------
 	 */
 declare_stack pcpu_crash_stack, tzfw_normal_stacks, \
 		PCPU_CRASH_STACK_SIZE, PLATFORM_CORE_COUNT
+
+	/* -----------------------------------------------------
+	 * Provides each CPU with a small stacks for reporting
+	 * unhandled exceptions, and stores the stack address
+	 * in cpu_data
+	 *
+	 * This can be called without a runtime stack
+	 * clobbers: x0 - x4
+	 * -----------------------------------------------------
+	 */
+func init_crash_reporting
+	mov	x4, x30
+	mov	x2, #0
+	adr	x3, pcpu_crash_stack
+init_crash_loop:
+	mov	x0, x2
+	bl	_cpu_data_by_index
+	add	x3, x3, #PCPU_CRASH_STACK_SIZE
+	str	x3, [x0, #CPU_DATA_CRASH_STACK_OFFSET]
+	add	x2, x2, #1
+	cmp	x2, #PLATFORM_CORE_COUNT
+	b.lo	init_crash_loop
+	ret	x4
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 8155f3d..5555c31 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -30,14 +30,15 @@
 
 BL31_SOURCES		+=	bl31/bl31_main.c				\
 				bl31/context_mgmt.c				\
+				bl31/cpu_data_array.c				\
 				bl31/runtime_svc.c				\
 				bl31/interrupt_mgmt.c				\
 				bl31/aarch64/bl31_arch_setup.c			\
 				bl31/aarch64/bl31_entrypoint.S			\
 				bl31/aarch64/context.S				\
+				bl31/aarch64/cpu_data.S				\
 				bl31/aarch64/runtime_exceptions.S		\
-				bl31/aarch64/crash_reporting.S	\
-				common/aarch64/early_exceptions.S		\
+				bl31/aarch64/crash_reporting.S			\
 				lib/aarch64/cpu_helpers.S			\
 				lib/locks/bakery/bakery_lock.c			\
 				lib/locks/exclusive/spinlock.S			\
@@ -59,3 +60,11 @@
 $(eval $(call assert_boolean,IMF_READ_INTERRUPT_ID))
 $(eval $(call add_define,IMF_READ_INTERRUPT_ID))
 
+# Flag used to inidicate if Crash reporting via console should be included
+# in BL3-1. This defaults to being present in DEBUG builds only
+ifndef CRASH_REPORTING
+CRASH_REPORTING		:=	$(DEBUG)
+endif
+
+$(eval $(call assert_boolean,CRASH_REPORTING))
+$(eval $(call add_define,CRASH_REPORTING))
diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c
index f79a122..6f88e65 100644
--- a/bl31/bl31_main.c
+++ b/bl31/bl31_main.c
@@ -71,10 +71,6 @@
  ******************************************************************************/
 void bl31_main(void)
 {
-#if DEBUG
-	unsigned long mpidr = read_mpidr();
-#endif
-
 	/* Perform remaining generic architectural setup from EL3 */
 	bl31_arch_setup();
 
@@ -92,17 +88,7 @@
 	/* Clean caches before re-entering normal world */
 	dcsw_op_all(DCCSW);
 
-	/*
-	 * Use the more complex exception vectors now that context
-	 * management is setup. SP_EL3 should point to a 'cpu_context'
-	 * structure which has an exception stack allocated.  The PSCI
-	 * service should have set the context.
-	 */
-	assert(cm_get_context(mpidr, NON_SECURE));
-	cm_set_next_eret_context(NON_SECURE);
-	cm_init_pcpu_ptr_cache();
-	write_vbar_el3((uint64_t) runtime_exceptions);
-	isb();
+	/* By default run the non-secure BL3-3 image next */
 	next_image_type = NON_SECURE;
 
 	/*
@@ -195,7 +181,7 @@
 	 * Save the args generated in BL2 for the image in the right context
 	 * used on its entry
 	 */
-	ctx = cm_get_context(read_mpidr(), image_type);
+	ctx = cm_get_context(image_type);
 	gp_regs = get_gpregs_ctx(ctx);
 	memcpy(gp_regs, (void *)&next_image_info->args, sizeof(aapcs64_params_t));
 
diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c
index b3dcf2d..67a6e03 100644
--- a/bl31/context_mgmt.c
+++ b/bl31/context_mgmt.c
@@ -35,24 +35,12 @@
 #include <bl31.h>
 #include <context.h>
 #include <context_mgmt.h>
+#include <cpu_data.h>
 #include <interrupt_mgmt.h>
 #include <platform.h>
 #include <platform_def.h>
 #include <runtime_svc.h>
 
-/*******************************************************************************
- * Data structure which holds the pointers to non-secure and secure security
- * state contexts for each cpu. It is aligned to the cache line boundary to
- * allow efficient concurrent manipulation of these pointers on different cpus
- ******************************************************************************/
-typedef struct {
-	void *ptr[2];
-} __aligned (CACHE_WRITEBACK_GRANULE) context_info_t;
-
-static context_info_t cm_context_info[PLATFORM_CORE_COUNT];
-
-/* The per_cpu_ptr_cache_t space allocation */
-static per_cpu_ptr_cache_t per_cpu_ptr_cache_space[PLATFORM_CORE_COUNT];
 
 /*******************************************************************************
  * Context management library initialisation routine. This library is used by
@@ -77,29 +65,25 @@
 
 /*******************************************************************************
  * This function returns a pointer to the most recent 'cpu_context' structure
- * that was set as the context for the specified security state. NULL is
- * returned if no such structure has been specified.
+ * for the CPU identified by MPIDR that was set as the context for the specified
+ * security state. NULL is returned if no such structure has been specified.
  ******************************************************************************/
-void *cm_get_context(uint64_t mpidr, uint32_t security_state)
+void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state)
 {
-	uint32_t linear_id = platform_get_core_pos(mpidr);
-
 	assert(security_state <= NON_SECURE);
 
-	return cm_context_info[linear_id].ptr[security_state];
+	return get_cpu_data_by_mpidr(mpidr, cpu_context[security_state]);
 }
 
 /*******************************************************************************
  * This function sets the pointer to the current 'cpu_context' structure for the
- * specified security state.
+ * specified security state for the CPU identified by MPIDR
  ******************************************************************************/
-void cm_set_context(uint64_t mpidr, void *context, uint32_t security_state)
+void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state)
 {
-	uint32_t linear_id = platform_get_core_pos(mpidr);
-
 	assert(security_state <= NON_SECURE);
 
-	cm_context_info[linear_id].ptr[security_state] = context;
+	set_cpu_data_by_mpidr(mpidr, cpu_context[security_state], context);
 }
 
 /*******************************************************************************
@@ -111,7 +95,7 @@
 {
 	cpu_context_t *ctx;
 
-	ctx = cm_get_context(read_mpidr(), security_state);
+	ctx = cm_get_context(security_state);
 	assert(ctx);
 
 	el3_sysregs_context_save(get_el3state_ctx(ctx));
@@ -121,7 +105,7 @@
 {
 	cpu_context_t *ctx;
 
-	ctx = cm_get_context(read_mpidr(), security_state);
+	ctx = cm_get_context(security_state);
 	assert(ctx);
 
 	el3_sysregs_context_restore(get_el3state_ctx(ctx));
@@ -131,7 +115,7 @@
 {
 	cpu_context_t *ctx;
 
-	ctx = cm_get_context(read_mpidr(), security_state);
+	ctx = cm_get_context(security_state);
 	assert(ctx);
 
 	el1_sysregs_context_save(get_sysregs_ctx(ctx));
@@ -141,7 +125,7 @@
 {
 	cpu_context_t *ctx;
 
-	ctx = cm_get_context(read_mpidr(), security_state);
+	ctx = cm_get_context(security_state);
 	assert(ctx);
 
 	el1_sysregs_context_restore(get_sysregs_ctx(ctx));
@@ -159,7 +143,7 @@
 	cpu_context_t *ctx;
 	el3_state_t *state;
 
-	ctx = cm_get_context(read_mpidr(), security_state);
+	ctx = cm_get_context(security_state);
 	assert(ctx);
 
 	/* Program the interrupt routing model for this security state */
@@ -183,7 +167,7 @@
 	cpu_context_t *ctx;
 	el3_state_t *state;
 
-	ctx = cm_get_context(read_mpidr(), security_state);
+	ctx = cm_get_context(security_state);
 	assert(ctx);
 
 	/* Populate EL3 state so that ERET jumps to the correct entry */
@@ -204,7 +188,7 @@
 	el3_state_t *state;
 	uint32_t scr_el3;
 
-	ctx = cm_get_context(read_mpidr(), security_state);
+	ctx = cm_get_context(security_state);
 	assert(ctx);
 
 	/* Ensure that the bit position is a valid one */
@@ -233,7 +217,7 @@
 	cpu_context_t *ctx;
 	el3_state_t *state;
 
-	ctx = cm_get_context(read_mpidr(), security_state);
+	ctx = cm_get_context(security_state);
 	assert(ctx);
 
 	/* Populate EL3 state so that ERET jumps to the correct entry */
@@ -253,7 +237,7 @@
 	uint64_t sp_mode;
 #endif
 
-	ctx = cm_get_context(read_mpidr(), security_state);
+	ctx = cm_get_context(security_state);
 	assert(ctx);
 
 #if DEBUG
@@ -272,32 +256,3 @@
 			 "msr	spsel, #0\n"
 			 : : "r" (ctx));
 }
-
-/************************************************************************
- * The following function is used to populate the per cpu pointer cache.
- * The pointer will be stored in the tpidr_el3 register.
- *************************************************************************/
-void cm_init_pcpu_ptr_cache()
-{
-	unsigned long mpidr = read_mpidr();
-	uint32_t linear_id = platform_get_core_pos(mpidr);
-	per_cpu_ptr_cache_t *pcpu_ptr_cache;
-
-	pcpu_ptr_cache = &per_cpu_ptr_cache_space[linear_id];
-	assert(pcpu_ptr_cache);
-	pcpu_ptr_cache->crash_stack = get_crash_stack(mpidr);
-
-	cm_set_pcpu_ptr_cache(pcpu_ptr_cache);
-}
-
-
-void cm_set_pcpu_ptr_cache(const void *pcpu_ptr)
-{
-	write_tpidr_el3((unsigned long)pcpu_ptr);
-}
-
-void *cm_get_pcpu_ptr_cache(void)
-{
-	return (void *)read_tpidr_el3();
-}
-
diff --git a/lib/aarch64/tlb_helpers.S b/bl31/cpu_data_array.c
similarity index 73%
rename from lib/aarch64/tlb_helpers.S
rename to bl31/cpu_data_array.c
index 8dfae12..b0042a1 100644
--- a/lib/aarch64/tlb_helpers.S
+++ b/bl31/cpu_data_array.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -28,46 +28,17 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <asm_macros.S>
+#include <cassert.h>
+#include <cpu_data.h>
+#include <platform_def.h>
 
-	.globl	tlbialle1
-	.globl	tlbialle1is
-	.globl	tlbialle2
-	.globl	tlbialle2is
-	.globl	tlbialle3
-	.globl	tlbialle3is
-	.globl	tlbivmalle1
+/* verify assembler offsets match data structures */
+CASSERT(CPU_DATA_CRASH_STACK_OFFSET == __builtin_offsetof
+	(cpu_data_t, crash_stack),
+	assert_cpu_data_crash_stack_offset_mismatch);
 
+CASSERT((1 << CPU_DATA_LOG2SIZE) == sizeof(cpu_data_t),
+	assert_cpu_data_log2size_mismatch);
 
-func tlbialle1
-	tlbi	alle1
-	ret
-
-
-func tlbialle1is
-	tlbi	alle1is
-	ret
-
-
-func tlbialle2
-	tlbi	alle2
-	ret
-
-
-func tlbialle2is
-	tlbi	alle2is
-	ret
-
-
-func tlbialle3
-	tlbi	alle3
-	ret
-
-
-func tlbialle3is
-	tlbi	alle3is
-	ret
-
-func tlbivmalle1
-	tlbi	vmalle1
-	ret
+/* The per_cpu_ptr_cache_t space allocation */
+cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
diff --git a/common/bl_common.c b/common/bl_common.c
index 3bc314c..4affa76 100644
--- a/common/bl_common.c
+++ b/common/bl_common.c
@@ -175,7 +175,9 @@
  * given a name, extents of free memory & whether the image should be loaded at
  * the bottom or top of the free memory. It updates the memory layout if the
  * load is successful. It also updates the image information and the entry point
- * information in the params passed
+ * information in the params passed. The caller might pass a NULL pointer for
+ * the entry point if it is not interested in this information, e.g. because
+ * the image just needs to be loaded in memory but won't ever be executed.
  ******************************************************************************/
 int load_image(meminfo_t *mem_layout,
 			 const char *image_name,
@@ -399,7 +401,8 @@
 	image_data->image_base = image_base;
 	image_data->image_size = image_size;
 
-	entry_point_info->pc = image_base;
+	if (entry_point_info != NULL)
+		entry_point_info->pc = image_base;
 
 	/*
 	 * File has been successfully loaded. Update the free memory
diff --git a/docs/user-guide.md b/docs/user-guide.md
index 85103b3..0105531 100644
--- a/docs/user-guide.md
+++ b/docs/user-guide.md
@@ -172,6 +172,10 @@
     entrypoint) or 1 (CPU reset to BL3-1 entrypoint).
     The default value is 0.
 
+*   `CRASH_REPORTING`: A non-zero value enables a console dump of processor
+    register state when an unexpected exception occurs during execution of
+    BL3-1. This option defaults to the value of `DEBUG` - i.e. by default
+    this is only enabled for a debug build of the firmware.
 
 ### Creating a Firmware Image Package
 
diff --git a/drivers/arm/gic/aarch64/gic_v3_sysregs.S b/drivers/arm/gic/aarch64/gic_v3_sysregs.S
deleted file mode 100644
index ddf85a8..0000000
--- a/drivers/arm/gic/aarch64/gic_v3_sysregs.S
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <asm_macros.S>
-
-	.globl	read_icc_sre_el1
-	.globl	read_icc_sre_el2
-	.globl	read_icc_sre_el3
-	.globl	write_icc_sre_el1
-	.globl	write_icc_sre_el2
-	.globl	write_icc_sre_el3
-	.globl  write_icc_pmr_el1
-
-
-/*
- * Register definitions used by GCC for GICv3 access.
- * These are defined by ARMCC, so keep them in the GCC specific code for now.
- */
-#define ICC_SRE_EL1     S3_0_C12_C12_5
-#define ICC_SRE_EL2     S3_4_C12_C9_5
-#define ICC_SRE_EL3     S3_6_C12_C12_5
-#define ICC_CTLR_EL1    S3_0_C12_C12_4
-#define ICC_CTLR_EL3    S3_6_C12_C12_4
-#define ICC_PMR_EL1     S3_0_C4_C6_0
-
-func read_icc_sre_el1
-	mrs	x0, ICC_SRE_EL1
-	ret
-
-
-func read_icc_sre_el2
-	mrs	x0, ICC_SRE_EL2
-	ret
-
-
-func read_icc_sre_el3
-	mrs	x0, ICC_SRE_EL3
-	ret
-
-
-func write_icc_sre_el1
-	msr	ICC_SRE_EL1, x0
-	ret
-
-
-func write_icc_sre_el2
-	msr	ICC_SRE_EL2, x0
-	ret
-
-
-func write_icc_sre_el3
-	msr	ICC_SRE_EL3, x0
-	ret
-
-
-func write_icc_pmr_el1
-	msr	ICC_PMR_EL1, x0
-	ret
diff --git a/drivers/arm/pl011/pl011_console.c b/drivers/arm/pl011/pl011_console.c
index 0e82aa2..81897ca 100644
--- a/drivers/arm/pl011/pl011_console.c
+++ b/drivers/arm/pl011/pl011_console.c
@@ -65,11 +65,18 @@
 
 }
 
-#define WAIT_UNTIL_UART_FREE(base) while ((pl011_read_fr(base)\
-					& PL011_UARTFR_TXFF) == 1)
+#define WAIT_UNTIL_UART_FREE(base)				\
+	while ((pl011_read_fr(base) & PL011_UARTFR_TXFF))	\
+		continue
+
 int console_putc(int c)
 {
-	assert(uart_base);
+	/* If the console has not been initialized then return an error
+	 * code. Asserting here would result in recursion and stack
+	 * exhaustion
+	 */
+	if (!uart_base)
+		return -1;
 
 	if (c == '\n') {
 		WAIT_UNTIL_UART_FREE(uart_base);
diff --git a/include/bl31/context.h b/include/bl31/context.h
index 16cc744..c0230b8 100644
--- a/include/bl31/context.h
+++ b/include/bl31/context.h
@@ -185,14 +185,10 @@
 #define CTX_FP_FPCR		0x208
 #define CTX_FPREGS_END		0x210
 
-/******************************************************************************
- * Offsets for the per cpu cache implementation
- ******************************************************************************/
-#define PTR_CACHE_CRASH_STACK_OFFSET 0x0
-
 #ifndef __ASSEMBLY__
 
 #include <cassert.h>
+#include <platform_def.h>	/* for CACHE_WRITEBACK_GRANULE */
 #include <stdint.h>
 
 /*
@@ -331,17 +327,6 @@
 void fpregs_context_restore(fp_regs_t *regs);
 
 
-/* Per-CPU pointer cache of recently used pointers and also the crash stack
- * TODO: Add other commonly used variables to this (tf_issues#90)
- */
-typedef struct per_cpu_ptr_cache {
-	uint64_t crash_stack;
-} per_cpu_ptr_cache_t;
-
-CASSERT(PTR_CACHE_CRASH_STACK_OFFSET == __builtin_offsetof\
-	(per_cpu_ptr_cache_t, crash_stack), \
-	assert_per_cpu_ptr_cache_crash_stack_offset_mismatch);
-
 #undef CTX_SYSREG_ALL
 #undef CTX_FP_ALL
 #undef CTX_GPREG_ALL
diff --git a/include/bl31/context_mgmt.h b/include/bl31/context_mgmt.h
index ce4f7a8..ade2fa1 100644
--- a/include/bl31/context_mgmt.h
+++ b/include/bl31/context_mgmt.h
@@ -31,16 +31,19 @@
 #ifndef __CM_H__
 #define __CM_H__
 
+#include <cpu_data.h>
 #include <stdint.h>
 
 /*******************************************************************************
  * Function & variable prototypes
  ******************************************************************************/
 void cm_init(void);
-void *cm_get_context(uint64_t mpidr, uint32_t security_state);
-void cm_set_context(uint64_t mpidr,
-		    void *context,
-		    uint32_t security_state);
+void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state);
+static inline void *cm_get_context(uint32_t security_state);
+void cm_set_context_by_mpidr(uint64_t mpidr,
+			     void *context,
+			     uint32_t security_state);
+static inline void cm_set_context(void *context, uint32_t security_state);
 void cm_el3_sysregs_context_save(uint32_t security_state);
 void cm_el3_sysregs_context_restore(uint32_t security_state);
 void cm_el1_sysregs_context_save(uint32_t security_state);
@@ -52,8 +55,32 @@
 			  uint32_t bit_pos,
 			  uint32_t value);
 void cm_set_next_eret_context(uint32_t security_state);
-void cm_init_pcpu_ptr_cache();
-void cm_set_pcpu_ptr_cache(const void *pcpu_ptr);
-void *cm_get_pcpu_ptr_cache(void);
 uint32_t cm_get_scr_el3(uint32_t security_state);
+
+/* Inline definitions */
+
+/*******************************************************************************
+ * This function returns a pointer to the most recent 'cpu_context' structure
+ * for the calling CPU that was set as the context for the specified security
+ * state. NULL is returned if no such structure has been specified.
+ ******************************************************************************/
+void *cm_get_context(uint32_t security_state)
+{
+	assert(security_state <= NON_SECURE);
+
+	return get_cpu_data(cpu_context[security_state]);
+}
+
+/*******************************************************************************
+ * This function sets the pointer to the current 'cpu_context' structure for the
+ * specified security state for the calling CPU
+ ******************************************************************************/
+void cm_set_context(void *context, uint32_t security_state)
+{
+	assert(security_state <= NON_SECURE);
+
+	set_cpu_data(cpu_context[security_state], context);
+}
+
+
 #endif /* __CM_H__ */
diff --git a/include/bl31/cpu_data.h b/include/bl31/cpu_data.h
new file mode 100644
index 0000000..5f45f14
--- /dev/null
+++ b/include/bl31/cpu_data.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CPU_DATA_H__
+#define __CPU_DATA_H__
+
+/* Offsets for the cpu_data structure */
+#define CPU_DATA_CRASH_STACK_OFFSET	0x10
+#define CPU_DATA_LOG2SIZE		6
+
+#ifndef __ASSEMBLY__
+
+#include <arch_helpers.h>
+#include <platform_def.h>
+#include <stdint.h>
+
+/*******************************************************************************
+ * Function & variable prototypes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Cache of frequently used per-cpu data:
+ *   Pointers to non-secure and secure security state contexts
+ *   Address of the crash stack
+ * It is aligned to the cache line boundary to allow efficient concurrent
+ * manipulation of these pointers on different cpus
+ *
+ * TODO: Add other commonly used variables to this (tf_issues#90)
+ *
+ * The data structure and the _cpu_data accessors should not be used directly
+ * by components that have per-cpu members. The member access macros should be
+ * used for this.
+ ******************************************************************************/
+
+typedef struct cpu_data {
+	void *cpu_context[2];
+	uint64_t crash_stack;
+} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
+
+struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
+struct cpu_data *_cpu_data_by_mpidr(uint64_t mpidr);
+
+/* Return the cpu_data structure for the current CPU. */
+static inline struct cpu_data *_cpu_data(void)
+{
+	return (cpu_data_t *)read_tpidr_el3();
+}
+
+
+/**************************************************************************
+ * APIs for initialising and accessing per-cpu data
+ *************************************************************************/
+
+void init_cpu_data_ptr(void);
+
+#define get_cpu_data(_m)		   _cpu_data()->_m
+#define set_cpu_data(_m, _v)		   _cpu_data()->_m = _v
+#define get_cpu_data_by_index(_ix, _m)	   _cpu_data_by_index(_ix)->_m
+#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v
+#define get_cpu_data_by_mpidr(_id, _m)	   _cpu_data_by_mpidr(_id)->_m
+#define set_cpu_data_by_mpidr(_id, _m, _v) _cpu_data_by_mpidr(_id)->_m = _v
+
+
+#endif /* __ASSEMBLY__ */
+#endif /* __CPU_DATA_H__ */
diff --git a/include/bl31/runtime_svc.h b/include/bl31/runtime_svc.h
index d7d88d4..f3543d4 100644
--- a/include/bl31/runtime_svc.h
+++ b/include/bl31/runtime_svc.h
@@ -267,7 +267,7 @@
 void runtime_svc_init();
 extern uint64_t __RT_SVC_DESCS_START__;
 extern uint64_t __RT_SVC_DESCS_END__;
-uint64_t get_crash_stack(uint64_t mpidr);
-void runtime_exceptions(void);
+void init_crash_reporting(void);
+
 #endif /*__ASSEMBLY__*/
 #endif /* __RUNTIME_SVC_H__ */
diff --git a/include/drivers/arm/gic_v3.h b/include/drivers/arm/gic_v3.h
index 0f99994..c410626 100644
--- a/include/drivers/arm/gic_v3.h
+++ b/include/drivers/arm/gic_v3.h
@@ -68,14 +68,6 @@
  ******************************************************************************/
 uintptr_t gicv3_get_rdist(uintptr_t gicr_base, uint64_t mpidr);
 
-unsigned int read_icc_sre_el1(void);
-unsigned int read_icc_sre_el2(void);
-unsigned int read_icc_sre_el3(void);
-void write_icc_sre_el1(unsigned int);
-void write_icc_sre_el2(unsigned int);
-void write_icc_sre_el3(unsigned int);
-void write_icc_pmr_el1(unsigned int);
-
 /*******************************************************************************
  * GIC Redistributor interface accessors
  ******************************************************************************/
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index d89b4fe..0bfbd66 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -65,6 +65,16 @@
 #define FIRST_MPIDR		0
 
 /*******************************************************************************
+ * Definitions for CPU system register interface to GICv3
+ ******************************************************************************/
+#define ICC_SRE_EL1     S3_0_C12_C12_5
+#define ICC_SRE_EL2     S3_4_C12_C9_5
+#define ICC_SRE_EL3     S3_6_C12_C12_5
+#define ICC_CTLR_EL1    S3_0_C12_C12_4
+#define ICC_CTLR_EL3    S3_6_C12_C12_4
+#define ICC_PMR_EL1     S3_0_C4_C6_0
+
+/*******************************************************************************
  * Implementation defined sysreg encodings
  ******************************************************************************/
 #define CPUECTLR_EL1	S3_1_C15_C2_1
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index f16c4b5..1ca3350 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -31,52 +31,115 @@
 #ifndef __ARCH_HELPERS_H__
 #define __ARCH_HELPERS_H__
 
-#include <cdefs.h> /* For __dead2 */
+#include <arch.h>	/* for additional register definitions */
+#include <cdefs.h>	/* For __dead2 */
+#include <stdint.h>
 
+/**********************************************************************
+ * Macros which create inline functions to read or write CPU system
+ * registers
+ *********************************************************************/
+
+#define _DEFINE_SYSREG_READ_FUNC(_name, _reg_name)	\
+static inline uint64_t read_ ## _name(void)		\
+{							\
+	uint64_t v;					\
+	__asm__ ("mrs %0, " #_reg_name : "=r" (v));	\
+	return v;					\
+}
+
+#define _DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)		\
+static inline void write_ ## _name(uint64_t v)		\
+{							\
+	__asm__ ("msr " #_reg_name ", %0" : : "r" (v));	\
+}
+
+#define _DEFINE_SYSREG_WRITE_CONST_FUNC(_name, _reg_name)		\
+static inline void write_ ## _name(const uint64_t v)	\
+{							\
+	__asm__ ("msr " #_reg_name ", %0" : : "i" (v));	\
+}
+
+/* Define read function for system register */
+#define DEFINE_SYSREG_READ_FUNC(_name) 			\
+	_DEFINE_SYSREG_READ_FUNC(_name, _name)
+
+/* Define read & write function for system register */
+#define DEFINE_SYSREG_RW_FUNCS(_name)			\
+	_DEFINE_SYSREG_READ_FUNC(_name, _name)		\
+	_DEFINE_SYSREG_WRITE_FUNC(_name, _name)
+
+/* Define read & write function for renamed system register */
+#define DEFINE_RENAME_SYSREG_RW_FUNCS(_name, _reg_name)	\
+	_DEFINE_SYSREG_READ_FUNC(_name, _reg_name)	\
+	_DEFINE_SYSREG_WRITE_FUNC(_name, _reg_name)
+
+/* Define write function for special system registers */
+#define DEFINE_SYSREG_WRITE_CONST_FUNC(_name)		\
+	_DEFINE_SYSREG_WRITE_CONST_FUNC(_name, _name)
+
+
+/**********************************************************************
+ * Macros to create inline functions for system instructions
+ *********************************************************************/
+
+/* Define function for simple system instruction */
+#define DEFINE_SYSOP_FUNC(_op)				\
+static inline void _op()				\
+{							\
+	__asm__ (#_op);					\
+}
+
+/* Define function for system instruction with type specifier */
+#define DEFINE_SYSOP_TYPE_FUNC(_op, _type)		\
+static inline void _op ## _type()			\
+{							\
+	__asm__ (#_op " " #_type);			\
+}
+
+/* Define function for system instruction with register parameter */
+#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type)	\
+static inline void _op ## _type(uint64_t v)		\
+{							\
+	 __asm__ (#_op " " #_type ", %0" : : "r" (v));	\
+}
 
 /*******************************************************************************
  * Aarch64 translation tables manipulation helper prototypes
- ******************************************************************************/
-unsigned long create_table_desc(unsigned long *next_table_ptr);
-unsigned long create_block_desc(unsigned long desc,
-				unsigned long addr,
-				unsigned int level);
-unsigned long create_device_block(unsigned long output_addr,
-				unsigned int level,
-				unsigned int ns);
-unsigned long create_romem_block(unsigned long output_addr,
-				unsigned int level,
-				unsigned int ns);
-unsigned long create_rwmem_block(unsigned long output_addr,
-				unsigned int level,
-				unsigned int ns);
+******************************************************************************/
+uint64_t create_table_desc(uint64_t *next_table_ptr);
+uint64_t create_block_desc(uint64_t desc, uint64_t addr, uint32_t level);
+uint64_t create_device_block(uint64_t output_addr, uint32_t level, uint32_t ns);
+uint64_t create_romem_block(uint64_t output_addr, uint32_t level, uint32_t ns);
+uint64_t create_rwmem_block(uint64_t output_addr, uint32_t level, uint32_t ns);
 
 /*******************************************************************************
  * TLB maintenance accessor prototypes
  ******************************************************************************/
-void tlbialle1(void);
-void tlbialle1is(void);
-void tlbialle2(void);
-void tlbialle2is(void);
-void tlbialle3(void);
-void tlbialle3is(void);
-void tlbivmalle1(void);
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle1is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle2is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, alle3is)
+DEFINE_SYSOP_TYPE_FUNC(tlbi, vmalle1)
 
 /*******************************************************************************
  * Cache maintenance accessor prototypes
  ******************************************************************************/
-void dcisw(unsigned long);
-void dccisw(unsigned long);
-void dccsw(unsigned long);
-void dccvac(unsigned long);
-void dcivac(unsigned long);
-void dccivac(unsigned long);
-void dccvau(unsigned long);
-void dczva(unsigned long);
-void flush_dcache_range(unsigned long, unsigned long);
-void inv_dcache_range(unsigned long, unsigned long);
-void dcsw_op_louis(unsigned int);
-void dcsw_op_all(unsigned int);
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, isw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cisw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, csw)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, ivac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, civac)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, cvau)
+DEFINE_SYSOP_TYPE_PARAM_FUNC(dc, zva)
+
+void flush_dcache_range(uint64_t, uint64_t);
+void inv_dcache_range(uint64_t, uint64_t);
+void dcsw_op_louis(uint32_t);
+void dcsw_op_all(uint32_t);
 
 void disable_mmu_el3(void);
 void disable_mmu_icache_el3(void);
@@ -84,202 +147,159 @@
 /*******************************************************************************
  * Misc. accessor prototypes
  ******************************************************************************/
-void enable_irq(void);
-void enable_fiq(void);
-void enable_serror(void);
-void enable_debug_exceptions(void);
 
-void disable_irq(void);
-void disable_fiq(void);
-void disable_serror(void);
-void disable_debug_exceptions(void);
+DEFINE_SYSREG_WRITE_CONST_FUNC(daifset)
+DEFINE_SYSREG_WRITE_CONST_FUNC(daifclr)
 
-unsigned long read_id_pfr1_el1(void);
-unsigned long read_id_aa64pfr0_el1(void);
-unsigned long read_current_el(void);
-unsigned long read_daif(void);
-unsigned long read_spsr_el1(void);
-unsigned long read_spsr_el2(void);
-unsigned long read_spsr_el3(void);
-unsigned long read_elr_el1(void);
-unsigned long read_elr_el2(void);
-unsigned long read_elr_el3(void);
+#define enable_irq()			write_daifclr(DAIF_IRQ_BIT)
+#define enable_fiq()			write_daifclr(DAIF_FIQ_BIT)
+#define enable_serror()			write_daifclr(DAIF_ABT_BIT)
+#define enable_debug_exceptions()	write_daifclr(DAIF_DBG_BIT)
+#define disable_irq()			write_daifset(DAIF_IRQ_BIT)
+#define disable_fiq()			write_daifset(DAIF_FIQ_BIT)
+#define disable_serror()		write_daifset(DAIF_ABT_BIT)
+#define disable_debug_exceptions()	write_daifset(DAIF_DBG_BIT)
 
-void write_daif(unsigned long);
-void write_spsr_el1(unsigned long);
-void write_spsr_el2(unsigned long);
-void write_spsr_el3(unsigned long);
-void write_elr_el1(unsigned long);
-void write_elr_el2(unsigned long);
-void write_elr_el3(unsigned long);
+DEFINE_SYSREG_READ_FUNC(id_pfr1_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64pfr0_el1)
+DEFINE_SYSREG_READ_FUNC(CurrentEl)
+DEFINE_SYSREG_RW_FUNCS(daif)
+DEFINE_SYSREG_RW_FUNCS(spsr_el1)
+DEFINE_SYSREG_RW_FUNCS(spsr_el2)
+DEFINE_SYSREG_RW_FUNCS(spsr_el3)
+DEFINE_SYSREG_RW_FUNCS(elr_el1)
+DEFINE_SYSREG_RW_FUNCS(elr_el2)
+DEFINE_SYSREG_RW_FUNCS(elr_el3)
 
-void wfi(void);
-void wfe(void);
-void rfe(void);
-void sev(void);
-void dsb(void);
-void isb(void);
+DEFINE_SYSOP_FUNC(wfi)
+DEFINE_SYSOP_FUNC(wfe)
+DEFINE_SYSOP_FUNC(sev)
+DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
+DEFINE_SYSOP_FUNC(isb)
 
-unsigned int get_afflvl_shift(unsigned int);
-unsigned int mpidr_mask_lower_afflvls(unsigned long, unsigned int);
+uint32_t get_afflvl_shift(uint32_t);
+uint32_t mpidr_mask_lower_afflvls(uint64_t, uint32_t);
 
-void __dead2 eret(unsigned long, unsigned long,
-		unsigned long, unsigned long,
-		unsigned long, unsigned long,
-		unsigned long, unsigned long);
 
-void __dead2 smc(unsigned long, unsigned long,
-		unsigned long, unsigned long,
-		unsigned long, unsigned long,
-		unsigned long, unsigned long);
+void __dead2 eret(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
+		  uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
+void __dead2 smc(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
+		 uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7);
 
 /*******************************************************************************
  * System register accessor prototypes
  ******************************************************************************/
-unsigned long read_midr(void);
-unsigned long read_mpidr(void);
+DEFINE_SYSREG_READ_FUNC(midr_el1)
+DEFINE_SYSREG_READ_FUNC(mpidr_el1)
 
-unsigned long read_scr(void);
-unsigned long read_hcr(void);
+DEFINE_SYSREG_RW_FUNCS(scr_el3)
+DEFINE_SYSREG_RW_FUNCS(hcr_el2)
 
-unsigned long read_vbar_el1(void);
-unsigned long read_vbar_el2(void);
-unsigned long read_vbar_el3(void);
+DEFINE_SYSREG_RW_FUNCS(vbar_el1)
+DEFINE_SYSREG_RW_FUNCS(vbar_el2)
+DEFINE_SYSREG_RW_FUNCS(vbar_el3)
 
-unsigned long read_sctlr_el1(void);
-unsigned long read_sctlr_el2(void);
-unsigned long read_sctlr_el3(void);
+DEFINE_SYSREG_RW_FUNCS(sctlr_el1)
+DEFINE_SYSREG_RW_FUNCS(sctlr_el2)
+DEFINE_SYSREG_RW_FUNCS(sctlr_el3)
 
-unsigned long read_actlr_el1(void);
-unsigned long read_actlr_el2(void);
-unsigned long read_actlr_el3(void);
+DEFINE_SYSREG_RW_FUNCS(actlr_el1)
+DEFINE_SYSREG_RW_FUNCS(actlr_el2)
+DEFINE_SYSREG_RW_FUNCS(actlr_el3)
 
-unsigned long read_esr_el1(void);
-unsigned long read_esr_el2(void);
-unsigned long read_esr_el3(void);
+DEFINE_SYSREG_RW_FUNCS(esr_el1)
+DEFINE_SYSREG_RW_FUNCS(esr_el2)
+DEFINE_SYSREG_RW_FUNCS(esr_el3)
 
-unsigned long read_afsr0_el1(void);
-unsigned long read_afsr0_el2(void);
-unsigned long read_afsr0_el3(void);
+DEFINE_SYSREG_RW_FUNCS(afsr0_el1)
+DEFINE_SYSREG_RW_FUNCS(afsr0_el2)
+DEFINE_SYSREG_RW_FUNCS(afsr0_el3)
 
-unsigned long read_afsr1_el1(void);
-unsigned long read_afsr1_el2(void);
-unsigned long read_afsr1_el3(void);
+DEFINE_SYSREG_RW_FUNCS(afsr1_el1)
+DEFINE_SYSREG_RW_FUNCS(afsr1_el2)
+DEFINE_SYSREG_RW_FUNCS(afsr1_el3)
 
-unsigned long read_far_el1(void);
-unsigned long read_far_el2(void);
-unsigned long read_far_el3(void);
+DEFINE_SYSREG_RW_FUNCS(far_el1)
+DEFINE_SYSREG_RW_FUNCS(far_el2)
+DEFINE_SYSREG_RW_FUNCS(far_el3)
 
-unsigned long read_mair_el1(void);
-unsigned long read_mair_el2(void);
-unsigned long read_mair_el3(void);
+DEFINE_SYSREG_RW_FUNCS(mair_el1)
+DEFINE_SYSREG_RW_FUNCS(mair_el2)
+DEFINE_SYSREG_RW_FUNCS(mair_el3)
 
-unsigned long read_amair_el1(void);
-unsigned long read_amair_el2(void);
-unsigned long read_amair_el3(void);
+DEFINE_SYSREG_RW_FUNCS(amair_el1)
+DEFINE_SYSREG_RW_FUNCS(amair_el2)
+DEFINE_SYSREG_RW_FUNCS(amair_el3)
 
-unsigned long read_rvbar_el1(void);
-unsigned long read_rvbar_el2(void);
-unsigned long read_rvbar_el3(void);
+DEFINE_SYSREG_READ_FUNC(rvbar_el1)
+DEFINE_SYSREG_READ_FUNC(rvbar_el2)
+DEFINE_SYSREG_READ_FUNC(rvbar_el3)
 
-unsigned long read_rmr_el1(void);
-unsigned long read_rmr_el2(void);
-unsigned long read_rmr_el3(void);
+DEFINE_SYSREG_RW_FUNCS(rmr_el1)
+DEFINE_SYSREG_RW_FUNCS(rmr_el2)
+DEFINE_SYSREG_RW_FUNCS(rmr_el3)
 
-unsigned long read_tcr_el1(void);
-unsigned long read_tcr_el2(void);
-unsigned long read_tcr_el3(void);
+DEFINE_SYSREG_RW_FUNCS(tcr_el1)
+DEFINE_SYSREG_RW_FUNCS(tcr_el2)
+DEFINE_SYSREG_RW_FUNCS(tcr_el3)
 
-unsigned long read_ttbr0_el1(void);
-unsigned long read_ttbr0_el2(void);
-unsigned long read_ttbr0_el3(void);
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el1)
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el2)
+DEFINE_SYSREG_RW_FUNCS(ttbr0_el3)
 
-unsigned long read_ttbr1_el1(void);
+DEFINE_SYSREG_RW_FUNCS(ttbr1_el1)
 
-unsigned long read_cptr_el2(void);
-unsigned long read_cptr_el3(void);
+DEFINE_SYSREG_RW_FUNCS(cptr_el2)
+DEFINE_SYSREG_RW_FUNCS(cptr_el3)
 
-unsigned long read_cpacr(void);
-unsigned long read_cpuectlr(void);
-unsigned int read_cntfrq_el0(void);
-unsigned int read_cntps_ctl_el1(void);
-unsigned int read_cntps_tval_el1(void);
-unsigned long read_cntps_cval_el1(void);
-unsigned long read_cntpct_el0(void);
-unsigned long read_cnthctl_el2(void);
+DEFINE_SYSREG_RW_FUNCS(cpacr_el1)
+DEFINE_SYSREG_RW_FUNCS(cntfrq_el0)
+DEFINE_SYSREG_RW_FUNCS(cntps_ctl_el1)
+DEFINE_SYSREG_RW_FUNCS(cntps_tval_el1)
+DEFINE_SYSREG_RW_FUNCS(cntps_cval_el1)
+DEFINE_SYSREG_READ_FUNC(cntpct_el0)
+DEFINE_SYSREG_RW_FUNCS(cnthctl_el2)
 
-unsigned long read_tpidr_el3(void);
+DEFINE_SYSREG_RW_FUNCS(tpidr_el3)
 
-void write_scr(unsigned long);
-void write_hcr(unsigned long);
-void write_cpacr(unsigned long);
-void write_cntfrq_el0(unsigned int);
-void write_cntps_ctl_el1(unsigned int);
-void write_cntps_tval_el1(unsigned int);
-void write_cntps_cval_el1(unsigned long);
-void write_cnthctl_el2(unsigned long);
+/* Implementation specific registers */
 
-void write_vbar_el1(unsigned long);
-void write_vbar_el2(unsigned long);
-void write_vbar_el3(unsigned long);
+DEFINE_RENAME_SYSREG_RW_FUNCS(cpuectlr_el1, CPUECTLR_EL1)
 
-void write_sctlr_el1(unsigned long);
-void write_sctlr_el2(unsigned long);
-void write_sctlr_el3(unsigned long);
+/* GICv3 System Registers */
 
-void write_actlr_el1(unsigned long);
-void write_actlr_el2(unsigned long);
-void write_actlr_el3(unsigned long);
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el1, ICC_SRE_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el2, ICC_SRE_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el3, ICC_SRE_EL3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icc_pmr_el1, ICC_PMR_EL1)
 
-void write_esr_el1(unsigned long);
-void write_esr_el2(unsigned long);
-void write_esr_el3(unsigned long);
-
-void write_afsr0_el1(unsigned long);
-void write_afsr0_el2(unsigned long);
-void write_afsr0_el3(unsigned long);
-
-void write_afsr1_el1(unsigned long);
-void write_afsr1_el2(unsigned long);
-void write_afsr1_el3(unsigned long);
-
-void write_far_el1(unsigned long);
-void write_far_el2(unsigned long);
-void write_far_el3(unsigned long);
-
-void write_mair_el1(unsigned long);
-void write_mair_el2(unsigned long);
-void write_mair_el3(unsigned long);
-
-void write_amair_el1(unsigned long);
-void write_amair_el2(unsigned long);
-void write_amair_el3(unsigned long);
-
-void write_rmr_el1(unsigned long);
-void write_rmr_el2(unsigned long);
-void write_rmr_el3(unsigned long);
-
-void write_tcr_el1(unsigned long);
-void write_tcr_el2(unsigned long);
-void write_tcr_el3(unsigned long);
-
-void write_ttbr0_el1(unsigned long);
-void write_ttbr0_el2(unsigned long);
-void write_ttbr0_el3(unsigned long);
-
-void write_ttbr1_el1(unsigned long);
-
-void write_cpuectlr(unsigned long);
-void write_cptr_el2(unsigned long);
-void write_cptr_el3(unsigned long);
-
-void write_tpidr_el3(unsigned long);
 
 #define IS_IN_EL(x) \
-	(GET_EL(read_current_el()) == MODE_EL##x)
+	(GET_EL(read_CurrentEl()) == MODE_EL##x)
 
 #define IS_IN_EL1() IS_IN_EL(1)
 #define IS_IN_EL3() IS_IN_EL(3)
 
+/* Previously defined accesor functions with incomplete register names  */
+
+#define read_current_el()	read_CurrentEl()
+
+#define dsb()			dsbsy()
+
+#define read_midr()		read_midr_el1()
+
+#define read_mpidr()		read_mpidr_el1()
+
+#define read_scr()		read_scr_el3()
+#define write_scr(_v)		write_scr_el3(_v)
+
+#define read_hcr()		read_hcr_el2()
+#define write_hcr(_v)		write_hcr_el2(_v)
+
+#define read_cpuectlr()		read_cpuectlr_el1()
+#define write_cpuectlr(_v)	write_cpuectlr_el1(_v)
+
+#define read_cpacr()		read_cpacr_el1()
+#define write_cpacr(_v)		write_cpacr_el1(_v)
 
 #endif /* __ARCH_HELPERS_H__ */
diff --git a/include/lib/aarch64/xlat_tables.h b/include/lib/aarch64/xlat_tables.h
index 5df655b..8e0adc7 100644
--- a/include/lib/aarch64/xlat_tables.h
+++ b/include/lib/aarch64/xlat_tables.h
@@ -55,13 +55,14 @@
  * Structure for specifying a single region of memory.
  */
 typedef struct mmap_region {
-	unsigned long	base;
+	unsigned long	base_pa;
+	unsigned long	base_va;
 	unsigned long	size;
 	mmap_attr_t	attr;
 } mmap_region_t;
 
-void mmap_add_region(unsigned long base, unsigned long size,
-			unsigned attr);
+void mmap_add_region(unsigned long base_pa, unsigned long base_va,
+				unsigned long size, unsigned attr);
 void mmap_add(const mmap_region_t *mm);
 
 void init_xlat_tables(void);
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
index a5b918c..1c80550 100644
--- a/lib/aarch64/cache_helpers.S
+++ b/lib/aarch64/cache_helpers.S
@@ -31,59 +31,11 @@
 #include <arch.h>
 #include <asm_macros.S>
 
-	.globl	dcisw
-	.globl	dccisw
-	.globl	dccsw
-	.globl	dccvac
-	.globl	dcivac
-	.globl	dccivac
-	.globl	dccvau
-	.globl	dczva
 	.globl	flush_dcache_range
 	.globl	inv_dcache_range
 	.globl	dcsw_op_louis
 	.globl	dcsw_op_all
 
-func dcisw
-	dc	isw, x0
-	ret
-
-
-func dccisw
-	dc	cisw, x0
-	ret
-
-
-func dccsw
-	dc	csw, x0
-	ret
-
-
-func dccvac
-	dc	cvac, x0
-	ret
-
-
-func dcivac
-	dc	ivac, x0
-	ret
-
-
-func dccivac
-	dc	civac, x0
-	ret
-
-
-func dccvau
-	dc	cvau, x0
-	ret
-
-
-func dczva
-	dc	zva, x0
-	ret
-
-
 	/* ------------------------------------------
 	 * Clean+Invalidate from base address till
 	 * size. 'x0' = addr, 'x1' = size
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index e7ee015..439ca28 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -31,44 +31,8 @@
 #include <arch.h>
 #include <asm_macros.S>
 
-	.globl	enable_irq
-	.globl	disable_irq
-
-	.globl	enable_fiq
-	.globl	disable_fiq
-
-	.globl	enable_serror
-	.globl	disable_serror
-
-	.globl	enable_debug_exceptions
-	.globl	disable_debug_exceptions
-
-	.globl	read_daif
-	.globl	write_daif
-
-	.globl	read_spsr_el1
-	.globl	read_spsr_el2
-	.globl	read_spsr_el3
-
-	.globl	write_spsr_el1
-	.globl	write_spsr_el2
-	.globl	write_spsr_el3
-
-	.globl	read_elr_el1
-	.globl	read_elr_el2
-	.globl	read_elr_el3
-
-	.globl	write_elr_el1
-	.globl	write_elr_el2
-	.globl	write_elr_el3
-
 	.globl	get_afflvl_shift
 	.globl	mpidr_mask_lower_afflvls
-	.globl	dsb
-	.globl	isb
-	.globl	sev
-	.globl	wfe
-	.globl	wfi
 	.globl	eret
 	.globl	smc
 
@@ -78,6 +42,10 @@
 	.globl	disable_mmu_el3
 	.globl	disable_mmu_icache_el3
 
+#if SUPPORT_VFP
+	.globl	enable_vfp
+#endif
+
 
 func get_afflvl_shift
 	cmp	x0, #3
@@ -95,144 +63,6 @@
 	lsl	x0, x0, x2
 	ret
 
-	/* -----------------------------------------------------
-	 * Asynchronous exception manipulation accessors
-	 * -----------------------------------------------------
-	 */
-func enable_irq
-	msr	daifclr, #DAIF_IRQ_BIT
-	ret
-
-
-func enable_fiq
-	msr	daifclr, #DAIF_FIQ_BIT
-	ret
-
-
-func enable_serror
-	msr	daifclr, #DAIF_ABT_BIT
-	ret
-
-
-func enable_debug_exceptions
-	msr	daifclr, #DAIF_DBG_BIT
-	ret
-
-
-func disable_irq
-	msr	daifset, #DAIF_IRQ_BIT
-	ret
-
-
-func disable_fiq
-	msr	daifset, #DAIF_FIQ_BIT
-	ret
-
-
-func disable_serror
-	msr	daifset, #DAIF_ABT_BIT
-	ret
-
-
-func disable_debug_exceptions
-	msr	daifset, #DAIF_DBG_BIT
-	ret
-
-
-func read_daif
-	mrs	x0, daif
-	ret
-
-
-func write_daif
-	msr	daif, x0
-	ret
-
-
-func read_spsr_el1
-	mrs	x0, spsr_el1
-	ret
-
-
-func read_spsr_el2
-	mrs	x0, spsr_el2
-	ret
-
-
-func read_spsr_el3
-	mrs	x0, spsr_el3
-	ret
-
-
-func write_spsr_el1
-	msr	spsr_el1, x0
-	ret
-
-
-func write_spsr_el2
-	msr	spsr_el2, x0
-	ret
-
-
-func write_spsr_el3
-	msr	spsr_el3, x0
-	ret
-
-
-func read_elr_el1
-	mrs	x0, elr_el1
-	ret
-
-
-func read_elr_el2
-	mrs	x0, elr_el2
-	ret
-
-
-func read_elr_el3
-	mrs	x0, elr_el3
-	ret
-
-
-func write_elr_el1
-	msr	elr_el1, x0
-	ret
-
-
-func write_elr_el2
-	msr	elr_el2, x0
-	ret
-
-
-func write_elr_el3
-	msr	elr_el3, x0
-	ret
-
-
-func dsb
-	dsb	sy
-	ret
-
-
-func isb
-	isb
-	ret
-
-
-func sev
-	sev
-	ret
-
-
-func wfe
-	wfe
-	ret
-
-
-func wfi
-	wfi
-	ret
-
 
 func eret
 	eret
@@ -315,3 +145,20 @@
 	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
 	b	do_disable_mmu
 
+
+/* ---------------------------------------------------------------------------
+ * Enable the use of VFP at EL3
+ * ---------------------------------------------------------------------------
+ */
+#if SUPPORT_VFP
+func enable_vfp
+	mrs	x0, cpacr_el1
+	orr	x0, x0, #CPACR_VFP_BITS
+	msr	cpacr_el1, x0
+	mrs	x0, cptr_el3
+	mov	x1, #AARCH64_CPTR_TFP
+	bic	x0, x0, x1
+	msr	cptr_el3, x0
+	isb
+	ret
+#endif
diff --git a/lib/aarch64/sysreg_helpers.S b/lib/aarch64/sysreg_helpers.S
deleted file mode 100644
index 925e93e..0000000
--- a/lib/aarch64/sysreg_helpers.S
+++ /dev/null
@@ -1,782 +0,0 @@
-/*
- * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-#include <asm_macros.S>
-
-	.globl	read_vbar_el1
-	.globl	read_vbar_el2
-	.globl	read_vbar_el3
-	.globl	write_vbar_el1
-	.globl	write_vbar_el2
-	.globl	write_vbar_el3
-
-	.globl	read_sctlr_el1
-	.globl	read_sctlr_el2
-	.globl	read_sctlr_el3
-	.globl	write_sctlr_el1
-	.globl	write_sctlr_el2
-	.globl	write_sctlr_el3
-
-	.globl	read_actlr_el1
-	.globl	read_actlr_el2
-	.globl	read_actlr_el3
-	.globl	write_actlr_el1
-	.globl	write_actlr_el2
-	.globl	write_actlr_el3
-
-	.globl	read_esr_el1
-	.globl	read_esr_el2
-	.globl	read_esr_el3
-	.globl	write_esr_el1
-	.globl	write_esr_el2
-	.globl	write_esr_el3
-
-	.globl	read_afsr0_el1
-	.globl	read_afsr0_el2
-	.globl	read_afsr0_el3
-	.globl	write_afsr0_el1
-	.globl	write_afsr0_el2
-	.globl	write_afsr0_el3
-
-	.globl	read_afsr1_el1
-	.globl	read_afsr1_el2
-	.globl	read_afsr1_el3
-	.globl	write_afsr1_el1
-	.globl	write_afsr1_el2
-	.globl	write_afsr1_el3
-
-	.globl	read_far_el1
-	.globl	read_far_el2
-	.globl	read_far_el3
-	.globl	write_far_el1
-	.globl	write_far_el2
-	.globl	write_far_el3
-
-	.globl	read_mair_el1
-	.globl	read_mair_el2
-	.globl	read_mair_el3
-	.globl	write_mair_el1
-	.globl	write_mair_el2
-	.globl	write_mair_el3
-
-	.globl	read_amair_el1
-	.globl	read_amair_el2
-	.globl	read_amair_el3
-	.globl	write_amair_el1
-	.globl	write_amair_el2
-	.globl	write_amair_el3
-
-	.globl	read_rvbar_el1
-	.globl	read_rvbar_el2
-	.globl	read_rvbar_el3
-
-	.globl	read_rmr_el1
-	.globl	read_rmr_el2
-	.globl	read_rmr_el3
-	.globl	write_rmr_el1
-	.globl	write_rmr_el2
-	.globl	write_rmr_el3
-
-	.globl	read_tcr_el1
-	.globl	read_tcr_el2
-	.globl	read_tcr_el3
-	.globl	write_tcr_el1
-	.globl	write_tcr_el2
-	.globl	write_tcr_el3
-
-	.globl	read_cptr_el2
-	.globl	read_cptr_el3
-	.globl	write_cptr_el2
-	.globl	write_cptr_el3
-
-	.globl	read_ttbr0_el1
-	.globl	read_ttbr0_el2
-	.globl	read_ttbr0_el3
-	.globl	write_ttbr0_el1
-	.globl	write_ttbr0_el2
-	.globl	write_ttbr0_el3
-
-	.globl	read_ttbr1_el1
-	.globl	write_ttbr1_el1
-
-	.globl	read_cpacr
-	.globl	write_cpacr
-
-	.globl	read_cntfrq
-	.globl	write_cntfrq
-
-	.globl	read_cpuectlr
-	.globl	write_cpuectlr
-
-	.globl	read_cnthctl_el2
-	.globl	write_cnthctl_el2
-
-	.globl	read_cntfrq_el0
-	.globl	write_cntfrq_el0
-
-	.globl	read_cntps_ctl_el1
-	.globl	write_cntps_ctl_el1
-
-	.globl	read_cntps_cval_el1
-	.globl	write_cntps_cval_el1
-
-	.globl	read_cntps_tval_el1
-	.globl	write_cntps_tval_el1
-
-	.globl	read_scr
-	.globl	write_scr
-
-	.globl	read_hcr
-	.globl	write_hcr
-
-	.globl	read_midr
-	.globl	read_mpidr
-
-	.globl	read_cntpct_el0
-	.globl	read_current_el
-	.globl	read_id_pfr1_el1
-	.globl	read_id_aa64pfr0_el1
-
-	.globl	write_tpidr_el3
-	.globl	read_tpidr_el3
-
-#if SUPPORT_VFP
-	.globl	enable_vfp
-#endif
-
-
-func read_current_el
-	mrs	x0, CurrentEl
-	ret
-
-
-func read_id_pfr1_el1
-	mrs	x0, id_pfr1_el1
-	ret
-
-
-func read_id_aa64pfr0_el1
-	mrs	x0, id_aa64pfr0_el1
-	ret
-
-
-	/* -----------------------------------------------------
-	 * VBAR accessors
-	 * -----------------------------------------------------
-	 */
-func read_vbar_el1
-	mrs	x0, vbar_el1
-	ret
-
-
-func read_vbar_el2
-	mrs	x0, vbar_el2
-	ret
-
-
-func read_vbar_el3
-	mrs	x0, vbar_el3
-	ret
-
-
-func write_vbar_el1
-	msr	vbar_el1, x0
-	ret
-
-
-func write_vbar_el2
-	msr	vbar_el2, x0
-	ret
-
-
-func write_vbar_el3
-	msr	vbar_el3, x0
-	ret
-
-
-	/* -----------------------------------------------------
-	 * AFSR0 accessors
-	 * -----------------------------------------------------
-	 */
-func read_afsr0_el1
-	mrs	x0, afsr0_el1
-	ret
-
-
-func read_afsr0_el2
-	mrs	x0, afsr0_el2
-	ret
-
-
-func read_afsr0_el3
-	mrs	x0, afsr0_el3
-	ret
-
-
-func write_afsr0_el1
-	msr	afsr0_el1, x0
-	ret
-
-
-func write_afsr0_el2
-	msr	afsr0_el2, x0
-	ret
-
-
-func write_afsr0_el3
-	msr	afsr0_el3, x0
-	ret
-
-
-	/* -----------------------------------------------------
-	 * FAR accessors
-	 * -----------------------------------------------------
-	 */
-func read_far_el1
-	mrs	x0, far_el1
-	ret
-
-
-func read_far_el2
-	mrs	x0, far_el2
-	ret
-
-
-func read_far_el3
-	mrs	x0, far_el3
-	ret
-
-
-func write_far_el1
-	msr	far_el1, x0
-	ret
-
-
-func write_far_el2
-	msr	far_el2, x0
-	ret
-
-
-func write_far_el3
-	msr	far_el3, x0
-	ret
-
-
-	/* -----------------------------------------------------
-	 * MAIR accessors
-	 * -----------------------------------------------------
-	 */
-func read_mair_el1
-	mrs	x0, mair_el1
-	ret
-
-
-func read_mair_el2
-	mrs	x0, mair_el2
-	ret
-
-
-func read_mair_el3
-	mrs	x0, mair_el3
-	ret
-
-
-func write_mair_el1
-	msr	mair_el1, x0
-	ret
-
-
-func write_mair_el2
-	msr	mair_el2, x0
-	ret
-
-
-func write_mair_el3
-	msr	mair_el3, x0
-	ret
-
-
-	/* -----------------------------------------------------
-	 * AMAIR accessors
-	 * -----------------------------------------------------
-	 */
-func read_amair_el1
-	mrs	x0, amair_el1
-	ret
-
-
-func read_amair_el2
-	mrs	x0, amair_el2
-	ret
-
-
-func read_amair_el3
-	mrs	x0, amair_el3
-	ret
-
-
-func write_amair_el1
-	msr	amair_el1, x0
-	ret
-
-
-func write_amair_el2
-	msr	amair_el2, x0
-	ret
-
-
-func write_amair_el3
-	msr	amair_el3, x0
-	ret
-
-
-	/* -----------------------------------------------------
-	 * RVBAR accessors
-	 * -----------------------------------------------------
-	 */
-func read_rvbar_el1
-	mrs	x0, rvbar_el1
-	ret
-
-
-func read_rvbar_el2
-	mrs	x0, rvbar_el2
-	ret
-
-
-func read_rvbar_el3
-	mrs	x0, rvbar_el3
-	ret
-
-
-	/* -----------------------------------------------------
-	 * RMR accessors
-	 * -----------------------------------------------------
-	 */
-func read_rmr_el1
-	mrs	x0, rmr_el1
-	ret
-
-
-func read_rmr_el2
-	mrs	x0, rmr_el2
-	ret
-
-
-func read_rmr_el3
-	mrs	x0, rmr_el3
-	ret
-
-
-func write_rmr_el1
-	msr	rmr_el1, x0
-	ret
-
-
-func write_rmr_el2
-	msr	rmr_el2, x0
-	ret
-
-
-func write_rmr_el3
-	msr	rmr_el3, x0
-	ret
-
-
-	/* -----------------------------------------------------
-	 * AFSR1 accessors
-	 * -----------------------------------------------------
-	 */
-func read_afsr1_el1
-	mrs	x0, afsr1_el1
-	ret
-
-
-func read_afsr1_el2
-	mrs	x0, afsr1_el2
-	ret
-
-
-func read_afsr1_el3
-	mrs	x0, afsr1_el3
-	ret
-
-
-func write_afsr1_el1
-	msr	afsr1_el1, x0
-	ret
-
-
-func write_afsr1_el2
-	msr	afsr1_el2, x0
-	ret
-
-
-func write_afsr1_el3
-	msr	afsr1_el3, x0
-	ret
-
-
-	/* -----------------------------------------------------
-	 * SCTLR accessors
-	 * -----------------------------------------------------
-	 */
-func read_sctlr_el1
-	mrs	x0, sctlr_el1
-	ret
-
-
-func read_sctlr_el2
-	mrs	x0, sctlr_el2
-	ret
-
-
-func read_sctlr_el3
-	mrs	x0, sctlr_el3
-	ret
-
-
-func write_sctlr_el1
-	msr	sctlr_el1, x0
-	ret
-
-
-func write_sctlr_el2
-	msr	sctlr_el2, x0
-	ret
-
-
-func write_sctlr_el3
-	msr	sctlr_el3, x0
-	ret
-
-
-	/* -----------------------------------------------------
-	 * ACTLR accessors
-	 * -----------------------------------------------------
-	 */
-func read_actlr_el1
-	mrs	x0, actlr_el1
-	ret
-
-
-func read_actlr_el2
-	mrs	x0, actlr_el2
-	ret
-
-
-func read_actlr_el3
-	mrs	x0, actlr_el3
-	ret
-
-
-func write_actlr_el1
-	msr	actlr_el1, x0
-	ret
-
-
-func write_actlr_el2
-	msr	actlr_el2, x0
-	ret
-
-
-func write_actlr_el3
-	msr	actlr_el3, x0
-	ret
-
-
-	/* -----------------------------------------------------
-	 * ESR accessors
-	 * -----------------------------------------------------
-	 */
-func read_esr_el1
-	mrs	x0, esr_el1
-	ret
-
-
-func read_esr_el2
-	mrs	x0, esr_el2
-	ret
-
-
-func read_esr_el3
-	mrs	x0, esr_el3
-	ret
-
-
-func write_esr_el1
-	msr	esr_el1, x0
-	ret
-
-
-func write_esr_el2
-	msr	esr_el2, x0
-	ret
-
-
-func write_esr_el3
-	msr	esr_el3, x0
-	ret
-
-
-	/* -----------------------------------------------------
-	 * TCR accessors
-	 * -----------------------------------------------------
-	 */
-func read_tcr_el1
-	mrs	x0, tcr_el1
-	ret
-
-
-func read_tcr_el2
-	mrs	x0, tcr_el2
-	ret
-
-
-func read_tcr_el3
-	mrs	x0, tcr_el3
-	ret
-
-
-func write_tcr_el1
-	msr	tcr_el1, x0
-	ret
-
-
-func write_tcr_el2
-	msr	tcr_el2, x0
-	ret
-
-
-func write_tcr_el3
-	msr	tcr_el3, x0
-	ret
-
-
-	/* -----------------------------------------------------
-	 * CPTR accessors
-	 * -----------------------------------------------------
-	 */
-func read_cptr_el2
-	mrs	x0, cptr_el2
-	ret
-
-
-func read_cptr_el3
-	mrs	x0, cptr_el3
-	ret
-
-
-func write_cptr_el2
-	msr	cptr_el2, x0
-	ret
-
-
-func write_cptr_el3
-	msr	cptr_el3, x0
-	ret
-
-
-	/* -----------------------------------------------------
-	 * TTBR0 accessors
-	 * -----------------------------------------------------
-	 */
-func read_ttbr0_el1
-	mrs	x0, ttbr0_el1
-	ret
-
-
-func read_ttbr0_el2
-	mrs	x0, ttbr0_el2
-	ret
-
-
-func read_ttbr0_el3
-	mrs	x0, ttbr0_el3
-	ret
-
-
-func write_ttbr0_el1
-	msr	ttbr0_el1, x0
-	ret
-
-
-func write_ttbr0_el2
-	msr	ttbr0_el2, x0
-	ret
-
-
-func write_ttbr0_el3
-	msr	ttbr0_el3, x0
-	ret
-
-
-	/* -----------------------------------------------------
-	 * TTBR1 accessors
-	 * -----------------------------------------------------
-	 */
-func read_ttbr1_el1
-	mrs	x0, ttbr1_el1
-	ret
-
-
-func write_ttbr1_el1
-	msr	ttbr1_el1, x0
-	ret
-
-
-func read_hcr
-	mrs	x0, hcr_el2
-	ret
-
-
-func write_hcr
-	msr	hcr_el2, x0
-	ret
-
-
-func read_cpacr
-	mrs	x0, cpacr_el1
-	ret
-
-
-func write_cpacr
-	msr	cpacr_el1, x0
-	ret
-
-
-func read_cntfrq_el0
-	mrs	x0, cntfrq_el0
-	ret
-
-
-func write_cntfrq_el0
-	msr	cntfrq_el0, x0
-	ret
-
-func read_cntps_ctl_el1
-	mrs	x0, cntps_ctl_el1
-	ret
-
-func write_cntps_ctl_el1
-	msr	cntps_ctl_el1, x0
-	ret
-
-func read_cntps_cval_el1
-	mrs	x0, cntps_cval_el1
-	ret
-
-func write_cntps_cval_el1
-	msr	cntps_cval_el1, x0
-	ret
-
-func read_cntps_tval_el1
-	mrs	x0, cntps_tval_el1
-	ret
-
-func write_cntps_tval_el1
-	msr	cntps_tval_el1, x0
-	ret
-
-func read_cntpct_el0
-	mrs	x0, cntpct_el0
-	ret
-
-func read_cpuectlr
-	mrs	x0, CPUECTLR_EL1
-	ret
-
-
-func write_cpuectlr
-	msr	CPUECTLR_EL1, x0
-	ret
-
-
-func read_cnthctl_el2
-	mrs	x0, cnthctl_el2
-	ret
-
-
-func write_cnthctl_el2
-	msr	cnthctl_el2, x0
-	ret
-
-
-func read_cntfrq
-	mrs	x0, cntfrq_el0
-	ret
-
-
-func write_cntfrq
-	msr	cntfrq_el0, x0
-	ret
-
-
-func write_scr
-	msr	scr_el3, x0
-	ret
-
-
-func read_scr
-	mrs	x0, scr_el3
-	ret
-
-
-func read_midr
-	mrs	x0, midr_el1
-	ret
-
-
-func read_mpidr
-	mrs	x0, mpidr_el1
-	ret
-
-func write_tpidr_el3
-	msr	tpidr_el3, x0
-	ret
-
-func read_tpidr_el3
-	mrs	x0, tpidr_el3
-	ret
-
-#if SUPPORT_VFP
-func enable_vfp
-	mrs	x0, cpacr_el1
-	orr	x0, x0, #CPACR_VFP_BITS
-	msr	cpacr_el1, x0
-	mrs	x0, cptr_el3
-	mov	x1, #AARCH64_CPTR_TFP
-	bic	x0, x0, x1
-	msr	cptr_el3, x0
-	isb
-	ret
-
-#endif
diff --git a/lib/aarch64/xlat_tables.c b/lib/aarch64/xlat_tables.c
index 29b81db..1b99cc8 100644
--- a/lib/aarch64/xlat_tables.c
+++ b/lib/aarch64/xlat_tables.c
@@ -72,26 +72,29 @@
 	debug_print("mmap:\n");
 	mmap_region_t *mm = mmap;
 	while (mm->size) {
-		debug_print(" %010lx %10lx %x\n", mm->base, mm->size, mm->attr);
+		debug_print(" %010lx %010lx %10lx %x\n", mm->base_va,
+					mm->base_pa, mm->size, mm->attr);
 		++mm;
 	};
 	debug_print("\n");
 #endif
 }
 
-void mmap_add_region(unsigned long base, unsigned long size, unsigned attr)
+void mmap_add_region(unsigned long base_pa, unsigned long base_va,
+			unsigned long size, unsigned attr)
 {
 	mmap_region_t *mm = mmap;
 	mmap_region_t *mm_last = mm + sizeof(mmap) / sizeof(mmap[0]) - 1;
 
-	assert(IS_PAGE_ALIGNED(base));
+	assert(IS_PAGE_ALIGNED(base_pa));
+	assert(IS_PAGE_ALIGNED(base_va));
 	assert(IS_PAGE_ALIGNED(size));
 
 	if (!size)
 		return;
 
 	/* Find correct place in mmap to insert new region */
-	while (mm->base < base && mm->size)
+	while (mm->base_va < base_va && mm->size)
 		++mm;
 
 	/* Make room for new region by moving other regions up by one place */
@@ -100,7 +103,8 @@
 	/* Check we haven't lost the empty sentinal from the end of the array */
 	assert(mm_last->size == 0);
 
-	mm->base = base;
+	mm->base_pa = base_pa;
+	mm->base_va = base_va;
 	mm->size = size;
 	mm->attr = attr;
 }
@@ -108,15 +112,15 @@
 void mmap_add(const mmap_region_t *mm)
 {
 	while (mm->size) {
-		mmap_add_region(mm->base, mm->size, mm->attr);
+		mmap_add_region(mm->base_pa, mm->base_va, mm->size, mm->attr);
 		++mm;
 	}
 }
 
-static unsigned long mmap_desc(unsigned attr, unsigned long addr,
+static unsigned long mmap_desc(unsigned attr, unsigned long addr_pa,
 					unsigned level)
 {
-	unsigned long desc = addr;
+	unsigned long desc = addr_pa;
 
 	desc |= level == 3 ? TABLE_DESC : BLOCK_DESC;
 
@@ -142,7 +146,7 @@
 	return desc;
 }
 
-static int mmap_region_attr(mmap_region_t *mm, unsigned long base,
+static int mmap_region_attr(mmap_region_t *mm, unsigned long base_va,
 					unsigned long size)
 {
 	int attr = mm->attr;
@@ -153,10 +157,10 @@
 		if (!mm->size)
 			return attr; /* Reached end of list */
 
-		if (mm->base >= base + size)
+		if (mm->base_va >= base_va + size)
 			return attr; /* Next region is after area so end */
 
-		if (mm->base + mm->size <= base)
+		if (mm->base_va + mm->size <= base_va)
 			continue; /* Next region has already been overtaken */
 
 		if ((mm->attr & attr) == attr)
@@ -164,12 +168,14 @@
 
 		attr &= mm->attr;
 
-		if (mm->base > base || mm->base + mm->size < base + size)
+		if (mm->base_va > base_va ||
+			mm->base_va + mm->size < base_va + size)
 			return -1; /* Region doesn't fully cover our area */
 	}
 }
 
-static mmap_region_t *init_xlation_table(mmap_region_t *mm, unsigned long base,
+static mmap_region_t *init_xlation_table(mmap_region_t *mm,
+					unsigned long base_va,
 					unsigned long *table, unsigned level)
 {
 	unsigned level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) *
@@ -184,23 +190,26 @@
 	do  {
 		unsigned long desc = UNSET_DESC;
 
-		if (mm->base + mm->size <= base) {
+		if (mm->base_va + mm->size <= base_va) {
 			/* Area now after the region so skip it */
 			++mm;
 			continue;
 		}
 
-		debug_print("      %010lx %8lx " + 6 - 2 * level, base, level_size);
+		debug_print("      %010lx %8lx " + 6 - 2 * level, base_va,
+				level_size);
 
-		if (mm->base >= base + level_size) {
+		if (mm->base_va >= base_va + level_size) {
 			/* Next region is after area so nothing to map yet */
 			desc = INVALID_DESC;
-		} else if (mm->base <= base &&
-				mm->base + mm->size >= base + level_size) {
+		} else if (mm->base_va <= base_va && mm->base_va + mm->size >=
+				base_va + level_size) {
 			/* Next region covers all of area */
-			int attr = mmap_region_attr(mm, base, level_size);
+			int attr = mmap_region_attr(mm, base_va, level_size);
 			if (attr >= 0)
-				desc = mmap_desc(attr, base, level);
+				desc = mmap_desc(attr,
+					base_va - mm->base_va + mm->base_pa,
+					level);
 		}
 		/* else Next region only partially covers area, so need */
 
@@ -211,14 +220,15 @@
 			desc = TABLE_DESC | (unsigned long)new_table;
 
 			/* Recurse to fill in new table */
-			mm = init_xlation_table(mm, base, new_table, level+1);
+			mm = init_xlation_table(mm, base_va,
+						new_table, level+1);
 		}
 
 		debug_print("\n");
 
 		*table++ = desc;
-		base += level_size;
-	} while (mm->size && (base & level_index_mask));
+		base_va += level_size;
+	} while (mm->size && (base_va & level_index_mask));
 
 	return mm;
 }
diff --git a/plat/fvp/aarch64/fvp_common.c b/plat/fvp/aarch64/fvp_common.c
index 3a07844..41234cb 100644
--- a/plat/fvp/aarch64/fvp_common.c
+++ b/plat/fvp/aarch64/fvp_common.c
@@ -54,17 +54,27 @@
  * configure_mmu_elx() will give the available subset of that,
  */
 const mmap_region_t fvp_mmap[] = {
-	{ TZROM_BASE,	TZROM_SIZE,	MT_MEMORY | MT_RO | MT_SECURE },
-	{ TZDRAM_BASE,	TZDRAM_SIZE,	MT_MEMORY | MT_RW | MT_SECURE },
-	{ FLASH0_BASE,	FLASH0_SIZE,	MT_MEMORY | MT_RO | MT_SECURE },
-	{ FLASH1_BASE,	FLASH1_SIZE,	MT_MEMORY | MT_RO | MT_SECURE },
-	{ VRAM_BASE,	VRAM_SIZE,	MT_MEMORY | MT_RW | MT_SECURE },
-	{ DEVICE0_BASE,	DEVICE0_SIZE,	MT_DEVICE | MT_RW | MT_SECURE },
-	{ NSRAM_BASE,	NSRAM_SIZE,	MT_MEMORY | MT_RW | MT_NS },
-	{ DEVICE1_BASE,	DEVICE1_SIZE,	MT_DEVICE | MT_RW | MT_SECURE },
+	{ TZROM_BASE,	TZROM_BASE,	TZROM_SIZE,
+						MT_MEMORY | MT_RO | MT_SECURE },
+	{ TZDRAM_BASE,	TZDRAM_BASE,	TZDRAM_SIZE,
+						MT_MEMORY | MT_RW | MT_SECURE },
+	{ FLASH0_BASE,	FLASH0_BASE,	FLASH0_SIZE,
+						MT_MEMORY | MT_RO | MT_SECURE },
+	{ FLASH1_BASE,	FLASH1_BASE,	FLASH1_SIZE,
+						MT_MEMORY | MT_RO | MT_SECURE },
+	{ VRAM_BASE,	VRAM_BASE,	VRAM_SIZE,
+						MT_MEMORY | MT_RW | MT_SECURE },
+	{ DEVICE0_BASE,	DEVICE0_BASE,	DEVICE0_SIZE,
+						MT_DEVICE | MT_RW | MT_SECURE },
+	{ NSRAM_BASE,	NSRAM_BASE,	NSRAM_SIZE,
+						MT_MEMORY | MT_RW | MT_NS },
+	{ DEVICE1_BASE,	DEVICE1_BASE,	DEVICE1_SIZE,
+						MT_DEVICE | MT_RW | MT_SECURE },
 	/* 2nd GB as device for now...*/
-	{ 0x40000000,	0x40000000,	MT_DEVICE | MT_RW | MT_SECURE },
-	{ DRAM1_BASE,	DRAM1_SIZE,	MT_MEMORY | MT_RW | MT_NS },
+	{ 0x40000000,	0x40000000,	0x40000000,
+						MT_DEVICE | MT_RW | MT_SECURE },
+	{ DRAM1_BASE,	DRAM1_BASE,	DRAM1_SIZE,
+						MT_MEMORY | MT_RW | MT_NS },
 	{0}
 };
 
@@ -73,19 +83,21 @@
  * the platform memory map & initialize the mmu, for the given exception level
  ******************************************************************************/
 #define DEFINE_CONFIGURE_MMU_EL(_el)					\
-	void fvp_configure_mmu_el##_el(unsigned long total_base,		\
+	void fvp_configure_mmu_el##_el(unsigned long total_base,	\
 				   unsigned long total_size,		\
 				   unsigned long ro_start,		\
 				   unsigned long ro_limit,		\
 				   unsigned long coh_start,		\
 				   unsigned long coh_limit)		\
 	{								\
-		mmap_add_region(total_base,				\
+		mmap_add_region(total_base, total_base,			\
 				total_size,				\
 				MT_MEMORY | MT_RW | MT_SECURE);		\
-		mmap_add_region(ro_start, ro_limit - ro_start,		\
+		mmap_add_region(ro_start, ro_start,			\
+				ro_limit - ro_start,			\
 				MT_MEMORY | MT_RO | MT_SECURE);		\
-		mmap_add_region(coh_start, coh_limit - coh_start,	\
+		mmap_add_region(coh_start, coh_start,			\
+				coh_limit - coh_start,			\
 				MT_DEVICE | MT_RW | MT_SECURE);		\
 		mmap_add(fvp_mmap);					\
 		init_xlat_tables();					\
diff --git a/plat/fvp/bl2_fvp_setup.c b/plat/fvp/bl2_fvp_setup.c
index 72580f9..de9c6a4 100644
--- a/plat/fvp/bl2_fvp_setup.c
+++ b/plat/fvp/bl2_fvp_setup.c
@@ -67,9 +67,6 @@
 #define BL2_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
 #define BL2_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
 
-/* Pointer to memory visible to both BL2 and BL31 for passing data */
-extern unsigned char **bl2_el_change_mem_ptr;
-
 /* Data structure which holds the extents of the trusted SRAM for BL2 */
 static meminfo_t bl2_tzram_layout
 __attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
diff --git a/plat/fvp/fvp_def.h b/plat/fvp/fvp_def.h
index 04ba611..59dcc90 100644
--- a/plat/fvp/fvp_def.h
+++ b/plat/fvp/fvp_def.h
@@ -137,7 +137,7 @@
 #define SYS_LED_EC_MASK		0x1f
 
 /* V2M sysid register bits */
-#define SYS_ID_REV_SHIFT	27
+#define SYS_ID_REV_SHIFT	28
 #define SYS_ID_HBI_SHIFT	16
 #define SYS_ID_BLD_SHIFT	12
 #define SYS_ID_ARCH_SHIFT	8
diff --git a/plat/fvp/fvp_pm.c b/plat/fvp/fvp_pm.c
index d702643..03f06e7 100644
--- a/plat/fvp/fvp_pm.c
+++ b/plat/fvp/fvp_pm.c
@@ -290,7 +290,7 @@
 	int rc = PSCI_E_SUCCESS;
 	unsigned long linear_id, cpu_setup;
 	mailbox_t *fvp_mboxes;
-	unsigned int gicd_base, gicc_base, reg_val, ectlr;
+	unsigned int gicd_base, gicc_base, ectlr;
 
 	switch (afflvl) {
 
@@ -354,17 +354,6 @@
 		/* TODO: This setup is needed only after a cold boot */
 		gic_pcpu_distif_setup(gicd_base);
 
-		/* Allow access to the System counter timer module */
-		reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT);
-		reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT);
-		reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT);
-		mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val);
-		mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val);
-
-		reg_val = (1 << CNTNSAR_NS_SHIFT(0)) |
-			(1 << CNTNSAR_NS_SHIFT(1));
-		mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val);
-
 		break;
 
 	default:
diff --git a/plat/fvp/platform.mk b/plat/fvp/platform.mk
index 4cc4d1e..450529b 100644
--- a/plat/fvp/platform.mk
+++ b/plat/fvp/platform.mk
@@ -51,7 +51,6 @@
 				drivers/io/io_memmap.c				\
 				drivers/io/io_semihosting.c			\
 				lib/mmio.c					\
-				lib/aarch64/sysreg_helpers.S			\
 				lib/aarch64/xlat_tables.c			\
 				lib/semihosting/semihosting.c			\
 				lib/semihosting/aarch64/semihosting_call.S	\
@@ -72,7 +71,6 @@
 
 BL31_SOURCES		+=	drivers/arm/gic/gic_v2.c			\
 				drivers/arm/gic/gic_v3.c			\
-				drivers/arm/gic/aarch64/gic_v3_sysregs.S	\
 				drivers/arm/cci400/cci400.c			\
 				plat/common/aarch64/platform_mp_stack.S		\
 				plat/fvp/bl31_fvp_setup.c			\
diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c
index 6b3592e..9242702 100644
--- a/services/spd/tspd/tspd_common.c
+++ b/services/spd/tspd/tspd_common.c
@@ -100,7 +100,7 @@
 	/* Associate this context with the cpu specified */
 	tsp_ctx->mpidr = mpidr;
 
-	cm_set_context(mpidr, &tsp_ctx->cpu_ctx, SECURE);
+	cm_set_context(&tsp_ctx->cpu_ctx, SECURE);
 	spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
 	cm_set_el3_eret_context(SECURE, entrypoint, spsr, scr);
 
@@ -122,7 +122,7 @@
 	assert(tsp_ctx->c_rt_ctx == 0);
 
 	/* Apply the Secure EL1 system register context and switch to it */
-	assert(cm_get_context(read_mpidr(), SECURE) == &tsp_ctx->cpu_ctx);
+	assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
 	cm_el1_sysregs_context_restore(SECURE);
 	cm_set_next_eret_context(SECURE);
 
@@ -146,7 +146,7 @@
 void tspd_synchronous_sp_exit(tsp_context_t *tsp_ctx, uint64_t ret)
 {
 	/* Save the Secure EL1 system register context */
-	assert(cm_get_context(read_mpidr(), SECURE) == &tsp_ctx->cpu_ctx);
+	assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
 	cm_el1_sysregs_context_save(SECURE);
 
 	assert(tsp_ctx->c_rt_ctx != 0);
diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c
index 1a6913a..35bc6e2 100644
--- a/services/spd/tspd/tspd_main.c
+++ b/services/spd/tspd/tspd_main.c
@@ -95,7 +95,7 @@
 
 	/* Sanity check the pointer to this cpu's context */
 	mpidr = read_mpidr();
-	assert(handle == cm_get_context(mpidr, NON_SECURE));
+	assert(handle == cm_get_context(NON_SECURE));
 
 	/* Save the non-secure context before entering the TSP */
 	cm_el1_sysregs_context_save(NON_SECURE);
@@ -103,7 +103,7 @@
 	/* Get a reference to this cpu's TSP context */
 	linear_id = platform_get_core_pos(mpidr);
 	tsp_ctx = &tspd_sp_context[linear_id];
-	assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE));
+	assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
 
 	/*
 	 * Determine if the TSP was previously preempted. Its last known
@@ -275,10 +275,10 @@
 		if (ns)
 			SMC_RET1(handle, SMC_UNK);
 
-		assert(handle == cm_get_context(mpidr, SECURE));
+		assert(handle == cm_get_context(SECURE));
 		cm_el1_sysregs_context_save(SECURE);
 		/* Get a reference to the non-secure context */
-		ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
+		ns_cpu_context = cm_get_context(NON_SECURE);
 		assert(ns_cpu_context);
 
 		/*
@@ -300,7 +300,7 @@
 		if (ns)
 			SMC_RET1(handle, SMC_UNK);
 
-		assert(handle == cm_get_context(mpidr, SECURE));
+		assert(handle == cm_get_context(SECURE));
 
 		/*
 		 * Restore the relevant EL3 state which saved to service
@@ -316,7 +316,7 @@
 		}
 
 		/* Get a reference to the non-secure context */
-		ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
+		ns_cpu_context = cm_get_context(NON_SECURE);
 		assert(ns_cpu_context);
 
 		/*
@@ -339,7 +339,7 @@
 		if (ns)
 			SMC_RET1(handle, SMC_UNK);
 
-		assert(handle == cm_get_context(mpidr, SECURE));
+		assert(handle == cm_get_context(SECURE));
 
 		/* Assert that standard SMC execution has been preempted */
 		assert(get_std_smc_active_flag(tsp_ctx->state));
@@ -348,7 +348,7 @@
 		cm_el1_sysregs_context_save(SECURE);
 
 		/* Get a reference to the non-secure context */
-		ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
+		ns_cpu_context = cm_get_context(NON_SECURE);
 		assert(ns_cpu_context);
 
 		/* Restore non-secure state */
@@ -434,7 +434,7 @@
 			 * registers need to be preserved, save the non-secure
 			 * state and send the request to the secure payload.
 			 */
-			assert(handle == cm_get_context(mpidr, NON_SECURE));
+			assert(handle == cm_get_context(NON_SECURE));
 
 			/* Check if we are already preempted */
 			if (get_std_smc_active_flag(tsp_ctx->state))
@@ -457,7 +457,7 @@
 			 * payload. Entry into S-EL1 will take place upon exit
 			 * from this function.
 			 */
-			assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE));
+			assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
 
 			/* Set appropriate entry for SMC.
 			 * We expect the TSP to manage the PSTATE.I and PSTATE.F
@@ -482,11 +482,11 @@
 			 * into the non-secure context, save the secure state
 			 * and return to the non-secure state.
 			 */
-			assert(handle == cm_get_context(mpidr, SECURE));
+			assert(handle == cm_get_context(SECURE));
 			cm_el1_sysregs_context_save(SECURE);
 
 			/* Get a reference to the non-secure context */
-			ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
+			ns_cpu_context = cm_get_context(NON_SECURE);
 			assert(ns_cpu_context);
 
 			/* Restore non-secure state */
@@ -515,7 +515,7 @@
 		 * save the non-secure state and send the request to
 		 * the secure payload.
 		 */
-		assert(handle == cm_get_context(mpidr, NON_SECURE));
+		assert(handle == cm_get_context(NON_SECURE));
 
 		/* Check if we are already preempted before resume */
 		if (!get_std_smc_active_flag(tsp_ctx->state))
diff --git a/services/std_svc/psci/psci_afflvl_on.c b/services/std_svc/psci/psci_afflvl_on.c
index e3a1831..e4d8f1f 100644
--- a/services/std_svc/psci/psci_afflvl_on.c
+++ b/services/std_svc/psci/psci_afflvl_on.c
@@ -373,17 +373,6 @@
 	bl31_arch_setup();
 
 	/*
-	 * Use the more complex exception vectors to enable SPD
-	 * initialisation. SP_EL3 should point to a 'cpu_context'
-	 * structure. The calling cpu should have set the
-	 * context already
-	 */
-	assert(cm_get_context(mpidr, NON_SECURE));
-	cm_set_next_eret_context(NON_SECURE);
-	cm_init_pcpu_ptr_cache();
-	write_vbar_el3((uint64_t) runtime_exceptions);
-
-	/*
 	 * Call the cpu on finish handler registered by the Secure Payload
 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
 	 * error, it's expected to assert within
diff --git a/services/std_svc/psci/psci_afflvl_suspend.c b/services/std_svc/psci/psci_afflvl_suspend.c
index 377afdf..9934310 100644
--- a/services/std_svc/psci/psci_afflvl_suspend.c
+++ b/services/std_svc/psci/psci_afflvl_suspend.c
@@ -180,7 +180,7 @@
 	 * The EL3 state to PoC since it will be accessed after a
 	 * reset with the caches turned off
 	 */
-	saved_el3_state = get_el3state_ctx(cm_get_context(mpidr, NON_SECURE));
+	saved_el3_state = get_el3state_ctx(cm_get_context(NON_SECURE));
 	flush_dcache_range((uint64_t) saved_el3_state, sizeof(*saved_el3_state));
 
 	/* Set the secure world (EL3) re-entry point after BL1 */
@@ -491,17 +491,6 @@
 	rc = PSCI_E_SUCCESS;
 
 	/*
-	 * Use the more complex exception vectors to enable SPD
-	 * initialisation. SP_EL3 should point to a 'cpu_context'
-	 * structure. The non-secure context should have been
-	 * set on this cpu prior to suspension.
-	 */
-	assert(cm_get_context(mpidr, NON_SECURE));
-	cm_set_next_eret_context(NON_SECURE);
-	cm_init_pcpu_ptr_cache();
-	write_vbar_el3((uint64_t) runtime_exceptions);
-
-	/*
 	 * Call the cpu suspend finish handler registered by the Secure Payload
 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
 	 * error, it's expected to assert within
diff --git a/services/std_svc/psci/psci_common.c b/services/std_svc/psci/psci_common.c
index b1ee10d..3cbacd7 100644
--- a/services/std_svc/psci/psci_common.c
+++ b/services/std_svc/psci/psci_common.c
@@ -219,7 +219,6 @@
 void psci_get_ns_entry_info(unsigned int index)
 {
 	unsigned long sctlr = 0, scr, el_status, id_aa64pfr0;
-	uint64_t mpidr = read_mpidr();
 	cpu_context_t *ns_entry_context;
 	gp_regs_t *ns_entry_gpregs;
 
@@ -253,7 +252,7 @@
 		write_sctlr_el1(sctlr);
 
 	/* Fulfill the cpu_on entry reqs. as per the psci spec */
-	ns_entry_context = (cpu_context_t *) cm_get_context(mpidr, NON_SECURE);
+	ns_entry_context = (cpu_context_t *) cm_get_context(NON_SECURE);
 	assert(ns_entry_context);
 
 	/*
diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S
index bc8d900..5628d79 100644
--- a/services/std_svc/psci/psci_entry.S
+++ b/services/std_svc/psci/psci_entry.S
@@ -61,12 +61,16 @@
 	adr	x22, psci_afflvl_power_on_finish
 
 	/* ---------------------------------------------
-	 * Exceptions should not occur at this point.
-	 * Set VBAR in order to handle and report any
-	 * that do occur
+	 * Initialise the pcpu cache pointer for the CPU
 	 * ---------------------------------------------
 	 */
-	adr	x0, early_exceptions
+	bl	init_cpu_data_ptr
+
+	/* ---------------------------------------------
+	 * Set the exception vectors
+	 * ---------------------------------------------
+	 */
+	adr	x0, runtime_exceptions
 	msr	vbar_el3, x0
 	isb
 
diff --git a/services/std_svc/psci/psci_setup.c b/services/std_svc/psci/psci_setup.c
index a1587b7..015beab 100644
--- a/services/std_svc/psci/psci_setup.c
+++ b/services/std_svc/psci/psci_setup.c
@@ -210,9 +210,9 @@
 		linear_id = platform_get_core_pos(mpidr);
 		assert(linear_id < PLATFORM_CORE_COUNT);
 
-		cm_set_context(mpidr,
-				(void *) &psci_ns_context[linear_id],
-				NON_SECURE);
+		cm_set_context_by_mpidr(mpidr,
+					(void *) &psci_ns_context[linear_id],
+					NON_SECURE);
 
 	}