Merge pull request #1402 from glneo/for-upstream-uart

drivers: ti: uart: Add TI specific 16550 initialization
diff --git a/common/bl_common.c b/common/bl_common.c
index 6b979f6..af51c07 100644
--- a/common/bl_common.c
+++ b/common/bl_common.c
@@ -184,7 +184,7 @@
 #endif /* LOAD_IMAGE_V2 */
 
 /* Generic function to return the size of an image */
-size_t image_size(unsigned int image_id)
+size_t get_image_size(unsigned int image_id)
 {
 	uintptr_t dev_handle;
 	uintptr_t image_handle;
diff --git a/common/tf_printf.c b/common/tf_printf.c
index d403983..9d8333a 100644
--- a/common/tf_printf.c
+++ b/common/tf_printf.c
@@ -15,13 +15,13 @@
  * The tf_printf implementation for all BL stages
  ***********************************************************/
 
-#define get_num_va_args(args, lcount) \
-	(((lcount) > 1) ? va_arg(args, long long int) :	\
-	((lcount) ? va_arg(args, long int) : va_arg(args, int)))
+#define get_num_va_args(_args, _lcount) \
+	(((_lcount) > 1) ? va_arg(_args, long long int) :	\
+	((_lcount) ? va_arg(_args, long int) : va_arg(_args, int)))
 
-#define get_unum_va_args(args, lcount) \
-	(((lcount) > 1) ? va_arg(args, unsigned long long int) :	\
-	((lcount) ? va_arg(args, unsigned long int) : va_arg(args, unsigned int)))
+#define get_unum_va_args(_args, _lcount) \
+	(((_lcount) > 1) ? va_arg(_args, unsigned long long int) :	\
+	((_lcount) ? va_arg(_args, unsigned long int) : va_arg(_args, unsigned int)))
 
 void tf_string_print(const char *str)
 {
diff --git a/docs/firmware-design.rst b/docs/firmware-design.rst
index 51f5b42..e3500c2 100644
--- a/docs/firmware-design.rst
+++ b/docs/firmware-design.rst
@@ -516,8 +516,8 @@
 in memory and changing the address where the system jumps at reset.
 For example:
 
-	-C cluster0.cpu0.RVBAR=0x4020000
-	--data cluster0.cpu0=bl2.bin@0x4020000
+	-C cluster0.cpu0.RVBAR=0x4022000
+	--data cluster0.cpu0=bl2.bin@0x4022000
 
 With this configuration, FVP is like a platform of the first case,
 where the Boot ROM jumps always to the same address. For simplification,
diff --git a/drivers/arm/gic/v3/gicv3_helpers.c b/drivers/arm/gic/v3/gicv3_helpers.c
index 020ec1b..2ea8c72 100644
--- a/drivers/arm/gic/v3/gicv3_helpers.c
+++ b/drivers/arm/gic/v3/gicv3_helpers.c
@@ -342,7 +342,7 @@
 /*******************************************************************************
  * Helper function to configure the default attributes of SPIs.
  ******************************************************************************/
-void gicv3_spis_configure_defaults(uintptr_t gicd_base)
+void gicv3_spis_config_defaults(uintptr_t gicd_base)
 {
 	unsigned int index, num_ints;
 
@@ -375,7 +375,7 @@
 /*******************************************************************************
  * Helper function to configure secure G0 and G1S SPIs.
  ******************************************************************************/
-void gicv3_secure_spis_configure(uintptr_t gicd_base,
+void gicv3_secure_spis_config(uintptr_t gicd_base,
 				     unsigned int num_ints,
 				     const unsigned int *sec_intr_list,
 				     unsigned int int_grp)
@@ -423,7 +423,7 @@
 /*******************************************************************************
  * Helper function to configure properties of secure SPIs
  ******************************************************************************/
-unsigned int gicv3_secure_spis_configure_props(uintptr_t gicd_base,
+unsigned int gicv3_secure_spis_config_props(uintptr_t gicd_base,
 		const interrupt_prop_t *interrupt_props,
 		unsigned int interrupt_props_num)
 {
@@ -478,7 +478,7 @@
 /*******************************************************************************
  * Helper function to configure the default attributes of SPIs.
  ******************************************************************************/
-void gicv3_ppi_sgi_configure_defaults(uintptr_t gicr_base)
+void gicv3_ppi_sgi_config_defaults(uintptr_t gicr_base)
 {
 	unsigned int index;
 
@@ -507,7 +507,7 @@
 /*******************************************************************************
  * Helper function to configure secure G0 and G1S SPIs.
  ******************************************************************************/
-void gicv3_secure_ppi_sgi_configure(uintptr_t gicr_base,
+void gicv3_secure_ppi_sgi_config(uintptr_t gicr_base,
 					unsigned int num_ints,
 					const unsigned int *sec_intr_list,
 					unsigned int int_grp)
@@ -546,7 +546,7 @@
 /*******************************************************************************
  * Helper function to configure properties of secure G0 and G1S PPIs and SGIs.
  ******************************************************************************/
-unsigned int gicv3_secure_ppi_sgi_configure_props(uintptr_t gicr_base,
+unsigned int gicv3_secure_ppi_sgi_config_props(uintptr_t gicr_base,
 		const interrupt_prop_t *interrupt_props,
 		unsigned int interrupt_props_num)
 {
diff --git a/drivers/arm/gic/v3/gicv3_main.c b/drivers/arm/gic/v3/gicv3_main.c
index 82f43d0..83d030a 100644
--- a/drivers/arm/gic/v3/gicv3_main.c
+++ b/drivers/arm/gic/v3/gicv3_main.c
@@ -190,12 +190,12 @@
 			CTLR_ARE_S_BIT | CTLR_ARE_NS_BIT, RWP_TRUE);
 
 	/* Set the default attribute of all SPIs */
-	gicv3_spis_configure_defaults(gicv3_driver_data->gicd_base);
+	gicv3_spis_config_defaults(gicv3_driver_data->gicd_base);
 
 #if !ERROR_DEPRECATED
 	if (gicv3_driver_data->interrupt_props != NULL) {
 #endif
-		bitmap = gicv3_secure_spis_configure_props(
+		bitmap = gicv3_secure_spis_config_props(
 				gicv3_driver_data->gicd_base,
 				gicv3_driver_data->interrupt_props,
 				gicv3_driver_data->interrupt_props_num);
@@ -213,7 +213,7 @@
 
 		/* Configure the G1S SPIs */
 		if (gicv3_driver_data->g1s_interrupt_array) {
-			gicv3_secure_spis_configure(gicv3_driver_data->gicd_base,
+			gicv3_secure_spis_config(gicv3_driver_data->gicd_base,
 					gicv3_driver_data->g1s_interrupt_num,
 					gicv3_driver_data->g1s_interrupt_array,
 					INTR_GROUP1S);
@@ -222,7 +222,7 @@
 
 		/* Configure the G0 SPIs */
 		if (gicv3_driver_data->g0_interrupt_array) {
-			gicv3_secure_spis_configure(gicv3_driver_data->gicd_base,
+			gicv3_secure_spis_config(gicv3_driver_data->gicd_base,
 					gicv3_driver_data->g0_interrupt_num,
 					gicv3_driver_data->g0_interrupt_array,
 					INTR_GROUP0);
@@ -263,12 +263,12 @@
 	gicr_base = gicv3_driver_data->rdistif_base_addrs[proc_num];
 
 	/* Set the default attribute of all SGIs and PPIs */
-	gicv3_ppi_sgi_configure_defaults(gicr_base);
+	gicv3_ppi_sgi_config_defaults(gicr_base);
 
 #if !ERROR_DEPRECATED
 	if (gicv3_driver_data->interrupt_props != NULL) {
 #endif
-		bitmap = gicv3_secure_ppi_sgi_configure_props(gicr_base,
+		bitmap = gicv3_secure_ppi_sgi_config_props(gicr_base,
 				gicv3_driver_data->interrupt_props,
 				gicv3_driver_data->interrupt_props_num);
 #if !ERROR_DEPRECATED
@@ -285,7 +285,7 @@
 
 		/* Configure the G1S SGIs/PPIs */
 		if (gicv3_driver_data->g1s_interrupt_array) {
-			gicv3_secure_ppi_sgi_configure(gicr_base,
+			gicv3_secure_ppi_sgi_config(gicr_base,
 					gicv3_driver_data->g1s_interrupt_num,
 					gicv3_driver_data->g1s_interrupt_array,
 					INTR_GROUP1S);
@@ -294,7 +294,7 @@
 
 		/* Configure the G0 SGIs/PPIs */
 		if (gicv3_driver_data->g0_interrupt_array) {
-			gicv3_secure_ppi_sgi_configure(gicr_base,
+			gicv3_secure_ppi_sgi_config(gicr_base,
 					gicv3_driver_data->g0_interrupt_num,
 					gicv3_driver_data->g0_interrupt_array,
 					INTR_GROUP0);
diff --git a/drivers/arm/gic/v3/gicv3_private.h b/drivers/arm/gic/v3/gicv3_private.h
index c4474a4..e1c0775 100644
--- a/drivers/arm/gic/v3/gicv3_private.h
+++ b/drivers/arm/gic/v3/gicv3_private.h
@@ -27,20 +27,20 @@
  * GICD_IROUTER. Bits[31:24] in the MPIDR are cleared as they are not relevant
  * to GICv3.
  */
-#define gicd_irouter_val_from_mpidr(mpidr, irm)		\
-	((mpidr & ~(0xff << 24)) |			\
-	 (irm & IROUTER_IRM_MASK) << IROUTER_IRM_SHIFT)
+#define gicd_irouter_val_from_mpidr(_mpidr, _irm)		\
+	((_mpidr & ~(0xff << 24)) |			\
+	 (_irm & IROUTER_IRM_MASK) << IROUTER_IRM_SHIFT)
 
 /*
  * Macro to convert a GICR_TYPER affinity value into a MPIDR value. Bits[31:24]
  * are zeroes.
  */
 #ifdef AARCH32
-#define mpidr_from_gicr_typer(typer_val)	(((typer_val) >> 32) & 0xffffff)
+#define mpidr_from_gicr_typer(_typer_val)	(((_typer_val) >> 32) & 0xffffff)
 #else
-#define mpidr_from_gicr_typer(typer_val)				 \
-	(((((typer_val) >> 56) & MPIDR_AFFLVL_MASK) << MPIDR_AFF3_SHIFT) | \
-	 (((typer_val) >> 32) & 0xffffff))
+#define mpidr_from_gicr_typer(_typer_val)				 \
+	(((((_typer_val) >> 56) & MPIDR_AFFLVL_MASK) << MPIDR_AFF3_SHIFT) | \
+	 (((_typer_val) >> 32) & 0xffffff))
 #endif
 
 /*******************************************************************************
@@ -85,22 +85,22 @@
 /*******************************************************************************
  * Private GICv3 helper function prototypes
  ******************************************************************************/
-void gicv3_spis_configure_defaults(uintptr_t gicd_base);
-void gicv3_ppi_sgi_configure_defaults(uintptr_t gicr_base);
+void gicv3_spis_config_defaults(uintptr_t gicd_base);
+void gicv3_ppi_sgi_config_defaults(uintptr_t gicr_base);
 #if !ERROR_DEPRECATED
-void gicv3_secure_spis_configure(uintptr_t gicd_base,
+void gicv3_secure_spis_config(uintptr_t gicd_base,
 				     unsigned int num_ints,
 				     const unsigned int *sec_intr_list,
 				     unsigned int int_grp);
-void gicv3_secure_ppi_sgi_configure(uintptr_t gicr_base,
+void gicv3_secure_ppi_sgi_config(uintptr_t gicr_base,
 					unsigned int num_ints,
 					const unsigned int *sec_intr_list,
 					unsigned int int_grp);
 #endif
-unsigned int gicv3_secure_ppi_sgi_configure_props(uintptr_t gicr_base,
+unsigned int gicv3_secure_ppi_sgi_config_props(uintptr_t gicr_base,
 		const interrupt_prop_t *interrupt_props,
 		unsigned int interrupt_props_num);
-unsigned int gicv3_secure_spis_configure_props(uintptr_t gicd_base,
+unsigned int gicv3_secure_spis_config_props(uintptr_t gicd_base,
 		const interrupt_prop_t *interrupt_props,
 		unsigned int interrupt_props_num);
 void gicv3_rdistif_base_addrs_probe(uintptr_t *rdistif_base_addrs,
diff --git a/drivers/arm/smmu/smmu_v3.c b/drivers/arm/smmu/smmu_v3.c
index cfe8c2a..7b017e3 100644
--- a/drivers/arm/smmu/smmu_v3.c
+++ b/drivers/arm/smmu/smmu_v3.c
@@ -8,8 +8,8 @@
 #include <smmu_v3.h>
 
 /* Test for pending invalidate */
-#define INVAL_PENDING(base)	\
-	smmuv3_read_s_init(base) & SMMU_S_INIT_INV_ALL_MASK
+#define INVAL_PENDING(_base)	\
+	smmuv3_read_s_init(_base) & SMMU_S_INIT_INV_ALL_MASK
 
 static inline uint32_t smmuv3_read_s_idr1(uintptr_t base)
 {
diff --git a/drivers/arm/tzc/tzc400.c b/drivers/arm/tzc/tzc400.c
index 0999fa5..db4f88a 100644
--- a/drivers/arm/tzc/tzc400.c
+++ b/drivers/arm/tzc/tzc400.c
@@ -54,7 +54,7 @@
 /*
  * Get the open status information for all filter units.
  */
-#define get_gate_keeper_os(base)	((_tzc400_read_gate_keeper(base) >>	\
+#define get_gate_keeper_os(_base)	((_tzc400_read_gate_keeper(_base) >>  \
 					GATE_KEEPER_OS_SHIFT) &		\
 					GATE_KEEPER_OS_MASK)
 
diff --git a/include/common/bl_common.h b/include/common/bl_common.h
index c7c7487..f64e6ae 100644
--- a/include/common/bl_common.h
+++ b/include/common/bl_common.h
@@ -207,7 +207,7 @@
 /*******************************************************************************
  * Function & variable prototypes
  ******************************************************************************/
-size_t image_size(unsigned int image_id);
+size_t get_image_size(unsigned int image_id);
 
 int is_mem_free(uintptr_t free_base, size_t free_size,
 		uintptr_t addr, size_t size);
diff --git a/include/lib/cpus/aarch64/cortex_a76.h b/include/lib/cpus/aarch64/cortex_a76.h
new file mode 100644
index 0000000..1cb7747
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_a76.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORTEX_A76_H__
+#define __CORTEX_A76_H__
+
+/* Cortex-A76 MIDR for revision 0 */
+#define CORTEX_A76_MIDR		0x410fd0b0
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A76_CPUPWRCTLR_EL1	S3_0_C15_C2_7
+#define CORTEX_A76_CPUECTLR_EL1	S3_0_C15_C1_4
+
+/*******************************************************************************
+ * CPU Auxiliary Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_A76_CPUACTLR2_EL1	S3_0_C15_C1_1
+
+#define CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE	(1 << 16)
+
+/* Definitions of register field mask in CORTEX_A76_CPUPWRCTLR_EL1 */
+#define CORTEX_A76_CORE_PWRDN_EN_MASK	0x1
+
+#endif /* __CORTEX_A76_H__ */
diff --git a/include/lib/cpus/aarch64/cortex_ares.h b/include/lib/cpus/aarch64/cortex_ares.h
new file mode 100644
index 0000000..84955b1
--- /dev/null
+++ b/include/lib/cpus/aarch64/cortex_ares.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORTEX_ARES_H__
+#define __CORTEX_ARES_H__
+
+/* Cortex-ARES MIDR for revision 0 */
+#define CORTEX_ARES_MIDR		0x410fd0c0
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CORTEX_ARES_CPUPWRCTLR_EL1	S3_0_C15_C2_7
+#define CORTEX_ARES_CPUECTLR_EL1	S3_0_C15_C1_4
+
+/* Definitions of register field mask in CORTEX_ARES_CPUPWRCTLR_EL1 */
+#define CORTEX_ARES_CORE_PWRDN_EN_MASK	0x1
+
+#define CORTEX_ARES_ACTLR_AMEN_BIT	(U(1) << 4)
+
+#define CORTEX_ARES_AMU_NR_COUNTERS	U(5)
+#define CORTEX_ARES_AMU_GROUP0_MASK	U(0x1f)
+
+/* Instruction patching registers */
+#define CPUPSELR_EL3	S3_6_C15_C8_0
+#define CPUPCR_EL3	S3_6_C15_C8_1
+#define CPUPOR_EL3	S3_6_C15_C8_2
+#define CPUPMR_EL3	S3_6_C15_C8_3
+
+#endif /* __CORTEX_ARES_H__ */
diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h
index a4f3ea1..a2ce9f8 100644
--- a/include/lib/el3_runtime/aarch64/context.h
+++ b/include/lib/el3_runtime/aarch64/context.h
@@ -271,6 +271,7 @@
 #endif
 #define get_sysregs_ctx(h)	(&((cpu_context_t *) h)->sysregs_ctx)
 #define get_gpregs_ctx(h)	(&((cpu_context_t *) h)->gpregs_ctx)
+#define get_cve_2018_3639_ctx(h)	(&((cpu_context_t *) h)->cve_2018_3639_ctx)
 
 /*
  * Compile time assertions related to the 'cpu_context' structure to
diff --git a/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h b/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
index a418d2d..808589a 100644
--- a/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
+++ b/include/lib/xlat_tables/aarch32/xlat_tables_aarch32.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -66,7 +66,7 @@
  * valid. Therefore, the caller is expected to check it is the case using the
  * CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
  */
-#define GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size)			\
-	(((virt_addr_space_size) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) ? 1 : 2)
+#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size)			\
+	(((_virt_addr_space_size) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT)) ? 1 : 2)
 
 #endif /* __XLAT_TABLES_AARCH32_H__ */
diff --git a/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h b/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
index 6021e40..ad48a35 100644
--- a/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
+++ b/include/lib/xlat_tables/aarch64/xlat_tables_aarch64.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -74,10 +74,10 @@
  * valid. Therefore, the caller is expected to check it is the case using the
  * CHECK_VIRT_ADDR_SPACE_SIZE() macro first.
  */
-#define GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size)				\
-	(((virt_addr_space_size) > (ULL(1) << L0_XLAT_ADDRESS_SHIFT))		\
+#define GET_XLAT_TABLE_LEVEL_BASE(_virt_addr_space_size)				\
+	(((_virt_addr_space_size) > (ULL(1) << L0_XLAT_ADDRESS_SHIFT))		\
 	? 0									\
-	 : (((virt_addr_space_size) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT))	\
+	 : (((_virt_addr_space_size) > (ULL(1) << L1_XLAT_ADDRESS_SHIFT))	\
 	 ? 1 : 2))
 
 #endif /* __XLAT_TABLES_AARCH64_H__ */
diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h
index 1f62ebe..d87fc16 100644
--- a/include/plat/arm/common/arm_def.h
+++ b/include/plat/arm/common/arm_def.h
@@ -339,9 +339,9 @@
  * BL2 specific defines.
  ******************************************************************************/
 #if BL2_AT_EL3
-/* Put BL2 in the middle of the Trusted SRAM */
+/* Put BL2 towards the middle of the Trusted SRAM */
 #define BL2_BASE			(ARM_TRUSTED_SRAM_BASE + \
-						(PLAT_ARM_TRUSTED_SRAM_SIZE >> 1))
+						(PLAT_ARM_TRUSTED_SRAM_SIZE >> 1) + 0x2000)
 #define BL2_LIMIT			(ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)
 
 #else
@@ -374,8 +374,16 @@
 #define BL31_BASE			((ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)\
 						- PLAT_ARM_MAX_BL31_SIZE)
 #define BL31_PROGBITS_LIMIT		BL2_BASE
+/*
+ * For BL2_AT_EL3 make sure the BL31 can grow up until BL2_BASE. This is
+ * because in the BL2_AT_EL3 configuration, BL2 is always resident.
+ */
+#if BL2_AT_EL3
+#define BL31_LIMIT			BL2_BASE
+#else
 #define BL31_LIMIT			(ARM_BL_RAM_BASE + ARM_BL_RAM_SIZE)
 #endif
+#endif
 
 #if defined(AARCH32) || JUNO_AARCH32_EL3_RUNTIME
 /*******************************************************************************
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
new file mode 100644
index 0000000..14705d7
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arm_arch_svc.h>
+#include <asm_macros.S>
+#include <bl_common.h>
+#include <context.h>
+#include <cortex_a76.h>
+#include <cpu_macros.S>
+#include <plat_macros.S>
+
+#if !DYNAMIC_WORKAROUND_CVE_2018_3639
+#error Cortex A76 requires DYNAMIC_WORKAROUND_CVE_2018_3639=1
+#endif
+
+#define ESR_EL3_A64_SMC0	0x5e000000
+#define ESR_EL3_A32_SMC0	0x4e000000
+
+	/*
+	 * This macro applies the mitigation for CVE-2018-3639.
+	 * It implements a fash path where `SMCCC_ARCH_WORKAROUND_2`
+	 * SMC calls from a lower EL running in AArch32 or AArch64
+	 * will go through the fast and return early.
+	 *
+	 * The macro saves x2-x3 to the context.  In the fast path
+	 * x0-x3 registers do not need to be restored as the calling
+	 * context will have saved them.
+	 */
+	.macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
+	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+
+	.if \_is_sync_exception
+		/*
+		 * Ensure SMC is coming from A64/A32 state on #0
+		 * with W0 = SMCCC_ARCH_WORKAROUND_2
+		 *
+		 * This sequence evaluates as:
+		 *    (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
+		 * allowing use of a single branch operation
+		 */
+		orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_2
+		cmp	x0, x2
+		mrs	x3, esr_el3
+		mov_imm	w2, \_esr_el3_val
+		ccmp	w2, w3, #0, eq
+		/*
+		 * Static predictor will predict a fall-through, optimizing
+		 * the `SMCCC_ARCH_WORKAROUND_2` fast path.
+		 */
+		bne	1f
+
+		/*
+		 * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
+		 * fast path.
+		 */
+		cmp	x1, xzr /* enable/disable check */
+
+		/*
+		 * When the calling context wants mitigation disabled,
+		 * we program the mitigation disable function in the
+		 * CPU context, which gets invoked on subsequent exits from
+		 * EL3 via the `el3_exit` function.  Otherwise NULL is
+		 * programmed in the CPU context, which results in caller's
+		 * inheriting the EL3 mitigation state (enabled) on subsequent
+		 * `el3_exit`.
+		 */
+		mov	x0, xzr
+		adr	x1, cortex_a76_disable_wa_cve_2018_3639
+		csel	x1, x1, x0, eq
+		str	x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
+
+		mrs	x2, CORTEX_A76_CPUACTLR2_EL1
+		orr	x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+		bic	x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+		csel	x3, x3, x1, eq
+		msr	CORTEX_A76_CPUACTLR2_EL1, x3
+		eret	/* ERET implies ISB */
+	.endif
+1:
+	/*
+	 * Always enable v4 mitigation during EL3 execution.  This is not
+	 * required for the fast path above because it does not perform any
+	 * memory loads.
+	 */
+	mrs	x2, CORTEX_A76_CPUACTLR2_EL1
+	orr	x2, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+	msr	CORTEX_A76_CPUACTLR2_EL1, x2
+	isb
+
+	/*
+	 * The caller may have passed arguments to EL3 via x2-x3.
+	 * Restore these registers from the context before jumping to the
+	 * main runtime vector table entry.
+	 */
+	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+	.endm
+
+vector_base cortex_a76_wa_cve_2018_3639_a76_vbar
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_EL0 : 0x0 - 0x200
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry cortex_a76_sync_exception_sp_el0
+	b	sync_exception_sp_el0
+	check_vector_size cortex_a76_sync_exception_sp_el0
+
+vector_entry cortex_a76_irq_sp_el0
+	b	irq_sp_el0
+	check_vector_size cortex_a76_irq_sp_el0
+
+vector_entry cortex_a76_fiq_sp_el0
+	b	fiq_sp_el0
+	check_vector_size cortex_a76_fiq_sp_el0
+
+vector_entry cortex_a76_serror_sp_el0
+	b	serror_sp_el0
+	check_vector_size cortex_a76_serror_sp_el0
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_ELx: 0x200 - 0x400
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry cortex_a76_sync_exception_sp_elx
+	b	sync_exception_sp_elx
+	check_vector_size cortex_a76_sync_exception_sp_elx
+
+vector_entry cortex_a76_irq_sp_elx
+	b	irq_sp_elx
+	check_vector_size cortex_a76_irq_sp_elx
+
+vector_entry cortex_a76_fiq_sp_elx
+	b	fiq_sp_elx
+	check_vector_size cortex_a76_fiq_sp_elx
+
+vector_entry cortex_a76_serror_sp_elx
+	b	serror_sp_elx
+	check_vector_size cortex_a76_serror_sp_elx
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch64 : 0x400 - 0x600
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry cortex_a76_sync_exception_aarch64
+	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
+	b	sync_exception_aarch64
+	check_vector_size cortex_a76_sync_exception_aarch64
+
+vector_entry cortex_a76_irq_aarch64
+	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+	b	irq_aarch64
+	check_vector_size cortex_a76_irq_aarch64
+
+vector_entry cortex_a76_fiq_aarch64
+	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+	b	fiq_aarch64
+	check_vector_size cortex_a76_fiq_aarch64
+
+vector_entry cortex_a76_serror_aarch64
+	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
+	b	serror_aarch64
+	check_vector_size cortex_a76_serror_aarch64
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch32 : 0x600 - 0x800
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry cortex_a76_sync_exception_aarch32
+	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
+	b	sync_exception_aarch32
+	check_vector_size cortex_a76_sync_exception_aarch32
+
+vector_entry cortex_a76_irq_aarch32
+	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+	b	irq_aarch32
+	check_vector_size cortex_a76_irq_aarch32
+
+vector_entry cortex_a76_fiq_aarch32
+	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+	b	fiq_aarch32
+	check_vector_size cortex_a76_fiq_aarch32
+
+vector_entry cortex_a76_serror_aarch32
+	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
+	b	serror_aarch32
+	check_vector_size cortex_a76_serror_aarch32
+
+func check_errata_cve_2018_3639
+#if WORKAROUND_CVE_2018_3639
+	mov	x0, #ERRATA_APPLIES
+#else
+	mov	x0, #ERRATA_MISSING
+#endif
+	ret
+endfunc check_errata_cve_2018_3639
+
+func cortex_a76_disable_wa_cve_2018_3639
+	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
+	bic	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+	msr	CORTEX_A76_CPUACTLR2_EL1, x0
+	isb
+	ret
+endfunc cortex_a76_disable_wa_cve_2018_3639
+
+func cortex_a76_reset_func
+#if WORKAROUND_CVE_2018_3639
+	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
+	orr	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
+	msr	CORTEX_A76_CPUACTLR2_EL1, x0
+	isb
+#endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2018_3639
+	/*
+	 * The Cortex-A76 generic vectors are overwritten to use the vectors
+	 * defined above.  This is required in order to apply mitigation
+	 * against CVE-2018-3639 on exception entry from lower ELs.
+	 */
+	adr	x0, cortex_a76_wa_cve_2018_3639_a76_vbar
+	msr	vbar_el3, x0
+	isb
+#endif
+	ret
+endfunc cortex_a76_reset_func
+
+	/* ---------------------------------------------
+	 * HW will do the cache maintenance while powering down
+	 * ---------------------------------------------
+	 */
+func cortex_a76_core_pwr_dwn
+	/* ---------------------------------------------
+	 * Enable CPU power down bit in power control register
+	 * ---------------------------------------------
+	 */
+	mrs	x0, CORTEX_A76_CPUPWRCTLR_EL1
+	orr	x0, x0, #CORTEX_A76_CORE_PWRDN_EN_MASK
+	msr	CORTEX_A76_CPUPWRCTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_a76_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex Cortex A76. Must follow AAPCS.
+ */
+func cortex_a76_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
+
+	ldp	x8, x30, [sp], #16
+	ret
+endfunc cortex_a76_errata_report
+#endif
+
+	/* ---------------------------------------------
+	 * This function provides cortex_a76 specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.cortex_a76_regs, "aS"
+cortex_a76_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func cortex_a76_cpu_reg_dump
+	adr	x6, cortex_a76_regs
+	mrs	x8, CORTEX_A76_CPUECTLR_EL1
+	ret
+endfunc cortex_a76_cpu_reg_dump
+
+declare_cpu_ops_wa cortex_a76, CORTEX_A76_MIDR, \
+	cortex_a76_reset_func, \
+	CPU_NO_EXTRA1_FUNC, \
+	cortex_a76_disable_wa_cve_2018_3639, \
+	cortex_a76_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_ares.S b/lib/cpus/aarch64/cortex_ares.S
new file mode 100644
index 0000000..942b6f7
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_ares.S
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cortex_ares.h>
+#include <cpuamu.h>
+#include <cpu_macros.S>
+
+/* --------------------------------------------------
+ * Errata Workaround for Cortex-Ares Errata
+ * This applies to revision r0p0 and r1p0 of Cortex-Ares.
+ * Inputs:
+ * x0: variant[4:7] and revision[0:3] of current cpu.
+ * Shall clobber: x0-x17
+ * --------------------------------------------------
+ */
+func errata_ares_1043202_wa
+	/* Compare x0 against revision r1p0 */
+	mov	x17, x30
+	bl	check_errata_1043202
+	cbz	x0, 1f
+
+	/* Apply instruction patching sequence */
+	ldr	x0, =0x0
+	msr	CPUPSELR_EL3, x0
+	ldr	x0, =0xF3BF8F2F
+	msr	CPUPOR_EL3, x0
+	ldr	x0, =0xFFFFFFFF
+	msr	CPUPMR_EL3, x0
+	ldr	x0, =0x800200071
+	msr	CPUPCR_EL3, x0
+	isb
+1:
+	ret	x17
+endfunc errata_ares_1043202_wa
+
+func check_errata_1043202
+	/* Applies to r0p0 and r1p0 */
+	mov	x1, #0x10
+	b	cpu_rev_var_ls
+endfunc check_errata_1043202
+
+func cortex_ares_reset_func
+	mov	x19, x30
+	bl	cpu_get_rev_var
+	mov	x18, x0
+
+#if ERRATA_ARES_1043202
+	mov	x0, x18
+	bl	errata_ares_1043202_wa
+#endif
+
+#if ENABLE_AMU
+	/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+	mrs	x0, actlr_el3
+	orr	x0, x0, #CORTEX_ARES_ACTLR_AMEN_BIT
+	msr	actlr_el3, x0
+	isb
+
+	/* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+	mrs	x0, actlr_el2
+	orr	x0, x0, #CORTEX_ARES_ACTLR_AMEN_BIT
+	msr	actlr_el2, x0
+	isb
+
+	/* Enable group0 counters */
+	mov	x0, #CORTEX_ARES_AMU_GROUP0_MASK
+	msr	CPUAMCNTENSET_EL0, x0
+	isb
+#endif
+	ret	x19
+endfunc cortex_ares_reset_func
+
+	/* ---------------------------------------------
+	 * HW will do the cache maintenance while powering down
+	 * ---------------------------------------------
+	 */
+func cortex_ares_core_pwr_dwn
+	/* ---------------------------------------------
+	 * Enable CPU power down bit in power control register
+	 * ---------------------------------------------
+	 */
+	mrs	x0, CORTEX_ARES_CPUPWRCTLR_EL1
+	orr	x0, x0, #CORTEX_ARES_CORE_PWRDN_EN_MASK
+	msr	CORTEX_ARES_CPUPWRCTLR_EL1, x0
+	isb
+	ret
+endfunc cortex_ares_core_pwr_dwn
+
+#if REPORT_ERRATA
+/*
+ * Errata printing function for Cortex-Ares. Must follow AAPCS.
+ */
+func cortex_a72_errata_report
+	stp	x8, x30, [sp, #-16]!
+
+	bl	cpu_get_rev_var
+	mov	x8, x0
+
+	/*
+	 * Report all errata. The revision-variant information is passed to
+	 * checking functions of each errata.
+	 */
+	report_errata ERRATA_ARES_1043202, cortex_ares, 1043202
+
+	ldp	x8, x30, [sp], #16
+	ret
+endfunc cortex_a72_errata_report
+#endif
+
+	/* ---------------------------------------------
+	 * This function provides cortex_ares specific
+	 * register information for crash reporting.
+	 * It needs to return with x6 pointing to
+	 * a list of register names in ascii and
+	 * x8 - x15 having values of registers to be
+	 * reported.
+	 * ---------------------------------------------
+	 */
+.section .rodata.cortex_ares_regs, "aS"
+cortex_ares_regs:  /* The ascii list of register names to be reported */
+	.asciz	"cpuectlr_el1", ""
+
+func cortex_ares_cpu_reg_dump
+	adr	x6, cortex_ares_regs
+	mrs	x8, CORTEX_ARES_CPUECTLR_EL1
+	ret
+endfunc cortex_ares_cpu_reg_dump
+
+declare_cpu_ops cortex_ares, CORTEX_ARES_MIDR, \
+	cortex_ares_reset_func, \
+	cortex_ares_core_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_ares_pubsub.c b/lib/cpus/aarch64/cortex_ares_pubsub.c
new file mode 100644
index 0000000..c7d850a
--- /dev/null
+++ b/lib/cpus/aarch64/cortex_ares_pubsub.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cortex_ares.h>
+#include <cpuamu.h>
+#include <pubsub_events.h>
+
+static void *cortex_ares_context_save(const void *arg)
+{
+	if (midr_match(CORTEX_ARES_MIDR) != 0)
+		cpuamu_context_save(CORTEX_ARES_AMU_NR_COUNTERS);
+	return 0;
+}
+
+static void *cortex_ares_context_restore(const void *arg)
+{
+	if (midr_match(CORTEX_ARES_MIDR) != 0)
+		cpuamu_context_restore(CORTEX_ARES_AMU_NR_COUNTERS);
+	return 0;
+}
+
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, cortex_ares_context_save);
+SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, cortex_ares_context_restore);
diff --git a/lib/cpus/aarch64/cpuamu.c b/lib/cpus/aarch64/cpuamu.c
index b9bad86..5ad5bf8 100644
--- a/lib/cpus/aarch64/cpuamu.c
+++ b/lib/cpus/aarch64/cpuamu.c
@@ -10,12 +10,12 @@
 
 #define CPUAMU_NR_COUNTERS	5U
 
-struct amu_ctx {
+struct cpuamu_ctx {
 	uint64_t cnts[CPUAMU_NR_COUNTERS];
 	unsigned int mask;
 };
 
-static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
+static struct cpuamu_ctx cpuamu_ctxs[PLATFORM_CORE_COUNT];
 
 int midr_match(unsigned int cpu_midr)
 {
@@ -29,7 +29,7 @@
 
 void cpuamu_context_save(unsigned int nr_counters)
 {
-	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+	struct cpuamu_ctx *ctx = &cpuamu_ctxs[plat_my_core_pos()];
 	unsigned int i;
 
 	assert(nr_counters <= CPUAMU_NR_COUNTERS);
@@ -48,7 +48,7 @@
 
 void cpuamu_context_restore(unsigned int nr_counters)
 {
-	struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
+	struct cpuamu_ctx *ctx = &cpuamu_ctxs[plat_my_core_pos()];
 	unsigned int i;
 
 	assert(nr_counters <= CPUAMU_NR_COUNTERS);
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 434c13e..456e3e5 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -119,6 +119,10 @@
 # only to revision <= r0p3 of the Cortex A72 cpu.
 ERRATA_A72_859971	?=0
 
+# Flag to apply T32 CLREX workaround during reset. This erratum applies
+# only to r0p0 and r1p0 of the Ares cpu.
+ERRATA_ARES_1043202	?=1
+
 # Process ERRATA_A53_826319 flag
 $(eval $(call assert_boolean,ERRATA_A53_826319))
 $(eval $(call add_define,ERRATA_A53_826319))
@@ -179,6 +183,10 @@
 $(eval $(call assert_boolean,ERRATA_A72_859971))
 $(eval $(call add_define,ERRATA_A72_859971))
 
+# Process ERRATA_ARES_1043202 flag
+$(eval $(call assert_boolean,ERRATA_ARES_1043202))
+$(eval $(call add_define,ERRATA_ARES_1043202))
+
 # Errata build flags
 ifneq (${ERRATA_A53_843419},0)
 TF_LDFLAGS_aarch64	+= --fix-cortex-a53-843419
diff --git a/lib/locks/bakery/bakery_lock_coherent.c b/lib/locks/bakery/bakery_lock_coherent.c
index a857e03..788ba98 100644
--- a/lib/locks/bakery/bakery_lock_coherent.c
+++ b/lib/locks/bakery/bakery_lock_coherent.c
@@ -34,9 +34,9 @@
  * accesses regardless of status of address translation.
  */
 
-#define assert_bakery_entry_valid(entry, bakery) do {	\
-	assert(bakery);					\
-	assert(entry < BAKERY_LOCK_MAX_CPUS);		\
+#define assert_bakery_entry_valid(_entry, _bakery) do {	\
+	assert(_bakery);					\
+	assert(_entry < BAKERY_LOCK_MAX_CPUS);		\
 } while (0)
 
 /* Obtain a ticket for a given CPU */
diff --git a/lib/locks/bakery/bakery_lock_normal.c b/lib/locks/bakery/bakery_lock_normal.c
index 37697f5..630226a 100644
--- a/lib/locks/bakery/bakery_lock_normal.c
+++ b/lib/locks/bakery/bakery_lock_normal.c
@@ -53,18 +53,18 @@
 IMPORT_SYM(uintptr_t, __PERCPU_BAKERY_LOCK_SIZE__, PERCPU_BAKERY_LOCK_SIZE);
 #endif
 
-#define get_bakery_info(cpu_ix, lock)	\
-	(bakery_info_t *)((uintptr_t)lock + cpu_ix * PERCPU_BAKERY_LOCK_SIZE)
+#define get_bakery_info(_cpu_ix, _lock)	\
+	(bakery_info_t *)((uintptr_t)_lock + _cpu_ix * PERCPU_BAKERY_LOCK_SIZE)
 
-#define write_cache_op(addr, cached)	\
+#define write_cache_op(_addr, _cached)	\
 				do {	\
-					(cached ? dccvac((uintptr_t)addr) :\
-						dcivac((uintptr_t)addr));\
+					(_cached ? dccvac((uintptr_t)_addr) :\
+						dcivac((uintptr_t)_addr));\
 						dsbish();\
 				} while (0)
 
-#define read_cache_op(addr, cached)	if (cached) \
-					    dccivac((uintptr_t)addr)
+#define read_cache_op(_addr, _cached)	if (_cached) \
+					    dccivac((uintptr_t)_addr)
 
 /* Helper function to check if the lock is acquired */
 static inline int is_lock_acquired(const bakery_info_t *my_bakery_info,
diff --git a/lib/optee/optee_utils.c b/lib/optee/optee_utils.c
index ac51265..ecf7cc0 100644
--- a/lib/optee/optee_utils.c
+++ b/lib/optee/optee_utils.c
@@ -43,7 +43,7 @@
 	uint8_t arch;
 	uint16_t flags;
 	uint32_t nb_images;
-	optee_image_t optee_image[];
+	optee_image_t optee_image_list[];
 } optee_header_t;
 
 /*******************************************************************************
@@ -51,11 +51,11 @@
  * Return 1 if valid
  * Return 0 if invalid
  ******************************************************************************/
-static inline int tee_validate_header(optee_header_t *optee_header)
+static inline int tee_validate_header(optee_header_t *header)
 {
-	if ((optee_header->magic == TEE_MAGIC_NUM_OPTEE) &&
-		(optee_header->version == 2) &&
-		(optee_header->nb_images <= OPTEE_MAX_IMAGE_NUM)) {
+	if ((header->magic == TEE_MAGIC_NUM_OPTEE) &&
+		(header->version == 2) &&
+		(header->nb_images <= OPTEE_MAX_IMAGE_NUM)) {
 		return 1;
 	}
 
@@ -68,14 +68,14 @@
  * Return 0 on success or a negative error code otherwise.
  ******************************************************************************/
 static int parse_optee_image(image_info_t *image_info,
-		optee_image_t *optee_image)
+		optee_image_t *image)
 {
 	uintptr_t init_load_addr, free_end, requested_end;
 	size_t init_size;
 
-	init_load_addr = ((uint64_t)optee_image->load_addr_hi << 32) |
-					optee_image->load_addr_lo;
-	init_size = optee_image->size;
+	init_load_addr = ((uint64_t)image->load_addr_hi << 32) |
+					image->load_addr_lo;
+	init_size = image->size;
 
 	/*
 	 * -1 indicates loader decided address; take our pre-mapped area
@@ -133,21 +133,21 @@
 		image_info_t *paged_image_info)
 
 {
-	optee_header_t *optee_header;
+	optee_header_t *header;
 	int num, ret;
 
 	assert(header_ep);
-	optee_header = (optee_header_t *)header_ep->pc;
-	assert(optee_header);
+	header = (optee_header_t *)header_ep->pc;
+	assert(header);
 
 	/* Print the OPTEE header information */
 	INFO("OPTEE ep=0x%x\n", (unsigned int)header_ep->pc);
 	INFO("OPTEE header info:\n");
-	INFO("      magic=0x%x\n", optee_header->magic);
-	INFO("      version=0x%x\n", optee_header->version);
-	INFO("      arch=0x%x\n", optee_header->arch);
-	INFO("      flags=0x%x\n", optee_header->flags);
-	INFO("      nb_images=0x%x\n", optee_header->nb_images);
+	INFO("      magic=0x%x\n", header->magic);
+	INFO("      version=0x%x\n", header->version);
+	INFO("      arch=0x%x\n", header->arch);
+	INFO("      flags=0x%x\n", header->flags);
+	INFO("      nb_images=0x%x\n", header->nb_images);
 
 	/*
 	 * OPTEE image has 3 types:
@@ -166,7 +166,7 @@
 	 *	pager and pageable. Remove skip attr for BL32_EXTRA1_IMAGE_ID
 	 *	and BL32_EXTRA2_IMAGE_ID to load pager and paged bin.
 	 */
-	if (!tee_validate_header(optee_header)) {
+	if (!tee_validate_header(header)) {
 		INFO("Invalid OPTEE header, set legacy mode.\n");
 #ifdef AARCH64
 		header_ep->args.arg0 = MODE_RW_64;
@@ -177,15 +177,15 @@
 	}
 
 	/* Parse OPTEE image */
-	for (num = 0; num < optee_header->nb_images; num++) {
-		if (optee_header->optee_image[num].image_id ==
+	for (num = 0; num < header->nb_images; num++) {
+		if (header->optee_image_list[num].image_id ==
 				OPTEE_PAGER_IMAGE_ID) {
 			ret = parse_optee_image(pager_image_info,
-				&optee_header->optee_image[num]);
-		} else if (optee_header->optee_image[num].image_id ==
+				&header->optee_image_list[num]);
+		} else if (header->optee_image_list[num].image_id ==
 				OPTEE_PAGED_IMAGE_ID) {
 			ret = parse_optee_image(paged_image_info,
-				&optee_header->optee_image[num]);
+				&header->optee_image_list[num]);
 		} else {
 			ERROR("Parse optee image failed.\n");
 			return -1;
@@ -211,7 +211,7 @@
 	header_ep->args.arg2 = paged_image_info->image_size;
 
 	/* Set OPTEE runtime arch - aarch32/aarch64 */
-	if (optee_header->arch == 0) {
+	if (header->arch == 0) {
 		header_ep->args.arg0 = MODE_RW_32;
 	} else {
 #ifdef AARCH64
diff --git a/lib/psci/psci_private.h b/lib/psci/psci_private.h
index c58f329..d452e2a 100644
--- a/lib/psci/psci_private.h
+++ b/lib/psci/psci_private.h
@@ -65,8 +65,8 @@
 
 #endif
 
-#define psci_lock_init(non_cpu_pd_node, idx)			\
-	((non_cpu_pd_node)[(idx)].lock_index = (idx))
+#define psci_lock_init(_non_cpu_pd_node, _idx)			\
+	((_non_cpu_pd_node)[(_idx)].lock_index = (_idx))
 
 /*
  * The PSCI capability which are provided by the generic code but does not
@@ -96,35 +96,35 @@
 /*
  * Helper macros to get/set the fields of PSCI per-cpu data.
  */
-#define psci_set_aff_info_state(aff_state) \
-		set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state)
+#define psci_set_aff_info_state(_aff_state) \
+		set_cpu_data(psci_svc_cpu_data.aff_info_state, _aff_state)
 #define psci_get_aff_info_state() \
 		get_cpu_data(psci_svc_cpu_data.aff_info_state)
-#define psci_get_aff_info_state_by_idx(idx) \
-		get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state)
-#define psci_set_aff_info_state_by_idx(idx, aff_state) \
-		set_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state,\
-					aff_state)
+#define psci_get_aff_info_state_by_idx(_idx) \
+		get_cpu_data_by_index(_idx, psci_svc_cpu_data.aff_info_state)
+#define psci_set_aff_info_state_by_idx(_idx, _aff_state) \
+		set_cpu_data_by_index(_idx, psci_svc_cpu_data.aff_info_state,\
+					_aff_state)
 #define psci_get_suspend_pwrlvl() \
 		get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
-#define psci_set_suspend_pwrlvl(target_lvl) \
-		set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl)
-#define psci_set_cpu_local_state(state) \
-		set_cpu_data(psci_svc_cpu_data.local_state, state)
+#define psci_set_suspend_pwrlvl(_target_lvl) \
+		set_cpu_data(psci_svc_cpu_data.target_pwrlvl, _target_lvl)
+#define psci_set_cpu_local_state(_state) \
+		set_cpu_data(psci_svc_cpu_data.local_state, _state)
 #define psci_get_cpu_local_state() \
 		get_cpu_data(psci_svc_cpu_data.local_state)
-#define psci_get_cpu_local_state_by_idx(idx) \
-		get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state)
+#define psci_get_cpu_local_state_by_idx(_idx) \
+		get_cpu_data_by_index(_idx, psci_svc_cpu_data.local_state)
 
 /*
  * Helper macros for the CPU level spinlocks
  */
-#define psci_spin_lock_cpu(idx)	spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock)
-#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock)
+#define psci_spin_lock_cpu(_idx) spin_lock(&psci_cpu_pd_nodes[_idx].cpu_lock)
+#define psci_spin_unlock_cpu(_idx) spin_unlock(&psci_cpu_pd_nodes[_idx].cpu_lock)
 
 /* Helper macro to identify a CPU standby request in PSCI Suspend call */
-#define is_cpu_standby_req(is_power_down_state, retn_lvl) \
-		(((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0)
+#define is_cpu_standby_req(_is_power_down_state, _retn_lvl) \
+		(((!(_is_power_down_state)) && ((_retn_lvl) == 0)) ? 1 : 0)
 
 /*******************************************************************************
  * The following two data structures implement the power domain tree. The tree
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index 8be6d94..31d3365 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -735,7 +735,7 @@
 
 void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
 {
-	mmap_region_t *mm_cursor = ctx->mmap;
+	mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
 	const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
 	mmap_region_t *mm_last;
 	unsigned long long end_pa = mm->base_pa + mm->size - 1;
@@ -802,9 +802,10 @@
 	 * that there is free space.
 	 */
 	assert(mm_last->size == 0U);
-
+	
 	/* Make room for new region by moving other regions up by one place */
-	memmove(mm_cursor + 1, mm_cursor,
+	mm_destination = mm_cursor + 1;
+	memmove(mm_destination, mm_cursor,
 		(uintptr_t)mm_last - (uintptr_t)mm_cursor);
 
 	/*
diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk
index f807dc6..3a28733 100644
--- a/plat/arm/board/fvp/platform.mk
+++ b/plat/arm/board/fvp/platform.mk
@@ -114,7 +114,9 @@
 				lib/cpus/aarch64/cortex_a57.S			\
 				lib/cpus/aarch64/cortex_a72.S			\
 				lib/cpus/aarch64/cortex_a73.S			\
-				lib/cpus/aarch64/cortex_a75.S
+				lib/cpus/aarch64/cortex_a75.S			\
+				lib/cpus/aarch64/cortex_a76.S			\
+				lib/cpus/aarch64/cortex_ares.S
 else
 FVP_CPU_LIBS		+=	lib/cpus/aarch32/cortex_a32.S
 endif
@@ -204,8 +206,12 @@
 # Enable Activity Monitor Unit extensions by default
 ENABLE_AMU			:=	1
 
+# Enable dynamic mitigation support by default
+DYNAMIC_WORKAROUND_CVE_2018_3639	:=	1
+
 ifeq (${ENABLE_AMU},1)
 BL31_SOURCES		+=	lib/cpus/aarch64/cortex_a75_pubsub.c	\
+				lib/cpus/aarch64/cortex_ares_pubsub.c	\
 				lib/cpus/aarch64/cpuamu.c		\
 				lib/cpus/aarch64/cpuamu_helpers.S
 endif
diff --git a/plat/arm/css/drivers/mhu/css_mhu_doorbell.h b/plat/arm/css/drivers/mhu/css_mhu_doorbell.h
index 3c94536..cb75ed0 100644
--- a/plat/arm/css/drivers/mhu/css_mhu_doorbell.h
+++ b/plat/arm/css/drivers/mhu/css_mhu_doorbell.h
@@ -18,8 +18,8 @@
 #define MHU_V2_ACCESS_REQ_OFFSET		0xF88
 #define MHU_V2_ACCESS_READY_OFFSET		0xF8C
 
-#define SENDER_REG_STAT(CHANNEL)	(0x20 * (CHANNEL))
-#define SENDER_REG_SET(CHANNEL)		(0x20 * (CHANNEL)) + 0xC
+#define SENDER_REG_STAT(_channel)	(0x20 * (_channel))
+#define SENDER_REG_SET(_channel)	((0x20 * (_channel)) + 0xC)
 
 /* Helper macro to ring doorbell */
 #define MHU_RING_DOORBELL(addr, modify_mask, preserve_mask)	do {	\
diff --git a/plat/arm/css/drivers/scmi/scmi_private.h b/plat/arm/css/drivers/scmi/scmi_private.h
index a07841e..67fe748 100644
--- a/plat/arm/css/drivers/scmi/scmi_private.h
+++ b/plat/arm/css/drivers/scmi/scmi_private.h
@@ -60,14 +60,14 @@
  * Helper macro to create an SCMI message header given protocol, message id
  * and token.
  */
-#define SCMI_MSG_CREATE(protocol, msg_id, token)				\
-	((((protocol) & SCMI_MSG_PROTO_ID_MASK) << SCMI_MSG_PROTO_ID_SHIFT) |	\
-	(((msg_id) & SCMI_MSG_ID_MASK) << SCMI_MSG_ID_SHIFT) |			\
-	(((token) & SCMI_MSG_TOKEN_MASK) << SCMI_MSG_TOKEN_SHIFT))
+#define SCMI_MSG_CREATE(_protocol, _msg_id, _token)				\
+	((((_protocol) & SCMI_MSG_PROTO_ID_MASK) << SCMI_MSG_PROTO_ID_SHIFT) |	\
+	(((_msg_id) & SCMI_MSG_ID_MASK) << SCMI_MSG_ID_SHIFT) |			\
+	(((_token) & SCMI_MSG_TOKEN_MASK) << SCMI_MSG_TOKEN_SHIFT))
 
 /* Helper macro to get the token from a SCMI message header */
-#define SCMI_MSG_GET_TOKEN(msg)				\
-	(((msg) >> SCMI_MSG_TOKEN_SHIFT) & SCMI_MSG_TOKEN_MASK)
+#define SCMI_MSG_GET_TOKEN(_msg)				\
+	(((_msg) >> SCMI_MSG_TOKEN_SHIFT) & SCMI_MSG_TOKEN_MASK)
 
 /* SCMI Channel Status bit fields */
 #define SCMI_CH_STATUS_RES0_MASK	0xFFFFFFFE
diff --git a/plat/arm/css/drivers/scp/css_pm_scmi.c b/plat/arm/css/drivers/scp/css_pm_scmi.c
index c0ed487..3a25509 100644
--- a/plat/arm/css/drivers/scp/css_pm_scmi.c
+++ b/plat/arm/css/drivers/scp/css_pm_scmi.c
@@ -36,21 +36,21 @@
 #define SCMI_PWR_STATE_MAX_PWR_LVL_WIDTH	4
 #define SCMI_PWR_STATE_MAX_PWR_LVL_MASK		\
 				((1 << SCMI_PWR_STATE_MAX_PWR_LVL_WIDTH) - 1)
-#define SCMI_SET_PWR_STATE_MAX_PWR_LVL(pwr_state, max_lvl)		\
-		(pwr_state) |= ((max_lvl) & SCMI_PWR_STATE_MAX_PWR_LVL_MASK)	\
+#define SCMI_SET_PWR_STATE_MAX_PWR_LVL(_power_state, _max_level)		\
+		(_power_state) |= ((_max_level) & SCMI_PWR_STATE_MAX_PWR_LVL_MASK)\
 				<< SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT
-#define SCMI_GET_PWR_STATE_MAX_PWR_LVL(pwr_state)		\
-		(((pwr_state) >> SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT)	\
+#define SCMI_GET_PWR_STATE_MAX_PWR_LVL(_power_state)		\
+		(((_power_state) >> SCMI_PWR_STATE_MAX_PWR_LVL_SHIFT)	\
 				& SCMI_PWR_STATE_MAX_PWR_LVL_MASK)
 
 #define SCMI_PWR_STATE_LVL_WIDTH		4
 #define SCMI_PWR_STATE_LVL_MASK			\
 				((1 << SCMI_PWR_STATE_LVL_WIDTH) - 1)
-#define SCMI_SET_PWR_STATE_LVL(pwr_state, lvl, lvl_state)		\
-		(pwr_state) |= ((lvl_state) & SCMI_PWR_STATE_LVL_MASK)	\
-				<< (SCMI_PWR_STATE_LVL_WIDTH * (lvl))
-#define SCMI_GET_PWR_STATE_LVL(pwr_state, lvl)		\
-		(((pwr_state) >> (SCMI_PWR_STATE_LVL_WIDTH * (lvl))) &	\
+#define SCMI_SET_PWR_STATE_LVL(_power_state, _level, _level_state)		\
+		(_power_state) |= ((_level_state) & SCMI_PWR_STATE_LVL_MASK)	\
+				<< (SCMI_PWR_STATE_LVL_WIDTH * (_level))
+#define SCMI_GET_PWR_STATE_LVL(_power_state, _level)		\
+		(((_power_state) >> (SCMI_PWR_STATE_LVL_WIDTH * (_level))) &	\
 				SCMI_PWR_STATE_LVL_MASK)
 
 /*
@@ -69,7 +69,7 @@
 static void *scmi_handle;
 
 /* The SCMI channel global object */
-static scmi_channel_t scmi_channel;
+static scmi_channel_t channel;
 
 ARM_INSTANTIATE_LOCK;
 
@@ -308,9 +308,9 @@
 
 void plat_arm_pwrc_setup(void)
 {
-	scmi_channel.info = &plat_css_scmi_plat_info;
-	scmi_channel.lock = ARM_LOCK_GET_INSTANCE;
-	scmi_handle = scmi_init(&scmi_channel);
+	channel.info = &plat_css_scmi_plat_info;
+	channel.lock = ARM_LOCK_GET_INSTANCE;
+	scmi_handle = scmi_init(&channel);
 	if (scmi_handle == NULL) {
 		ERROR("SCMI Initialization failed\n");
 		panic();
diff --git a/plat/arm/css/drivers/sds/sds_private.h b/plat/arm/css/drivers/sds/sds_private.h
index 649576b..43b97f6 100644
--- a/plat/arm/css/drivers/sds/sds_private.h
+++ b/plat/arm/css/drivers/sds/sds_private.h
@@ -67,18 +67,18 @@
 	uint32_t reg[2];
 } struct_header_t;
 
-#define GET_SDS_HEADER_ID(header)			\
-	((((struct_header_t *)(header))->reg[0]) & SDS_HEADER_ID_MASK)
-#define GET_SDS_HEADER_VERSION(header)			\
-	(((((struct_header_t *)(header))->reg[0]) >> SDS_HEADER_MINOR_VERSION_SHIFT)\
+#define GET_SDS_HEADER_ID(_header)			\
+	((((struct_header_t *)(_header))->reg[0]) & SDS_HEADER_ID_MASK)
+#define GET_SDS_HEADER_VERSION(_header)			\
+	(((((struct_header_t *)(_header))->reg[0]) >> SDS_HEADER_MINOR_VERSION_SHIFT)\
 	& SDS_HEADER_VERSION_MASK)
-#define GET_SDS_HEADER_STRUCT_SIZE(header)		\
-	(((((struct_header_t *)(header))->reg[1]) >> SDS_HEADER_STRUCT_SIZE_SHIFT)\
+#define GET_SDS_HEADER_STRUCT_SIZE(_header)		\
+	(((((struct_header_t *)(_header))->reg[1]) >> SDS_HEADER_STRUCT_SIZE_SHIFT)\
 	& SDS_HEADER_STRUCT_SIZE_MASK)
-#define IS_SDS_HEADER_VALID(header)			\
-	((((struct_header_t *)(header))->reg[1]) & SDS_HEADER_VALID_MASK)
-#define GET_SDS_STRUCT_FIELD(header, field_offset)	\
-	((((uint8_t *)(header)) + sizeof(struct_header_t)) + (field_offset))
+#define IS_SDS_HEADER_VALID(_header)			\
+	((((struct_header_t *)(_header))->reg[1]) & SDS_HEADER_VALID_MASK)
+#define GET_SDS_STRUCT_FIELD(_header, _field_offset)	\
+	((((uint8_t *)(_header)) + sizeof(struct_header_t)) + (_field_offset))
 
 /* Region Descriptor describing the SDS Memory Region */
 typedef struct region_descriptor {
diff --git a/services/spd/opteed/opteed_main.c b/services/spd/opteed/opteed_main.c
index ac58e04..01ec2a2 100644
--- a/services/spd/opteed/opteed_main.c
+++ b/services/spd/opteed/opteed_main.c
@@ -34,7 +34,7 @@
  * Address of the entrypoint vector table in OPTEE. It is
  * initialised once on the primary core after a cold boot.
  ******************************************************************************/
-optee_vectors_t *optee_vectors;
+optee_vectors_t *optee_vector_table;
 
 /*******************************************************************************
  * Array to keep track of per-cpu OPTEE state
@@ -71,7 +71,7 @@
 	optee_ctx = &opteed_sp_context[linear_id];
 	assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE));
 
-	cm_set_elr_el3(SECURE, (uint64_t)&optee_vectors->fiq_entry);
+	cm_set_elr_el3(SECURE, (uint64_t)&optee_vector_table->fiq_entry);
 	cm_el1_sysregs_context_restore(SECURE);
 	cm_set_next_eret_context(SECURE);
 
@@ -236,10 +236,10 @@
 		 */
 		if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) {
 			cm_set_elr_el3(SECURE, (uint64_t)
-					&optee_vectors->fast_smc_entry);
+					&optee_vector_table->fast_smc_entry);
 		} else {
 			cm_set_elr_el3(SECURE, (uint64_t)
-					&optee_vectors->yield_smc_entry);
+					&optee_vector_table->yield_smc_entry);
 		}
 
 		cm_el1_sysregs_context_restore(SECURE);
@@ -279,10 +279,10 @@
 		 * Stash the OPTEE entry points information. This is done
 		 * only once on the primary cpu
 		 */
-		assert(optee_vectors == NULL);
-		optee_vectors = (optee_vectors_t *) x1;
+		assert(optee_vector_table == NULL);
+		optee_vector_table = (optee_vectors_t *) x1;
 
-		if (optee_vectors) {
+		if (optee_vector_table) {
 			set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_ON);
 
 			/*
diff --git a/services/spd/opteed/opteed_pm.c b/services/spd/opteed/opteed_pm.c
index 7efc234..bdacf98 100644
--- a/services/spd/opteed/opteed_pm.c
+++ b/services/spd/opteed/opteed_pm.c
@@ -30,11 +30,11 @@
 	uint32_t linear_id = plat_my_core_pos();
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 
-	assert(optee_vectors);
+	assert(optee_vector_table);
 	assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_ON);
 
 	/* Program the entry point and enter OPTEE */
-	cm_set_elr_el3(SECURE, (uint64_t) &optee_vectors->cpu_off_entry);
+	cm_set_elr_el3(SECURE, (uint64_t) &optee_vector_table->cpu_off_entry);
 	rc = opteed_synchronous_sp_entry(optee_ctx);
 
 	/*
@@ -63,11 +63,11 @@
 	uint32_t linear_id = plat_my_core_pos();
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 
-	assert(optee_vectors);
+	assert(optee_vector_table);
 	assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_ON);
 
 	/* Program the entry point and enter OPTEE */
-	cm_set_elr_el3(SECURE, (uint64_t) &optee_vectors->cpu_suspend_entry);
+	cm_set_elr_el3(SECURE, (uint64_t) &optee_vector_table->cpu_suspend_entry);
 	rc = opteed_synchronous_sp_entry(optee_ctx);
 
 	/*
@@ -94,11 +94,11 @@
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 	entry_point_info_t optee_on_entrypoint;
 
-	assert(optee_vectors);
+	assert(optee_vector_table);
 	assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_OFF);
 
 	opteed_init_optee_ep_state(&optee_on_entrypoint, opteed_rw,
-				(uint64_t)&optee_vectors->cpu_on_entry,
+				(uint64_t)&optee_vector_table->cpu_on_entry,
 				0, 0, 0, optee_ctx);
 
 	/* Initialise this cpu's secure context */
@@ -129,14 +129,14 @@
 	uint32_t linear_id = plat_my_core_pos();
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 
-	assert(optee_vectors);
+	assert(optee_vector_table);
 	assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_SUSPEND);
 
 	/* Program the entry point, max_off_pwrlvl and enter the SP */
 	write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx),
 		      CTX_GPREG_X0,
 		      max_off_pwrlvl);
-	cm_set_elr_el3(SECURE, (uint64_t) &optee_vectors->cpu_resume_entry);
+	cm_set_elr_el3(SECURE, (uint64_t) &optee_vector_table->cpu_resume_entry);
 	rc = opteed_synchronous_sp_entry(optee_ctx);
 
 	/*
@@ -168,11 +168,11 @@
 	uint32_t linear_id = plat_my_core_pos();
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 
-	assert(optee_vectors);
+	assert(optee_vector_table);
 	assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_ON);
 
 	/* Program the entry point */
-	cm_set_elr_el3(SECURE, (uint64_t) &optee_vectors->system_off_entry);
+	cm_set_elr_el3(SECURE, (uint64_t) &optee_vector_table->system_off_entry);
 
 	/* Enter OPTEE. We do not care about the return value because we
 	 * must continue the shutdown anyway */
@@ -188,11 +188,11 @@
 	uint32_t linear_id = plat_my_core_pos();
 	optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
 
-	assert(optee_vectors);
+	assert(optee_vector_table);
 	assert(get_optee_pstate(optee_ctx->state) == OPTEE_PSTATE_ON);
 
 	/* Program the entry point */
-	cm_set_elr_el3(SECURE, (uint64_t) &optee_vectors->system_reset_entry);
+	cm_set_elr_el3(SECURE, (uint64_t) &optee_vector_table->system_reset_entry);
 
 	/* Enter OPTEE. We do not care about the return value because we
 	 * must continue the reset anyway */
diff --git a/services/spd/opteed/opteed_private.h b/services/spd/opteed/opteed_private.h
index 6cda2c8..b77b6d3 100644
--- a/services/spd/opteed/opteed_private.h
+++ b/services/spd/opteed/opteed_private.h
@@ -154,7 +154,7 @@
 
 extern optee_context_t opteed_sp_context[OPTEED_CORE_COUNT];
 extern uint32_t opteed_rw;
-extern struct optee_vectors *optee_vectors;
+extern struct optee_vectors *optee_vector_table;
 #endif /*__ASSEMBLY__*/
 
 #endif /* __OPTEED_PRIVATE_H__ */
diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h
index 692a967..080e757 100644
--- a/services/spd/tspd/tspd_private.h
+++ b/services/spd/tspd/tspd_private.h
@@ -190,14 +190,14 @@
 } tsp_context_t;
 
 /* Helper macros to store and retrieve tsp args from tsp_context */
-#define store_tsp_args(tsp_ctx, x1, x2)		do {\
-				tsp_ctx->saved_tsp_args[0] = x1;\
-				tsp_ctx->saved_tsp_args[1] = x2;\
+#define store_tsp_args(_tsp_ctx, _x1, _x2)		do {\
+				_tsp_ctx->saved_tsp_args[0] = _x1;\
+				_tsp_ctx->saved_tsp_args[1] = _x2;\
 			} while (0)
 
-#define get_tsp_args(tsp_ctx, x1, x2)	do {\
-				x1 = tsp_ctx->saved_tsp_args[0];\
-				x2 = tsp_ctx->saved_tsp_args[1];\
+#define get_tsp_args(_tsp_ctx, _x1, _x2)	do {\
+				_x1 = _tsp_ctx->saved_tsp_args[0];\
+				_x2 = _tsp_ctx->saved_tsp_args[1];\
 			} while (0)
 
 /* TSPD power management handlers */
diff --git a/services/std_svc/sdei/sdei_intr_mgmt.c b/services/std_svc/sdei/sdei_intr_mgmt.c
index 2717ea4..c0bd9de 100644
--- a/services/std_svc/sdei/sdei_intr_mgmt.c
+++ b/services/std_svc/sdei/sdei_intr_mgmt.c
@@ -39,6 +39,11 @@
 	/* Exception state registers */
 	uint64_t elr_el3;
 	uint64_t spsr_el3;
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+	/* CVE-2018-3639 mitigation state */
+	uint64_t disable_cve_2018_3639;
+#endif
 } sdei_dispatch_context_t;
 
 /* Per-CPU SDEI state data */
@@ -170,6 +175,18 @@
 	memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
 	disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
 	disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+	cve_2018_3639_t *tgt_cve_2018_3639;
+	tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
+
+	/* Save CVE-2018-3639 mitigation state */
+	disp_ctx->disable_cve_2018_3639 = read_ctx_reg(tgt_cve_2018_3639,
+		CTX_CVE_2018_3639_DISABLE);
+
+	/* Force SDEI handler to execute with mitigation enabled by default */
+	write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE, 0);
+#endif
 }
 
 static void restore_event_ctx(sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
@@ -188,6 +205,15 @@
 	memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
 	write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
 	write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
+
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
+	cve_2018_3639_t *tgt_cve_2018_3639;
+	tgt_cve_2018_3639 = get_cve_2018_3639_ctx(tgt_ctx);
+
+	/* Restore CVE-2018-3639 mitigation state */
+	write_ctx_reg(tgt_cve_2018_3639, CTX_CVE_2018_3639_DISABLE,
+		disp_ctx->disable_cve_2018_3639);
+#endif
 }
 
 static void save_secure_context(void)