Workaround for CVE-2017-5715 on Cortex A57 and A72

Invalidate the Branch Target Buffer (BTB) on entry to EL3 by disabling
and enabling the MMU.  To achieve this without performing any branch
instruction, a per-cpu vbar is installed which executes the workaround
and then branches off to the corresponding vector entry in the main
vector table.  A side effect of this change is that the main vbar is
configured before any reset handling.  This is to allow the per-cpu
reset function to override the vbar setting.

This workaround is enabled by default on the affected CPUs.

Change-Id: I97788d38463a5840a410e3cea85ed297a1678265
Signed-off-by: Dimitris Papastamos <dimitris.papastamos@arm.com>
diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S
index d8fbb9b..9b7735f 100644
--- a/bl31/aarch64/runtime_exceptions.S
+++ b/bl31/aarch64/runtime_exceptions.S
@@ -14,6 +14,26 @@
 
 	.globl	runtime_exceptions
 
+	.globl	sync_exception_sp_el0
+	.globl	irq_sp_el0
+	.globl	fiq_sp_el0
+	.globl	serror_sp_el0
+
+	.globl	sync_exception_sp_elx
+	.globl	irq_sp_elx
+	.globl	fiq_sp_elx
+	.globl	serror_sp_elx
+
+	.globl	sync_exception_aarch64
+	.globl	irq_aarch64
+	.globl	fiq_aarch64
+	.globl	serror_aarch64
+
+	.globl	sync_exception_aarch32
+	.globl	irq_aarch32
+	.globl	fiq_aarch32
+	.globl	serror_aarch32
+
 	/* ---------------------------------------------------------------------
 	 * This macro handles Synchronous exceptions.
 	 * Only SMC exceptions are supported.
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index fdcc931..0732e05 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -58,6 +58,10 @@
 BL31_SOURCES		+=	lib/extensions/sve/sve.c
 endif
 
+ifeq (${WORKAROUND_CVE_2017_5715},1)
+BL31_SOURCES		+=	lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
+endif
+
 BL31_LINKERFILE		:=	bl31/bl31.ld.S
 
 # Flag used to indicate if Crash reporting via console should be included
diff --git a/docs/cpu-specific-build-macros.rst b/docs/cpu-specific-build-macros.rst
index f74b459..014817d 100644
--- a/docs/cpu-specific-build-macros.rst
+++ b/docs/cpu-specific-build-macros.rst
@@ -11,6 +11,15 @@
 operations framework to enable errata workarounds and to enable optimizations
 for a specific CPU on a platform.
 
+Security Vulnerability Workarounds
+----------------------------------
+
+ARM Trusted Firmware exports a series of build flags which control which
+security vulnerability workarounds should be applied at runtime.
+
+-  ``WORKAROUND_CVE_2017_5715``: Enables the security workaround for
+   `CVE-2017-5715`_. Defaults to 1.
+
 CPU Errata Workarounds
 ----------------------
 
@@ -142,6 +151,7 @@
 
 *Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.*
 
+.. _CVE-2017-5715: http://www.cve.mitre.org/cgi-bin/cvename.cgi?name=2017-5715
 .. _Cortex-A53 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm048406/Cortex_A53_MPCore_Software_Developers_Errata_Notice.pdf
 .. _Cortex-A57 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm049219/cortex_a57_mpcore_software_developers_errata_notice.pdf
 .. _Cortex-A72 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm012079/index.html
diff --git a/include/common/aarch64/el3_common_macros.S b/include/common/aarch64/el3_common_macros.S
index 63a0fa7..defd4a2 100644
--- a/include/common/aarch64/el3_common_macros.S
+++ b/include/common/aarch64/el3_common_macros.S
@@ -13,7 +13,7 @@
 	/*
 	 * Helper macro to initialise EL3 registers we care about.
 	 */
-	.macro el3_arch_init_common _exception_vectors
+	.macro el3_arch_init_common
 	/* ---------------------------------------------------------------------
 	 * SCTLR_EL3 has already been initialised - read current value before
 	 * modifying.
@@ -50,14 +50,6 @@
 #endif /* IMAGE_BL31 */
 
 	/* ---------------------------------------------------------------------
-	 * Set the exception vectors.
-	 * ---------------------------------------------------------------------
-	 */
-	adr	x0, \_exception_vectors
-	msr	vbar_el3, x0
-	isb
-
-	/* ---------------------------------------------------------------------
 	 * Initialise SCR_EL3, setting all fields rather than relying on hw.
 	 * All fields are architecturally UNKNOWN on reset. The following fields
 	 * do not change during the TF lifetime. The remaining fields are set to
@@ -221,6 +213,14 @@
 	.endif /* _warm_boot_mailbox */
 
 	/* ---------------------------------------------------------------------
+	 * Set the exception vectors.
+	 * ---------------------------------------------------------------------
+	 */
+	adr	x0, \_exception_vectors
+	msr	vbar_el3, x0
+	isb
+
+	/* ---------------------------------------------------------------------
 	 * It is a cold boot.
 	 * Perform any processor specific actions upon reset e.g. cache, TLB
 	 * invalidations etc.
@@ -228,7 +228,7 @@
 	 */
 	bl	reset_handler
 
-	el3_arch_init_common \_exception_vectors
+	el3_arch_init_common
 
 	.if \_secondary_cold_boot
 		/* -------------------------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index a720e98..683be47 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -383,6 +383,11 @@
 	bl	errata_a57_859972_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+	adr	x0, workaround_mmu_runtime_exceptions
+	msr	vbar_el3, x0
+#endif
+
 	/* ---------------------------------------------
 	 * Enable the SMP bit.
 	 * ---------------------------------------------
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index b034125..93821b7 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -110,6 +110,12 @@
 	mov	x0, x18
 	bl	errata_a72_859971_wa
 #endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+	adr	x0, workaround_mmu_runtime_exceptions
+	msr	vbar_el3, x0
+#endif
+
 	/* ---------------------------------------------
 	 * Enable the SMP bit.
 	 * ---------------------------------------------
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
new file mode 100644
index 0000000..f478148
--- /dev/null
+++ b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+
+	.globl	workaround_mmu_runtime_exceptions
+
+vector_base workaround_mmu_runtime_exceptions
+
+	.macro	apply_workaround
+	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	mrs	x0, sctlr_el3
+	/* Disable MMU */
+	bic	x1, x0, #SCTLR_M_BIT
+	msr	sctlr_el3, x1
+	isb
+	/* Restore MMU config */
+	msr	sctlr_el3, x0
+	isb
+	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+	.endm
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_EL0 : 0x0 - 0x200
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry workaround_mmu_sync_exception_sp_el0
+	b	sync_exception_sp_el0
+	check_vector_size workaround_mmu_sync_exception_sp_el0
+
+vector_entry workaround_mmu_irq_sp_el0
+	b	irq_sp_el0
+	check_vector_size workaround_mmu_irq_sp_el0
+
+vector_entry workaround_mmu_fiq_sp_el0
+	b	fiq_sp_el0
+	check_vector_size workaround_mmu_fiq_sp_el0
+
+vector_entry workaround_mmu_serror_sp_el0
+	b	serror_sp_el0
+	check_vector_size workaround_mmu_serror_sp_el0
+
+	/* ---------------------------------------------------------------------
+	 * Current EL with SP_ELx: 0x200 - 0x400
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry workaround_mmu_sync_exception_sp_elx
+	b	sync_exception_sp_elx
+	check_vector_size workaround_mmu_sync_exception_sp_elx
+
+vector_entry workaround_mmu_irq_sp_elx
+	b	irq_sp_elx
+	check_vector_size workaround_mmu_irq_sp_elx
+
+vector_entry workaround_mmu_fiq_sp_elx
+	b	fiq_sp_elx
+	check_vector_size workaround_mmu_fiq_sp_elx
+
+vector_entry workaround_mmu_serror_sp_elx
+	b	serror_sp_elx
+	check_vector_size workaround_mmu_serror_sp_elx
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch64 : 0x400 - 0x600
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry workaround_mmu_sync_exception_aarch64
+	apply_workaround
+	b	sync_exception_aarch64
+	check_vector_size workaround_mmu_sync_exception_aarch64
+
+vector_entry workaround_mmu_irq_aarch64
+	apply_workaround
+	b	irq_aarch64
+	check_vector_size workaround_mmu_irq_aarch64
+
+vector_entry workaround_mmu_fiq_aarch64
+	apply_workaround
+	b	fiq_aarch64
+	check_vector_size workaround_mmu_fiq_aarch64
+
+vector_entry workaround_mmu_serror_aarch64
+	apply_workaround
+	b	serror_aarch64
+	check_vector_size workaround_mmu_serror_aarch64
+
+	/* ---------------------------------------------------------------------
+	 * Lower EL using AArch32 : 0x600 - 0x800
+	 * ---------------------------------------------------------------------
+	 */
+vector_entry workaround_mmu_sync_exception_aarch32
+	apply_workaround
+	b	sync_exception_aarch32
+	check_vector_size workaround_mmu_sync_exception_aarch32
+
+vector_entry workaround_mmu_irq_aarch32
+	apply_workaround
+	b	irq_aarch32
+	check_vector_size workaround_mmu_irq_aarch32
+
+vector_entry workaround_mmu_fiq_aarch32
+	apply_workaround
+	b	fiq_aarch32
+	check_vector_size workaround_mmu_fiq_aarch32
+
+vector_entry workaround_mmu_serror_aarch32
+	apply_workaround
+	b	serror_aarch32
+	check_vector_size workaround_mmu_serror_aarch32
diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk
index 31adfb4..3ba8c1f 100644
--- a/lib/cpus/cpu-ops.mk
+++ b/lib/cpus/cpu-ops.mk
@@ -16,6 +16,8 @@
 # It is enabled by default.
 A57_DISABLE_NON_TEMPORAL_HINT	?=1
 
+WORKAROUND_CVE_2017_5715	?=1
+
 # Process SKIP_A57_L1_FLUSH_PWR_DWN flag
 $(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN))
 $(eval $(call add_define,SKIP_A57_L1_FLUSH_PWR_DWN))
@@ -28,6 +30,9 @@
 $(eval $(call assert_boolean,A57_DISABLE_NON_TEMPORAL_HINT))
 $(eval $(call add_define,A57_DISABLE_NON_TEMPORAL_HINT))
 
+# Process WORKAROUND_CVE_2017_5715 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2017_5715))
+$(eval $(call add_define,WORKAROUND_CVE_2017_5715))
 
 # CPU Errata Build flags.
 # These should be enabled by the platform if the erratum workaround needs to be