xtensa: add support for KASAN

Cover kernel addresses above 0x90000000 by the shadow map. Enable
HAVE_ARCH_KASAN when MMU is enabled. Provide kasan_early_init that fills
shadow map with writable copies of kasan_zero_page. Call
kasan_early_init right after mmu initialization in the setup_arch.
Provide kasan_init that allocates proper shadow map pages from the
memblock and puts these pages into the shadow map for addresses from
VMALLOC area to the end of KSEG. Call kasan_init right after memblock
initialization. Don't use KASAN for the boot code, MMU and KASAN
initialization and page fault handler. Make kernel stack size 4 times
larger when KASAN is enabled to avoid stack overflows.
GCC 7.3, 8 or newer is required to build the xtensa kernel with KASAN.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt
index 76bbd7f..8abb013 100644
--- a/Documentation/features/debug/KASAN/arch-support.txt
+++ b/Documentation/features/debug/KASAN/arch-support.txt
@@ -35,5 +35,5 @@
     |          um: | TODO |
     |   unicore32: | TODO |
     |         x86: |  ok  |
-    |      xtensa: | TODO |
+    |      xtensa: |  ok  |
     -----------------------
diff --git a/Documentation/xtensa/mmu.txt b/Documentation/xtensa/mmu.txt
index 1692139..318114d 100644
--- a/Documentation/xtensa/mmu.txt
+++ b/Documentation/xtensa/mmu.txt
@@ -71,6 +71,8 @@
 +------------------+
 | Page table       |  XCHAL_PAGE_TABLE_VADDR   0x80000000  XCHAL_PAGE_TABLE_SIZE
 +------------------+
+| KASAN shadow map |  KASAN_SHADOW_START       0x80400000  KASAN_SHADOW_SIZE
++------------------+                           0x8e400000
 +------------------+
 | VMALLOC area     |  VMALLOC_START            0xc0000000  128MB - 64KB
 +------------------+  VMALLOC_END
@@ -111,6 +113,8 @@
 +------------------+
 | Page table       |  XCHAL_PAGE_TABLE_VADDR   0x80000000  XCHAL_PAGE_TABLE_SIZE
 +------------------+
+| KASAN shadow map |  KASAN_SHADOW_START       0x80400000  KASAN_SHADOW_SIZE
++------------------+                           0x8e400000
 +------------------+
 | VMALLOC area     |  VMALLOC_START            0xa0000000  128MB - 64KB
 +------------------+  VMALLOC_END
@@ -152,6 +156,8 @@
 +------------------+
 | Page table       |  XCHAL_PAGE_TABLE_VADDR   0x80000000  XCHAL_PAGE_TABLE_SIZE
 +------------------+
+| KASAN shadow map |  KASAN_SHADOW_START       0x80400000  KASAN_SHADOW_SIZE
++------------------+                           0x8e400000
 +------------------+
 | VMALLOC area     |  VMALLOC_START            0x90000000  128MB - 64KB
 +------------------+  VMALLOC_END
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index fffe05b..f9f95d6 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -15,6 +15,7 @@
 	select GENERIC_IRQ_SHOW
 	select GENERIC_PCI_IOMAP
 	select GENERIC_SCHED_CLOCK
+	select HAVE_ARCH_KASAN if MMU
 	select HAVE_CC_STACKPROTECTOR
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DMA_API_DEBUG
@@ -80,6 +81,10 @@
 config HAVE_XTENSA_GPIO32
 	def_bool n
 
+config KASAN_SHADOW_OFFSET
+	hex
+	default 0x6e400000
+
 menu "Processor type and features"
 
 choice
diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile
index 2fe1829..355127f 100644
--- a/arch/xtensa/boot/lib/Makefile
+++ b/arch/xtensa/boot/lib/Makefile
@@ -15,6 +15,8 @@
 CFLAGS_REMOVE_inffast.o = -pg
 endif
 
+KASAN_SANITIZE := n
+
 CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong
 CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong
 CFLAGS_REMOVE_inftrees.o += -fstack-protector -fstack-protector-strong
diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h
new file mode 100644
index 0000000..54be808
--- /dev/null
+++ b/arch/xtensa/include/asm/kasan.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <asm/kmem_layout.h>
+
+/* Start of area covered by KASAN */
+#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000)
+/* Start of the shadow map */
+#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE)
+/* Size of the shadow map */
+#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)
+/* Offset for mem to shadow address transformation */
+#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET)
+
+void __init kasan_early_init(void);
+void __init kasan_init(void);
+
+#else
+
+static inline void kasan_early_init(void)
+{
+}
+
+static inline void kasan_init(void)
+{
+}
+
+#endif
+#endif
+#endif
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
index 28f9260..2317c83 100644
--- a/arch/xtensa/include/asm/kmem_layout.h
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -71,7 +71,11 @@
 
 #endif
 
+#ifndef CONFIG_KASAN
 #define KERNEL_STACK_SHIFT	13
+#else
+#define KERNEL_STACK_SHIFT	15
+#endif
 #define KERNEL_STACK_SIZE	(1 << KERNEL_STACK_SHIFT)
 
 #endif
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 30dd5b2..3880225 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -12,9 +12,9 @@
 #define _XTENSA_PGTABLE_H
 
 #define __ARCH_USE_5LEVEL_HACK
-#include <asm-generic/pgtable-nopmd.h>
 #include <asm/page.h>
 #include <asm/kmem_layout.h>
+#include <asm-generic/pgtable-nopmd.h>
 
 /*
  * We only use two ring levels, user and kernel space.
@@ -170,6 +170,7 @@
 #define PAGE_SHARED_EXEC \
 	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
 #define PAGE_KERNEL	   __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
+#define PAGE_KERNEL_RO	   __pgprot(_PAGE_PRESENT)
 #define PAGE_KERNEL_EXEC   __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
 
 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h
index 8d5d9df..586bad9fe 100644
--- a/arch/xtensa/include/asm/string.h
+++ b/arch/xtensa/include/asm/string.h
@@ -108,14 +108,33 @@
 
 #define __HAVE_ARCH_MEMSET
 extern void *memset(void *__s, int __c, size_t __count);
+extern void *__memset(void *__s, int __c, size_t __count);
 
 #define __HAVE_ARCH_MEMCPY
 extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
 
 #define __HAVE_ARCH_MEMMOVE
 extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
 
 /* Don't build bcopy at all ...  */
 #define __HAVE_ARCH_BCOPY
 
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+#endif
+
 #endif	/* _XTENSA_STRING_H */
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 960212e..a931af9 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -36,6 +36,7 @@
 #endif
 
 #include <asm/bootparam.h>
+#include <asm/kasan.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
@@ -251,6 +252,10 @@
 
 	init_mmu();
 
+	/* Initialize initial KASAN shadow map */
+
+	kasan_early_init();
+
 	/* Parse boot parameters */
 
 	if (bp_start)
@@ -388,7 +393,7 @@
 #endif
 	parse_early_param();
 	bootmem_init();
-
+	kasan_init();
 	unflatten_and_copy_device_tree();
 
 #ifdef CONFIG_SMP
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 6723910..3a443f8 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -41,6 +41,9 @@
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
 EXPORT_SYMBOL(__strncpy_user);
 EXPORT_SYMBOL(clear_page);
 EXPORT_SYMBOL(copy_page);
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
index 24d6508..c0f6981 100644
--- a/arch/xtensa/lib/memcopy.S
+++ b/arch/xtensa/lib/memcopy.S
@@ -109,7 +109,8 @@
 	addi	a5, a5,  2
 	j	.Ldstaligned	# dst is now aligned, return to main algorithm
 
-ENTRY(memcpy)
+ENTRY(__memcpy)
+WEAK(memcpy)
 
 	entry	sp, 16		# minimal stack frame
 	# a2/ dst, a3/ src, a4/ len
@@ -271,7 +272,7 @@
 	s8i	a6, a5,  0
 	retw
 
-ENDPROC(memcpy)
+ENDPROC(__memcpy)
 
 /*
  * void bcopy(const void *src, void *dest, size_t n);
@@ -376,7 +377,8 @@
 	j	.Lbackdstaligned	# dst is now aligned,
 					# return to main algorithm
 
-ENTRY(memmove)
+ENTRY(__memmove)
+WEAK(memmove)
 
 	entry	sp, 16		# minimal stack frame
 	# a2/ dst, a3/ src, a4/ len
@@ -548,4 +550,4 @@
 	s8i	a6, a5,  0
 	retw
 
-ENDPROC(memmove)
+ENDPROC(__memmove)
diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S
index a6cd04b..276747d 100644
--- a/arch/xtensa/lib/memset.S
+++ b/arch/xtensa/lib/memset.S
@@ -31,7 +31,8 @@
  */
 
 .text
-ENTRY(memset)
+ENTRY(__memset)
+WEAK(memset)
 
 	entry	sp, 16		# minimal stack frame
 	# a2/ dst, a3/ c, a4/ length
@@ -140,7 +141,7 @@
 .Lbytesetdone:
 	retw
 
-ENDPROC(memset)
+ENDPROC(__memset)
 
 	.section .fixup, "ax"
 	.align	4
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index 0b3d296..734888a 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -5,3 +5,8 @@
 obj-y			:= init.o misc.o
 obj-$(CONFIG_MMU)	+= cache.o fault.o ioremap.o mmu.o tlb.o
 obj-$(CONFIG_HIGHMEM)	+= highmem.o
+obj-$(CONFIG_KASAN)	+= kasan_init.o
+
+KASAN_SANITIZE_fault.o := n
+KASAN_SANITIZE_kasan_init.o := n
+KASAN_SANITIZE_mmu.o := n
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 6fc1cb0..0d980f0 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -100,6 +100,9 @@
 
 	mem_init_print_info(NULL);
 	pr_info("virtual kernel memory layout:\n"
+#ifdef CONFIG_KASAN
+		"    kasan   : 0x%08lx - 0x%08lx  (%5lu MB)\n"
+#endif
 #ifdef CONFIG_MMU
 		"    vmalloc : 0x%08lx - 0x%08lx  (%5lu MB)\n"
 #endif
@@ -108,6 +111,10 @@
 		"    fixmap  : 0x%08lx - 0x%08lx  (%5lu kB)\n"
 #endif
 		"    lowmem  : 0x%08lx - 0x%08lx  (%5lu MB)\n",
+#ifdef CONFIG_KASAN
+		KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE,
+		KASAN_SHADOW_SIZE >> 20,
+#endif
 #ifdef CONFIG_MMU
 		VMALLOC_START, VMALLOC_END,
 		(VMALLOC_END - VMALLOC_START) >> 20,
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
new file mode 100644
index 0000000..6b532b6
--- /dev/null
+++ b/arch/xtensa/mm/kasan_init.c
@@ -0,0 +1,95 @@
+/*
+ * Xtensa KASAN shadow map initialization
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2017 Cadence Design Systems Inc.
+ */
+
+#include <linux/bootmem.h>
+#include <linux/init_task.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <asm/initialize_mmu.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+void __init kasan_early_init(void)
+{
+	unsigned long vaddr = KASAN_SHADOW_START;
+	pgd_t *pgd = pgd_offset_k(vaddr);
+	pmd_t *pmd = pmd_offset(pgd, vaddr);
+	int i;
+
+	for (i = 0; i < PTRS_PER_PTE; ++i)
+		set_pte(kasan_zero_pte + i,
+			mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL));
+
+	for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
+		BUG_ON(!pmd_none(*pmd));
+		set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte));
+	}
+	early_trap_init();
+}
+
+static void __init populate(void *start, void *end)
+{
+	unsigned long n_pages = (end - start) / PAGE_SIZE;
+	unsigned long n_pmds = n_pages / PTRS_PER_PTE;
+	unsigned long i, j;
+	unsigned long vaddr = (unsigned long)start;
+	pgd_t *pgd = pgd_offset_k(vaddr);
+	pmd_t *pmd = pmd_offset(pgd, vaddr);
+	pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+
+	pr_debug("%s: %p - %p\n", __func__, start, end);
+
+	for (i = j = 0; i < n_pmds; ++i) {
+		int k;
+
+		for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
+			phys_addr_t phys =
+				memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
+						    MEMBLOCK_ALLOC_ANYWHERE);
+
+			set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
+		}
+	}
+
+	for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
+		set_pmd(pmd + i, __pmd((unsigned long)pte));
+
+	local_flush_tlb_all();
+	memset(start, 0, end - start);
+}
+
+void __init kasan_init(void)
+{
+	int i;
+
+	BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
+		     (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
+	BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
+
+	/*
+	 * Replace shadow map pages that cover addresses from VMALLOC area
+	 * start to the end of KSEG with clean writable pages.
+	 */
+	populate(kasan_mem_to_shadow((void *)VMALLOC_START),
+		 kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
+
+	/* Write protect kasan_zero_page and zero-initialize it again. */
+	for (i = 0; i < PTRS_PER_PTE; ++i)
+		set_pte(kasan_zero_pte + i,
+			mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO));
+
+	local_flush_tlb_all();
+	memset(kasan_zero_page, 0, PAGE_SIZE);
+
+	/* At this point kasan is fully initialized. Enable error messages. */
+	current->kasan_depth = 0;
+	pr_info("KernelAddressSanitizer initialized\n");
+}