x86/kasan: instrument user memory access API
Exchange between user and kernel memory is coded in assembly language.
Which means that such accesses won't be spotted by KASAN as a compiler
instruments only C code.
Add explicit KASAN checks to user memory access API to ensure that
userspace writes to (or reads from) a valid kernel memory.
Note: Unlike others strncpy_from_user() is written mostly in C and KASAN
sees memory accesses in it. However, it makes sense to add explicit
check for all @count bytes that *potentially* could be written to the
kernel.
[aryabinin@virtuozzo.com: move kasan check under the condition]
Link: http://lkml.kernel.org/r/1462869209-21096-1-git-send-email-aryabinin@virtuozzo.com
Link: http://lkml.kernel.org/r/1462538722-1574-4-git-send-email-aryabinin@virtuozzo.com
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 12f9653..2982387 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -5,6 +5,7 @@
*/
#include <linux/errno.h>
#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
#include <linux/thread_info.h>
#include <linux/string.h>
#include <asm/asm.h>
@@ -721,6 +722,8 @@
might_fault();
+ kasan_check_write(to, n);
+
/*
* While we would like to have the compiler do the checking for us
* even in the non-constant size case, any false positives there are
@@ -754,6 +757,8 @@
{
int sz = __compiletime_object_size(from);
+ kasan_check_read(from, n);
+
might_fault();
/* See the comment in copy_from_user() above. */
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 3076986..2eac2aa 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -7,6 +7,7 @@
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/lockdep.h>
+#include <linux/kasan-checks.h>
#include <asm/alternative.h>
#include <asm/cpufeatures.h>
#include <asm/page.h>
@@ -109,6 +110,7 @@
int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
might_fault();
+ kasan_check_write(dst, size);
return __copy_from_user_nocheck(dst, src, size);
}
@@ -175,6 +177,7 @@
int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
might_fault();
+ kasan_check_read(src, size);
return __copy_to_user_nocheck(dst, src, size);
}
@@ -242,12 +245,14 @@
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
+ kasan_check_write(dst, size);
return __copy_from_user_nocheck(dst, src, size);
}
static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
+ kasan_check_read(src, size);
return __copy_to_user_nocheck(dst, src, size);
}
@@ -258,6 +263,7 @@
__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
{
might_fault();
+ kasan_check_write(dst, size);
return __copy_user_nocache(dst, src, size, 1);
}
@@ -265,6 +271,7 @@
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size)
{
+ kasan_check_write(dst, size);
return __copy_user_nocache(dst, src, size, 0);
}