x86: some lock annotations for user copy paths, v2
- introduce might_fault()
- handle the atomic user copy paths correctly
[ mingo@elte.hu: move might_sleep() outside of in_atomic(). ]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 8eedde2..7393152 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -32,9 +32,7 @@
#define __do_strncpy_from_user(dst, src, count, res) \
do { \
int __d0, __d1, __d2; \
- might_sleep(); \
- if (current->mm) \
- might_lock_read(¤t->mm->mmap_sem); \
+ might_fault(); \
__asm__ __volatile__( \
" testl %1,%1\n" \
" jz 2f\n" \
@@ -121,9 +119,7 @@
#define __do_clear_user(addr,size) \
do { \
int __d0; \
- might_sleep(); \
- if (current->mm) \
- might_lock_read(¤t->mm->mmap_sem); \
+ might_fault(); \
__asm__ __volatile__( \
"0: rep; stosl\n" \
" movl %2,%0\n" \
@@ -193,9 +189,7 @@
unsigned long mask = -__addr_ok(s);
unsigned long res, tmp;
- might_sleep();
- if (current->mm)
- might_lock_read(¤t->mm->mmap_sem);
+ might_fault();
__asm__ __volatile__(
" testl %0, %0\n"
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 847d129..64d6c84 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -15,9 +15,7 @@
#define __do_strncpy_from_user(dst,src,count,res) \
do { \
long __d0, __d1, __d2; \
- might_sleep(); \
- if (current->mm) \
- might_lock_read(¤t->mm->mmap_sem); \
+ might_fault(); \
__asm__ __volatile__( \
" testq %1,%1\n" \
" jz 2f\n" \
@@ -66,9 +64,7 @@
unsigned long __clear_user(void __user *addr, unsigned long size)
{
long __d0;
- might_sleep();
- if (current->mm)
- might_lock_read(¤t->mm->mmap_sem);
+ might_fault();
/* no memory constraint because it doesn't change any memory gcc knows
about */
asm volatile(
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h
index ad29752..39f8420 100644
--- a/include/asm-x86/uaccess.h
+++ b/include/asm-x86/uaccess.h
@@ -8,8 +8,6 @@
#include <linux/thread_info.h>
#include <linux/prefetch.h>
#include <linux/string.h>
-#include <linux/lockdep.h>
-#include <linux/sched.h>
#include <asm/asm.h>
#include <asm/page.h>
@@ -159,9 +157,7 @@
int __ret_gu; \
unsigned long __val_gu; \
__chk_user_ptr(ptr); \
- might_sleep(); \
- if (current->mm) \
- might_lock_read(¤t->mm->mmap_sem); \
+ might_fault(); \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_x(1, __ret_gu, __val_gu, ptr); \
@@ -246,9 +242,7 @@
int __ret_pu; \
__typeof__(*(ptr)) __pu_val; \
__chk_user_ptr(ptr); \
- might_sleep(); \
- if (current->mm) \
- might_lock_read(¤t->mm->mmap_sem); \
+ might_fault(); \
__pu_val = x; \
switch (sizeof(*(ptr))) { \
case 1: \
@@ -273,9 +267,7 @@
#define __put_user_size(x, ptr, size, retval, errret) \
do { \
retval = 0; \
- might_sleep(); \
- if (current->mm) \
- might_lock_read(¤t->mm->mmap_sem); \
+ might_fault(); \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
@@ -328,9 +320,7 @@
#define __get_user_size(x, ptr, size, retval, errret) \
do { \
retval = 0; \
- might_sleep(); \
- if (current->mm) \
- might_lock_read(¤t->mm->mmap_sem); \
+ might_fault(); \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
index d725e2d..d10e842 100644
--- a/include/asm-x86/uaccess_32.h
+++ b/include/asm-x86/uaccess_32.h
@@ -82,9 +82,7 @@
static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
- might_sleep();
- if (current->mm)
- might_lock_read(¤t->mm->mmap_sem);
+ might_fault();
return __copy_to_user_inatomic(to, from, n);
}
@@ -139,9 +137,7 @@
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
- might_sleep();
- if (current->mm)
- might_lock_read(¤t->mm->mmap_sem);
+ might_fault();
if (__builtin_constant_p(n)) {
unsigned long ret;
@@ -163,9 +159,7 @@
static __always_inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{
- might_sleep();
- if (current->mm)
- might_lock_read(¤t->mm->mmap_sem);
+ might_fault();
if (__builtin_constant_p(n)) {
unsigned long ret;
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index 40a7205..13fd56f 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -29,9 +29,7 @@
{
int ret = 0;
- might_sleep();
- if (current->mm)
- might_lock_read(¤t->mm->mmap_sem);
+ might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
@@ -75,9 +73,7 @@
{
int ret = 0;
- might_sleep();
- if (current->mm)
- might_lock_read(¤t->mm->mmap_sem);
+ might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
@@ -121,9 +117,7 @@
{
int ret = 0;
- might_sleep();
- if (current->mm)
- might_lock_read(¤t->mm->mmap_sem);
+ might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst,
(__force void *)src, size);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2651f80..e580ec0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -140,6 +140,15 @@
(__x < 0) ? -__x : __x; \
})
+#ifdef CONFIG_PROVE_LOCKING
+void might_fault(void);
+#else
+static inline void might_fault(void)
+{
+ might_sleep();
+}
+#endif
+
extern struct atomic_notifier_head panic_notifier_list;
extern long (*panic_blink)(long time);
NORET_TYPE void panic(const char * fmt, ...)
diff --git a/mm/memory.c b/mm/memory.c
index 1002f47..b8fdf4e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3016,3 +3016,18 @@
}
up_read(¤t->mm->mmap_sem);
}
+
+#ifdef CONFIG_PROVE_LOCKING
+void might_fault(void)
+{
+ might_sleep();
+ /*
+ * it would be nicer only to annotate paths which are not under
+ * pagefault_disable, however that requires a larger audit and
+ * providing helpers like get_user_atomic.
+ */
+ if (!in_atomic() && current->mm)
+ might_lock_read(¤t->mm->mmap_sem);
+}
+EXPORT_SYMBOL(might_fault);
+#endif