MIPS: uaccess: Switch lock annotations to might_fault().

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index 290485a..f2f7c6c 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -40,7 +40,7 @@
 __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
 				   __wsum sum, int *err_ptr)
 {
-	might_sleep();
+	might_fault();
 	return __csum_partial_copy_user((__force void *)src, dst,
 					len, sum, err_ptr);
 }
@@ -53,7 +53,7 @@
 __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
 			     __wsum sum, int *err_ptr)
 {
-	might_sleep();
+	might_fault();
 	if (access_ok(VERIFY_WRITE, dst, len))
 		return __csum_partial_copy_user(src, (__force void *)dst,
 						len, sum, err_ptr);
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 5c08760..8de858f 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -245,6 +245,7 @@
 	int __gu_err = -EFAULT;						\
 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
 									\
+	might_fault();							\
 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))		\
 		__get_user_common((x), size, __gu_ptr);			\
 									\
@@ -334,6 +335,7 @@
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	int __pu_err = -EFAULT;						\
 									\
+	might_fault();							\
 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\
 		switch (size) {						\
 		case 1: __put_user_asm("sb", __pu_addr); break;		\
@@ -708,10 +710,10 @@
 	const void *__cu_from;						\
 	long __cu_len;							\
 									\
-	might_sleep();							\
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
+	might_fault();							\
 	__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len);	\
 	__cu_len;							\
 })
@@ -764,13 +766,14 @@
 	const void *__cu_from;						\
 	long __cu_len;							\
 									\
-	might_sleep();							\
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
-	if (access_ok(VERIFY_WRITE, __cu_to, __cu_len))			\
+	if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {		\
+		might_fault();						\
 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
 		                                 __cu_len);		\
+	}								\
 	__cu_len;							\
 })
 
@@ -843,10 +846,10 @@
 	const void __user *__cu_from;					\
 	long __cu_len;							\
 									\
-	might_sleep();							\
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
+	might_fault();							\
 	__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,		\
 	                                   __cu_len);			\
 	__cu_len;							\
@@ -874,13 +877,14 @@
 	const void __user *__cu_from;					\
 	long __cu_len;							\
 									\
-	might_sleep();							\
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
-	if (access_ok(VERIFY_READ, __cu_from, __cu_len))		\
+	if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {		\
+		might_fault();						\
 		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\
 		                                   __cu_len);		\
+	}								\
 	__cu_len;							\
 })
 
@@ -890,10 +894,10 @@
 	const void __user *__cu_from;					\
 	long __cu_len;							\
 									\
-	might_sleep();							\
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
+	might_fault();							\
 	__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,		\
 	                                   __cu_len);			\
 	__cu_len;							\
@@ -905,14 +909,15 @@
 	const void __user *__cu_from;					\
 	long __cu_len;							\
 									\
-	might_sleep();							\
 	__cu_to = (to);							\
 	__cu_from = (from);						\
 	__cu_len = (n);							\
 	if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&	\
-	           access_ok(VERIFY_WRITE, __cu_to, __cu_len)))		\
+	           access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {	\
+		might_fault();						\
 		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\
 		                                   __cu_len);		\
+	}								\
 	__cu_len;							\
 })
 
@@ -932,7 +937,7 @@
 {
 	__kernel_size_t res;
 
-	might_sleep();
+	might_fault();
 	__asm__ __volatile__(
 		"move\t$4, %1\n\t"
 		"move\t$5, $0\n\t"
@@ -981,7 +986,7 @@
 {
 	long res;
 
-	might_sleep();
+	might_fault();
 	__asm__ __volatile__(
 		"move\t$4, %1\n\t"
 		"move\t$5, %2\n\t"
@@ -1018,7 +1023,7 @@
 {
 	long res;
 
-	might_sleep();
+	might_fault();
 	__asm__ __volatile__(
 		"move\t$4, %1\n\t"
 		"move\t$5, %2\n\t"
@@ -1037,7 +1042,7 @@
 {
 	long res;
 
-	might_sleep();
+	might_fault();
 	__asm__ __volatile__(
 		"move\t$4, %1\n\t"
 		__MODULE_JAL(__strlen_user_nocheck_asm)
@@ -1067,7 +1072,7 @@
 {
 	long res;
 
-	might_sleep();
+	might_fault();
 	__asm__ __volatile__(
 		"move\t$4, %1\n\t"
 		__MODULE_JAL(__strlen_user_asm)
@@ -1084,7 +1089,7 @@
 {
 	long res;
 
-	might_sleep();
+	might_fault();
 	__asm__ __volatile__(
 		"move\t$4, %1\n\t"
 		"move\t$5, %2\n\t"
@@ -1115,7 +1120,7 @@
 {
 	long res;
 
-	might_sleep();
+	might_fault();
 	__asm__ __volatile__(
 		"move\t$4, %1\n\t"
 		"move\t$5, %2\n\t"