xtensa: fix a6 and a7 handling in fast_syscall_xtensa
Remove restoring a6 on some return paths and instead modify and restore
it in a single place, using symbolic name.
Correctly restore a7 from PT_AREG7 in case of illegal a6 value.
Cc: stable@vger.kernel.org
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 21917e5..a06b7ef 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1001,9 +1001,8 @@
movi a7, 4 # sizeof(unsigned int)
access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
- addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1
- _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill
- _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
+ _bgeui a6, SYS_XTENSA_COUNT, .Lill
+ _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
/* Fall through for ATOMIC_CMP_SWP. */
@@ -1015,27 +1014,26 @@
l32i a7, a2, PT_AREG7 # restore a7
l32i a0, a2, PT_AREG0 # restore a0
movi a2, 1 # and return 1
- addi a6, a6, 1 # restore a6 (really necessary?)
rfe
1: l32i a7, a2, PT_AREG7 # restore a7
l32i a0, a2, PT_AREG0 # restore a0
movi a2, 0 # return 0 (note that we cannot set
- addi a6, a6, 1 # restore a6 (really necessary?)
rfe
.Lnswp: /* Atomic set, add, and exg_add. */
TRY l32i a7, a3, 0 # orig
+ addi a6, a6, -SYS_XTENSA_ATOMIC_SET
add a0, a4, a7 # + arg
moveqz a0, a4, a6 # set
+ addi a6, a6, SYS_XTENSA_ATOMIC_SET
TRY s32i a0, a3, 0 # write new value
mov a0, a2
mov a2, a7
l32i a7, a0, PT_AREG7 # restore a7
l32i a0, a0, PT_AREG0 # restore a0
- addi a6, a6, 1 # restore a6 (really necessary?)
rfe
CATCH
@@ -1044,7 +1042,7 @@
movi a2, -EFAULT
rfe
-.Lill: l32i a7, a2, PT_AREG0 # restore a7
+.Lill: l32i a7, a2, PT_AREG7 # restore a7
l32i a0, a2, PT_AREG0 # restore a0
movi a2, -EINVAL
rfe