arm64: rwlocks: don't fail trylock purely due to contention
STXR can fail for a number of reasons, so don't fail an rwlock trylock
operation simply because the STXR reported failure.
I'm not aware of any issues with the current code, but this makes it
consistent with spin_trylock and also other architectures (e.g. arch/arm).
Reported-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index cee1287..0f08ba5 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -140,10 +140,11 @@
unsigned int tmp;
asm volatile(
- " ldaxr %w0, %1\n"
- " cbnz %w0, 1f\n"
+ "1: ldaxr %w0, %1\n"
+ " cbnz %w0, 2f\n"
" stxr %w0, %w2, %1\n"
- "1:\n"
+ " cbnz %w0, 1b\n"
+ "2:\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
: "memory");
@@ -209,11 +210,12 @@
unsigned int tmp, tmp2 = 1;
asm volatile(
- " ldaxr %w0, %2\n"
+ "1: ldaxr %w0, %2\n"
" add %w0, %w0, #1\n"
- " tbnz %w0, #31, 1f\n"
+ " tbnz %w0, #31, 2f\n"
" stxr %w1, %w0, %2\n"
- "1:\n"
+ " cbnz %w1, 1b\n"
+ "2:\n"
: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
:
: "memory");