Fix a performance regression from r144565. Positive offsets were being lowered
into registers, rather then encoded directly in the load/store.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@144576 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 81a93b1..0b728a9 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -874,9 +874,9 @@
// Integer loads/stores handle 12-bit offsets.
needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
// Handle negative offsets.
- if (isThumb2)
- needsLowering = !(needsLowering && Subtarget->hasV6T2Ops() &&
- Addr.Offset < 0 && Addr.Offset > -256);
+ if (needsLowering && isThumb2)
+ needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
+ Addr.Offset > -256);
} else {
// ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);