Improve code generation of long shifts by 32.
On this testcase:
long %test(long %X) {
%Y = shr long %X, ubyte 32
ret long %Y
}
instead of:
t:
mov %EAX, DWORD PTR [%ESP + 4]
mov %EAX, DWORD PTR [%ESP + 8]
sar %EAX, 0
mov %EDX, 0
ret
we now emit:
test:
mov %EAX, DWORD PTR [%ESP + 4]
mov %EAX, DWORD PTR [%ESP + 8]
mov %EDX, 0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@12688 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/InstSelectSimple.cpp b/lib/Target/X86/InstSelectSimple.cpp
index e735f94..4e59ed1 100644
--- a/lib/Target/X86/InstSelectSimple.cpp
+++ b/lib/Target/X86/InstSelectSimple.cpp
@@ -2128,13 +2128,20 @@
} else { // Shifting more than 32 bits
Amount -= 32;
if (isLeftShift) {
- BuildMI(*MBB, IP, X86::SHL32ri, 2,
- DestReg + 1).addReg(SrcReg).addImm(Amount);
- BuildMI(*MBB, IP, X86::MOV32ri, 1,
- DestReg).addImm(0);
+ if (Amount != 0) {
+ BuildMI(*MBB, IP, X86::SHL32ri, 2,
+ DestReg + 1).addReg(SrcReg).addImm(Amount);
+ } else {
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg);
+ }
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg).addImm(0);
} else {
- unsigned Opcode = isSigned ? X86::SAR32ri : X86::SHR32ri;
- BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(SrcReg+1).addImm(Amount);
+ if (Amount != 0) {
+ BuildMI(*MBB, IP, isSigned ? X86::SAR32ri : X86::SHR32ri, 2,
+ DestReg).addReg(SrcReg+1).addImm(Amount);
+ } else {
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg+1);
+ }
BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
}
}