Improve codegen for the LLVM offsetof/sizeof "operator". Before we compiled
this LLVM function:
int %foo() {
ret int cast (int** getelementptr (int** null, int 1) to int)
}
into:
foo:
mov %EAX, 0
lea %EAX, DWORD PTR [%EAX + 4]
ret
now we compile it into:
foo:
mov %EAX, 4
ret
This sequence is frequently generated by the MSIL front-end, and soon the malloc lowering pass and
Java front-ends as well..
-Chris
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@14834 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/InstSelectSimple.cpp b/lib/Target/X86/InstSelectSimple.cpp
index 5cabd48..68a602b 100644
--- a/lib/Target/X86/InstSelectSimple.cpp
+++ b/lib/Target/X86/InstSelectSimple.cpp
@@ -3663,6 +3663,21 @@
if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
Src = CPR->getValue();
+ // If this is a getelementptr null, with all constant integer indices, just
+ // replace it with TargetReg = 42.
+ if (isa<ConstantPointerNull>(Src)) {
+ User::op_iterator I = IdxBegin;
+ for (; I != IdxEnd; ++I)
+ if (!isa<ConstantInt>(*I))
+ break;
+ if (I == IdxEnd) { // All constant indices
+ unsigned Offset = TD.getIndexedOffset(Src->getType(),
+ std::vector<Value*>(IdxBegin, IdxEnd));
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addImm(Offset);
+ return;
+ }
+ }
+
std::vector<Value*> GEPOps;
GEPOps.resize(IdxEnd-IdxBegin+1);
GEPOps[0] = Src;