Use movw+movt in ARMFastISel::ARMMaterializeGV.
This eliminates a lot of constant pool entries for -O0 builds of code
with many global variable accesses.
This speeds up -O0 codegen of consumer-typeset by 2x because the
constant island pass no longer has to look at thousands of constant pool
entries.
<rdar://problem/10629774>
llvm-svn: 147712
diff --git a/llvm/test/CodeGen/ARM/fast-isel.ll b/llvm/test/CodeGen/ARM/fast-isel.ll
index 9f33aea..c8e0211 100644
--- a/llvm/test/CodeGen/ARM/fast-isel.ll
+++ b/llvm/test/CodeGen/ARM/fast-isel.ll
@@ -142,19 +142,23 @@
store i32 %b, i32* @test4g
ret void
-; THUMB: ldr.n r0, LCPI4_1
+; THUMB: movw r0, :lower16:L_test4g$non_lazy_ptr
+; THUMB: movt r0, :upper16:L_test4g$non_lazy_ptr
; THUMB: ldr r0, [r0]
; THUMB: ldr r0, [r0]
; THUMB: adds r0, #1
-; THUMB: ldr.n r1, LCPI4_0
+; THUMB: movw r1, :lower16:L_test4g$non_lazy_ptr
+; THUMB: movt r1, :upper16:L_test4g$non_lazy_ptr
; THUMB: ldr r1, [r1]
; THUMB: str r0, [r1]
-; ARM: ldr r0, LCPI4_1
+; ARM: movw r0, :lower16:L_test4g$non_lazy_ptr
+; ARM: movt r0, :upper16:L_test4g$non_lazy_ptr
; ARM: ldr r0, [r0]
; ARM: ldr r0, [r0]
; ARM: add r0, r0, #1
-; ARM: ldr r1, LCPI4_0
+; ARM: movw r1, :lower16:L_test4g$non_lazy_ptr
+; ARM: movt r1, :upper16:L_test4g$non_lazy_ptr
; ARM: ldr r1, [r1]
; ARM: str r0, [r1]
}