AArch64: remove ConstantIsland pass & put literals in separate section.

This implements the review suggestion to simplify the AArch64 backend. If we
later discover that we *really* need the extra complexity of the
ConstantIslands pass for performance reasons it can be resurrected.

llvm-svn: 175258
diff --git a/llvm/test/CodeGen/AArch64/adrp-relocation.ll b/llvm/test/CodeGen/AArch64/adrp-relocation.ll
index 3eeb53e3..c33b442 100644
--- a/llvm/test/CodeGen/AArch64/adrp-relocation.ll
+++ b/llvm/test/CodeGen/AArch64/adrp-relocation.ll
@@ -1,16 +1,16 @@
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -filetype=obj < %s | elf-dump | FileCheck %s
 
-define fp128 @testfn() nounwind {
+define i64 @testfn() nounwind {
 entry:
-  ret fp128 0xL00000000000000004004500000000000
+  ret i64 0
 }
 
-define fp128 @foo() nounwind {
+define i64 @foo() nounwind {
 entry:
-  %bar = alloca fp128 ()*, align 8
-  store fp128 ()* @testfn, fp128 ()** %bar, align 8
-  %call = call fp128 @testfn()
-  ret fp128 %call
+  %bar = alloca i64 ()*, align 8
+  store i64 ()* @testfn, i64 ()** %bar, align 8
+  %call = call i64 @testfn()
+  ret i64 %call
 }
 
 ; The above should produce an ADRP/ADD pair to calculate the address of
@@ -22,14 +22,14 @@
 ; CHECK: .rela.text
 
 ; CHECK: # Relocation 0
-; CHECK-NEXT: (('r_offset', 0x0000000000000028)
-; CHECK-NEXT:  ('r_sym', 0x00000009)
+; CHECK-NEXT: (('r_offset', 0x0000000000000010)
+; CHECK-NEXT:  ('r_sym', 0x00000007)
 ; CHECK-NEXT:  ('r_type', 0x00000113)
 ; CHECK-NEXT:  ('r_addend', 0x0000000000000000)
 ; CHECK-NEXT: ),
 ; CHECK-NEXT:  Relocation 1
-; CHECK-NEXT: (('r_offset', 0x000000000000002c)
-; CHECK-NEXT:  ('r_sym', 0x00000009)
+; CHECK-NEXT: (('r_offset', 0x0000000000000014)
+; CHECK-NEXT:  ('r_sym', 0x00000007)
 ; CHECK-NEXT:  ('r_type', 0x00000115)
 ; CHECK-NEXT:  ('r_addend', 0x0000000000000000)
 ; CHECK-NEXT: ),
diff --git a/llvm/test/CodeGen/AArch64/extern-weak.ll b/llvm/test/CodeGen/AArch64/extern-weak.ll
index 54baab2..2989776 100644
--- a/llvm/test/CodeGen/AArch64/extern-weak.ll
+++ b/llvm/test/CodeGen/AArch64/extern-weak.ll
@@ -6,8 +6,9 @@
 ; The usual ADRP/ADD pair can't be used for a weak reference because it must
 ; evaluate to 0 if the symbol is undefined. We use a litpool entry.
   ret i32()* @var
-; CHECK: ldr x0, .LCPI0_0
-
 ; CHECK: .LCPI0_0:
 ; CHECK-NEXT: .xword var
+
+; CHECK: ldr x0, [{{x[0-9]+}}, #:lo12:.LCPI0_0]
+
 }
diff --git a/llvm/test/CodeGen/AArch64/fp-cond-sel.ll b/llvm/test/CodeGen/AArch64/fp-cond-sel.ll
index 0d5882b..56e8f16 100644
--- a/llvm/test/CodeGen/AArch64/fp-cond-sel.ll
+++ b/llvm/test/CodeGen/AArch64/fp-cond-sel.ll
@@ -9,15 +9,15 @@
   %tst1 = icmp ugt i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, float 0.0, float 1.0
   store float %val1, float* @varfloat
+; CHECK: ldr [[FLT0:s[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI
 ; CHECK: fmov [[FLT1:s[0-9]+]], #1.0
-; CHECK: ldr [[FLT0:s[0-9]+]], .LCPI
 ; CHECK: fcsel {{s[0-9]+}}, [[FLT0]], [[FLT1]], hi
 
   %rhs64 = sext i32 %rhs32 to i64
   %tst2 = icmp sle i64 %lhs64, %rhs64
   %val2 = select i1 %tst2, double 1.0, double 0.0
   store double %val2, double* @vardouble
-; CHECK: ldr [[FLT0:d[0-9]+]], .LCPI
+; CHECK: ldr [[FLT0:d[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI
 ; CHECK: fmov [[FLT1:d[0-9]+]], #1.0
 ; CHECK: fcsel {{d[0-9]+}}, [[FLT1]], [[FLT0]], le
 
diff --git a/llvm/test/CodeGen/AArch64/fp128-folding.ll b/llvm/test/CodeGen/AArch64/fp128-folding.ll
index b2c3040..b5bdcf4 100644
--- a/llvm/test/CodeGen/AArch64/fp128-folding.ll
+++ b/llvm/test/CodeGen/AArch64/fp128-folding.ll
@@ -12,6 +12,6 @@
   %fpval = sitofp i32 %val to fp128
   ; If the value is loaded from a constant pool into an fp128, it's been folded
   ; successfully.
-; CHECK: ldr {{q[0-9]+}}, .LCPI
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI
   ret fp128 %fpval
 }
\ No newline at end of file
diff --git a/llvm/test/CodeGen/AArch64/fp128.ll b/llvm/test/CodeGen/AArch64/fp128.ll
index 8fd8a30..258d34b 100644
--- a/llvm/test/CodeGen/AArch64/fp128.ll
+++ b/llvm/test/CodeGen/AArch64/fp128.ll
@@ -261,6 +261,10 @@
 }
 
 define fp128 @test_neg(fp128 %in) {
+; CHECK: [[MINUS0:.LCPI[0-9]+_0]]:
+; Make sure the weird hex constant below *is* -0.0
+; CHECK-NEXT: fp128 -0
+
 ; CHECK: test_neg:
 
   ; Could in principle be optimized to fneg which we can't select, this makes
@@ -268,13 +272,9 @@
   %ret = fsub fp128 0xL00000000000000008000000000000000, %in
 ; CHECK: str q0, [sp, #-16]
 ; CHECK-NEXT: ldr q1, [sp], #16
-; CHECK: ldr q0, [[MINUS0:.LCPI[0-9]+_0]]
+; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:[[MINUS0]]]
 ; CHECK: bl __subtf3
 
   ret fp128 %ret
 ; CHECK: ret
-
-; CHECK: [[MINUS0]]:
-; Make sure the weird hex constant below *is* -0.0
-; CHECK-NEXT: fp128 -0
 }
diff --git a/llvm/test/CodeGen/AArch64/fpimm.ll b/llvm/test/CodeGen/AArch64/fpimm.ll
index fad2151..fd28aee 100644
--- a/llvm/test/CodeGen/AArch64/fpimm.ll
+++ b/llvm/test/CodeGen/AArch64/fpimm.ll
@@ -13,7 +13,7 @@
 
   %newval2 = fadd float %val, 128.0
   store volatile float %newval2, float* @varf32
-; CHECK: ldr {{s[0-9]+}}, .LCPI0_0
+; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI0_0
 
   ret void
 }
@@ -28,7 +28,7 @@
 
   %newval2 = fadd double %val, 128.0
   store volatile double %newval2, double* @varf64
-; CHECK: ldr {{d[0-9]+}}, .LCPI1_0
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI1_0
 
   ret void
 }
diff --git a/llvm/test/CodeGen/AArch64/func-argpassing.ll b/llvm/test/CodeGen/AArch64/func-argpassing.ll
index 760c8d4..5675e5a 100644
--- a/llvm/test/CodeGen/AArch64/func-argpassing.ll
+++ b/llvm/test/CodeGen/AArch64/func-argpassing.ll
@@ -83,7 +83,7 @@
 define double @return_double() {
 ; CHECK: return_double:
     ret double 3.14
-; CHECK: ldr d0, .LCPI
+; CHECK: ldr d0, [{{x[0-9]+}}, #:lo12:.LCPI
 }
 
 ; This is the kind of IR clang will produce for returning a struct
diff --git a/llvm/test/CodeGen/AArch64/func-calls.ll b/llvm/test/CodeGen/AArch64/func-calls.ll
index 8810d1c..abb09a5 100644
--- a/llvm/test/CodeGen/AArch64/func-calls.ll
+++ b/llvm/test/CodeGen/AArch64/func-calls.ll
@@ -90,7 +90,7 @@
   call void @stacked_fpu(float -1.0, double 1.0, float 4.0, float 2.0,
                          float -2.0, float -8.0, float 16.0, float 1.0,
                          float 64.0)
-; CHECK: ldr s[[STACKEDREG:[0-9]+]], .LCPI
+; CHECK: ldr s[[STACKEDREG:[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI
 ; CHECK: mov x0, sp
 ; CHECK: str d[[STACKEDREG]], [x0]
 ; CHECK bl stacked_fpu
diff --git a/llvm/test/CodeGen/AArch64/literal_pools.ll b/llvm/test/CodeGen/AArch64/literal_pools.ll
index a14dfc1..e090841 100644
--- a/llvm/test/CodeGen/AArch64/literal_pools.ll
+++ b/llvm/test/CodeGen/AArch64/literal_pools.ll
@@ -10,19 +10,23 @@
 
     %val32_lit32 = and i32 %val32, 123456785
     store volatile i32 %val32_lit32, i32* @var32
-; CHECK: ldr {{w[0-9]+}}, .LCPI0
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
+; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
 
     %val64_lit32 = and i64 %val64, 305402420
     store volatile i64 %val64_lit32, i64* @var64
-; CHECK: ldr {{w[0-9]+}}, .LCPI0
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
+; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
 
     %val64_lit32signed = and i64 %val64, -12345678
     store volatile i64 %val64_lit32signed, i64* @var64
-; CHECK: ldrsw {{x[0-9]+}}, .LCPI0
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
+; CHECK: ldrsw {{x[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
 
     %val64_lit64 = and i64 %val64, 1234567898765432
     store volatile i64 %val64_lit64, i64* @var64
-; CHECK: ldr {{x[0-9]+}}, .LCPI0
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
+; CHECK: ldr {{x[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
 
     ret void
 }
@@ -35,13 +39,15 @@
 
   %floatval = load float* @varfloat
   %newfloat = fadd float %floatval, 128.0
-; CHECK: ldr {{s[0-9]+}}, .LCPI1
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
+; CHECK: ldr {{s[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
 ; CHECK: fadd
   store float %newfloat, float* @varfloat
 
   %doubleval = load double* @vardouble
   %newdouble = fadd double %doubleval, 129.0
-; CHECK: ldr {{d[0-9]+}}, .LCPI1
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
+; CHECK: ldr {{d[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
 ; CHECK: fadd
   store double %newdouble, double* @vardouble