Don't assume a null GV is local for ELF and MachO.

This is already a simplification, and should help with avoiding a plt
reference when calling an intrinsic with -fno-plt.

With this change we return false for null GVs, so the caller only
needs to check the new metadata to decide if it should use foo@plt or
*foo@got.

llvm-svn: 323297
diff --git a/llvm/lib/Target/TargetMachine.cpp b/llvm/lib/Target/TargetMachine.cpp
index ee5b010..76ec541 100644
--- a/llvm/lib/Target/TargetMachine.cpp
+++ b/llvm/lib/Target/TargetMachine.cpp
@@ -137,20 +137,27 @@
   if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
     return true;
 
+  // If GV is null we know that this is a call to an intrinsic. For ELF and
+  // MachO we don't need to assume those are local since the liker can trivially
+  // convert a call to a PLT to a direct call if the target (in the runtime
+  // library) turns out to be local.
+  if (!GV)
+    return false;
+
   // Most PIC code sequences that assume that a symbol is local cannot
   // produce a 0 if it turns out the symbol is undefined. While this
   // is ABI and relocation depended, it seems worth it to handle it
   // here.
-  if (GV && isPositionIndependent() && GV->hasExternalWeakLinkage())
+  if (isPositionIndependent() && GV->hasExternalWeakLinkage())
     return false;
 
-  if (GV && !GV->hasDefaultVisibility())
+  if (!GV->hasDefaultVisibility())
     return true;
 
   if (TT.isOSBinFormatMachO()) {
     if (RM == Reloc::Static)
       return true;
-    return GV && GV->isStrongDefinitionForLinker();
+    return GV->isStrongDefinitionForLinker();
   }
 
   assert(TT.isOSBinFormatELF());
@@ -160,19 +167,19 @@
       RM == Reloc::Static || M.getPIELevel() != PIELevel::Default;
   if (IsExecutable) {
     // If the symbol is defined, it cannot be preempted.
-    if (GV && !GV->isDeclarationForLinker())
+    if (!GV->isDeclarationForLinker())
       return true;
 
     // A symbol marked nonlazybind should not be accessed with a plt. If the
     // symbol turns out to be external, the linker will convert a direct
     // access to an access via the plt, so don't assume it is local.
-    const Function *F = dyn_cast_or_null<Function>(GV);
+    const Function *F = dyn_cast<Function>(GV);
     if (F && F->hasFnAttribute(Attribute::NonLazyBind))
       return false;
 
-    bool IsTLS = GV && GV->isThreadLocal();
+    bool IsTLS = GV->isThreadLocal();
     bool IsAccessViaCopyRelocs =
-        Options.MCOptions.MCPIECopyRelocations && GV && isa<GlobalVariable>(GV);
+        Options.MCOptions.MCPIECopyRelocations && isa<GlobalVariable>(GV);
     Triple::ArchType Arch = TT.getArch();
     bool IsPPC =
         Arch == Triple::ppc || Arch == Triple::ppc64 || Arch == Triple::ppc64le;
diff --git a/llvm/test/CodeGen/X86/finite-libcalls.ll b/llvm/test/CodeGen/X86/finite-libcalls.ll
index d4b79ac..f3d3360 100644
--- a/llvm/test/CodeGen/X86/finite-libcalls.ll
+++ b/llvm/test/CodeGen/X86/finite-libcalls.ll
@@ -9,7 +9,7 @@
 define float @exp_f32(float %x) #0 {
 ; GNU-LABEL: exp_f32:
 ; GNU:       # %bb.0:
-; GNU-NEXT:    jmp __expf_finite # TAILCALL
+; GNU-NEXT:    jmp __expf_finite@PLT # TAILCALL
 ;
 ; WIN-LABEL: exp_f32:
 ; WIN:       # %bb.0:
@@ -25,7 +25,7 @@
 define double @exp_f64(double %x) #0 {
 ; GNU-LABEL: exp_f64:
 ; GNU:       # %bb.0:
-; GNU-NEXT:    jmp __exp_finite # TAILCALL
+; GNU-NEXT:    jmp __exp_finite@PLT # TAILCALL
 ;
 ; WIN-LABEL: exp_f64:
 ; WIN:       # %bb.0:
@@ -72,7 +72,7 @@
 define float @exp2_f32(float %x) #0 {
 ; GNU-LABEL: exp2_f32:
 ; GNU:       # %bb.0:
-; GNU-NEXT:    jmp __exp2f_finite # TAILCALL
+; GNU-NEXT:    jmp __exp2f_finite@PLT # TAILCALL
 ;
 ; WIN-LABEL: exp2_f32:
 ; WIN:       # %bb.0:
@@ -88,7 +88,7 @@
 define double @exp2_f64(double %x) #0 {
 ; GNU-LABEL: exp2_f64:
 ; GNU:       # %bb.0:
-; GNU-NEXT:    jmp __exp2_finite # TAILCALL
+; GNU-NEXT:    jmp __exp2_finite@PLT # TAILCALL
 ;
 ; WIN-LABEL: exp2_f64:
 ; WIN:       # %bb.0:
@@ -135,7 +135,7 @@
 define float @log_f32(float %x) #0 {
 ; GNU-LABEL: log_f32:
 ; GNU:       # %bb.0:
-; GNU-NEXT:    jmp __logf_finite # TAILCALL
+; GNU-NEXT:    jmp __logf_finite@PLT # TAILCALL
 ;
 ; WIN-LABEL: log_f32:
 ; WIN:       # %bb.0:
@@ -151,7 +151,7 @@
 define double @log_f64(double %x) #0 {
 ; GNU-LABEL: log_f64:
 ; GNU:       # %bb.0:
-; GNU-NEXT:    jmp __log_finite # TAILCALL
+; GNU-NEXT:    jmp __log_finite@PLT # TAILCALL
 ;
 ; WIN-LABEL: log_f64:
 ; WIN:       # %bb.0:
@@ -198,7 +198,7 @@
 define float @log2_f32(float %x) #0 {
 ; GNU-LABEL: log2_f32:
 ; GNU:       # %bb.0:
-; GNU-NEXT:    jmp __log2f_finite # TAILCALL
+; GNU-NEXT:    jmp __log2f_finite@PLT # TAILCALL
 ;
 ; WIN-LABEL: log2_f32:
 ; WIN:       # %bb.0:
@@ -214,7 +214,7 @@
 define double @log2_f64(double %x) #0 {
 ; GNU-LABEL: log2_f64:
 ; GNU:       # %bb.0:
-; GNU-NEXT:    jmp __log2_finite # TAILCALL
+; GNU-NEXT:    jmp __log2_finite@PLT # TAILCALL
 ;
 ; WIN-LABEL: log2_f64:
 ; WIN:       # %bb.0:
@@ -261,7 +261,7 @@
 define float @log10_f32(float %x) #0 {
 ; GNU-LABEL: log10_f32:
 ; GNU:       # %bb.0:
-; GNU-NEXT:    jmp __log10f_finite # TAILCALL
+; GNU-NEXT:    jmp __log10f_finite@PLT # TAILCALL
 ;
 ; WIN-LABEL: log10_f32:
 ; WIN:       # %bb.0:
@@ -277,7 +277,7 @@
 define double @log10_f64(double %x) #0 {
 ; GNU-LABEL: log10_f64:
 ; GNU:       # %bb.0:
-; GNU-NEXT:    jmp __log10_finite # TAILCALL
+; GNU-NEXT:    jmp __log10_finite@PLT # TAILCALL
 ;
 ; WIN-LABEL: log10_f64:
 ; WIN:       # %bb.0:
@@ -325,7 +325,7 @@
 ; GNU-LABEL: pow_f32:
 ; GNU:       # %bb.0:
 ; GNU-NEXT:    movaps %xmm0, %xmm1
-; GNU-NEXT:    jmp __powf_finite # TAILCALL
+; GNU-NEXT:    jmp __powf_finite@PLT # TAILCALL
 ;
 ; WIN-LABEL: pow_f32:
 ; WIN:       # %bb.0:
@@ -344,7 +344,7 @@
 ; GNU-LABEL: pow_f64:
 ; GNU:       # %bb.0:
 ; GNU-NEXT:    movaps %xmm0, %xmm1
-; GNU-NEXT:    jmp __pow_finite # TAILCALL
+; GNU-NEXT:    jmp __pow_finite@PLT # TAILCALL
 ;
 ; WIN-LABEL: pow_f64:
 ; WIN:       # %bb.0:
diff --git a/llvm/test/CodeGen/X86/fp-intrinsics.ll b/llvm/test/CodeGen/X86/fp-intrinsics.ll
index eae3955..9816875 100644
--- a/llvm/test/CodeGen/X86/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/fp-intrinsics.ll
@@ -245,7 +245,7 @@
 ; Verify that fma(3.5) isn't simplified when the rounding mode is
 ; unknown.
 ; CHECK-LABEL: f17
-; FMACALL32: jmp fmaf  # TAILCALL
+; FMACALL32: jmp fmaf@PLT  # TAILCALL
 ; FMA32: vfmadd213ss
 define float @f17() {
 entry:
@@ -261,7 +261,7 @@
 ; Verify that fma(42.1) isn't simplified when the rounding mode is
 ; unknown.
 ; CHECK-LABEL: f18
-; FMACALL64: jmp fma  # TAILCALL
+; FMACALL64: jmp fma@PLT  # TAILCALL
 ; FMA64: vfmadd213sd
 define double @f18() {
 entry:
diff --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll
index 20db4a5..b438e84 100644
--- a/llvm/test/CodeGen/X86/half.ll
+++ b/llvm/test/CodeGen/X86/half.ll
@@ -75,7 +75,7 @@
 ; CHECK-LIBCALL-LABEL: test_extend32:
 ; CHECK-LIBCALL:       # %bb.0:
 ; CHECK-LIBCALL-NEXT:    movzwl (%rdi), %edi
-; CHECK-LIBCALL-NEXT:    jmp __gnu_h2f_ieee # TAILCALL
+; CHECK-LIBCALL-NEXT:    jmp __gnu_h2f_ieee@PLT # TAILCALL
 ;
 ; BWON-F16C-LABEL: test_extend32:
 ; BWON-F16C:       # %bb.0:
diff --git a/llvm/test/CodeGen/X86/memset-nonzero.ll b/llvm/test/CodeGen/X86/memset-nonzero.ll
index 37b98b4..e7192b0 100644
--- a/llvm/test/CodeGen/X86/memset-nonzero.ll
+++ b/llvm/test/CodeGen/X86/memset-nonzero.ll
@@ -394,7 +394,7 @@
 ; SSE-LABEL: memset_256_nonconst_bytes:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movl $256, %edx # imm = 0x100
-; SSE-NEXT:    jmp memset # TAILCALL
+; SSE-NEXT:    jmp memset@PLT # TAILCALL
 ;
 ; SSE2FAST-LABEL: memset_256_nonconst_bytes:
 ; SSE2FAST:       # %bb.0:
diff --git a/llvm/test/CodeGen/X86/negative-sin.ll b/llvm/test/CodeGen/X86/negative-sin.ll
index c30cd27..9f00dbb 100644
--- a/llvm/test/CodeGen/X86/negative-sin.ll
+++ b/llvm/test/CodeGen/X86/negative-sin.ll
@@ -28,7 +28,7 @@
 define double @fast(double %e) nounwind {
 ; CHECK-LABEL: fast:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    jmp sin # TAILCALL
+; CHECK-NEXT:    jmp sin@PLT # TAILCALL
   %f = fsub fast double 0.0, %e
   %g = call double @sin(double %f) readonly
   %h = fsub fast double 0.0, %g
@@ -40,7 +40,7 @@
 define double @nsz(double %e) nounwind {
 ; CHECK-LABEL: nsz:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    jmp sin # TAILCALL
+; CHECK-NEXT:    jmp sin@PLT # TAILCALL
   %f = fsub nsz double 0.0, %e
   %g = call double @sin(double %f) readonly
   %h = fsub nsz double 0.0, %g
@@ -88,7 +88,7 @@
 define double @fn_attr(double %e) nounwind #0 {
 ; CHECK-LABEL: fn_attr:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    jmp sin # TAILCALL
+; CHECK-NEXT:    jmp sin@PLT # TAILCALL
   %f = fsub double 0.0, %e
   %g = call double @sin(double %f) readonly
   %h = fsub double 0.0, %g
diff --git a/llvm/test/CodeGen/X86/vector-half-conversions.ll b/llvm/test/CodeGen/X86/vector-half-conversions.ll
index 249a358..a6d7afb 100644
--- a/llvm/test/CodeGen/X86/vector-half-conversions.ll
+++ b/llvm/test/CodeGen/X86/vector-half-conversions.ll
@@ -2953,7 +2953,7 @@
 define i16 @cvt_f64_to_i16(double %a0) nounwind {
 ; ALL-LABEL: cvt_f64_to_i16:
 ; ALL:       # %bb.0:
-; ALL-NEXT:    jmp __truncdfhf2 # TAILCALL
+; ALL-NEXT:    jmp __truncdfhf2@PLT # TAILCALL
   %1 = fptrunc double %a0 to half
   %2 = bitcast half %1 to i16
   ret i16 %2