Don't fold a load if the other operand is a TLS address.

With this we generate

movl    %gs:0, %eax
leal    i@NTPOFF(%eax), %eax

instead of

movl    $i@NTPOFF, %eax
addl    %gs:0, %eax



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68778 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/X86/tls15.ll b/test/CodeGen/X86/tls15.ll
new file mode 100644
index 0000000..5d3ee16
--- /dev/null
+++ b/test/CodeGen/X86/tls15.ll
@@ -0,0 +1,14 @@
+; RUN: llvm-as < %s | llc -march=x86 -mtriple=i386-linux-gnu > %t
+; RUN: grep {movl	%gs:0, %eax} %t | count 1
+; RUN: grep {leal	i@NTPOFF(%eax), %ecx} %t
+; RUN: grep {leal	j@NTPOFF(%eax), %eax} %t
+
+@i = thread_local global i32 0
+@j = thread_local global i32 0
+
+define void @f(i32** %a, i32** %b) {
+entry:
+	store i32* @i, i32** %a, align 8
+	store i32* @j, i32** %b, align 8
+	ret void
+}