Re-land MachineInstr: Reason locally about some memory objects before going to AA.
Summary:
Reverts r311008 to reinstate r310825 with a fix.
Refine alias checking for pseudo vs value to be conservative.
This fixes the original failure in builtbot unittest SingleSource/UnitTests/2003-07-09-SignedArgs.
Reviewers: hfinkel, nemanjai, efriedma
Reviewed By: efriedma
Subscribers: bjope, mcrosier, nhaehnle, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D36900
llvm-svn: 312126
diff --git a/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll b/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll
index 5425670..8059e4a 100644
--- a/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll
+++ b/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll
@@ -118,17 +118,17 @@
; X64: # BB#0:
; X64-NEXT: movzwl 4(%rdi), %eax
; X64-NEXT: movzbl 6(%rdi), %ecx
-; X64-NEXT: movl (%rdi), %edx
; X64-NEXT: movb %cl, 6(%rdi)
; X64-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<kill> %RCX<def>
; X64-NEXT: shll $16, %ecx
; X64-NEXT: orl %eax, %ecx
; X64-NEXT: shlq $32, %rcx
-; X64-NEXT: orq %rcx, %rdx
-; X64-NEXT: orq $384, %rdx # imm = 0x180
-; X64-NEXT: movl %edx, (%rdi)
-; X64-NEXT: shrq $32, %rdx
-; X64-NEXT: movw %dx, 4(%rdi)
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: orq %rcx, %rax
+; X64-NEXT: orq $384, %rax # imm = 0x180
+; X64-NEXT: movl %eax, (%rdi)
+; X64-NEXT: shrq $32, %rax
+; X64-NEXT: movw %ax, 4(%rdi)
; X64-NEXT: retq
%aa = load i56, i56* %a, align 1
%b = or i56 %aa, 384
@@ -150,19 +150,19 @@
; X64: # BB#0:
; X64-NEXT: movzwl 4(%rdi), %eax
; X64-NEXT: movzbl 6(%rdi), %ecx
-; X64-NEXT: movl (%rdi), %edx
; X64-NEXT: movb %cl, 6(%rdi)
; X64-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<kill> %RCX<def>
; X64-NEXT: shll $16, %ecx
; X64-NEXT: orl %eax, %ecx
; X64-NEXT: shlq $32, %rcx
-; X64-NEXT: orq %rcx, %rdx
-; X64-NEXT: orq $384, %rdx # imm = 0x180
-; X64-NEXT: movabsq $72057594037927808, %rax # imm = 0xFFFFFFFFFFFF80
-; X64-NEXT: andq %rdx, %rax
-; X64-NEXT: movl %eax, (%rdi)
-; X64-NEXT: shrq $32, %rax
-; X64-NEXT: movw %ax, 4(%rdi)
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: orq %rcx, %rax
+; X64-NEXT: orq $384, %rax # imm = 0x180
+; X64-NEXT: movabsq $72057594037927808, %rcx # imm = 0xFFFFFFFFFFFF80
+; X64-NEXT: andq %rax, %rcx
+; X64-NEXT: movl %ecx, (%rdi)
+; X64-NEXT: shrq $32, %rcx
+; X64-NEXT: movw %cx, 4(%rdi)
; X64-NEXT: retq
%b = load i56, i56* %a, align 1
%c = and i56 %b, -128
@@ -188,20 +188,20 @@
; X64-NEXT: movzbl %sil, %eax
; X64-NEXT: movzwl 4(%rdi), %ecx
; X64-NEXT: movzbl 6(%rdi), %edx
-; X64-NEXT: movl (%rdi), %esi
; X64-NEXT: movb %dl, 6(%rdi)
; X64-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill> %RDX<def>
; X64-NEXT: shll $16, %edx
; X64-NEXT: orl %ecx, %edx
; X64-NEXT: shlq $32, %rdx
-; X64-NEXT: orq %rdx, %rsi
+; X64-NEXT: movl (%rdi), %ecx
+; X64-NEXT: orq %rdx, %rcx
; X64-NEXT: shlq $13, %rax
-; X64-NEXT: movabsq $72057594037919743, %rcx # imm = 0xFFFFFFFFFFDFFF
-; X64-NEXT: andq %rsi, %rcx
-; X64-NEXT: orq %rax, %rcx
-; X64-NEXT: movl %ecx, (%rdi)
-; X64-NEXT: shrq $32, %rcx
-; X64-NEXT: movw %cx, 4(%rdi)
+; X64-NEXT: movabsq $72057594037919743, %rdx # imm = 0xFFFFFFFFFFDFFF
+; X64-NEXT: andq %rcx, %rdx
+; X64-NEXT: orq %rax, %rdx
+; X64-NEXT: movl %edx, (%rdi)
+; X64-NEXT: shrq $32, %rdx
+; X64-NEXT: movw %dx, 4(%rdi)
; X64-NEXT: retq
%extbit = zext i1 %bit to i56
%b = load i56, i56* %a, align 1
diff --git a/llvm/test/CodeGen/X86/memcpy-2.ll b/llvm/test/CodeGen/X86/memcpy-2.ll
index 7ef61c9..bd8f6e91 100644
--- a/llvm/test/CodeGen/X86/memcpy-2.ll
+++ b/llvm/test/CodeGen/X86/memcpy-2.ll
@@ -12,23 +12,23 @@
define void @t1(i32 %argc, i8** %argv) nounwind {
entry:
; SSE2-Darwin-LABEL: t1:
-; SSE2-Darwin: movsd _.str+16, %xmm0
-; SSE2-Darwin: movsd %xmm0, 16(%esp)
; SSE2-Darwin: movaps _.str, %xmm0
; SSE2-Darwin: movaps %xmm0
+; SSE2-Darwin: movsd _.str+16, %xmm0
+; SSE2-Darwin: movsd %xmm0, 16(%esp)
; SSE2-Darwin: movb $0, 24(%esp)
; SSE2-Mingw32-LABEL: t1:
-; SSE2-Mingw32: movsd _.str+16, %xmm0
-; SSE2-Mingw32: movsd %xmm0, 16(%esp)
; SSE2-Mingw32: movaps _.str, %xmm0
; SSE2-Mingw32: movups %xmm0
+; SSE2-Mingw32: movsd _.str+16, %xmm0
+; SSE2-Mingw32: movsd %xmm0, 16(%esp)
; SSE2-Mingw32: movb $0, 24(%esp)
; SSE1-LABEL: t1:
; SSE1: movaps _.str, %xmm0
-; SSE1: movaps %xmm0
; SSE1: movb $0, 24(%esp)
+; SSE1: movaps %xmm0
; SSE1: movl $0, 20(%esp)
; SSE1: movl $0, 16(%esp)
diff --git a/llvm/test/CodeGen/X86/pr34088.ll b/llvm/test/CodeGen/X86/pr34088.ll
index d3667e3..259c735 100644
--- a/llvm/test/CodeGen/X86/pr34088.ll
+++ b/llvm/test/CodeGen/X86/pr34088.ll
@@ -25,8 +25,8 @@
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: movaps %xmm0, (%esp)
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: movaps %xmm1, (%esp)
; CHECK-NEXT: movl $-842150451, {{[0-9]+}}(%esp) # imm = 0xCDCDCDCD
+; CHECK-NEXT: movaps %xmm1, (%esp)
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %ebp, %esp
; CHECK-NEXT: popl %ebp
diff --git a/llvm/test/CodeGen/X86/select.ll b/llvm/test/CodeGen/X86/select.ll
index 34a2d22..8c5c0ed 100644
--- a/llvm/test/CodeGen/X86/select.ll
+++ b/llvm/test/CodeGen/X86/select.ll
@@ -349,8 +349,8 @@
; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
; ATOM-NEXT: paddd %xmm2, %xmm0
; ATOM-NEXT: paddd %xmm2, %xmm1
-; ATOM-NEXT: movq %xmm1, 16(%rsi)
; ATOM-NEXT: movdqa %xmm0, (%rsi)
+; ATOM-NEXT: movq %xmm1, 16(%rsi)
; ATOM-NEXT: retq
; ATOM-NEXT: ## -- End function
;
diff --git a/llvm/test/CodeGen/X86/widen_arith-3.ll b/llvm/test/CodeGen/X86/widen_arith-3.ll
index e363a82..d53e828 100644
--- a/llvm/test/CodeGen/X86/widen_arith-3.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-3.ll
@@ -16,9 +16,9 @@
; CHECK-NEXT: movl {{\.LCPI.*}}, %eax
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
+; CHECK-NEXT: movw $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl $0, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movw $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: jmp .LBB0_1
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB0_2: # %forbody