Revert r307026, "[AMDGPU] Switch scalarize global loads ON by default"

It broke a testcase.

  Failing Tests (1):
      LLVM :: CodeGen/AMDGPU/alignbit-pat.ll

llvm-svn: 307054
diff --git a/llvm/test/CodeGen/AMDGPU/add_i64.ll b/llvm/test/CodeGen/AMDGPU/add_i64.ll
index f673d91..62733d5 100644
--- a/llvm/test/CodeGen/AMDGPU/add_i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/add_i64.ll
@@ -19,8 +19,8 @@
 
 ; Check that the SGPR add operand is correctly moved to a VGPR.
 ; SI-LABEL: {{^}}sgpr_operand:
-; SI: s_add_u32
-; SI: s_addc_u32
+; SI: v_add_i32
+; SI: v_addc_u32
 define amdgpu_kernel void @sgpr_operand(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 addrspace(1)* noalias %in_bar, i64 %a) {
   %foo = load i64, i64 addrspace(1)* %in, align 8
   %result = add i64 %foo, %a
@@ -32,8 +32,8 @@
 ; SGPR as other operand.
 ;
 ; SI-LABEL: {{^}}sgpr_operand_reversed:
-; SI: s_add_u32
-; SI: s_addc_u32
+; SI: v_add_i32
+; SI: v_addc_u32
 define amdgpu_kernel void @sgpr_operand_reversed(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i64 %a) {
   %foo = load i64, i64 addrspace(1)* %in, align 8
   %result = add i64 %a, %foo