R600/SI: Change all instruction assembly names to lowercase.

This matches the format produced by the AMD proprietary driver.

//==================================================================//
// Shell script for converting .ll test cases: (Pass the .ll files
   you want to convert to this script as arguments).
//==================================================================//

; This was necessary on my system so that A-Z in sed would match only
; upper case.  I'm not sure why.
export LC_ALL='C'

TEST_FILES="$*"

MATCHES=`grep -v Patterns SIInstructions.td | grep -o '"[A-Z0-9_]\+["e]' | grep -o '[A-Z0-9_]\+' | sort -r`

for f in $TEST_FILES; do
  # Check that there are SI tests:
  grep -q -e 'verde' -e 'bonaire' -e 'SI' -e 'tahiti' $f
  if [ $? -eq 0 ]; then
    for match in $MATCHES; do
      sed -i -e "s/\([ :]$match\)/\L\1/" $f
    done

    # Try to get check lines with partial instruction names
    sed -i 's/\(;[ ]*SI[A-Z\\-]*: \)\([A-Z_0-9]\+\)/\1\L\2/' $f
  fi
done

sed -i -e 's/bb0_1/BB0_1/g' ../../../test/CodeGen/R600/infinite-loop.ll
sed -i -e 's/SI-NOT: bfe/SI-NOT: {{[^@]}}bfe/g'../../../test/CodeGen/R600/llvm.AMDGPU.bfe.*32.ll ../../../test/CodeGen/R600/sext-in-reg.ll
sed -i -e 's/exp_IEEE/EXP_IEEE/g' ../../../test/CodeGen/R600/llvm.exp2.ll
sed -i -e 's/numVgprs/NumVgprs/g' ../../../test/CodeGen/R600/register-count-comments.ll
sed -i 's/\(; CHECK[-NOT]*: \)\([A-Z_0-9]\+\)/\1\L\2/' ../../../test/CodeGen/R600/select64.ll ../../../test/CodeGen/R600/sgpr-copy.ll

//==================================================================//
// Shell script for converting .td files (run this last)
//==================================================================//

export LC_ALL='C'
sed -i -e '/Patterns/!s/\("[A-Z0-9_]\+[ "e]\)/\L\1/g' SIInstructions.td
sed -i -e 's/"EXP/"exp/g' SIInstrInfo.td

llvm-svn: 221350
diff --git a/llvm/test/CodeGen/R600/shl_add_ptr.ll b/llvm/test/CodeGen/R600/shl_add_ptr.ll
index 344d313..e6dfc58 100644
--- a/llvm/test/CodeGen/R600/shl_add_ptr.ll
+++ b/llvm/test/CodeGen/R600/shl_add_ptr.ll
@@ -15,9 +15,9 @@
 ; Make sure the (add tid, 2) << 2 gets folded into the ds's offset as (tid << 2) + 8
 
 ; SI-LABEL: {{^}}load_shl_base_lds_0:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_READ_B32 {{v[0-9]+}}, [[PTR]] offset:8 [M0]
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_read_b32 {{v[0-9]+}}, [[PTR]] offset:8 [M0]
+; SI: s_endpgm
 define void @load_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
@@ -32,12 +32,12 @@
 ; remaining add use goes through the normal shl + add constant fold.
 
 ; SI-LABEL: {{^}}load_shl_base_lds_1:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_READ_B32 [[RESULT:v[0-9]+]], [[PTR]] offset:8 [M0]
-; SI: V_ADD_I32_e32 [[ADDUSE:v[0-9]+]], 8, v{{[0-9]+}}
-; SI-DAG: BUFFER_STORE_DWORD [[RESULT]]
-; SI-DAG: BUFFER_STORE_DWORD [[ADDUSE]]
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_read_b32 [[RESULT:v[0-9]+]], [[PTR]] offset:8 [M0]
+; SI: v_add_i32_e32 [[ADDUSE:v[0-9]+]], 8, v{{[0-9]+}}
+; SI-DAG: buffer_store_dword [[RESULT]]
+; SI-DAG: buffer_store_dword [[ADDUSE]]
+; SI: s_endpgm
 define void @load_shl_base_lds_1(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
@@ -52,8 +52,8 @@
 @maxlds = addrspace(3) global [65536 x i8] zeroinitializer, align 4
 
 ; SI-LABEL: {{^}}load_shl_base_lds_max_offset
-; SI: DS_READ_U8 v{{[0-9]+}}, v{{[0-9]+}} offset:65535
-; SI: S_ENDPGM
+; SI: ds_read_u8 v{{[0-9]+}}, v{{[0-9]+}} offset:65535
+; SI: s_endpgm
 define void @load_shl_base_lds_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %lds, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 65535
@@ -68,9 +68,9 @@
 ; pointer can be used with an offset into the second one.
 
 ; SI-LABEL: {{^}}load_shl_base_lds_2:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI-NEXT: DS_READ2ST64_B32 {{v\[[0-9]+:[0-9]+\]}}, [[PTR]] offset0:1 offset1:9 [M0]
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI-NEXT: ds_read2st64_b32 {{v\[[0-9]+:[0-9]+\]}}, [[PTR]] offset0:1 offset1:9 [M0]
+; SI: s_endpgm
 define void @load_shl_base_lds_2(float addrspace(1)* %out) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 64
@@ -84,9 +84,9 @@
 }
 
 ; SI-LABEL: {{^}}store_shl_base_lds_0:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_WRITE_B32 [[PTR]], {{v[0-9]+}} offset:8 [M0]
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_write_b32 [[PTR]], {{v[0-9]+}} offset:8 [M0]
+; SI: s_endpgm
 define void @store_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
@@ -114,9 +114,9 @@
 
 
 ; SI-LABEL: {{^}}atomic_cmpxchg_shl_base_lds_0:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_CMPST_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, {{v[0-9]+}} offset:8
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_cmpst_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, {{v[0-9]+}} offset:8
+; SI: s_endpgm
 define void @atomic_cmpxchg_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use, i32 %swap) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
@@ -129,9 +129,9 @@
 }
 
 ; SI-LABEL: {{^}}atomic_swap_shl_base_lds_0:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_WRXCHG_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_wrxchg_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
+; SI: s_endpgm
 define void @atomic_swap_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
@@ -143,9 +143,9 @@
 }
 
 ; SI-LABEL: {{^}}atomic_add_shl_base_lds_0:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_ADD_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_add_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
+; SI: s_endpgm
 define void @atomic_add_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
@@ -157,9 +157,9 @@
 }
 
 ; SI-LABEL: {{^}}atomic_sub_shl_base_lds_0:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_SUB_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_sub_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
+; SI: s_endpgm
 define void @atomic_sub_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
@@ -171,9 +171,9 @@
 }
 
 ; SI-LABEL: {{^}}atomic_and_shl_base_lds_0:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_AND_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_and_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
+; SI: s_endpgm
 define void @atomic_and_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
@@ -185,9 +185,9 @@
 }
 
 ; SI-LABEL: {{^}}atomic_or_shl_base_lds_0:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_OR_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_or_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
+; SI: s_endpgm
 define void @atomic_or_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
@@ -199,9 +199,9 @@
 }
 
 ; SI-LABEL: {{^}}atomic_xor_shl_base_lds_0:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_XOR_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_xor_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
+; SI: s_endpgm
 define void @atomic_xor_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
@@ -223,9 +223,9 @@
 ; }
 
 ; SI-LABEL: {{^}}atomic_min_shl_base_lds_0:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_MIN_RTN_I32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_min_rtn_i32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
+; SI: s_endpgm
 define void @atomic_min_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
@@ -237,9 +237,9 @@
 }
 
 ; SI-LABEL: {{^}}atomic_max_shl_base_lds_0:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_MAX_RTN_I32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_max_rtn_i32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
+; SI: s_endpgm
 define void @atomic_max_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
@@ -251,9 +251,9 @@
 }
 
 ; SI-LABEL: {{^}}atomic_umin_shl_base_lds_0:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_MIN_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_min_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
+; SI: s_endpgm
 define void @atomic_umin_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2
@@ -265,9 +265,9 @@
 }
 
 ; SI-LABEL: {{^}}atomic_umax_shl_base_lds_0:
-; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
-; SI: DS_MAX_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
-; SI: S_ENDPGM
+; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: ds_max_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
+; SI: s_endpgm
 define void @atomic_umax_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
   %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
   %idx.0 = add nsw i32 %tid.x, 2