AMDGPU: Improve load/store of illegal types.
There was a combine before to handle the simple copy case.
Split this into handling loads and stores separately.
We might want to change how this handles some of the vector
extloads, since this can result in large code size increases.
llvm-svn: 274394
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
index 5f6ec65..40c29be 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
@@ -78,8 +78,9 @@
; GCN-DAG: v_mov_b32_e32 v[[SHI:[0-9]+]], 0{{$}}
; GCN: store_dwordx2
-; EG: MEM_RAT
-; EG: MEM_RAT
+; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
+; EG: CF_END
+; EG: VTX_READ_32
define void @constant_zextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(2)* %in) #0 {
%ld = load i32, i32 addrspace(2)* %in
%ext = zext i32 %ld to i64
@@ -92,9 +93,10 @@
; GCN: s_ashr_i32 s[[HI:[0-9]+]], s[[SLO]], 31
; GCN: store_dwordx2
-; EG: MEM_RAT
-; EG: MEM_RAT
-; EG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, literal.x
+; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
+; EG: CF_END
+; EG: VTX_READ_32
+; EG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}}, literal.
; EG: 31
define void @constant_sextload_i32_to_i64(i64 addrspace(1)* %out, i32 addrspace(2)* %in) #0 {
%ld = load i32, i32 addrspace(2)* %in