AMDGPU/R600: Implement allowsMisalignedMemoryAccess
This avoids some test regressions in a future commit
when unaligned operations are expanded when they
have custom lowering.
llvm-svn: 261570
diff --git a/llvm/test/CodeGen/AMDGPU/store.ll b/llvm/test/CodeGen/AMDGPU/store.ll
index d22f43f..e403409 100644
--- a/llvm/test/CodeGen/AMDGPU/store.ll
+++ b/llvm/test/CodeGen/AMDGPU/store.ll
@@ -358,20 +358,13 @@
ret void
}
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
; When i128 was a legal type this program generated cannot select errors:
; FUNC-LABEL: {{^}}"i128-const-store":
-; FIXME: We should be able to to this with one store instruction
-; EG: STORE_RAW
-; EG: STORE_RAW
-; EG: STORE_RAW
-; EG: STORE_RAW
-; CM: STORE_DWORD
-; CM: STORE_DWORD
-; CM: STORE_DWORD
-; CM: STORE_DWORD
+; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 1
+
+; CM: MEM_RAT_CACHELESS STORE_DWORD T{{[0-9]+}}, T{{[0-9]+}}.X
+
; SI: buffer_store_dwordx4
define void @i128-const-store(i32 addrspace(1)* %out) {
entry:
@@ -384,3 +377,5 @@
store i32 2, i32 addrspace(1)* %arrayidx6, align 4
ret void
}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }