[AArch64] Prefer UZP for concat_vector of illegal truncs.
Follow-up to r232459: prefer a UZP shuffle to the intermediate truncs.
llvm-svn: 232871
diff --git a/llvm/test/CodeGen/AArch64/concat_vector-truncate-combine.ll b/llvm/test/CodeGen/AArch64/concat_vector-truncate-combine.ll
index 468aa42..c510e27 100644
--- a/llvm/test/CodeGen/AArch64/concat_vector-truncate-combine.ll
+++ b/llvm/test/CodeGen/AArch64/concat_vector-truncate-combine.ll
@@ -5,9 +5,8 @@
define <4 x i16> @test_concat_truncate_v2i64_to_v4i16(<2 x i64> %a, <2 x i64> %b) #0 {
entry:
; CHECK-LABEL: test_concat_truncate_v2i64_to_v4i16:
-; CHECK-NEXT: xtn.2s v0, v0
-; CHECK-NEXT: xtn2.4s v0, v1
-; CHECK-NEXT: xtn.4h v0, v0
+; CHECK-NEXT: uzp1.4s v0, v0, v1
+; CHECK-NEXT: xtn.4h v0, v0
; CHECK-NEXT: ret
%at = trunc <2 x i64> %a to <2 x i16>
%bt = trunc <2 x i64> %b to <2 x i16>
@@ -18,9 +17,8 @@
define <8 x i8> @test_concat_truncate_v4i32_to_v8i8(<4 x i32> %a, <4 x i32> %b) #0 {
entry:
; CHECK-LABEL: test_concat_truncate_v4i32_to_v8i8:
-; CHECK-NEXT: xtn.4h v0, v0
-; CHECK-NEXT: xtn2.8h v0, v1
-; CHECK-NEXT: xtn.8b v0, v0
+; CHECK-NEXT: uzp1.8h v0, v0, v1
+; CHECK-NEXT: xtn.8b v0, v0
; CHECK-NEXT: ret
%at = trunc <4 x i32> %a to <4 x i8>
%bt = trunc <4 x i32> %b to <4 x i8>
@@ -28,4 +26,16 @@
ret <8 x i8> %shuffle
}
+define <8 x i16> @test_concat_truncate_v4i32_to_v8i16(<4 x i32> %a, <4 x i32> %b) #0 {
+entry:
+; CHECK-LABEL: test_concat_truncate_v4i32_to_v8i16:
+; CHECK-NEXT: xtn.4h v0, v0
+; CHECK-NEXT: xtn2.8h v0, v1
+; CHECK-NEXT: ret
+ %at = trunc <4 x i32> %a to <4 x i16>
+ %bt = trunc <4 x i32> %b to <4 x i16>
+ %shuffle = shufflevector <4 x i16> %at, <4 x i16> %bt, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %shuffle
+}
+
attributes #0 = { nounwind }