[ARM] MVE VMULL patterns
This adds MVE vmull patterns, which are conceptually the same as
mul(vmovl, vmovl), and so the tablegen patterns follow the same
structure.
For i8 and i16 this is simple enough, but in the i32 version the
multiply (in 64bits) is illegal, meaning we need to catch the pattern
earlier in a dag fold. Because bitcasts are involved in the zext
versions and the patterns are a little different in little and big
endian. I have only added little endian support in this patch.
Differential Revision: https://reviews.llvm.org/D76740
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
index 690e2c3..dea70df 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
@@ -41,12 +41,13 @@
define arm_aapcs_vfpcc i64 @add_v2i32_v2i64_zext(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: add_v2i32_v2i64_zext:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmov r0, s4
-; CHECK-NEXT: vmov r1, s0
-; CHECK-NEXT: vmov r2, s6
-; CHECK-NEXT: vmov r3, s2
-; CHECK-NEXT: umull r0, r1, r1, r0
-; CHECK-NEXT: umlal r0, r1, r3, r2
+; CHECK-NEXT: vmullb.u32 q2, q0, q1
+; CHECK-NEXT: vmov r0, s10
+; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: vmov r1, s11
+; CHECK-NEXT: vmov r2, s9
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adcs r1, r2
; CHECK-NEXT: bx lr
entry:
%xx = zext <2 x i32> %x to <2 x i64>
@@ -59,12 +60,13 @@
define arm_aapcs_vfpcc i64 @add_v2i32_v2i64_sext(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: add_v2i32_v2i64_sext:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmov r0, s4
-; CHECK-NEXT: vmov r1, s0
-; CHECK-NEXT: vmov r2, s6
-; CHECK-NEXT: vmov r3, s2
-; CHECK-NEXT: smull r0, r1, r1, r0
-; CHECK-NEXT: smlal r0, r1, r3, r2
+; CHECK-NEXT: vmullb.s32 q2, q0, q1
+; CHECK-NEXT: vmov r0, s10
+; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: vmov r1, s11
+; CHECK-NEXT: vmov r2, s9
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adcs r1, r2
; CHECK-NEXT: bx lr
entry:
%xx = sext <2 x i32> %x to <2 x i64>
@@ -306,10 +308,8 @@
; CHECK-NEXT: vmov.16 q3[6], r0
; CHECK-NEXT: vmov.u8 r0, q0[15]
; CHECK-NEXT: vmov.16 q3[7], r0
-; CHECK-NEXT: vmovlb.u8 q2, q2
-; CHECK-NEXT: vmovlb.u8 q3, q3
; CHECK-NEXT: vmov.u8 r0, q1[0]
-; CHECK-NEXT: vmul.i16 q2, q3, q2
+; CHECK-NEXT: vmullb.u8 q2, q3, q2
; CHECK-NEXT: vmov.16 q3[0], r0
; CHECK-NEXT: vmov.u8 r0, q1[1]
; CHECK-NEXT: vmov.16 q3[1], r0
@@ -326,24 +326,22 @@
; CHECK-NEXT: vmov.u8 r0, q1[7]
; CHECK-NEXT: vmov.16 q3[7], r0
; CHECK-NEXT: vmov.u8 r0, q0[0]
-; CHECK-NEXT: vmovlb.u8 q1, q3
-; CHECK-NEXT: vmov.16 q3[0], r0
+; CHECK-NEXT: vmov.16 q1[0], r0
; CHECK-NEXT: vmov.u8 r0, q0[1]
-; CHECK-NEXT: vmov.16 q3[1], r0
+; CHECK-NEXT: vmov.16 q1[1], r0
; CHECK-NEXT: vmov.u8 r0, q0[2]
-; CHECK-NEXT: vmov.16 q3[2], r0
+; CHECK-NEXT: vmov.16 q1[2], r0
; CHECK-NEXT: vmov.u8 r0, q0[3]
-; CHECK-NEXT: vmov.16 q3[3], r0
+; CHECK-NEXT: vmov.16 q1[3], r0
; CHECK-NEXT: vmov.u8 r0, q0[4]
-; CHECK-NEXT: vmov.16 q3[4], r0
+; CHECK-NEXT: vmov.16 q1[4], r0
; CHECK-NEXT: vmov.u8 r0, q0[5]
-; CHECK-NEXT: vmov.16 q3[5], r0
+; CHECK-NEXT: vmov.16 q1[5], r0
; CHECK-NEXT: vmov.u8 r0, q0[6]
-; CHECK-NEXT: vmov.16 q3[6], r0
+; CHECK-NEXT: vmov.16 q1[6], r0
; CHECK-NEXT: vmov.u8 r0, q0[7]
-; CHECK-NEXT: vmov.16 q3[7], r0
-; CHECK-NEXT: vmovlb.u8 q0, q3
-; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: vmov.16 q1[7], r0
+; CHECK-NEXT: vmullb.u8 q0, q1, q3
; CHECK-NEXT: vadd.i16 q0, q0, q2
; CHECK-NEXT: vaddv.u16 r0, q0
; CHECK-NEXT: uxth r0, r0
@@ -391,10 +389,8 @@
; CHECK-NEXT: vmov.16 q3[6], r0
; CHECK-NEXT: vmov.u8 r0, q0[15]
; CHECK-NEXT: vmov.16 q3[7], r0
-; CHECK-NEXT: vmovlb.s8 q2, q2
-; CHECK-NEXT: vmovlb.s8 q3, q3
; CHECK-NEXT: vmov.u8 r0, q1[0]
-; CHECK-NEXT: vmul.i16 q2, q3, q2
+; CHECK-NEXT: vmullb.s8 q2, q3, q2
; CHECK-NEXT: vmov.16 q3[0], r0
; CHECK-NEXT: vmov.u8 r0, q1[1]
; CHECK-NEXT: vmov.16 q3[1], r0
@@ -411,24 +407,22 @@
; CHECK-NEXT: vmov.u8 r0, q1[7]
; CHECK-NEXT: vmov.16 q3[7], r0
; CHECK-NEXT: vmov.u8 r0, q0[0]
-; CHECK-NEXT: vmovlb.s8 q1, q3
-; CHECK-NEXT: vmov.16 q3[0], r0
+; CHECK-NEXT: vmov.16 q1[0], r0
; CHECK-NEXT: vmov.u8 r0, q0[1]
-; CHECK-NEXT: vmov.16 q3[1], r0
+; CHECK-NEXT: vmov.16 q1[1], r0
; CHECK-NEXT: vmov.u8 r0, q0[2]
-; CHECK-NEXT: vmov.16 q3[2], r0
+; CHECK-NEXT: vmov.16 q1[2], r0
; CHECK-NEXT: vmov.u8 r0, q0[3]
-; CHECK-NEXT: vmov.16 q3[3], r0
+; CHECK-NEXT: vmov.16 q1[3], r0
; CHECK-NEXT: vmov.u8 r0, q0[4]
-; CHECK-NEXT: vmov.16 q3[4], r0
+; CHECK-NEXT: vmov.16 q1[4], r0
; CHECK-NEXT: vmov.u8 r0, q0[5]
-; CHECK-NEXT: vmov.16 q3[5], r0
+; CHECK-NEXT: vmov.16 q1[5], r0
; CHECK-NEXT: vmov.u8 r0, q0[6]
-; CHECK-NEXT: vmov.16 q3[6], r0
+; CHECK-NEXT: vmov.16 q1[6], r0
; CHECK-NEXT: vmov.u8 r0, q0[7]
-; CHECK-NEXT: vmov.16 q3[7], r0
-; CHECK-NEXT: vmovlb.s8 q0, q3
-; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: vmov.16 q1[7], r0
+; CHECK-NEXT: vmullb.s8 q0, q1, q3
; CHECK-NEXT: vadd.i16 q0, q0, q2
; CHECK-NEXT: vaddv.u16 r0, q0
; CHECK-NEXT: sxth r0, r0
@@ -444,9 +438,7 @@
define arm_aapcs_vfpcc zeroext i16 @add_v8i8_v8i16_zext(<8 x i8> %x, <8 x i8> %y) {
; CHECK-LABEL: add_v8i8_v8i16_zext:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmovlb.u8 q1, q1
-; CHECK-NEXT: vmovlb.u8 q0, q0
-; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: vmullb.u8 q0, q0, q1
; CHECK-NEXT: vaddv.u16 r0, q0
; CHECK-NEXT: uxth r0, r0
; CHECK-NEXT: bx lr
@@ -461,9 +453,7 @@
define arm_aapcs_vfpcc signext i16 @add_v8i8_v8i16_sext(<8 x i8> %x, <8 x i8> %y) {
; CHECK-LABEL: add_v8i8_v8i16_sext:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmovlb.s8 q1, q1
-; CHECK-NEXT: vmovlb.s8 q0, q0
-; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: vmullb.s8 q0, q0, q1
; CHECK-NEXT: vaddv.u16 r0, q0
; CHECK-NEXT: sxth r0, r0
; CHECK-NEXT: bx lr
@@ -990,14 +980,15 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
-; CHECK-NEXT: vmov r2, s4
-; CHECK-NEXT: vmov r3, s0
-; CHECK-NEXT: vmov r12, s6
-; CHECK-NEXT: umull r2, lr, r3, r2
-; CHECK-NEXT: vmov r3, s2
-; CHECK-NEXT: umlal r2, lr, r3, r12
+; CHECK-NEXT: vmullb.u32 q2, q0, q1
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: vmov r12, s11
+; CHECK-NEXT: vmov lr, s9
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: adc.w r3, lr, r12
; CHECK-NEXT: adds r0, r0, r2
-; CHECK-NEXT: adc.w r1, r1, lr
+; CHECK-NEXT: adcs r1, r3
; CHECK-NEXT: pop {r7, pc}
entry:
%xx = zext <2 x i32> %x to <2 x i64>
@@ -1013,14 +1004,15 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
-; CHECK-NEXT: vmov r2, s4
-; CHECK-NEXT: vmov r3, s0
-; CHECK-NEXT: vmov r12, s6
-; CHECK-NEXT: smull r2, lr, r3, r2
-; CHECK-NEXT: vmov r3, s2
-; CHECK-NEXT: smlal r2, lr, r3, r12
+; CHECK-NEXT: vmullb.s32 q2, q0, q1
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: vmov r12, s11
+; CHECK-NEXT: vmov lr, s9
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: adc.w r3, lr, r12
; CHECK-NEXT: adds r0, r0, r2
-; CHECK-NEXT: adc.w r1, r1, lr
+; CHECK-NEXT: adcs r1, r3
; CHECK-NEXT: pop {r7, pc}
entry:
%xx = sext <2 x i32> %x to <2 x i64>
@@ -1284,10 +1276,8 @@
; CHECK-NEXT: vmov.16 q3[6], r1
; CHECK-NEXT: vmov.u8 r1, q0[15]
; CHECK-NEXT: vmov.16 q3[7], r1
-; CHECK-NEXT: vmovlb.u8 q2, q2
-; CHECK-NEXT: vmovlb.u8 q3, q3
; CHECK-NEXT: vmov.u8 r1, q1[0]
-; CHECK-NEXT: vmul.i16 q2, q3, q2
+; CHECK-NEXT: vmullb.u8 q2, q3, q2
; CHECK-NEXT: vmov.16 q3[0], r1
; CHECK-NEXT: vmov.u8 r1, q1[1]
; CHECK-NEXT: vmov.16 q3[1], r1
@@ -1304,24 +1294,22 @@
; CHECK-NEXT: vmov.u8 r1, q1[7]
; CHECK-NEXT: vmov.16 q3[7], r1
; CHECK-NEXT: vmov.u8 r1, q0[0]
-; CHECK-NEXT: vmovlb.u8 q1, q3
-; CHECK-NEXT: vmov.16 q3[0], r1
+; CHECK-NEXT: vmov.16 q1[0], r1
; CHECK-NEXT: vmov.u8 r1, q0[1]
-; CHECK-NEXT: vmov.16 q3[1], r1
+; CHECK-NEXT: vmov.16 q1[1], r1
; CHECK-NEXT: vmov.u8 r1, q0[2]
-; CHECK-NEXT: vmov.16 q3[2], r1
+; CHECK-NEXT: vmov.16 q1[2], r1
; CHECK-NEXT: vmov.u8 r1, q0[3]
-; CHECK-NEXT: vmov.16 q3[3], r1
+; CHECK-NEXT: vmov.16 q1[3], r1
; CHECK-NEXT: vmov.u8 r1, q0[4]
-; CHECK-NEXT: vmov.16 q3[4], r1
+; CHECK-NEXT: vmov.16 q1[4], r1
; CHECK-NEXT: vmov.u8 r1, q0[5]
-; CHECK-NEXT: vmov.16 q3[5], r1
+; CHECK-NEXT: vmov.16 q1[5], r1
; CHECK-NEXT: vmov.u8 r1, q0[6]
-; CHECK-NEXT: vmov.16 q3[6], r1
+; CHECK-NEXT: vmov.16 q1[6], r1
; CHECK-NEXT: vmov.u8 r1, q0[7]
-; CHECK-NEXT: vmov.16 q3[7], r1
-; CHECK-NEXT: vmovlb.u8 q0, q3
-; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: vmov.16 q1[7], r1
+; CHECK-NEXT: vmullb.u8 q0, q1, q3
; CHECK-NEXT: vadd.i16 q0, q0, q2
; CHECK-NEXT: vaddva.u16 r0, q0
; CHECK-NEXT: uxth r0, r0
@@ -1370,10 +1358,8 @@
; CHECK-NEXT: vmov.16 q3[6], r1
; CHECK-NEXT: vmov.u8 r1, q0[15]
; CHECK-NEXT: vmov.16 q3[7], r1
-; CHECK-NEXT: vmovlb.s8 q2, q2
-; CHECK-NEXT: vmovlb.s8 q3, q3
; CHECK-NEXT: vmov.u8 r1, q1[0]
-; CHECK-NEXT: vmul.i16 q2, q3, q2
+; CHECK-NEXT: vmullb.s8 q2, q3, q2
; CHECK-NEXT: vmov.16 q3[0], r1
; CHECK-NEXT: vmov.u8 r1, q1[1]
; CHECK-NEXT: vmov.16 q3[1], r1
@@ -1390,24 +1376,22 @@
; CHECK-NEXT: vmov.u8 r1, q1[7]
; CHECK-NEXT: vmov.16 q3[7], r1
; CHECK-NEXT: vmov.u8 r1, q0[0]
-; CHECK-NEXT: vmovlb.s8 q1, q3
-; CHECK-NEXT: vmov.16 q3[0], r1
+; CHECK-NEXT: vmov.16 q1[0], r1
; CHECK-NEXT: vmov.u8 r1, q0[1]
-; CHECK-NEXT: vmov.16 q3[1], r1
+; CHECK-NEXT: vmov.16 q1[1], r1
; CHECK-NEXT: vmov.u8 r1, q0[2]
-; CHECK-NEXT: vmov.16 q3[2], r1
+; CHECK-NEXT: vmov.16 q1[2], r1
; CHECK-NEXT: vmov.u8 r1, q0[3]
-; CHECK-NEXT: vmov.16 q3[3], r1
+; CHECK-NEXT: vmov.16 q1[3], r1
; CHECK-NEXT: vmov.u8 r1, q0[4]
-; CHECK-NEXT: vmov.16 q3[4], r1
+; CHECK-NEXT: vmov.16 q1[4], r1
; CHECK-NEXT: vmov.u8 r1, q0[5]
-; CHECK-NEXT: vmov.16 q3[5], r1
+; CHECK-NEXT: vmov.16 q1[5], r1
; CHECK-NEXT: vmov.u8 r1, q0[6]
-; CHECK-NEXT: vmov.16 q3[6], r1
+; CHECK-NEXT: vmov.16 q1[6], r1
; CHECK-NEXT: vmov.u8 r1, q0[7]
-; CHECK-NEXT: vmov.16 q3[7], r1
-; CHECK-NEXT: vmovlb.s8 q0, q3
-; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: vmov.16 q1[7], r1
+; CHECK-NEXT: vmullb.s8 q0, q1, q3
; CHECK-NEXT: vadd.i16 q0, q0, q2
; CHECK-NEXT: vaddva.u16 r0, q0
; CHECK-NEXT: sxth r0, r0
@@ -1424,9 +1408,7 @@
define arm_aapcs_vfpcc zeroext i16 @add_v8i8_v8i16_acc_zext(<8 x i8> %x, <8 x i8> %y, i16 %a) {
; CHECK-LABEL: add_v8i8_v8i16_acc_zext:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmovlb.u8 q1, q1
-; CHECK-NEXT: vmovlb.u8 q0, q0
-; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: vmullb.u8 q0, q0, q1
; CHECK-NEXT: vaddva.u16 r0, q0
; CHECK-NEXT: uxth r0, r0
; CHECK-NEXT: bx lr
@@ -1442,9 +1424,7 @@
define arm_aapcs_vfpcc signext i16 @add_v8i8_v8i16_acc_sext(<8 x i8> %x, <8 x i8> %y, i16 %a) {
; CHECK-LABEL: add_v8i8_v8i16_acc_sext:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmovlb.s8 q1, q1
-; CHECK-NEXT: vmovlb.s8 q0, q0
-; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: vmullb.s8 q0, q0, q1
; CHECK-NEXT: vaddva.u16 r0, q0
; CHECK-NEXT: sxth r0, r0
; CHECK-NEXT: bx lr
diff --git a/llvm/test/CodeGen/Thumb2/mve-vmull-loop.ll b/llvm/test/CodeGen/Thumb2/mve-vmull-loop.ll
index c32abb2..927a912 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmull-loop.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmull-loop.ll
@@ -6,38 +6,33 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r5, lr}
; CHECK-NEXT: push {r5, lr}
-; CHECK-NEXT: .vsave {d8, d9}
-; CHECK-NEXT: vpush {d8, d9}
; CHECK-NEXT: cmp r3, #1
-; CHECK-NEXT: blt .LBB0_2
+; CHECK-NEXT: it lt
+; CHECK-NEXT: poplt {r5, pc}
; CHECK-NEXT: .LBB0_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vldrw.u32 q2, [r0], #16
-; CHECK-NEXT: vldrw.u32 q3, [r1], #16
+; CHECK-NEXT: vldrw.u32 q1, [r0], #16
+; CHECK-NEXT: vldrw.u32 q2, [r1], #16
; CHECK-NEXT: subs r3, #4
-; CHECK-NEXT: vrev64.32 q1, q2
-; CHECK-NEXT: vrev64.32 q4, q3
-; CHECK-NEXT: vmov r12, s4
-; CHECK-NEXT: vmov lr, s16
-; CHECK-NEXT: smull r12, r5, lr, r12
+; CHECK-NEXT: vmullt.s32 q3, q2, q1
+; CHECK-NEXT: vmov r5, s13
+; CHECK-NEXT: vmov r12, s12
; CHECK-NEXT: lsrl r12, r5, #31
; CHECK-NEXT: vmov.32 q0[0], r12
-; CHECK-NEXT: vmov r12, s6
+; CHECK-NEXT: vmov r12, s14
; CHECK-NEXT: vmov.32 q0[1], r5
-; CHECK-NEXT: vmov r5, s18
-; CHECK-NEXT: smull r12, r5, r5, r12
+; CHECK-NEXT: vmov r5, s15
; CHECK-NEXT: lsrl r12, r5, #31
+; CHECK-NEXT: vmullb.s32 q3, q2, q1
; CHECK-NEXT: vmov.32 q0[2], r12
-; CHECK-NEXT: vmov r12, s8
+; CHECK-NEXT: vmov r12, s12
; CHECK-NEXT: vmov.32 q0[3], r5
-; CHECK-NEXT: vmov r5, s12
-; CHECK-NEXT: smull r12, r5, r5, r12
+; CHECK-NEXT: vmov r5, s13
; CHECK-NEXT: lsrl r12, r5, #31
; CHECK-NEXT: vmov.32 q1[0], r12
-; CHECK-NEXT: vmov r12, s10
+; CHECK-NEXT: vmov r12, s14
; CHECK-NEXT: vmov.32 q1[1], r5
-; CHECK-NEXT: vmov r5, s14
-; CHECK-NEXT: smull r12, r5, r5, r12
+; CHECK-NEXT: vmov r5, s15
; CHECK-NEXT: lsrl r12, r5, #31
; CHECK-NEXT: vmov.32 q1[2], r12
; CHECK-NEXT: vmov.32 q1[3], r5
@@ -52,8 +47,7 @@
; CHECK-NEXT: vmov.f32 s7, s10
; CHECK-NEXT: vstrb.8 q1, [r2], #16
; CHECK-NEXT: bne .LBB0_1
-; CHECK-NEXT: .LBB0_2: @ %for.cond.cleanup
-; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
; CHECK-NEXT: pop {r5, pc}
entry:
%0 = and i32 %n, 3
@@ -103,17 +97,13 @@
; CHECK-NEXT: .LBB1_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrh.u16 q0, [r0], #16
-; CHECK-NEXT: vldrh.u16 q2, [r1], #16
+; CHECK-NEXT: vldrh.u16 q1, [r1], #16
; CHECK-NEXT: subs r3, #8
-; CHECK-NEXT: vmovlt.s16 q1, q0
-; CHECK-NEXT: vmovlt.s16 q3, q2
-; CHECK-NEXT: vmovlb.s16 q0, q0
-; CHECK-NEXT: vmovlb.s16 q2, q2
-; CHECK-NEXT: vmul.i32 q1, q3, q1
-; CHECK-NEXT: vmul.i32 q0, q2, q0
-; CHECK-NEXT: vshr.u32 q1, q1, #15
+; CHECK-NEXT: vmullt.s16 q2, q1, q0
+; CHECK-NEXT: vmullb.s16 q0, q1, q0
+; CHECK-NEXT: vshr.u32 q2, q2, #15
; CHECK-NEXT: vshr.u32 q0, q0, #15
-; CHECK-NEXT: vmovnt.i32 q0, q1
+; CHECK-NEXT: vmovnt.i32 q0, q2
; CHECK-NEXT: vstrb.8 q0, [r2], #16
; CHECK-NEXT: bne .LBB1_1
; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
@@ -166,17 +156,13 @@
; CHECK-NEXT: .LBB2_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrb.u8 q0, [r0], #16
-; CHECK-NEXT: vldrb.u8 q2, [r1], #16
+; CHECK-NEXT: vldrb.u8 q1, [r1], #16
; CHECK-NEXT: subs r3, #16
-; CHECK-NEXT: vmovlt.u8 q1, q0
-; CHECK-NEXT: vmovlt.u8 q3, q2
-; CHECK-NEXT: vmovlb.u8 q0, q0
-; CHECK-NEXT: vmovlb.u8 q2, q2
-; CHECK-NEXT: vmul.i16 q1, q3, q1
-; CHECK-NEXT: vmul.i16 q0, q2, q0
-; CHECK-NEXT: vshr.u16 q1, q1, #7
+; CHECK-NEXT: vmullt.u8 q2, q1, q0
+; CHECK-NEXT: vmullb.u8 q0, q1, q0
+; CHECK-NEXT: vshr.u16 q2, q2, #7
; CHECK-NEXT: vshr.u16 q0, q0, #7
-; CHECK-NEXT: vmovnt.i16 q0, q1
+; CHECK-NEXT: vmovnt.i16 q0, q2
; CHECK-NEXT: vstrb.8 q0, [r2], #16
; CHECK-NEXT: bne .LBB2_1
; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
diff --git a/llvm/test/CodeGen/Thumb2/mve-vmull.ll b/llvm/test/CodeGen/Thumb2/mve-vmull.ll
index c1e720f..6d0b662 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmull.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmull.ll
@@ -4,16 +4,7 @@
define arm_aapcs_vfpcc <2 x i64> @sext_02(<4 x i32> %src1, <4 x i32> %src2) {
; CHECK-LABEL: sext_02:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmov r0, s4
-; CHECK-NEXT: vmov r1, s0
-; CHECK-NEXT: smull r0, r1, r1, r0
-; CHECK-NEXT: vmov.32 q2[0], r0
-; CHECK-NEXT: vmov r0, s6
-; CHECK-NEXT: vmov.32 q2[1], r1
-; CHECK-NEXT: vmov r1, s2
-; CHECK-NEXT: smull r0, r1, r1, r0
-; CHECK-NEXT: vmov.32 q2[2], r0
-; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmullb.s32 q2, q0, q1
; CHECK-NEXT: vmov q0, q2
; CHECK-NEXT: bx lr
entry:
@@ -28,18 +19,8 @@
define arm_aapcs_vfpcc <2 x i64> @sext_13(<4 x i32> %src1, <4 x i32> %src2) {
; CHECK-LABEL: sext_13:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vrev64.32 q2, q1
-; CHECK-NEXT: vrev64.32 q1, q0
-; CHECK-NEXT: vmov r0, s8
-; CHECK-NEXT: vmov r1, s4
-; CHECK-NEXT: smull r0, r1, r1, r0
-; CHECK-NEXT: vmov.32 q0[0], r0
-; CHECK-NEXT: vmov r0, s10
-; CHECK-NEXT: vmov.32 q0[1], r1
-; CHECK-NEXT: vmov r1, s6
-; CHECK-NEXT: smull r0, r1, r1, r0
-; CHECK-NEXT: vmov.32 q0[2], r0
-; CHECK-NEXT: vmov.32 q0[3], r1
+; CHECK-NEXT: vmullt.s32 q2, q0, q1
+; CHECK-NEXT: vmov q0, q2
; CHECK-NEXT: bx lr
entry:
%shuf1 = shufflevector <4 x i32> %src1, <4 x i32> undef, <2 x i32> <i32 1, i32 3>
@@ -53,16 +34,7 @@
define arm_aapcs_vfpcc <2 x i64> @zext_02(<4 x i32> %src1, <4 x i32> %src2) {
; CHECK-LABEL: zext_02:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmov r0, s4
-; CHECK-NEXT: vmov r1, s0
-; CHECK-NEXT: umull r0, r1, r1, r0
-; CHECK-NEXT: vmov.32 q2[0], r0
-; CHECK-NEXT: vmov r0, s6
-; CHECK-NEXT: vmov.32 q2[1], r1
-; CHECK-NEXT: vmov r1, s2
-; CHECK-NEXT: umull r0, r1, r1, r0
-; CHECK-NEXT: vmov.32 q2[2], r0
-; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmullb.u32 q2, q0, q1
; CHECK-NEXT: vmov q0, q2
; CHECK-NEXT: bx lr
entry:
@@ -77,18 +49,8 @@
define arm_aapcs_vfpcc <2 x i64> @zext_13(<4 x i32> %src1, <4 x i32> %src2) {
; CHECK-LABEL: zext_13:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vrev64.32 q2, q1
-; CHECK-NEXT: vrev64.32 q1, q0
-; CHECK-NEXT: vmov r0, s8
-; CHECK-NEXT: vmov r1, s4
-; CHECK-NEXT: umull r0, r1, r1, r0
-; CHECK-NEXT: vmov.32 q0[0], r0
-; CHECK-NEXT: vmov r0, s10
-; CHECK-NEXT: vmov.32 q0[1], r1
-; CHECK-NEXT: vmov r1, s6
-; CHECK-NEXT: umull r0, r1, r1, r0
-; CHECK-NEXT: vmov.32 q0[2], r0
-; CHECK-NEXT: vmov.32 q0[3], r1
+; CHECK-NEXT: vmullt.u32 q2, q0, q1
+; CHECK-NEXT: vmov q0, q2
; CHECK-NEXT: bx lr
entry:
%shuf1 = shufflevector <4 x i32> %src1, <4 x i32> undef, <2 x i32> <i32 1, i32 3>
@@ -103,9 +65,7 @@
define arm_aapcs_vfpcc <4 x i32> @sext_0246(<8 x i16> %src1, <8 x i16> %src2) {
; CHECK-LABEL: sext_0246:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmovlb.s16 q1, q1
-; CHECK-NEXT: vmovlb.s16 q0, q0
-; CHECK-NEXT: vmul.i32 q0, q0, q1
+; CHECK-NEXT: vmullb.s16 q0, q0, q1
; CHECK-NEXT: bx lr
entry:
%shuf1 = shufflevector <8 x i16> %src1, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -119,9 +79,7 @@
define arm_aapcs_vfpcc <4 x i32> @sext_1357(<8 x i16> %src1, <8 x i16> %src2) {
; CHECK-LABEL: sext_1357:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmovlt.s16 q1, q1
-; CHECK-NEXT: vmovlt.s16 q0, q0
-; CHECK-NEXT: vmul.i32 q0, q0, q1
+; CHECK-NEXT: vmullt.s16 q0, q0, q1
; CHECK-NEXT: bx lr
entry:
%shuf1 = shufflevector <8 x i16> %src1, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -135,9 +93,7 @@
define arm_aapcs_vfpcc <4 x i32> @zext_0246(<8 x i16> %src1, <8 x i16> %src2) {
; CHECK-LABEL: zext_0246:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmovlb.u16 q1, q1
-; CHECK-NEXT: vmovlb.u16 q0, q0
-; CHECK-NEXT: vmul.i32 q0, q0, q1
+; CHECK-NEXT: vmullb.u16 q0, q0, q1
; CHECK-NEXT: bx lr
entry:
%shuf1 = shufflevector <8 x i16> %src1, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -151,9 +107,7 @@
define arm_aapcs_vfpcc <4 x i32> @zext_1357(<8 x i16> %src1, <8 x i16> %src2) {
; CHECK-LABEL: zext_1357:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmovlt.u16 q1, q1
-; CHECK-NEXT: vmovlt.u16 q0, q0
-; CHECK-NEXT: vmul.i32 q0, q0, q1
+; CHECK-NEXT: vmullt.u16 q0, q0, q1
; CHECK-NEXT: bx lr
entry:
%shuf1 = shufflevector <8 x i16> %src1, <8 x i16> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
@@ -167,9 +121,7 @@
define arm_aapcs_vfpcc <8 x i16> @sext_02468101214(<16 x i8> %src1, <16 x i8> %src2) {
; CHECK-LABEL: sext_02468101214:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmovlb.s8 q1, q1
-; CHECK-NEXT: vmovlb.s8 q0, q0
-; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: vmullb.s8 q0, q0, q1
; CHECK-NEXT: bx lr
entry:
%shuf1 = shufflevector <16 x i8> %src1, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -183,9 +135,7 @@
define arm_aapcs_vfpcc <8 x i16> @sext_13579111315(<16 x i8> %src1, <16 x i8> %src2) {
; CHECK-LABEL: sext_13579111315:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmovlt.s8 q1, q1
-; CHECK-NEXT: vmovlt.s8 q0, q0
-; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: vmullt.s8 q0, q0, q1
; CHECK-NEXT: bx lr
entry:
%shuf1 = shufflevector <16 x i8> %src1, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
@@ -199,9 +149,7 @@
define arm_aapcs_vfpcc <8 x i16> @zext_02468101214(<16 x i8> %src1, <16 x i8> %src2) {
; CHECK-LABEL: zext_02468101214:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmovlb.u8 q1, q1
-; CHECK-NEXT: vmovlb.u8 q0, q0
-; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: vmullb.u8 q0, q0, q1
; CHECK-NEXT: bx lr
entry:
%shuf1 = shufflevector <16 x i8> %src1, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -215,9 +163,7 @@
define arm_aapcs_vfpcc <8 x i16> @zext_13579111315(<16 x i8> %src1, <16 x i8> %src2) {
; CHECK-LABEL: zext_13579111315:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmovlt.u8 q1, q1
-; CHECK-NEXT: vmovlt.u8 q0, q0
-; CHECK-NEXT: vmul.i16 q0, q0, q1
+; CHECK-NEXT: vmullt.u8 q0, q0, q1
; CHECK-NEXT: bx lr
entry:
%shuf1 = shufflevector <16 x i8> %src1, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>