blob: fed6ec02f32d9018d0e51cc7807b9dc813b4c114 [file] [log] [blame]
Christian Pirkerb5728192014-05-08 14:06:24 +00001; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s -check-prefix=CHECK --check-prefix=CHECK-LE
2; RUN: llc -mtriple=armv7-eabi %s -o - | FileCheck %s --check-prefix=CHECK-V7-LE
3; RUN: llc -mtriple=armeb-eabi %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
4; RUN: llc -mtriple=armebv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK-V7-BE
Arnold Schwaighoferf00fb1c2012-09-04 14:37:49 +00005; Check generated signed and unsigned multiply accumulate long.
6
7define i64 @MACLongTest1(i32 %a, i32 %b, i64 %c) {
Stephen Lind24ab202013-07-14 06:24:09 +00008;CHECK-LABEL: MACLongTest1:
Arnold Schwaighoferf00fb1c2012-09-04 14:37:49 +00009;CHECK: umlal
10 %conv = zext i32 %a to i64
11 %conv1 = zext i32 %b to i64
12 %mul = mul i64 %conv1, %conv
13 %add = add i64 %mul, %c
14 ret i64 %add
15}
16
17define i64 @MACLongTest2(i32 %a, i32 %b, i64 %c) {
Stephen Lind24ab202013-07-14 06:24:09 +000018;CHECK-LABEL: MACLongTest2:
Arnold Schwaighoferf00fb1c2012-09-04 14:37:49 +000019;CHECK: smlal
20 %conv = sext i32 %a to i64
21 %conv1 = sext i32 %b to i64
22 %mul = mul nsw i64 %conv1, %conv
23 %add = add nsw i64 %mul, %c
24 ret i64 %add
25}
26
27define i64 @MACLongTest3(i32 %a, i32 %b, i32 %c) {
Stephen Lind24ab202013-07-14 06:24:09 +000028;CHECK-LABEL: MACLongTest3:
Arnold Schwaighoferf00fb1c2012-09-04 14:37:49 +000029;CHECK: umlal
30 %conv = zext i32 %b to i64
31 %conv1 = zext i32 %a to i64
32 %mul = mul i64 %conv, %conv1
33 %conv2 = zext i32 %c to i64
34 %add = add i64 %mul, %conv2
35 ret i64 %add
36}
37
38define i64 @MACLongTest4(i32 %a, i32 %b, i32 %c) {
Stephen Lind24ab202013-07-14 06:24:09 +000039;CHECK-LABEL: MACLongTest4:
Arnold Schwaighoferf00fb1c2012-09-04 14:37:49 +000040;CHECK: smlal
41 %conv = sext i32 %b to i64
42 %conv1 = sext i32 %a to i64
43 %mul = mul nsw i64 %conv, %conv1
44 %conv2 = sext i32 %c to i64
45 %add = add nsw i64 %mul, %conv2
46 ret i64 %add
47}
Tim Northover56cc5c92014-01-14 13:05:47 +000048
49; Two things to check here: the @earlyclobber constraint (on <= v5) and the "$Rd = $R" ones.
50; + Without @earlyclobber the v7 code is natural. With it, the first two
51; registers must be distinct from the third.
52; + Without "$Rd = $R", this can be satisfied without a mov before the umlal
53; by trying to use 6 different registers in the MachineInstr. The natural
54; evolution of this attempt currently leaves only two movs in the final
55; function, both after the umlal. With it, *some* move has to happen
56; before the umlal.
57define i64 @MACLongTest5(i64 %c, i32 %a, i32 %b) {
Christian Pirkerb5728192014-05-08 14:06:24 +000058; CHECK-V7-LE-LABEL: MACLongTest5:
59; CHECK-V7-LE-LABEL: umlal r0, r1, r0, r0
60; CHECK-V7-BE-LABEL: MACLongTest5:
61; CHECK-V7-BE-LABEL: umlal r1, r0, r1, r1
Tim Northover56cc5c92014-01-14 13:05:47 +000062
63; CHECK-LABEL: MACLongTest5:
Christian Pirkerb5728192014-05-08 14:06:24 +000064; CHECK-LE: mov [[RDLO:r[0-9]+]], r0
65; CHECK-LE: umlal [[RDLO]], r1, r0, r0
66; CHECK-LE: mov r0, [[RDLO]]
67; CHECK-BE: mov [[RDLO:r[0-9]+]], r1
68; CHECK-BE: umlal [[RDLO]], r0, r1, r1
69; CHECK-BE: mov r1, [[RDLO]]
Tim Northover56cc5c92014-01-14 13:05:47 +000070
71 %conv.trunc = trunc i64 %c to i32
72 %conv = zext i32 %conv.trunc to i64
73 %conv1 = zext i32 %b to i64
74 %mul = mul i64 %conv, %conv
75 %add = add i64 %mul, %c
76 ret i64 %add
77}