blob: 5d832537c0f77891fe70ba3cdc80cd33baa21c6b [file] [log] [blame]
Evan Cheng529916c2010-11-12 20:32:20 +00001; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
2; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s -check-prefix=NEON
3; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8
David Goodwin42a83f22009-08-04 17:53:06 +00004
Evan Cheng529916c2010-11-12 20:32:20 +00005define float @t1(float %acc, float %a, float %b) nounwind {
David Goodwin42a83f22009-08-04 17:53:06 +00006entry:
Evan Cheng529916c2010-11-12 20:32:20 +00007; VFP2: t1:
8; VFP2: vnmla.f32
9
10; NEON: t1:
11; NEON: vnmla.f32
12
13; A8: t1:
14; A8: vnmul.f32 s0, s1, s0
15; A8: vsub.f32 d0, d0, d1
David Goodwin42a83f22009-08-04 17:53:06 +000016 %0 = fmul float %a, %b
David Goodwinaeb66fe2009-08-10 22:31:04 +000017 %1 = fsub float -0.0, %0
David Goodwin42a83f22009-08-04 17:53:06 +000018 %2 = fsub float %1, %acc
19 ret float %2
20}
21
Evan Cheng529916c2010-11-12 20:32:20 +000022define float @t2(float %acc, float %a, float %b) nounwind {
David Goodwin831b5002009-08-04 18:11:59 +000023entry:
Evan Cheng529916c2010-11-12 20:32:20 +000024; VFP2: t2:
25; VFP2: vnmla.f32
26
27; NEON: t2:
28; NEON: vnmla.f32
29
30; A8: t2:
31; A8: vnmul.f32 s0, s1, s0
32; A8: vsub.f32 d0, d0, d1
David Goodwin831b5002009-08-04 18:11:59 +000033 %0 = fmul float %a, %b
34 %1 = fmul float -1.0, %0
35 %2 = fsub float %1, %acc
36 ret float %2
37}
38
Evan Cheng529916c2010-11-12 20:32:20 +000039define double @t3(double %acc, double %a, double %b) nounwind {
40entry:
41; VFP2: t3:
42; VFP2: vnmla.f64
43
44; NEON: t3:
45; NEON: vnmla.f64
46
47; A8: t3:
48; A8: vnmul.f64 d16, d16, d17
49; A8: vsub.f64 d16, d16, d17
50 %0 = fmul double %a, %b
51 %1 = fsub double -0.0, %0
52 %2 = fsub double %1, %acc
53 ret double %2
54}
55
56define double @t4(double %acc, double %a, double %b) nounwind {
57entry:
58; VFP2: t4:
59; VFP2: vnmla.f64
60
61; NEON: t4:
62; NEON: vnmla.f64
63
64; A8: t4:
65; A8: vnmul.f64 d16, d16, d17
66; A8: vsub.f64 d16, d16, d17
67 %0 = fmul double %a, %b
68 %1 = fmul double -1.0, %0
69 %2 = fsub double %1, %acc
70 ret double %2
71}