blob: 5664c7d118c306f3a64e131e031718e2cff9c0c4 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001; RUN: llc < %s -march=arm64 -aarch64-stp-suppress=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s
Tim Northover3b0846e2014-05-24 12:50:23 +00002
Chad Rosier4c5a4642015-09-30 21:10:02 +00003; CHECK-LABEL: stp_int
Tim Northover3b0846e2014-05-24 12:50:23 +00004; CHECK: stp w0, w1, [x2]
5define void @stp_int(i32 %a, i32 %b, i32* nocapture %p) nounwind {
6 store i32 %a, i32* %p, align 4
David Blaikie79e6c742015-02-27 19:29:02 +00007 %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
Tim Northover3b0846e2014-05-24 12:50:23 +00008 store i32 %b, i32* %add.ptr, align 4
9 ret void
10}
11
Chad Rosier4c5a4642015-09-30 21:10:02 +000012; CHECK-LABEL: stp_long
Tim Northover3b0846e2014-05-24 12:50:23 +000013; CHECK: stp x0, x1, [x2]
14define void @stp_long(i64 %a, i64 %b, i64* nocapture %p) nounwind {
15 store i64 %a, i64* %p, align 8
David Blaikie79e6c742015-02-27 19:29:02 +000016 %add.ptr = getelementptr inbounds i64, i64* %p, i64 1
Tim Northover3b0846e2014-05-24 12:50:23 +000017 store i64 %b, i64* %add.ptr, align 8
18 ret void
19}
20
Chad Rosier4c5a4642015-09-30 21:10:02 +000021; CHECK-LABEL: stp_float
Tim Northover3b0846e2014-05-24 12:50:23 +000022; CHECK: stp s0, s1, [x0]
23define void @stp_float(float %a, float %b, float* nocapture %p) nounwind {
24 store float %a, float* %p, align 4
David Blaikie79e6c742015-02-27 19:29:02 +000025 %add.ptr = getelementptr inbounds float, float* %p, i64 1
Tim Northover3b0846e2014-05-24 12:50:23 +000026 store float %b, float* %add.ptr, align 4
27 ret void
28}
29
Chad Rosier4c5a4642015-09-30 21:10:02 +000030; CHECK-LABEL: stp_double
Tim Northover3b0846e2014-05-24 12:50:23 +000031; CHECK: stp d0, d1, [x0]
32define void @stp_double(double %a, double %b, double* nocapture %p) nounwind {
33 store double %a, double* %p, align 8
David Blaikie79e6c742015-02-27 19:29:02 +000034 %add.ptr = getelementptr inbounds double, double* %p, i64 1
Tim Northover3b0846e2014-05-24 12:50:23 +000035 store double %b, double* %add.ptr, align 8
36 ret void
37}
38
39; Test the load/store optimizer---combine ldurs into a ldp, if appropriate
40define void @stur_int(i32 %a, i32 %b, i32* nocapture %p) nounwind {
Chad Rosier4c5a4642015-09-30 21:10:02 +000041; CHECK-LABEL: stur_int
42; CHECK: stp w{{[0-9]+}}, {{w[0-9]+}}, [x{{[0-9]+}}, #-8]
43; CHECK-NEXT: ret
David Blaikie79e6c742015-02-27 19:29:02 +000044 %p1 = getelementptr inbounds i32, i32* %p, i32 -1
Tim Northover3b0846e2014-05-24 12:50:23 +000045 store i32 %a, i32* %p1, align 2
David Blaikie79e6c742015-02-27 19:29:02 +000046 %p2 = getelementptr inbounds i32, i32* %p, i32 -2
Tim Northover3b0846e2014-05-24 12:50:23 +000047 store i32 %b, i32* %p2, align 2
48 ret void
49}
50
51define void @stur_long(i64 %a, i64 %b, i64* nocapture %p) nounwind {
Chad Rosier4c5a4642015-09-30 21:10:02 +000052; CHECK-LABEL: stur_long
53; CHECK: stp x{{[0-9]+}}, {{x[0-9]+}}, [x{{[0-9]+}}, #-16]
54; CHECK-NEXT: ret
David Blaikie79e6c742015-02-27 19:29:02 +000055 %p1 = getelementptr inbounds i64, i64* %p, i32 -1
Tim Northover3b0846e2014-05-24 12:50:23 +000056 store i64 %a, i64* %p1, align 2
David Blaikie79e6c742015-02-27 19:29:02 +000057 %p2 = getelementptr inbounds i64, i64* %p, i32 -2
Tim Northover3b0846e2014-05-24 12:50:23 +000058 store i64 %b, i64* %p2, align 2
59 ret void
60}
61
62define void @stur_float(float %a, float %b, float* nocapture %p) nounwind {
Chad Rosier4c5a4642015-09-30 21:10:02 +000063; CHECK-LABEL: stur_float
64; CHECK: stp s{{[0-9]+}}, {{s[0-9]+}}, [x{{[0-9]+}}, #-8]
65; CHECK-NEXT: ret
David Blaikie79e6c742015-02-27 19:29:02 +000066 %p1 = getelementptr inbounds float, float* %p, i32 -1
Tim Northover3b0846e2014-05-24 12:50:23 +000067 store float %a, float* %p1, align 2
David Blaikie79e6c742015-02-27 19:29:02 +000068 %p2 = getelementptr inbounds float, float* %p, i32 -2
Tim Northover3b0846e2014-05-24 12:50:23 +000069 store float %b, float* %p2, align 2
70 ret void
71}
72
73define void @stur_double(double %a, double %b, double* nocapture %p) nounwind {
Chad Rosier4c5a4642015-09-30 21:10:02 +000074; CHECK-LABEL: stur_double
75; CHECK: stp d{{[0-9]+}}, {{d[0-9]+}}, [x{{[0-9]+}}, #-16]
76; CHECK-NEXT: ret
David Blaikie79e6c742015-02-27 19:29:02 +000077 %p1 = getelementptr inbounds double, double* %p, i32 -1
Tim Northover3b0846e2014-05-24 12:50:23 +000078 store double %a, double* %p1, align 2
David Blaikie79e6c742015-02-27 19:29:02 +000079 %p2 = getelementptr inbounds double, double* %p, i32 -2
Tim Northover3b0846e2014-05-24 12:50:23 +000080 store double %b, double* %p2, align 2
81 ret void
82}
83
84define void @splat_v4i32(i32 %v, i32 *%p) {
85entry:
86
87; CHECK-LABEL: splat_v4i32
88; CHECK-DAG: stp w0, w0, [x1]
89; CHECK-DAG: stp w0, w0, [x1, #8]
90; CHECK: ret
91
92 %p17 = insertelement <4 x i32> undef, i32 %v, i32 0
93 %p18 = insertelement <4 x i32> %p17, i32 %v, i32 1
94 %p19 = insertelement <4 x i32> %p18, i32 %v, i32 2
95 %p20 = insertelement <4 x i32> %p19, i32 %v, i32 3
96 %p21 = bitcast i32* %p to <4 x i32>*
97 store <4 x i32> %p20, <4 x i32>* %p21, align 4
98 ret void
99}
Chad Rosiercf90acc2015-06-09 20:59:41 +0000100
101; Read of %b to compute %tmp2 shouldn't prevent formation of stp
102; CHECK-LABEL: stp_int_rar_hazard
Chad Rosiercf90acc2015-06-09 20:59:41 +0000103; CHECK: ldr [[REG:w[0-9]+]], [x2, #8]
Jun Bum Lim4c5bd582016-04-15 14:58:38 +0000104; CHECK: add w8, [[REG]], w1
105; CHECK: stp w0, w1, [x2]
Chad Rosiercf90acc2015-06-09 20:59:41 +0000106; CHECK: ret
107define i32 @stp_int_rar_hazard(i32 %a, i32 %b, i32* nocapture %p) nounwind {
108 store i32 %a, i32* %p, align 4
109 %ld.ptr = getelementptr inbounds i32, i32* %p, i64 2
110 %tmp = load i32, i32* %ld.ptr, align 4
111 %tmp2 = add i32 %tmp, %b
112 %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
113 store i32 %b, i32* %add.ptr, align 4
114 ret i32 %tmp2
115}
116
117; Read of %b to compute %tmp2 shouldn't prevent formation of stp
118; CHECK-LABEL: stp_int_rar_hazard_after
119; CHECK: ldr [[REG:w[0-9]+]], [x3, #4]
120; CHECK: add w0, [[REG]], w2
121; CHECK: stp w1, w2, [x3]
122; CHECK: ret
123define i32 @stp_int_rar_hazard_after(i32 %w0, i32 %a, i32 %b, i32* nocapture %p) nounwind {
124 store i32 %a, i32* %p, align 4
125 %ld.ptr = getelementptr inbounds i32, i32* %p, i64 1
126 %tmp = load i32, i32* %ld.ptr, align 4
127 %tmp2 = add i32 %tmp, %b
128 %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
129 store i32 %b, i32* %add.ptr, align 4
130 ret i32 %tmp2
131}