blob: 2bbf7deb1684f61f08928324275eb801189001b7 [file] [log] [blame]
Jakob Stoklund Olesenf37812e2013-04-02 04:09:02 +00001; RUN: llc < %s -march=sparcv9 | FileCheck %s
2
3; CHECK: ret2:
4; CHECK: or %g0, %i1, %i0
5define i64 @ret2(i64 %a, i64 %b) {
6 ret i64 %b
7}
Jakob Stoklund Olesenc3ff3f42013-04-02 04:09:12 +00008
9; CHECK: shl_imm
10; CHECK: sllx %i0, 7, %i0
11define i64 @shl_imm(i64 %a) {
12 %x = shl i64 %a, 7
13 ret i64 %x
14}
15
16; CHECK: sra_reg
17; CHECK: srax %i0, %i1, %i0
18define i64 @sra_reg(i64 %a, i64 %b) {
19 %x = ashr i64 %a, %b
20 ret i64 %x
21}
Jakob Stoklund Olesen39e75542013-04-02 04:09:17 +000022
23; Immediate materialization. Many of these patterns could actually be merged
24; into the restore instruction:
25;
26; restore %g0, %g0, %o0
27;
28; CHECK: ret_imm0
29; CHECK: or %g0, %g0, %i0
30define i64 @ret_imm0() {
31 ret i64 0
32}
33
34; CHECK: ret_simm13
35; CHECK: or %g0, -4096, %i0
36define i64 @ret_simm13() {
37 ret i64 -4096
38}
39
40; CHECK: ret_sethi
41; CHECK: sethi 4, %i0
42; CHECK-NOT: or
43; CHECK: restore
44define i64 @ret_sethi() {
45 ret i64 4096
46}
47
48; CHECK: ret_sethi
49; CHECK: sethi 4, [[R:%[goli][0-7]]]
50; CHECK: or [[R]], 1, %i0
51define i64 @ret_sethi_or() {
52 ret i64 4097
53}
54
55; CHECK: ret_nimm33
56; CHECK: sethi 4, [[R:%[goli][0-7]]]
57; CHECK: xor [[R]], -4, %i0
58define i64 @ret_nimm33() {
59 ret i64 -4100
60}
61
62; CHECK: ret_bigimm
63; CHECK: sethi
64; CHECK: sethi
65define i64 @ret_bigimm() {
66 ret i64 6800754272627607872
67}
Jakob Stoklund Olesen73c5f802013-04-02 04:09:23 +000068
Jakob Stoklund Olesen2c6b5a82013-04-21 21:18:03 +000069; CHECK: ret_bigimm2
70; CHECK: sethi 1048576
71define i64 @ret_bigimm2() {
72 ret i64 4611686018427387904 ; 0x4000000000000000
73}
74
Jakob Stoklund Olesen73c5f802013-04-02 04:09:23 +000075; CHECK: reg_reg_alu
76; CHECK: add %i0, %i1, [[R0:%[goli][0-7]]]
77; CHECK: sub [[R0]], %i2, [[R1:%[goli][0-7]]]
78; CHECK: andn [[R1]], %i0, %i0
79define i64 @reg_reg_alu(i64 %x, i64 %y, i64 %z) {
80 %a = add i64 %x, %y
81 %b = sub i64 %a, %z
82 %c = xor i64 %x, -1
83 %d = and i64 %b, %c
84 ret i64 %d
85}
86
87; CHECK: reg_imm_alu
88; CHECK: add %i0, -5, [[R0:%[goli][0-7]]]
89; CHECK: xor [[R0]], 2, %i0
90define i64 @reg_imm_alu(i64 %x, i64 %y, i64 %z) {
91 %a = add i64 %x, -5
92 %b = xor i64 %a, 2
93 ret i64 %b
94}
Jakob Stoklund Olesen61ed5dd2013-04-02 04:09:28 +000095
96; CHECK: loads
97; CHECK: ldx [%i0]
98; CHECK: stx %
99; CHECK: ld [%i1]
100; CHECK: st %
101; CHECK: ldsw [%i2]
102; CHECK: stx %
103; CHECK: ldsh [%i3]
104; CHECK: sth %
105define i64 @loads(i64* %p, i32* %q, i32* %r, i16* %s) {
106 %a = load i64* %p
107 %ai = add i64 1, %a
108 store i64 %ai, i64* %p
109 %b = load i32* %q
110 %b2 = zext i32 %b to i64
111 %bi = trunc i64 %ai to i32
112 store i32 %bi, i32* %q
113 %c = load i32* %r
114 %c2 = sext i32 %c to i64
115 store i64 %ai, i64* %p
116 %d = load i16* %s
117 %d2 = sext i16 %d to i64
118 %di = trunc i64 %ai to i16
119 store i16 %di, i16* %s
120
121 %x1 = add i64 %a, %b2
122 %x2 = add i64 %c2, %d2
123 %x3 = add i64 %x1, %x2
124 ret i64 %x3
125}
126
127; CHECK: stores
128; CHECK: ldx [%i0+8], [[R:%[goli][0-7]]]
129; CHECK: stx [[R]], [%i0+16]
130; CHECK: st [[R]], [%i1+-8]
131; CHECK: sth [[R]], [%i2+40]
132; CHECK: stb [[R]], [%i3+-20]
133define void @stores(i64* %p, i32* %q, i16* %r, i8* %s) {
134 %p1 = getelementptr i64* %p, i64 1
135 %p2 = getelementptr i64* %p, i64 2
136 %pv = load i64* %p1
137 store i64 %pv, i64* %p2
138
139 %q2 = getelementptr i32* %q, i32 -2
140 %qv = trunc i64 %pv to i32
141 store i32 %qv, i32* %q2
142
143 %r2 = getelementptr i16* %r, i16 20
144 %rv = trunc i64 %pv to i16
145 store i16 %rv, i16* %r2
146
147 %s2 = getelementptr i8* %s, i8 -20
148 %sv = trunc i64 %pv to i8
149 store i8 %sv, i8* %s2
150
151 ret void
152}
Jakob Stoklund Olesend9f88da2013-04-14 05:48:50 +0000153
154; CHECK: promote_shifts
155; CHECK: ldub [%i0], [[R:%[goli][0-7]]]
156; CHECK: sll [[R]], [[R]], %i0
157define i8 @promote_shifts(i8* %p) {
158 %L24 = load i8* %p
159 %L32 = load i8* %p
160 %B36 = shl i8 %L24, %L32
161 ret i8 %B36
162}
Jakob Stoklund Olesenad366082013-04-16 02:57:02 +0000163
164; CHECK: multiply
165; CHECK: mulx %i0, %i1, %i0
166define i64 @multiply(i64 %a, i64 %b) {
167 %r = mul i64 %a, %b
168 ret i64 %r
169}
170
171; CHECK: signed_divide
172; CHECK: sdivx %i0, %i1, %i0
173define i64 @signed_divide(i64 %a, i64 %b) {
174 %r = sdiv i64 %a, %b
175 ret i64 %r
176}
177
178; CHECK: unsigned_divide
179; CHECK: udivx %i0, %i1, %i0
180define i64 @unsigned_divide(i64 %a, i64 %b) {
181 %r = udiv i64 %a, %b
182 ret i64 %r
183}