blob: 7b540074a35f926b2c25ebd217e2f6c2f5cdd684 [file] [log] [blame]
Chris Lattneradc29562010-11-23 02:26:52 +00001; RUN: llc < %s -march=sparc | FileCheck %s
Chris Lattnera590d802006-02-05 05:52:55 +00002
Chris Lattneradc29562010-11-23 02:26:52 +00003define i32 @test0(i32 %X) {
Chris Lattnerae0f8dc2007-03-28 01:27:12 +00004 %tmp.1 = add i32 %X, 1
5 ret i32 %tmp.1
Stephen Linf799e3f2013-07-13 20:38:47 +00006; CHECK-LABEL: test0:
Venkatraman Govindaraju3e8c7d92013-06-02 02:24:27 +00007; CHECK: add %o0, 1, %o0
Chris Lattneradc29562010-11-23 02:26:52 +00008}
9
10
11;; xnor tests.
12define i32 @test1(i32 %X, i32 %Y) {
13 %A = xor i32 %X, %Y
14 %B = xor i32 %A, -1
15 ret i32 %B
Stephen Linf799e3f2013-07-13 20:38:47 +000016; CHECK-LABEL: test1:
Venkatraman Govindaraju3e8c7d92013-06-02 02:24:27 +000017; CHECK: xnor %o0, %o1, %o0
Chris Lattneradc29562010-11-23 02:26:52 +000018}
19
20define i32 @test2(i32 %X, i32 %Y) {
21 %A = xor i32 %X, -1
22 %B = xor i32 %A, %Y
23 ret i32 %B
Stephen Linf799e3f2013-07-13 20:38:47 +000024; CHECK-LABEL: test2:
Venkatraman Govindaraju3e8c7d92013-06-02 02:24:27 +000025; CHECK: xnor %o0, %o1, %o0
Chris Lattnera590d802006-02-05 05:52:55 +000026}
Venkatraman Govindaraju774fe2e22013-06-03 00:21:54 +000027
Stephen Lind24ab202013-07-14 06:24:09 +000028; CHECK-LABEL: store_zero:
Venkatraman Govindaraju774fe2e22013-06-03 00:21:54 +000029; CHECK: st %g0, [%o0]
30; CHECK: st %g0, [%o1+4]
31define i32 @store_zero(i32* %a, i32* %b) {
32entry:
33 store i32 0, i32* %a, align 4
David Blaikie79e6c742015-02-27 19:29:02 +000034 %0 = getelementptr inbounds i32, i32* %b, i32 1
Venkatraman Govindaraju774fe2e22013-06-03 00:21:54 +000035 store i32 0, i32* %0, align 4
36 ret i32 0
37}
38
James Y Knight807563d2015-05-18 16:29:48 +000039; CHECK-LABEL: signed_divide:
40; CHECK: sra %o0, 31, %o2
41; CHECK: wr %o2, %g0, %y
42; CHECK: sdiv %o0, %o1, %o0
43define i32 @signed_divide(i32 %a, i32 %b) {
44 %r = sdiv i32 %a, %b
45 ret i32 %r
46}
47
48; CHECK-LABEL: unsigned_divide:
49; CHECK: wr %g0, %g0, %y
50; CHECK: udiv %o0, %o1, %o0
51define i32 @unsigned_divide(i32 %a, i32 %b) {
52 %r = udiv i32 %a, %b
53 ret i32 %r
54}
55
56; CHECK-LABEL: multiply_32x32:
57; CHECK: smul %o0, %o1, %o0
58define i32 @multiply_32x32(i32 %a, i32 %b) {
59 %r = mul i32 %a, %b
60 ret i32 %r
61}
62
63; CHECK-LABEL: signed_multiply_32x32_64:
64; CHECK: smul %o0, %o1, %o1
65; CHECK: rd %y, %o0
66define i64 @signed_multiply_32x32_64(i32 %a, i32 %b) {
67 %xa = sext i32 %a to i64
68 %xb = sext i32 %b to i64
69 %r = mul i64 %xa, %xb
70 ret i64 %r
71}
72
73; CHECK-LABEL: unsigned_multiply_32x32_64:
74; CHECK: umul %o0, %o1, %o2
75; CHECK: rd %y, %o2
76;FIXME: the smul in the output is totally redundant and should not there.
77; CHECK: smul %o0, %o1, %o1
78; CHECK: retl
79; CHECK: mov %o2, %o0
80define i64 @unsigned_multiply_32x32_64(i32 %a, i32 %b) {
81 %xa = zext i32 %a to i64
82 %xb = zext i32 %b to i64
83 %r = mul i64 %xa, %xb
84 ret i64 %r
85}
86