blob: ecf066154fc384dc80cd0668a9dc51166444e668 [file] [log] [blame]
Anton Korobeynikov8d1ffbd2009-12-11 23:01:29 +00001; RUN: llc -march=msp430 < %s | FileCheck %s
Anton Korobeynikov8396a172010-01-15 02:09:27 +00002; XFAIL: *
Anton Korobeynikov8d1ffbd2009-12-11 23:01:29 +00003target datalayout = "e-p:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:16:32"
4target triple = "msp430-generic-generic"
5
6define i16 @sccweqand(i16 %a, i16 %b) nounwind {
7 %t1 = and i16 %a, %b
8 %t2 = icmp eq i16 %t1, 0
9 %t3 = zext i1 %t2 to i16
10 ret i16 %t3
11}
12; CHECK: sccweqand:
13; CHECK: bit.w r14, r15
14; CHECK-NEXT: mov.w r2, r15
15; CHECK-NEXT: and.w #1, r15
16; CHECK-NEXT: xor.w #1, r15
17
18define i16 @sccwneand(i16 %a, i16 %b) nounwind {
19 %t1 = and i16 %a, %b
20 %t2 = icmp ne i16 %t1, 0
21 %t3 = zext i1 %t2 to i16
22 ret i16 %t3
23}
24; CHECK: sccwneand:
25; CHECK: bit.w r14, r15
26; CHECK-NEXT: mov.w r2, r15
27; CHECK-NEXT: and.w #1, r15
28
29define i16 @sccwne(i16 %a, i16 %b) nounwind {
30 %t1 = icmp ne i16 %a, %b
31 %t2 = zext i1 %t1 to i16
32 ret i16 %t2
33}
34; CHECK:sccwne:
35; CHECK: cmp.w r15, r14
36; CHECK-NEXT: mov.w r2, r15
37; CHECK-NEXT: rra.w r15
38; CHECK-NEXT: and.w #1, r15
39
40define i16 @sccweq(i16 %a, i16 %b) nounwind {
41 %t1 = icmp eq i16 %a, %b
42 %t2 = zext i1 %t1 to i16
43 ret i16 %t2
44}
45; CHECK:sccweq:
46; CHECK: cmp.w r15, r14
47; CHECK-NEXT: mov.w r2, r15
48; CHECK-NEXT: rra.w r15
49; CHECK-NEXT: and.w #1, r15
50; CHECK-NEXT: xor.w #1, r15
51
52define i16 @sccwugt(i16 %a, i16 %b) nounwind {
53 %t1 = icmp ugt i16 %a, %b
54 %t2 = zext i1 %t1 to i16
55 ret i16 %t2
56}
57; CHECK:sccwugt:
58; CHECK: cmp.w r14, r15
59; CHECK-NEXT: mov.w r2, r15
60; CHECK-NEXT: and.w #1, r15
61; CHECK-NEXT: xor.w #1, r15
62
63define i16 @sccwuge(i16 %a, i16 %b) nounwind {
64 %t1 = icmp uge i16 %a, %b
65 %t2 = zext i1 %t1 to i16
66 ret i16 %t2
67}
68; CHECK:sccwuge:
69; CHECK: cmp.w r15, r14
70; CHECK-NEXT: mov.w r2, r15
71; CHECK-NEXT: and.w #1, r15
72
73define i16 @sccwult(i16 %a, i16 %b) nounwind {
74 %t1 = icmp ult i16 %a, %b
75 %t2 = zext i1 %t1 to i16
76 ret i16 %t2
77}
78; CHECK:sccwult:
79; CHECK: cmp.w r15, r14
80; CHECK-NEXT: mov.w r2, r15
81; CHECK-NEXT: and.w #1, r15
82; CHECK-NEXT: xor.w #1, r15
83
84define i16 @sccwule(i16 %a, i16 %b) nounwind {
85 %t1 = icmp ule i16 %a, %b
86 %t2 = zext i1 %t1 to i16
87 ret i16 %t2
88}
89; CHECK:sccwule:
90; CHECK: cmp.w r14, r15
91; CHECK-NEXT: mov.w r2, r15
92; CHECK-NEXT: and.w #1, r15
93
94define i16 @sccwsgt(i16 %a, i16 %b) nounwind {
95 %t1 = icmp sgt i16 %a, %b
96 %t2 = zext i1 %t1 to i16
97 ret i16 %t2
98}
99
100define i16 @sccwsge(i16 %a, i16 %b) nounwind {
101 %t1 = icmp sge i16 %a, %b
102 %t2 = zext i1 %t1 to i16
103 ret i16 %t2
104}
105
106define i16 @sccwslt(i16 %a, i16 %b) nounwind {
107 %t1 = icmp slt i16 %a, %b
108 %t2 = zext i1 %t1 to i16
109 ret i16 %t2
110}
111
112define i16 @sccwsle(i16 %a, i16 %b) nounwind {
113 %t1 = icmp sle i16 %a, %b
114 %t2 = zext i1 %t1 to i16
115 ret i16 %t2
116}
117