blob: 3991a6849f3ed3d1d2f5601eacc86fa585173b88 [file] [log] [blame]
Chris Lattner71d845c2010-02-09 06:24:00 +00001; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=X32
2; RUN: llc < %s -march=x86-64 | FileCheck %s -check-prefix=X64
3
4; The immediate can be encoded in a smaller way if the
5; instruction is a sub instead of an add.
6
7define i32 @test1(i32 inreg %a) nounwind {
8 %b = add i32 %a, 128
9 ret i32 %b
10; X32: subl $-128, %eax
11; X64: subl $-128,
12}
13define i64 @test2(i64 inreg %a) nounwind {
14 %b = add i64 %a, 2147483648
15 ret i64 %b
16; X32: addl $-2147483648, %eax
17; X64: subq $-2147483648,
18}
19define i64 @test3(i64 inreg %a) nounwind {
20 %b = add i64 %a, 128
21 ret i64 %b
22
23; X32: addl $128, %eax
24; X64: subq $-128,
25}
Chris Lattner78162392010-02-09 06:33:27 +000026
27define i1 @test4(i32 %v1, i32 %v2, i32* %X) nounwind {
28entry:
29 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
30 %sum = extractvalue {i32, i1} %t, 0
31 %obit = extractvalue {i32, i1} %t, 1
32 br i1 %obit, label %overflow, label %normal
33
34normal:
35 store i32 0, i32* %X
36 br label %overflow
37
38overflow:
39 ret i1 false
40
41; X32: test4:
42; X32: addl
43; X32-NEXT: jo
44
45; X64: test4:
46; X64: addl %esi, %edi
47; X64-NEXT: jo
48}
49
50define i1 @test5(i32 %v1, i32 %v2, i32* %X) nounwind {
51entry:
52 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
53 %sum = extractvalue {i32, i1} %t, 0
54 %obit = extractvalue {i32, i1} %t, 1
55 br i1 %obit, label %carry, label %normal
56
57normal:
58 store i32 0, i32* %X
59 br label %carry
60
61carry:
62 ret i1 false
63
64; X32: test5:
65; X32: addl
66; X32-NEXT: jb
67
68; X64: test5:
69; X64: addl %esi, %edi
70; X64-NEXT: jb
71}
72
73declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32)
74declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32)
Chris Lattnerb00cd1c2010-02-09 06:35:50 +000075
76
77define i64 @test6(i64 %A, i32 %B) nounwind {
78 %tmp12 = zext i32 %B to i64 ; <i64> [#uses=1]
79 %tmp3 = shl i64 %tmp12, 32 ; <i64> [#uses=1]
80 %tmp5 = add i64 %tmp3, %A ; <i64> [#uses=1]
81 ret i64 %tmp5
82
83; X32: test6:
84; X32: movl 12(%esp), %edx
85; X32-NEXT: addl 8(%esp), %edx
86; X32-NEXT: movl 4(%esp), %eax
87; X32-NEXT: ret
88
89; X64: test6:
90; X64: shlq $32, %rsi
91; X64: leaq (%rsi,%rdi), %rax
92; X64: ret
93}
94