blob: c062f56090f5b11b9669c24201d581016798ea39 [file] [log] [blame]
Sanjay Patele2b52802015-10-14 21:47:03 +00001; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
2
3; The fundamental problem: an add separated from other arithmetic by a sext can't
Sanjay Patelbbd52442015-10-16 22:14:12 +00004; be combined with the later instructions. However, if the first add is 'nsw',
Sanjay Patele2b52802015-10-14 21:47:03 +00005; then we can promote the sext ahead of that add to allow optimizations.
6
7define i64 @add_nsw_consts(i32 %i) {
8; CHECK-LABEL: add_nsw_consts:
9; CHECK: # BB#0:
Sanjay Patele2b52802015-10-14 21:47:03 +000010; CHECK-NEXT: movslq %edi, %rax
Sanjay Patelbbd52442015-10-16 22:14:12 +000011; CHECK-NEXT: addq $12, %rax
Sanjay Patele2b52802015-10-14 21:47:03 +000012; CHECK-NEXT: retq
13
14 %add = add nsw i32 %i, 5
15 %ext = sext i32 %add to i64
16 %idx = add i64 %ext, 7
17 ret i64 %idx
18}
19
20; An x86 bonus: If we promote the sext ahead of the 'add nsw',
21; we allow LEA formation and eliminate an add instruction.
22
23define i64 @add_nsw_sext_add(i32 %i, i64 %x) {
24; CHECK-LABEL: add_nsw_sext_add:
25; CHECK: # BB#0:
Sanjay Patele2b52802015-10-14 21:47:03 +000026; CHECK-NEXT: movslq %edi, %rax
Sanjay Patelbbd52442015-10-16 22:14:12 +000027; CHECK-NEXT: leaq 5(%rax,%rsi), %rax
Sanjay Patele2b52802015-10-14 21:47:03 +000028; CHECK-NEXT: retq
29
30 %add = add nsw i32 %i, 5
31 %ext = sext i32 %add to i64
32 %idx = add i64 %x, %ext
33 ret i64 %idx
34}
35
36; Throw in a scale (left shift) because an LEA can do that too.
37; Use a negative constant (LEA displacement) to verify that's handled correctly.
38
39define i64 @add_nsw_sext_lsh_add(i32 %i, i64 %x) {
40; CHECK-LABEL: add_nsw_sext_lsh_add:
41; CHECK: # BB#0:
Sanjay Patele2b52802015-10-14 21:47:03 +000042; CHECK-NEXT: movslq %edi, %rax
Sanjay Patelbbd52442015-10-16 22:14:12 +000043; CHECK-NEXT: leaq -40(%rsi,%rax,8), %rax
Sanjay Patele2b52802015-10-14 21:47:03 +000044; CHECK-NEXT: retq
45
46 %add = add nsw i32 %i, -5
47 %ext = sext i32 %add to i64
48 %shl = shl i64 %ext, 3
49 %idx = add i64 %x, %shl
50 ret i64 %idx
51}
52
53; Don't promote the sext if it has no users. The wider add instruction needs an
54; extra byte to encode.
55
56define i64 @add_nsw_sext(i32 %i, i64 %x) {
57; CHECK-LABEL: add_nsw_sext:
58; CHECK: # BB#0:
59; CHECK-NEXT: addl $5, %edi
60; CHECK-NEXT: movslq %edi, %rax
61; CHECK-NEXT: retq
62
63 %add = add nsw i32 %i, 5
64 %ext = sext i32 %add to i64
65 ret i64 %ext
66}
67
68; The typical use case: a 64-bit system where an 'int' is used as an index into an array.
69
70define i8* @gep8(i32 %i, i8* %x) {
71; CHECK-LABEL: gep8:
72; CHECK: # BB#0:
Sanjay Patele2b52802015-10-14 21:47:03 +000073; CHECK-NEXT: movslq %edi, %rax
Sanjay Patelbbd52442015-10-16 22:14:12 +000074; CHECK-NEXT: leaq 5(%rax,%rsi), %rax
Sanjay Patele2b52802015-10-14 21:47:03 +000075; CHECK-NEXT: retq
76
77 %add = add nsw i32 %i, 5
78 %ext = sext i32 %add to i64
79 %idx = getelementptr i8, i8* %x, i64 %ext
80 ret i8* %idx
81}
82
83define i16* @gep16(i32 %i, i16* %x) {
84; CHECK-LABEL: gep16:
85; CHECK: # BB#0:
Sanjay Patele2b52802015-10-14 21:47:03 +000086; CHECK-NEXT: movslq %edi, %rax
Sanjay Patelbbd52442015-10-16 22:14:12 +000087; CHECK-NEXT: leaq -10(%rsi,%rax,2), %rax
Sanjay Patele2b52802015-10-14 21:47:03 +000088; CHECK-NEXT: retq
89
90 %add = add nsw i32 %i, -5
91 %ext = sext i32 %add to i64
92 %idx = getelementptr i16, i16* %x, i64 %ext
93 ret i16* %idx
94}
95
96define i32* @gep32(i32 %i, i32* %x) {
97; CHECK-LABEL: gep32:
98; CHECK: # BB#0:
Sanjay Patele2b52802015-10-14 21:47:03 +000099; CHECK-NEXT: movslq %edi, %rax
Sanjay Patelbbd52442015-10-16 22:14:12 +0000100; CHECK-NEXT: leaq 20(%rsi,%rax,4), %rax
Sanjay Patele2b52802015-10-14 21:47:03 +0000101; CHECK-NEXT: retq
102
103 %add = add nsw i32 %i, 5
104 %ext = sext i32 %add to i64
105 %idx = getelementptr i32, i32* %x, i64 %ext
106 ret i32* %idx
107}
108
109define i64* @gep64(i32 %i, i64* %x) {
110; CHECK-LABEL: gep64:
111; CHECK: # BB#0:
Sanjay Patele2b52802015-10-14 21:47:03 +0000112; CHECK-NEXT: movslq %edi, %rax
Sanjay Patelbbd52442015-10-16 22:14:12 +0000113; CHECK-NEXT: leaq -40(%rsi,%rax,8), %rax
Sanjay Patele2b52802015-10-14 21:47:03 +0000114; CHECK-NEXT: retq
115
116 %add = add nsw i32 %i, -5
117 %ext = sext i32 %add to i64
118 %idx = getelementptr i64, i64* %x, i64 %ext
119 ret i64* %idx
120}
121
122; LEA can't scale by 16, but the adds can still be combined into an LEA.
123
124define i128* @gep128(i32 %i, i128* %x) {
125; CHECK-LABEL: gep128:
126; CHECK: # BB#0:
Sanjay Patele2b52802015-10-14 21:47:03 +0000127; CHECK-NEXT: movslq %edi, %rax
128; CHECK-NEXT: shlq $4, %rax
Sanjay Patelbbd52442015-10-16 22:14:12 +0000129; CHECK-NEXT: leaq 80(%rax,%rsi), %rax
Sanjay Patele2b52802015-10-14 21:47:03 +0000130; CHECK-NEXT: retq
131
132 %add = add nsw i32 %i, 5
133 %ext = sext i32 %add to i64
134 %idx = getelementptr i128, i128* %x, i64 %ext
135 ret i128* %idx
136}
137
138; A bigger win can be achieved when there is more than one use of the
139; sign extended value. In this case, we can eliminate sign extension
140; instructions plus use more efficient addressing modes for memory ops.
141
142define void @PR20134(i32* %a, i32 %i) {
143; CHECK-LABEL: PR20134:
144; CHECK: # BB#0:
Sanjay Patelbbd52442015-10-16 22:14:12 +0000145; CHECK-NEXT: movslq %esi, %rax
146; CHECK-NEXT: movl 4(%rdi,%rax,4), %ecx
147; CHECK-NEXT: addl 8(%rdi,%rax,4), %ecx
148; CHECK-NEXT: movl %ecx, (%rdi,%rax,4)
Sanjay Patele2b52802015-10-14 21:47:03 +0000149; CHECK-NEXT: retq
150
151 %add1 = add nsw i32 %i, 1
152 %idx1 = sext i32 %add1 to i64
153 %gep1 = getelementptr i32, i32* %a, i64 %idx1
154 %load1 = load i32, i32* %gep1, align 4
155
156 %add2 = add nsw i32 %i, 2
157 %idx2 = sext i32 %add2 to i64
158 %gep2 = getelementptr i32, i32* %a, i64 %idx2
159 %load2 = load i32, i32* %gep2, align 4
160
161 %add3 = add i32 %load1, %load2
162 %idx3 = sext i32 %i to i64
163 %gep3 = getelementptr i32, i32* %a, i64 %idx3
164 store i32 %add3, i32* %gep3, align 4
165 ret void
166}
167