blob: b0d66669ba8c2731293e4a5a73a9d263ac7d2695 [file] [log] [blame]
Sanjay Patel08c87662016-08-13 18:42:14 +00001; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
3
4; Check that multiple instances of 64-bit constants encodable as
5; 32-bit immediates are merged for code size savings.
6
7@g1 = common global i64 0, align 8
8@g2 = common global i64 0, align 8
9@g3 = common global i64 0, align 8
10@g4 = common global i64 0, align 8
11
12; Immediates with multiple users should not be pulled into instructions when
13; optimizing for code size.
14define void @imm_multiple_users(i64 %l1, i64 %l2, i64 %l3, i64 %l4) optsize {
15; CHECK-LABEL: imm_multiple_users:
16; CHECK: # BB#0:
17; CHECK-NEXT: movq $-1, {{.*}}(%rip)
18; CHECK-NEXT: cmpq $-1, %rdx
19; CHECK-NEXT: cmovneq %rsi, %rdi
20; CHECK-NEXT: movq %rdi, {{.*}}(%rip)
21; CHECK-NEXT: movq $-1, %rax
22; CHECK-NEXT: # kill: %CL<def> %CL<kill> %RCX<kill>
23; CHECK-NEXT: shlq %cl, %rax
24; CHECK-NEXT: movq %rax, {{.*}}(%rip)
25; CHECK-NEXT: movq $0, {{.*}}(%rip)
26; CHECK-NEXT: retq
27;
28 store i64 -1, i64* @g1, align 8
29 %cmp = icmp eq i64 %l3, -1
30 %sel = select i1 %cmp, i64 %l1, i64 %l2
31 store i64 %sel, i64* @g2, align 8
32 %and = and i64 %l4, 63
33 %shl = shl i64 -1, %and
34 store i64 %shl, i64* @g3, align 8
35 store i64 0, i64* @g4, align 8
36 ret void
37}
38
39declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
40
41; Inlined memsets requiring multiple same-sized stores should be lowered using
42; the register, rather than immediate, form of stores when optimizing for
43; code size.
44define void @memset_zero(i8* noalias nocapture %D) optsize {
45; CHECK-LABEL: memset_zero:
46; CHECK: # BB#0:
47; CHECK-NEXT: movq $0, 7(%rdi)
48; CHECK-NEXT: movq $0, (%rdi)
49; CHECK-NEXT: retq
50;
51 tail call void @llvm.memset.p0i8.i64(i8* %D, i8 0, i64 15, i32 1, i1 false)
52 ret void
53}
54