blob: ea8ace12a868c2a8dcfe04edb7b407c296eb4257 [file] [log] [blame]
Sanjay Patel08c87662016-08-13 18:42:14 +00001; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
3
4; Check that multiple instances of 64-bit constants encodable as
5; 32-bit immediates are merged for code size savings.
6
Sanjay Patel08c87662016-08-13 18:42:14 +00007; Immediates with multiple users should not be pulled into instructions when
8; optimizing for code size.
Sanjay Patel904cd392016-08-16 21:35:16 +00009define i1 @imm_multiple_users(i64 %a, i64* %b) optsize {
Sanjay Patel08c87662016-08-13 18:42:14 +000010; CHECK-LABEL: imm_multiple_users:
11; CHECK: # BB#0:
Sanjay Patel08c87662016-08-13 18:42:14 +000012; CHECK-NEXT: movq $-1, %rax
Sanjay Patel904cd392016-08-16 21:35:16 +000013; CHECK-NEXT: movq %rax, (%rsi)
14; CHECK-NEXT: cmpq %rax, %rdi
15; CHECK-NEXT: sete %al
Sanjay Patel08c87662016-08-13 18:42:14 +000016; CHECK-NEXT: retq
17;
Sanjay Patel904cd392016-08-16 21:35:16 +000018 store i64 -1, i64* %b, align 8
19 %cmp = icmp eq i64 %a, -1
20 ret i1 %cmp
Sanjay Patel08c87662016-08-13 18:42:14 +000021}
22
23declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
24
25; Inlined memsets requiring multiple same-sized stores should be lowered using
26; the register, rather than immediate, form of stores when optimizing for
27; code size.
28define void @memset_zero(i8* noalias nocapture %D) optsize {
29; CHECK-LABEL: memset_zero:
30; CHECK: # BB#0:
Sanjay Patel904cd392016-08-16 21:35:16 +000031; CHECK-NEXT: xorl %eax, %eax
32; CHECK-NEXT: movq %rax, 7(%rdi)
33; CHECK-NEXT: movq %rax, (%rdi)
Sanjay Patel08c87662016-08-13 18:42:14 +000034; CHECK-NEXT: retq
35;
36 tail call void @llvm.memset.p0i8.i64(i8* %D, i8 0, i64 15, i32 1, i1 false)
37 ret void
38}