blob: 953f5118ab5ae9e6ebd0ef91c19528a45a3c8c65 [file] [log] [blame]
Chandler Carruth871ba722012-09-26 10:27:46 +00001; RUN: opt < %s -sroa -S | FileCheck %s
2target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
3
4declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
5
6define void @test1({ i8, i8 }* %a, { i8, i8 }* %b) {
7; CHECK: @test1
8; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }* %a, i64 0, i32 0
9; CHECK: %[[a0:.*]] = load i8* %[[gep_a0]], align 16
10; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }* %a, i64 0, i32 1
11; CHECK: %[[a1:.*]] = load i8* %[[gep_a1]], align 1
12; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }* %b, i64 0, i32 0
13; CHECK: store i8 %[[a0]], i8* %[[gep_b0]], align 16
14; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }* %b, i64 0, i32 1
15; CHECK: store i8 %[[a1]], i8* %[[gep_b1]], align 1
16; CHECK: ret void
17
18entry:
19 %alloca = alloca { i8, i8 }, align 16
20 %gep_a = getelementptr { i8, i8 }* %a, i32 0, i32 0
21 %gep_alloca = getelementptr { i8, i8 }* %alloca, i32 0, i32 0
22 %gep_b = getelementptr { i8, i8 }* %b, i32 0, i32 0
23
24 store i8 420, i8* %gep_alloca, align 16
25
26 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_alloca, i8* %gep_a, i32 2, i32 16, i1 false)
27 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_b, i8* %gep_alloca, i32 2, i32 16, i1 false)
28 ret void
29}
30
Chandler Carruth3e4273d2012-09-26 10:45:28 +000031define void @test2() {
32; CHECK: @test2
33; CHECK: alloca i16, align 2
34; CHECK: load i8* %{{.*}}, align 1
35; CHECK: store i8 42, i8* %{{.*}}, align 1
36; CHECK: ret void
37
38entry:
39 %a = alloca { i8, i8, i8, i8 }, align 2
40 %gep1 = getelementptr { i8, i8, i8, i8 }* %a, i32 0, i32 1
41 %cast1 = bitcast i8* %gep1 to i16*
42 store volatile i16 0, i16* %cast1
43 %gep2 = getelementptr { i8, i8, i8, i8 }* %a, i32 0, i32 2
44 %result = load i8* %gep2, align 2
45 store i8 42, i8* %gep2, align 2
46 ret void
47}
48
Chandler Carruth871ba722012-09-26 10:27:46 +000049define void @PR13920(<2 x i64>* %a, i16* %b) {
50; Test that alignments on memcpy intrinsics get propagated to loads and stores.
51; CHECK: @PR13920
52; CHECK: load <2 x i64>* %a, align 2
53; CHECK: store <2 x i64> {{.*}}, <2 x i64>* {{.*}}, align 2
54; CHECK: ret void
55
56entry:
57 %aa = alloca <2 x i64>, align 16
58 %aptr = bitcast <2 x i64>* %a to i8*
59 %aaptr = bitcast <2 x i64>* %aa to i8*
60 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %aaptr, i8* %aptr, i32 16, i32 2, i1 false)
61 %bptr = bitcast i16* %b to i8*
62 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %bptr, i8* %aaptr, i32 16, i32 2, i1 false)
63 ret void
64}