blob: 4d1f7347ecc9546cb6fd1f20c35dcfc96f97e9fe [file] [log] [blame]
Matt Arsenault6e63dd22014-02-02 00:13:12 +00001; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
2
3declare void @llvm.AMDGPU.barrier.local() noduplicate nounwind
4
Matt Arsenaultad41d7b2014-03-24 17:50:46 +00005; SI-LABEL: @private_access_f64_alloca:
6; SI: V_MOVRELD_B32_e32
7; SI: V_MOVRELD_B32_e32
8; SI: V_MOVRELS_B32_e32
9; SI: V_MOVRELS_B32_e32
10define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in, i32 %b) nounwind {
Matt Arsenault6e63dd22014-02-02 00:13:12 +000011 %val = load double addrspace(1)* %in, align 8
12 %array = alloca double, i32 16, align 8
13 %ptr = getelementptr double* %array, i32 %b
14 store double %val, double* %ptr, align 8
15 call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
16 %result = load double* %ptr, align 8
17 store double %result, double addrspace(1)* %out, align 8
18 ret void
19}
20
Matt Arsenaultad41d7b2014-03-24 17:50:46 +000021; SI-LABEL: @private_access_v2f64_alloca:
22; SI: V_MOVRELD_B32_e32
23; SI: V_MOVRELD_B32_e32
24; SI: V_MOVRELD_B32_e32
25; SI: V_MOVRELD_B32_e32
26; SI: V_MOVRELS_B32_e32
27; SI: V_MOVRELS_B32_e32
28; SI: V_MOVRELS_B32_e32
29; SI: V_MOVRELS_B32_e32
30define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) nounwind {
Matt Arsenault6e63dd22014-02-02 00:13:12 +000031 %val = load <2 x double> addrspace(1)* %in, align 16
32 %array = alloca <2 x double>, i32 16, align 16
33 %ptr = getelementptr <2 x double>* %array, i32 %b
34 store <2 x double> %val, <2 x double>* %ptr, align 16
35 call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
36 %result = load <2 x double>* %ptr, align 16
37 store <2 x double> %result, <2 x double> addrspace(1)* %out, align 16
38 ret void
39}
Matt Arsenaultad41d7b2014-03-24 17:50:46 +000040
41; SI-LABEL: @private_access_i64_alloca:
42; SI: V_MOVRELD_B32_e32
43; SI: V_MOVRELD_B32_e32
44; SI: V_MOVRELS_B32_e32
45; SI: V_MOVRELS_B32_e32
46define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i32 %b) nounwind {
47 %val = load i64 addrspace(1)* %in, align 8
48 %array = alloca i64, i32 16, align 8
49 %ptr = getelementptr i64* %array, i32 %b
50 store i64 %val, i64* %ptr, align 8
51 call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
52 %result = load i64* %ptr, align 8
53 store i64 %result, i64 addrspace(1)* %out, align 8
54 ret void
55}
56
57; SI-LABEL: @private_access_v2i64_alloca:
58; SI: V_MOVRELD_B32_e32
59; SI: V_MOVRELD_B32_e32
60; SI: V_MOVRELD_B32_e32
61; SI: V_MOVRELD_B32_e32
62; SI: V_MOVRELS_B32_e32
63; SI: V_MOVRELS_B32_e32
64; SI: V_MOVRELS_B32_e32
65; SI: V_MOVRELS_B32_e32
66define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) nounwind {
67 %val = load <2 x i64> addrspace(1)* %in, align 16
68 %array = alloca <2 x i64>, i32 16, align 16
69 %ptr = getelementptr <2 x i64>* %array, i32 %b
70 store <2 x i64> %val, <2 x i64>* %ptr, align 16
71 call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
72 %result = load <2 x i64>* %ptr, align 16
73 store <2 x i64> %result, <2 x i64> addrspace(1)* %out, align 16
74 ret void
75}