| Matt Arsenault | 7d5e2cb | 2014-07-13 02:46:17 +0000 | [diff] [blame] | 1 | ; RUN: llc -march=r600 -mcpu=SI -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s | 
|  | 2 | ; RUN: llc -march=r600 -mcpu=SI -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s | 
|  | 3 |  | 
| Matt Arsenault | 6e63dd2 | 2014-02-02 00:13:12 +0000 | [diff] [blame] | 4 |  | 
|  | 5 | declare void @llvm.AMDGPU.barrier.local() noduplicate nounwind | 
|  | 6 |  | 
| Matt Arsenault | ad41d7b | 2014-03-24 17:50:46 +0000 | [diff] [blame] | 7 | ; SI-LABEL: @private_access_f64_alloca: | 
| Matt Arsenault | 7d5e2cb | 2014-07-13 02:46:17 +0000 | [diff] [blame] | 8 |  | 
| Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 9 | ; SI-ALLOCA: BUFFER_STORE_DWORDX2 | 
| Tom Stellard | e812f2f | 2014-07-21 15:45:06 +0000 | [diff] [blame] | 10 | ; SI-ALLOCA: BUFFER_LOAD_DWORDX2 | 
| Matt Arsenault | 7d5e2cb | 2014-07-13 02:46:17 +0000 | [diff] [blame] | 11 |  | 
|  | 12 | ; SI-PROMOTE: DS_WRITE_B64 | 
|  | 13 | ; SI-PROMOTE: DS_READ_B64 | 
| Matt Arsenault | ad41d7b | 2014-03-24 17:50:46 +0000 | [diff] [blame] | 14 | define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in, i32 %b) nounwind { | 
| Matt Arsenault | 6e63dd2 | 2014-02-02 00:13:12 +0000 | [diff] [blame] | 15 | %val = load double addrspace(1)* %in, align 8 | 
|  | 16 | %array = alloca double, i32 16, align 8 | 
|  | 17 | %ptr = getelementptr double* %array, i32 %b | 
|  | 18 | store double %val, double* %ptr, align 8 | 
|  | 19 | call void @llvm.AMDGPU.barrier.local() noduplicate nounwind | 
|  | 20 | %result = load double* %ptr, align 8 | 
|  | 21 | store double %result, double addrspace(1)* %out, align 8 | 
|  | 22 | ret void | 
|  | 23 | } | 
|  | 24 |  | 
| Matt Arsenault | ad41d7b | 2014-03-24 17:50:46 +0000 | [diff] [blame] | 25 | ; SI-LABEL: @private_access_v2f64_alloca: | 
| Matt Arsenault | 7d5e2cb | 2014-07-13 02:46:17 +0000 | [diff] [blame] | 26 |  | 
| Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 27 | ; SI-ALLOCA: BUFFER_STORE_DWORDX4 | 
| Tom Stellard | e812f2f | 2014-07-21 15:45:06 +0000 | [diff] [blame] | 28 | ; SI-ALLOCA: BUFFER_LOAD_DWORDX4 | 
| Matt Arsenault | 7d5e2cb | 2014-07-13 02:46:17 +0000 | [diff] [blame] | 29 |  | 
| Matt Arsenault | ca3976f | 2014-07-15 02:06:31 +0000 | [diff] [blame] | 30 | ; SI-PROMOTE: DS_WRITE_B32 | 
|  | 31 | ; SI-PROMOTE: DS_WRITE_B32 | 
|  | 32 | ; SI-PROMOTE: DS_WRITE_B32 | 
|  | 33 | ; SI-PROMOTE: DS_WRITE_B32 | 
|  | 34 | ; SI-PROMOTE: DS_READ_B32 | 
|  | 35 | ; SI-PROMOTE: DS_READ_B32 | 
|  | 36 | ; SI-PROMOTE: DS_READ_B32 | 
|  | 37 | ; SI-PROMOTE: DS_READ_B32 | 
| Matt Arsenault | ad41d7b | 2014-03-24 17:50:46 +0000 | [diff] [blame] | 38 | define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) nounwind { | 
| Matt Arsenault | 6e63dd2 | 2014-02-02 00:13:12 +0000 | [diff] [blame] | 39 | %val = load <2 x double> addrspace(1)* %in, align 16 | 
|  | 40 | %array = alloca <2 x double>, i32 16, align 16 | 
|  | 41 | %ptr = getelementptr <2 x double>* %array, i32 %b | 
|  | 42 | store <2 x double> %val, <2 x double>* %ptr, align 16 | 
|  | 43 | call void @llvm.AMDGPU.barrier.local() noduplicate nounwind | 
|  | 44 | %result = load <2 x double>* %ptr, align 16 | 
|  | 45 | store <2 x double> %result, <2 x double> addrspace(1)* %out, align 16 | 
|  | 46 | ret void | 
|  | 47 | } | 
| Matt Arsenault | ad41d7b | 2014-03-24 17:50:46 +0000 | [diff] [blame] | 48 |  | 
|  | 49 | ; SI-LABEL: @private_access_i64_alloca: | 
| Matt Arsenault | 7d5e2cb | 2014-07-13 02:46:17 +0000 | [diff] [blame] | 50 |  | 
| Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 51 | ; SI-ALLOCA: BUFFER_STORE_DWORDX2 | 
| Tom Stellard | e812f2f | 2014-07-21 15:45:06 +0000 | [diff] [blame] | 52 | ; SI-ALLOCA: BUFFER_LOAD_DWORDX2 | 
| Matt Arsenault | 7d5e2cb | 2014-07-13 02:46:17 +0000 | [diff] [blame] | 53 |  | 
|  | 54 | ; SI-PROMOTE: DS_WRITE_B64 | 
|  | 55 | ; SI-PROMOTE: DS_READ_B64 | 
| Matt Arsenault | ad41d7b | 2014-03-24 17:50:46 +0000 | [diff] [blame] | 56 | define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i32 %b) nounwind { | 
|  | 57 | %val = load i64 addrspace(1)* %in, align 8 | 
|  | 58 | %array = alloca i64, i32 16, align 8 | 
|  | 59 | %ptr = getelementptr i64* %array, i32 %b | 
|  | 60 | store i64 %val, i64* %ptr, align 8 | 
|  | 61 | call void @llvm.AMDGPU.barrier.local() noduplicate nounwind | 
|  | 62 | %result = load i64* %ptr, align 8 | 
|  | 63 | store i64 %result, i64 addrspace(1)* %out, align 8 | 
|  | 64 | ret void | 
|  | 65 | } | 
|  | 66 |  | 
|  | 67 | ; SI-LABEL: @private_access_v2i64_alloca: | 
| Matt Arsenault | 7d5e2cb | 2014-07-13 02:46:17 +0000 | [diff] [blame] | 68 |  | 
| Tom Stellard | b02094e | 2014-07-21 15:45:01 +0000 | [diff] [blame] | 69 | ; SI-ALLOCA: BUFFER_STORE_DWORDX4 | 
| Tom Stellard | e812f2f | 2014-07-21 15:45:06 +0000 | [diff] [blame] | 70 | ; SI-ALLOCA: BUFFER_LOAD_DWORDX4 | 
| Matt Arsenault | 7d5e2cb | 2014-07-13 02:46:17 +0000 | [diff] [blame] | 71 |  | 
| Matt Arsenault | ca3976f | 2014-07-15 02:06:31 +0000 | [diff] [blame] | 72 | ; SI-PROMOTE: DS_WRITE_B32 | 
|  | 73 | ; SI-PROMOTE: DS_WRITE_B32 | 
|  | 74 | ; SI-PROMOTE: DS_WRITE_B32 | 
|  | 75 | ; SI-PROMOTE: DS_WRITE_B32 | 
|  | 76 | ; SI-PROMOTE: DS_READ_B32 | 
|  | 77 | ; SI-PROMOTE: DS_READ_B32 | 
|  | 78 | ; SI-PROMOTE: DS_READ_B32 | 
|  | 79 | ; SI-PROMOTE: DS_READ_B32 | 
| Matt Arsenault | ad41d7b | 2014-03-24 17:50:46 +0000 | [diff] [blame] | 80 | define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) nounwind { | 
|  | 81 | %val = load <2 x i64> addrspace(1)* %in, align 16 | 
|  | 82 | %array = alloca <2 x i64>, i32 16, align 16 | 
|  | 83 | %ptr = getelementptr <2 x i64>* %array, i32 %b | 
|  | 84 | store <2 x i64> %val, <2 x i64>* %ptr, align 16 | 
|  | 85 | call void @llvm.AMDGPU.barrier.local() noduplicate nounwind | 
|  | 86 | %result = load <2 x i64>* %ptr, align 16 | 
|  | 87 | store <2 x i64> %result, <2 x i64> addrspace(1)* %out, align 16 | 
|  | 88 | ret void | 
|  | 89 | } |