blob: d8cf52341e39fbbb3b692a7ab96fbbd42dac1f49 [file] [log] [blame]
Tom Stellardf3af8412016-06-10 19:26:38 +00001; RUN: llc -march=amdgcn -mcpu=bonaire -show-mc-encoding < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=ALL %s
2; RUN: llc -march=amdgcn -mcpu=carrizo --show-mc-encoding < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=ALL %s
Marek Olsak5c7a61d2017-03-21 17:00:39 +00003; RUN: llc -march=amdgcn -mcpu=gfx900 --show-mc-encoding < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 -check-prefix=ALL %s
Changpeng Fangb41574a2015-12-22 20:55:23 +00004; RUN: llc -march=amdgcn -mcpu=bonaire -mtriple=amdgcn-unknown-amdhsa < %s -mattr=-flat-for-global | FileCheck -check-prefix=GCNHSA -check-prefix=CIHSA -check-prefix=ALL %s
Nicolai Haehnle60355042016-01-05 20:42:49 +00005; RUN: llc -march=amdgcn -mcpu=carrizo -mtriple=amdgcn-unknown-amdhsa -mattr=-flat-for-global < %s | FileCheck -check-prefix=GCNHSA -check-prefix=VIHSA -check-prefix=ALL %s
Matt Arsenault0e3d3892015-11-30 21:15:53 +00006
7; FIXME: align on alloca seems to be ignored for private_segment_alignment
8
9; ALL-LABEL: {{^}}large_alloca_compute_shader:
10
Matt Arsenaulte8ed8e52016-05-11 00:28:54 +000011; GCN-DAG: s_mov_b32 s{{[0-9]+}}, SCRATCH_RSRC_DWORD0
Tom Stellard1c89eb72016-06-20 16:59:44 +000012; GCN-DAG: ; fixup A - offset: 4, value: SCRATCH_RSRC_DWORD0
Matt Arsenaulte8ed8e52016-05-11 00:28:54 +000013; GCN-DAG: s_mov_b32 s{{[0-9]+}}, SCRATCH_RSRC_DWORD1
Tom Stellard1c89eb72016-06-20 16:59:44 +000014; GCN-DAG: ; fixup A - offset: 4, value: SCRATCH_RSRC_DWORD1
Matt Arsenaulte8ed8e52016-05-11 00:28:54 +000015; GCN-DAG: s_mov_b32 s{{[0-9]+}}, -1
Marek Olsake93f6d62016-06-13 16:05:57 +000016; CI-DAG: s_mov_b32 s{{[0-9]+}}, 0xe8f000
17; VI-DAG: s_mov_b32 s{{[0-9]+}}, 0xe80000
Marek Olsak5c7a61d2017-03-21 17:00:39 +000018; GFX9-DAG: s_mov_b32 s{{[0-9]+}}, 0xe00000
Matt Arsenault0e3d3892015-11-30 21:15:53 +000019
20
21; GCNHSA: .amd_kernel_code_t
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000022
Sam Koltona2e5c882016-09-09 10:08:02 +000023; GCNHSA: enable_sgpr_private_segment_wave_byte_offset = 1
24; GCNHSA: user_sgpr_count = 8
25; GCNHSA: enable_sgpr_workgroup_id_x = 1
26; GCNHSA: enable_sgpr_workgroup_id_y = 0
27; GCNHSA: enable_sgpr_workgroup_id_z = 0
28; GCNHSA: enable_sgpr_workgroup_info = 0
29; GCNHSA: enable_vgpr_workitem_id = 0
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000030
31; GCNHSA: enable_sgpr_private_segment_buffer = 1
32; GCNHSA: enable_sgpr_dispatch_ptr = 0
33; GCNHSA: enable_sgpr_queue_ptr = 0
34; GCNHSA: enable_sgpr_kernarg_segment_ptr = 1
35; GCNHSA: enable_sgpr_dispatch_id = 0
Matt Arsenault296b8492016-02-12 06:31:30 +000036; GCNHSA: enable_sgpr_flat_scratch_init = 1
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000037; GCNHSA: enable_sgpr_private_segment_size = 0
38; GCNHSA: enable_sgpr_grid_workgroup_count_x = 0
39; GCNHSA: enable_sgpr_grid_workgroup_count_y = 0
40; GCNHSA: enable_sgpr_grid_workgroup_count_z = 0
Tom Stellarda4953072015-12-15 22:55:30 +000041; GCNHSA: workitem_private_segment_byte_size = 32772
Matt Arsenault0e3d3892015-11-30 21:15:53 +000042; GCNHSA: private_segment_alignment = 4
43; GCNHSA: .end_amd_kernel_code_t
44
Matt Arsenault0e3d3892015-11-30 21:15:53 +000045
Matt Arsenault296b8492016-02-12 06:31:30 +000046; GCNHSA: buffer_store_dword {{v[0-9]+}}, {{v[0-9]+}}, s[0:3], s9 offen
47; GCNHSA: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, s[0:3], s9 offen
Matt Arsenault0e3d3892015-11-30 21:15:53 +000048
Yaxun Liu2a22c5d2018-02-02 16:07:16 +000049; Scratch size = alloca size + emergency stack slot, align {{.*}}, addrspace(5)
Matt Arsenault0e3d3892015-11-30 21:15:53 +000050; ALL: ; ScratchSize: 32772
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000051define amdgpu_kernel void @large_alloca_compute_shader(i32 %x, i32 %y) #0 {
Yaxun Liu2a22c5d2018-02-02 16:07:16 +000052 %large = alloca [8192 x i32], align 4, addrspace(5)
53 %gep = getelementptr [8192 x i32], [8192 x i32] addrspace(5)* %large, i32 0, i32 8191
54 store volatile i32 %x, i32 addrspace(5)* %gep
55 %gep1 = getelementptr [8192 x i32], [8192 x i32] addrspace(5)* %large, i32 0, i32 %y
56 %val = load volatile i32, i32 addrspace(5)* %gep1
Matt Arsenault0e3d3892015-11-30 21:15:53 +000057 store volatile i32 %val, i32 addrspace(1)* undef
58 ret void
59}
60
61attributes #0 = { nounwind }