blob: 2a120bdd57e99a2679f86dbf9d7af0ec057d674f [file] [log] [blame]
Matt Arsenault0e3d3892015-11-30 21:15:53 +00001; RUN: llc -march=amdgcn -mcpu=bonaire < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=ALL %s
Nicolai Haehnle60355042016-01-05 20:42:49 +00002; RUN: llc -march=amdgcn -mcpu=carrizo < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=ALL %s
Changpeng Fangb41574a2015-12-22 20:55:23 +00003; RUN: llc -march=amdgcn -mcpu=bonaire -mtriple=amdgcn-unknown-amdhsa < %s -mattr=-flat-for-global | FileCheck -check-prefix=GCNHSA -check-prefix=CIHSA -check-prefix=ALL %s
Nicolai Haehnle60355042016-01-05 20:42:49 +00004; RUN: llc -march=amdgcn -mcpu=carrizo -mtriple=amdgcn-unknown-amdhsa -mattr=-flat-for-global < %s | FileCheck -check-prefix=GCNHSA -check-prefix=VIHSA -check-prefix=ALL %s
Matt Arsenault0e3d3892015-11-30 21:15:53 +00005
6; FIXME: align on alloca seems to be ignored for private_segment_alignment
7
8; ALL-LABEL: {{^}}large_alloca_compute_shader:
9
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000010; GCN: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
11; GCN: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
12; GCN: s_mov_b32 s10, -1
Matt Arsenault24ee0782016-02-12 02:40:47 +000013; CI: s_mov_b32 s11, 0x98f000
14; VI: s_mov_b32 s11, 0x980000
Matt Arsenault0e3d3892015-11-30 21:15:53 +000015
16
17; GCNHSA: .amd_kernel_code_t
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000018
19; GCNHSA: compute_pgm_rsrc2_scratch_en = 1
Matt Arsenault296b8492016-02-12 06:31:30 +000020; GCNHSA: compute_pgm_rsrc2_user_sgpr = 8
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000021; GCNHSA: compute_pgm_rsrc2_tgid_x_en = 1
22; GCNHSA: compute_pgm_rsrc2_tgid_y_en = 0
23; GCNHSA: compute_pgm_rsrc2_tgid_z_en = 0
24; GCNHSA: compute_pgm_rsrc2_tg_size_en = 0
25; GCNHSA: compute_pgm_rsrc2_tidig_comp_cnt = 0
26
27; GCNHSA: enable_sgpr_private_segment_buffer = 1
28; GCNHSA: enable_sgpr_dispatch_ptr = 0
29; GCNHSA: enable_sgpr_queue_ptr = 0
30; GCNHSA: enable_sgpr_kernarg_segment_ptr = 1
31; GCNHSA: enable_sgpr_dispatch_id = 0
Matt Arsenault296b8492016-02-12 06:31:30 +000032; GCNHSA: enable_sgpr_flat_scratch_init = 1
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000033; GCNHSA: enable_sgpr_private_segment_size = 0
34; GCNHSA: enable_sgpr_grid_workgroup_count_x = 0
35; GCNHSA: enable_sgpr_grid_workgroup_count_y = 0
36; GCNHSA: enable_sgpr_grid_workgroup_count_z = 0
Tom Stellarda4953072015-12-15 22:55:30 +000037; GCNHSA: workitem_private_segment_byte_size = 32772
Matt Arsenault0e3d3892015-11-30 21:15:53 +000038; GCNHSA: private_segment_alignment = 4
39; GCNHSA: .end_amd_kernel_code_t
40
Matt Arsenault0e3d3892015-11-30 21:15:53 +000041
Matt Arsenault296b8492016-02-12 06:31:30 +000042; GCNHSA: buffer_store_dword {{v[0-9]+}}, {{v[0-9]+}}, s[0:3], s9 offen
43; GCNHSA: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, s[0:3], s9 offen
Matt Arsenault0e3d3892015-11-30 21:15:53 +000044
45; Scratch size = alloca size + emergency stack slot
46; ALL: ; ScratchSize: 32772
47define void @large_alloca_compute_shader(i32 %x, i32 %y) #0 {
48 %large = alloca [8192 x i32], align 4
49 %gep = getelementptr [8192 x i32], [8192 x i32]* %large, i32 0, i32 8191
50 store volatile i32 %x, i32* %gep
51 %gep1 = getelementptr [8192 x i32], [8192 x i32]* %large, i32 0, i32 %y
52 %val = load volatile i32, i32* %gep1
53 store volatile i32 %val, i32 addrspace(1)* undef
54 ret void
55}
56
57attributes #0 = { nounwind }