Scott Linder | f5b36e5 | 2018-12-12 19:39:27 +0000 | [diff] [blame] | 1 | ; RUN: llc -mattr=+code-object-v3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 -enable-misched=0 -filetype=obj -o - < %s | llvm-readobj -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX700 --check-prefix=NOTES %s |
| 2 | ; RUN: llc -mattr=+code-object-v3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -enable-misched=0 -filetype=obj -o - < %s | llvm-readobj -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX803 --check-prefix=NOTES %s |
| 3 | ; RUN: llc -mattr=+code-object-v3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -enable-misched=0 -filetype=obj -o - < %s | llvm-readobj -elf-output-style=GNU -notes | FileCheck --check-prefix=CHECK --check-prefix=GFX900 --check-prefix=NOTES %s |
| 4 | |
| 5 | @var = addrspace(1) global float 0.0 |
| 6 | |
| 7 | ; CHECK: --- |
| 8 | ; CHECK: amdhsa.kernels: |
| 9 | |
| 10 | ; CHECK: - .max_flat_workgroup_size: 256 |
| 11 | ; CHECK: .kernarg_segment_size: 24 |
| 12 | ; CHECK: .private_segment_fixed_size: 0 |
| 13 | ; CHECK: .wavefront_size: 64 |
| 14 | ; CHECK: .symbol: test.kd |
| 15 | ; CHECK: .name: test |
| 16 | ; CHECK: .sgpr_count: 8 |
| 17 | ; CHECK: .kernarg_segment_align: 8 |
| 18 | ; CHECK: .vgpr_count: 6 |
| 19 | ; CHECK: .group_segment_fixed_size: 0 |
| 20 | define amdgpu_kernel void @test( |
| 21 | half addrspace(1)* %r, |
| 22 | half addrspace(1)* %a, |
| 23 | half addrspace(1)* %b) { |
| 24 | entry: |
| 25 | %a.val = load half, half addrspace(1)* %a |
| 26 | %b.val = load half, half addrspace(1)* %b |
| 27 | %r.val = fadd half %a.val, %b.val |
| 28 | store half %r.val, half addrspace(1)* %r |
| 29 | ret void |
| 30 | } |
| 31 | |
| 32 | ; CHECK: .symbol: num_spilled_sgprs.kd |
| 33 | ; CHECK: .name: num_spilled_sgprs |
| 34 | ; GFX700: .sgpr_spill_count: 40 |
| 35 | ; GFX803: .sgpr_spill_count: 24 |
| 36 | ; GFX900: .sgpr_spill_count: 24 |
| 37 | define amdgpu_kernel void @num_spilled_sgprs( |
| 38 | i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, [8 x i32], |
| 39 | i32 addrspace(1)* %out2, i32 addrspace(1)* %out3, [8 x i32], |
| 40 | i32 addrspace(1)* %out4, i32 addrspace(1)* %out5, [8 x i32], |
| 41 | i32 addrspace(1)* %out6, i32 addrspace(1)* %out7, [8 x i32], |
| 42 | i32 addrspace(1)* %out8, i32 addrspace(1)* %out9, [8 x i32], |
| 43 | i32 addrspace(1)* %outa, i32 addrspace(1)* %outb, [8 x i32], |
| 44 | i32 addrspace(1)* %outc, i32 addrspace(1)* %outd, [8 x i32], |
| 45 | i32 addrspace(1)* %oute, i32 addrspace(1)* %outf, [8 x i32], |
| 46 | i32 %in0, i32 %in1, i32 %in2, i32 %in3, [8 x i32], |
| 47 | i32 %in4, i32 %in5, i32 %in6, i32 %in7, [8 x i32], |
| 48 | i32 %in8, i32 %in9, i32 %ina, i32 %inb, [8 x i32], |
| 49 | i32 %inc, i32 %ind, i32 %ine, i32 %inf) #0 { |
| 50 | entry: |
| 51 | store i32 %in0, i32 addrspace(1)* %out0 |
| 52 | store i32 %in1, i32 addrspace(1)* %out1 |
| 53 | store i32 %in2, i32 addrspace(1)* %out2 |
| 54 | store i32 %in3, i32 addrspace(1)* %out3 |
| 55 | store i32 %in4, i32 addrspace(1)* %out4 |
| 56 | store i32 %in5, i32 addrspace(1)* %out5 |
| 57 | store i32 %in6, i32 addrspace(1)* %out6 |
| 58 | store i32 %in7, i32 addrspace(1)* %out7 |
| 59 | store i32 %in8, i32 addrspace(1)* %out8 |
| 60 | store i32 %in9, i32 addrspace(1)* %out9 |
| 61 | store i32 %ina, i32 addrspace(1)* %outa |
| 62 | store i32 %inb, i32 addrspace(1)* %outb |
| 63 | store i32 %inc, i32 addrspace(1)* %outc |
| 64 | store i32 %ind, i32 addrspace(1)* %outd |
| 65 | store i32 %ine, i32 addrspace(1)* %oute |
| 66 | store i32 %inf, i32 addrspace(1)* %outf |
| 67 | ret void |
| 68 | } |
| 69 | |
| 70 | ; CHECK: .symbol: num_spilled_vgprs.kd |
| 71 | ; CHECK: .name: num_spilled_vgprs |
| 72 | ; CHECK: .vgpr_spill_count: 14 |
| 73 | define amdgpu_kernel void @num_spilled_vgprs() #1 { |
| 74 | %val0 = load volatile float, float addrspace(1)* @var |
| 75 | %val1 = load volatile float, float addrspace(1)* @var |
| 76 | %val2 = load volatile float, float addrspace(1)* @var |
| 77 | %val3 = load volatile float, float addrspace(1)* @var |
| 78 | %val4 = load volatile float, float addrspace(1)* @var |
| 79 | %val5 = load volatile float, float addrspace(1)* @var |
| 80 | %val6 = load volatile float, float addrspace(1)* @var |
| 81 | %val7 = load volatile float, float addrspace(1)* @var |
| 82 | %val8 = load volatile float, float addrspace(1)* @var |
| 83 | %val9 = load volatile float, float addrspace(1)* @var |
| 84 | %val10 = load volatile float, float addrspace(1)* @var |
| 85 | %val11 = load volatile float, float addrspace(1)* @var |
| 86 | %val12 = load volatile float, float addrspace(1)* @var |
| 87 | %val13 = load volatile float, float addrspace(1)* @var |
| 88 | %val14 = load volatile float, float addrspace(1)* @var |
| 89 | %val15 = load volatile float, float addrspace(1)* @var |
| 90 | %val16 = load volatile float, float addrspace(1)* @var |
| 91 | %val17 = load volatile float, float addrspace(1)* @var |
| 92 | %val18 = load volatile float, float addrspace(1)* @var |
| 93 | %val19 = load volatile float, float addrspace(1)* @var |
| 94 | %val20 = load volatile float, float addrspace(1)* @var |
| 95 | %val21 = load volatile float, float addrspace(1)* @var |
| 96 | %val22 = load volatile float, float addrspace(1)* @var |
| 97 | %val23 = load volatile float, float addrspace(1)* @var |
| 98 | %val24 = load volatile float, float addrspace(1)* @var |
| 99 | %val25 = load volatile float, float addrspace(1)* @var |
| 100 | %val26 = load volatile float, float addrspace(1)* @var |
| 101 | %val27 = load volatile float, float addrspace(1)* @var |
| 102 | %val28 = load volatile float, float addrspace(1)* @var |
| 103 | %val29 = load volatile float, float addrspace(1)* @var |
| 104 | %val30 = load volatile float, float addrspace(1)* @var |
| 105 | |
| 106 | store volatile float %val0, float addrspace(1)* @var |
| 107 | store volatile float %val1, float addrspace(1)* @var |
| 108 | store volatile float %val2, float addrspace(1)* @var |
| 109 | store volatile float %val3, float addrspace(1)* @var |
| 110 | store volatile float %val4, float addrspace(1)* @var |
| 111 | store volatile float %val5, float addrspace(1)* @var |
| 112 | store volatile float %val6, float addrspace(1)* @var |
| 113 | store volatile float %val7, float addrspace(1)* @var |
| 114 | store volatile float %val8, float addrspace(1)* @var |
| 115 | store volatile float %val9, float addrspace(1)* @var |
| 116 | store volatile float %val10, float addrspace(1)* @var |
| 117 | store volatile float %val11, float addrspace(1)* @var |
| 118 | store volatile float %val12, float addrspace(1)* @var |
| 119 | store volatile float %val13, float addrspace(1)* @var |
| 120 | store volatile float %val14, float addrspace(1)* @var |
| 121 | store volatile float %val15, float addrspace(1)* @var |
| 122 | store volatile float %val16, float addrspace(1)* @var |
| 123 | store volatile float %val17, float addrspace(1)* @var |
| 124 | store volatile float %val18, float addrspace(1)* @var |
| 125 | store volatile float %val19, float addrspace(1)* @var |
| 126 | store volatile float %val20, float addrspace(1)* @var |
| 127 | store volatile float %val21, float addrspace(1)* @var |
| 128 | store volatile float %val22, float addrspace(1)* @var |
| 129 | store volatile float %val23, float addrspace(1)* @var |
| 130 | store volatile float %val24, float addrspace(1)* @var |
| 131 | store volatile float %val25, float addrspace(1)* @var |
| 132 | store volatile float %val26, float addrspace(1)* @var |
| 133 | store volatile float %val27, float addrspace(1)* @var |
| 134 | store volatile float %val28, float addrspace(1)* @var |
| 135 | store volatile float %val29, float addrspace(1)* @var |
| 136 | store volatile float %val30, float addrspace(1)* @var |
| 137 | |
| 138 | ret void |
| 139 | } |
| 140 | |
| 141 | ; CHECK: amdhsa.version: |
| 142 | ; CHECK-NEXT: - 1 |
| 143 | ; CHECK-NEXT: - 0 |
| 144 | |
| 145 | attributes #0 = { "amdgpu-num-sgpr"="14" } |
| 146 | attributes #1 = { "amdgpu-num-vgpr"="20" } |