blob: 8f6f45567bfa16d28602f39ed6f1f922e7e29f7f [file] [log] [blame]
Oded Gabbay130e0372015-06-12 21:35:14 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "amdgpu_amdkfd.h"
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080024#include "amd_shared.h"
Oded Gabbay130e0372015-06-12 21:35:14 +030025#include <drm/drmP.h>
26#include "amdgpu.h"
Alex Deucher2db0cdb2017-06-07 12:59:29 -040027#include "amdgpu_gfx.h"
Oded Gabbay130e0372015-06-12 21:35:14 +030028#include <linux/module.h>
29
Oded Gabbay130e0372015-06-12 21:35:14 +030030const struct kgd2kfd_calls *kgd2kfd;
Kent Russell8eabaf52017-08-15 23:00:04 -040031bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
Oded Gabbay130e0372015-06-12 21:35:14 +030032
Felix Kuehling155494d2018-02-06 20:32:36 -050033static const unsigned int compute_vmid_bitmap = 0xFF00;
34
Oded Gabbayefb1c652016-02-09 13:30:12 +020035int amdgpu_amdkfd_init(void)
Oded Gabbay130e0372015-06-12 21:35:14 +030036{
Oded Gabbayefb1c652016-02-09 13:30:12 +020037 int ret;
38
Oded Gabbay130e0372015-06-12 21:35:14 +030039#if defined(CONFIG_HSA_AMD_MODULE)
Kent Russell8eabaf52017-08-15 23:00:04 -040040 int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
Oded Gabbay130e0372015-06-12 21:35:14 +030041
42 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
43
44 if (kgd2kfd_init_p == NULL)
Oded Gabbayefb1c652016-02-09 13:30:12 +020045 return -ENOENT;
46
47 ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
48 if (ret) {
49 symbol_put(kgd2kfd_init);
50 kgd2kfd = NULL;
51 }
52
Oded Gabbayfcdfa432018-05-18 22:18:16 +030053
Oded Gabbayefb1c652016-02-09 13:30:12 +020054#elif defined(CONFIG_HSA_AMD)
Oded Gabbayfcdfa432018-05-18 22:18:16 +030055
Oded Gabbayefb1c652016-02-09 13:30:12 +020056 ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
57 if (ret)
58 kgd2kfd = NULL;
59
60#else
Oded Gabbayfcdfa432018-05-18 22:18:16 +030061 kgd2kfd = NULL;
Oded Gabbayefb1c652016-02-09 13:30:12 +020062 ret = -ENOENT;
Oded Gabbay130e0372015-06-12 21:35:14 +030063#endif
Oded Gabbayfcdfa432018-05-18 22:18:16 +030064
65#if defined(CONFIG_HSA_AMD_MODULE) || defined(CONFIG_HSA_AMD)
Felix Kuehlinga46a2cd2018-02-06 20:32:38 -050066 amdgpu_amdkfd_gpuvm_init_mem_limits();
Oded Gabbayfcdfa432018-05-18 22:18:16 +030067#endif
Oded Gabbayefb1c652016-02-09 13:30:12 +020068
69 return ret;
Oded Gabbay130e0372015-06-12 21:35:14 +030070}
71
Oded Gabbay130e0372015-06-12 21:35:14 +030072void amdgpu_amdkfd_fini(void)
73{
74 if (kgd2kfd) {
75 kgd2kfd->exit();
76 symbol_put(kgd2kfd_init);
77 }
78}
79
Andres Rodriguezdc102c42017-02-01 17:02:13 -050080void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
Oded Gabbay130e0372015-06-12 21:35:14 +030081{
Felix Kuehling5c33f212017-07-28 16:54:54 -040082 const struct kfd2kgd_calls *kfd2kgd;
83
84 if (!kgd2kfd)
85 return;
86
87 switch (adev->asic_type) {
88#ifdef CONFIG_DRM_AMDGPU_CIK
89 case CHIP_KAVERI:
Felix Kuehling30d13422018-01-04 17:17:48 -050090 case CHIP_HAWAII:
Felix Kuehling5c33f212017-07-28 16:54:54 -040091 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
92 break;
93#endif
94 case CHIP_CARRIZO:
Felix Kuehling30d13422018-01-04 17:17:48 -050095 case CHIP_TONGA:
96 case CHIP_FIJI:
97 case CHIP_POLARIS10:
98 case CHIP_POLARIS11:
Felix Kuehling5c33f212017-07-28 16:54:54 -040099 kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
100 break;
Felix Kuehlingd5a114a2018-04-10 17:33:01 -0400101 case CHIP_VEGA10:
102 case CHIP_RAVEN:
103 kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
104 break;
Felix Kuehling5c33f212017-07-28 16:54:54 -0400105 default:
Tom Stellardc3032fd2018-05-24 14:07:14 -0700106 dev_info(adev->dev, "kfd not supported on this ASIC\n");
Felix Kuehling5c33f212017-07-28 16:54:54 -0400107 return;
108 }
109
110 adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
111 adev->pdev, kfd2kgd);
Oded Gabbay130e0372015-06-12 21:35:14 +0300112}
113
Alex Deucher22cb0162017-12-14 16:27:11 -0500114/**
115 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
116 * setup amdkfd
117 *
118 * @adev: amdgpu_device pointer
119 * @aperture_base: output returning doorbell aperture base physical address
120 * @aperture_size: output returning doorbell aperture size in bytes
121 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
122 *
123 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
124 * takes doorbells required for its own rings and reports the setup to amdkfd.
125 * amdgpu reserved doorbells are at the start of the doorbell aperture.
126 */
127static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
128 phys_addr_t *aperture_base,
129 size_t *aperture_size,
130 size_t *start_offset)
131{
132 /*
133 * The first num_doorbells are used by amdgpu.
134 * amdkfd takes whatever's left in the aperture.
135 */
136 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
137 *aperture_base = adev->doorbell.base;
138 *aperture_size = adev->doorbell.size;
139 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
140 } else {
141 *aperture_base = 0;
142 *aperture_size = 0;
143 *start_offset = 0;
144 }
145}
146
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500147void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
Oded Gabbay130e0372015-06-12 21:35:14 +0300148{
Andres Rodriguezd0b63bb32017-02-03 16:28:48 -0500149 int i;
150 int last_valid_bit;
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500151 if (adev->kfd) {
Oded Gabbay130e0372015-06-12 21:35:14 +0300152 struct kgd2kfd_shared_resources gpu_resources = {
Felix Kuehling155494d2018-02-06 20:32:36 -0500153 .compute_vmid_bitmap = compute_vmid_bitmap,
Andres Rodriguezd0b63bb32017-02-03 16:28:48 -0500154 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
Felix Kuehling155494d2018-02-06 20:32:36 -0500155 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
156 .gpuvm_size = min(adev->vm_manager.max_pfn
157 << AMDGPU_GPU_PAGE_SHIFT,
158 AMDGPU_VA_HOLE_START),
159 .drm_render_minor = adev->ddev->render->index
Oded Gabbay130e0372015-06-12 21:35:14 +0300160 };
161
Andres Rodriguezd0b63bb32017-02-03 16:28:48 -0500162 /* this is going to have a few of the MSBs set that we need to
163 * clear */
164 bitmap_complement(gpu_resources.queue_bitmap,
165 adev->gfx.mec.queue_bitmap,
166 KGD_MAX_QUEUES);
167
Andres Rodriguez7b2124a2017-04-06 00:10:53 -0400168 /* remove the KIQ bit as well */
169 if (adev->gfx.kiq.ring.ready)
Alex Deucher2db0cdb2017-06-07 12:59:29 -0400170 clear_bit(amdgpu_gfx_queue_to_bit(adev,
171 adev->gfx.kiq.ring.me - 1,
172 adev->gfx.kiq.ring.pipe,
173 adev->gfx.kiq.ring.queue),
Andres Rodriguez7b2124a2017-04-06 00:10:53 -0400174 gpu_resources.queue_bitmap);
175
Andres Rodriguezd0b63bb32017-02-03 16:28:48 -0500176 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
177 * nbits is not compile time constant */
Jay Cornwall3447d222017-07-13 20:21:53 -0500178 last_valid_bit = 1 /* only first MEC can have compute queues */
Andres Rodriguezd0b63bb32017-02-03 16:28:48 -0500179 * adev->gfx.mec.num_pipe_per_mec
180 * adev->gfx.mec.num_queue_per_pipe;
181 for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
182 clear_bit(i, gpu_resources.queue_bitmap);
183
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500184 amdgpu_doorbell_get_kfd_info(adev,
Oded Gabbay130e0372015-06-12 21:35:14 +0300185 &gpu_resources.doorbell_physical_address,
186 &gpu_resources.doorbell_aperture_size,
187 &gpu_resources.doorbell_start_offset);
Felix Kuehling642a0e82018-04-10 17:33:02 -0400188 if (adev->asic_type >= CHIP_VEGA10) {
189 /* On SOC15 the BIF is involved in routing
190 * doorbells using the low 12 bits of the
191 * address. Communicate the assignments to
192 * KFD. KFD uses two doorbell pages per
193 * process in case of 64-bit doorbells so we
194 * can use each doorbell assignment twice.
195 */
196 gpu_resources.sdma_doorbell[0][0] =
197 AMDGPU_DOORBELL64_sDMA_ENGINE0;
198 gpu_resources.sdma_doorbell[0][1] =
199 AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200;
200 gpu_resources.sdma_doorbell[1][0] =
201 AMDGPU_DOORBELL64_sDMA_ENGINE1;
202 gpu_resources.sdma_doorbell[1][1] =
203 AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200;
204 /* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for
205 * SDMA, IH and VCN. So don't use them for the CP.
206 */
207 gpu_resources.reserved_doorbell_mask = 0x1f0;
208 gpu_resources.reserved_doorbell_val = 0x0f0;
209 }
Oded Gabbay130e0372015-06-12 21:35:14 +0300210
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500211 kgd2kfd->device_init(adev->kfd, &gpu_resources);
Oded Gabbay130e0372015-06-12 21:35:14 +0300212 }
213}
214
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500215void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
Oded Gabbay130e0372015-06-12 21:35:14 +0300216{
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500217 if (adev->kfd) {
218 kgd2kfd->device_exit(adev->kfd);
219 adev->kfd = NULL;
Oded Gabbay130e0372015-06-12 21:35:14 +0300220 }
221}
222
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500223void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
Oded Gabbay130e0372015-06-12 21:35:14 +0300224 const void *ih_ring_entry)
225{
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500226 if (adev->kfd)
227 kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
Oded Gabbay130e0372015-06-12 21:35:14 +0300228}
229
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500230void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
Oded Gabbay130e0372015-06-12 21:35:14 +0300231{
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500232 if (adev->kfd)
233 kgd2kfd->suspend(adev->kfd);
Oded Gabbay130e0372015-06-12 21:35:14 +0300234}
235
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500236int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
Oded Gabbay130e0372015-06-12 21:35:14 +0300237{
238 int r = 0;
239
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500240 if (adev->kfd)
241 r = kgd2kfd->resume(adev->kfd);
Oded Gabbay130e0372015-06-12 21:35:14 +0300242
243 return r;
244}
245
Oded Gabbay130e0372015-06-12 21:35:14 +0300246int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
247 void **mem_obj, uint64_t *gpu_addr,
248 void **cpu_ptr)
249{
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500250 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
Yong Zhao473fee42018-02-06 20:32:31 -0500251 struct amdgpu_bo *bo = NULL;
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800252 struct amdgpu_bo_param bp;
Oded Gabbay130e0372015-06-12 21:35:14 +0300253 int r;
Yong Zhao473fee42018-02-06 20:32:31 -0500254 uint64_t gpu_addr_tmp = 0;
255 void *cpu_ptr_tmp = NULL;
Oded Gabbay130e0372015-06-12 21:35:14 +0300256
Chunming Zhou3216c6b2018-04-16 18:27:50 +0800257 memset(&bp, 0, sizeof(bp));
258 bp.size = size;
259 bp.byte_align = PAGE_SIZE;
260 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
261 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
262 bp.type = ttm_bo_type_kernel;
263 bp.resv = NULL;
264 r = amdgpu_bo_create(adev, &bp, &bo);
Oded Gabbay130e0372015-06-12 21:35:14 +0300265 if (r) {
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500266 dev_err(adev->dev,
Oded Gabbay130e0372015-06-12 21:35:14 +0300267 "failed to allocate BO for amdkfd (%d)\n", r);
268 return r;
269 }
270
271 /* map the buffer */
Yong Zhao473fee42018-02-06 20:32:31 -0500272 r = amdgpu_bo_reserve(bo, true);
Oded Gabbay130e0372015-06-12 21:35:14 +0300273 if (r) {
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500274 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
Oded Gabbay130e0372015-06-12 21:35:14 +0300275 goto allocate_mem_reserve_bo_failed;
276 }
277
Yong Zhao473fee42018-02-06 20:32:31 -0500278 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT,
279 &gpu_addr_tmp);
Oded Gabbay130e0372015-06-12 21:35:14 +0300280 if (r) {
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500281 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
Oded Gabbay130e0372015-06-12 21:35:14 +0300282 goto allocate_mem_pin_bo_failed;
283 }
Oded Gabbay130e0372015-06-12 21:35:14 +0300284
Yong Zhao473fee42018-02-06 20:32:31 -0500285 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
Oded Gabbay130e0372015-06-12 21:35:14 +0300286 if (r) {
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500287 dev_err(adev->dev,
Oded Gabbay130e0372015-06-12 21:35:14 +0300288 "(%d) failed to map bo to kernel for amdkfd\n", r);
289 goto allocate_mem_kmap_bo_failed;
290 }
Oded Gabbay130e0372015-06-12 21:35:14 +0300291
Yong Zhao473fee42018-02-06 20:32:31 -0500292 *mem_obj = bo;
293 *gpu_addr = gpu_addr_tmp;
294 *cpu_ptr = cpu_ptr_tmp;
295
296 amdgpu_bo_unreserve(bo);
Oded Gabbay130e0372015-06-12 21:35:14 +0300297
298 return 0;
299
300allocate_mem_kmap_bo_failed:
Yong Zhao473fee42018-02-06 20:32:31 -0500301 amdgpu_bo_unpin(bo);
Oded Gabbay130e0372015-06-12 21:35:14 +0300302allocate_mem_pin_bo_failed:
Yong Zhao473fee42018-02-06 20:32:31 -0500303 amdgpu_bo_unreserve(bo);
Oded Gabbay130e0372015-06-12 21:35:14 +0300304allocate_mem_reserve_bo_failed:
Yong Zhao473fee42018-02-06 20:32:31 -0500305 amdgpu_bo_unref(&bo);
Oded Gabbay130e0372015-06-12 21:35:14 +0300306
307 return r;
308}
309
310void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
311{
Yong Zhao473fee42018-02-06 20:32:31 -0500312 struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
Oded Gabbay130e0372015-06-12 21:35:14 +0300313
Yong Zhao473fee42018-02-06 20:32:31 -0500314 amdgpu_bo_reserve(bo, true);
315 amdgpu_bo_kunmap(bo);
316 amdgpu_bo_unpin(bo);
317 amdgpu_bo_unreserve(bo);
318 amdgpu_bo_unref(&(bo));
Oded Gabbay130e0372015-06-12 21:35:14 +0300319}
320
Harish Kasiviswanathan30f1c042017-12-08 23:08:42 -0500321void get_local_mem_info(struct kgd_dev *kgd,
322 struct kfd_local_mem_info *mem_info)
323{
324 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
325 uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
326 ~((1ULL << 32) - 1);
Christian König770d13b2018-01-12 14:52:22 +0100327 resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
Harish Kasiviswanathan30f1c042017-12-08 23:08:42 -0500328
329 memset(mem_info, 0, sizeof(*mem_info));
Christian König770d13b2018-01-12 14:52:22 +0100330 if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
331 mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
332 mem_info->local_mem_size_private = adev->gmc.real_vram_size -
333 adev->gmc.visible_vram_size;
Harish Kasiviswanathan30f1c042017-12-08 23:08:42 -0500334 } else {
335 mem_info->local_mem_size_public = 0;
Christian König770d13b2018-01-12 14:52:22 +0100336 mem_info->local_mem_size_private = adev->gmc.real_vram_size;
Harish Kasiviswanathan30f1c042017-12-08 23:08:42 -0500337 }
Christian König770d13b2018-01-12 14:52:22 +0100338 mem_info->vram_width = adev->gmc.vram_width;
Harish Kasiviswanathan30f1c042017-12-08 23:08:42 -0500339
Arnd Bergmannfb8baef2018-01-08 13:53:56 +0100340 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
Christian König770d13b2018-01-12 14:52:22 +0100341 &adev->gmc.aper_base, &aper_limit,
Harish Kasiviswanathan30f1c042017-12-08 23:08:42 -0500342 mem_info->local_mem_size_public,
343 mem_info->local_mem_size_private);
344
Shaoyun Liu4a2ba392018-02-05 16:41:33 -0500345 if (amdgpu_emu_mode == 1) {
346 mem_info->mem_clk_max = 100;
347 return;
348 }
349
Harish Kasiviswanathan30f1c042017-12-08 23:08:42 -0500350 if (amdgpu_sriov_vf(adev))
351 mem_info->mem_clk_max = adev->clock.default_mclk / 100;
352 else
353 mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
354}
355
Oded Gabbay130e0372015-06-12 21:35:14 +0300356uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
357{
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500358 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
Oded Gabbay130e0372015-06-12 21:35:14 +0300359
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500360 if (adev->gfx.funcs->get_gpu_clock_counter)
361 return adev->gfx.funcs->get_gpu_clock_counter(adev);
Oded Gabbay130e0372015-06-12 21:35:14 +0300362 return 0;
363}
364
365uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
366{
Andres Rodriguezdc102c42017-02-01 17:02:13 -0500367 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
Oded Gabbay130e0372015-06-12 21:35:14 +0300368
Felix Kuehlinga9efcc12017-11-27 18:29:43 -0500369 /* the sclk is in quantas of 10kHz */
Shaoyun Liu4a2ba392018-02-05 16:41:33 -0500370 if (amdgpu_emu_mode == 1)
371 return 100;
372
Felix Kuehlinga9efcc12017-11-27 18:29:43 -0500373 if (amdgpu_sriov_vf(adev))
374 return adev->clock.default_sclk / 100;
375
376 return amdgpu_dpm_get_sclk(adev, false) / 100;
Oded Gabbay130e0372015-06-12 21:35:14 +0300377}
Flora Cuiebdebf42017-12-08 23:08:40 -0500378
379void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
380{
381 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
382 struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
383
384 memset(cu_info, 0, sizeof(*cu_info));
385 if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
386 return;
387
388 cu_info->cu_active_number = acu_info.number;
389 cu_info->cu_ao_mask = acu_info.ao_cu_mask;
390 memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
391 sizeof(acu_info.bitmap));
392 cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
393 cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
394 cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
395 cu_info->simd_per_cu = acu_info.simd_per_cu;
396 cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
397 cu_info->wave_front_size = acu_info.wave_front_size;
398 cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
399 cu_info->lds_size = acu_info.lds_size;
400}
Kent Russell9f0a0b42017-12-08 23:09:05 -0500401
402uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
403{
404 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
405
406 return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
407}
Felix Kuehling155494d2018-02-06 20:32:36 -0500408
Felix Kuehling4c660c82018-02-06 20:32:39 -0500409int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
410 uint32_t vmid, uint64_t gpu_addr,
411 uint32_t *ib_cmd, uint32_t ib_len)
412{
413 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
414 struct amdgpu_job *job;
415 struct amdgpu_ib *ib;
416 struct amdgpu_ring *ring;
417 struct dma_fence *f = NULL;
418 int ret;
419
420 switch (engine) {
421 case KGD_ENGINE_MEC1:
422 ring = &adev->gfx.compute_ring[0];
423 break;
424 case KGD_ENGINE_SDMA1:
425 ring = &adev->sdma.instance[0].ring;
426 break;
427 case KGD_ENGINE_SDMA2:
428 ring = &adev->sdma.instance[1].ring;
429 break;
430 default:
431 pr_err("Invalid engine in IB submission: %d\n", engine);
432 ret = -EINVAL;
433 goto err;
434 }
435
436 ret = amdgpu_job_alloc(adev, 1, &job, NULL);
437 if (ret)
438 goto err;
439
440 ib = &job->ibs[0];
441 memset(ib, 0, sizeof(struct amdgpu_ib));
442
443 ib->gpu_addr = gpu_addr;
444 ib->ptr = ib_cmd;
445 ib->length_dw = ib_len;
446 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
447 job->vmid = vmid;
448
449 ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
450 if (ret) {
451 DRM_ERROR("amdgpu: failed to schedule IB.\n");
452 goto err_ib_sched;
453 }
454
455 ret = dma_fence_wait(f, false);
456
457err_ib_sched:
458 dma_fence_put(f);
459 amdgpu_job_free(job);
460err:
461 return ret;
462}
463
Felix Kuehling155494d2018-02-06 20:32:36 -0500464bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
465{
466 if (adev->kfd) {
467 if ((1 << vmid) & compute_vmid_bitmap)
468 return true;
469 }
470
471 return false;
472}
Oded Gabbayfcdfa432018-05-18 22:18:16 +0300473
474#if !defined(CONFIG_HSA_AMD_MODULE) && !defined(CONFIG_HSA_AMD)
475bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
476{
477 return false;
478}
479
480void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
481{
482}
483
484void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
485 struct amdgpu_vm *vm)
486{
487}
488
489struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
490{
491 return NULL;
492}
493
494int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
495{
496 return 0;
497}
498
499struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
500{
501 return NULL;
502}
503
504struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
505{
506 return NULL;
507}
508
509struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
510{
511 return NULL;
512}
513#endif