Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | */ |
| 22 | |
| 23 | #include "amdgpu_amdkfd.h" |
Jammy Zhou | 2f7d10b | 2015-07-22 11:29:01 +0800 | [diff] [blame] | 24 | #include "amd_shared.h" |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 25 | #include <drm/drmP.h> |
| 26 | #include "amdgpu.h" |
Alex Deucher | 2db0cdb | 2017-06-07 12:59:29 -0400 | [diff] [blame] | 27 | #include "amdgpu_gfx.h" |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 28 | #include <linux/module.h> |
| 29 | |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 30 | const struct kgd2kfd_calls *kgd2kfd; |
Kent Russell | 8eabaf5 | 2017-08-15 23:00:04 -0400 | [diff] [blame] | 31 | bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 32 | |
Felix Kuehling | 155494d | 2018-02-06 20:32:36 -0500 | [diff] [blame] | 33 | static const unsigned int compute_vmid_bitmap = 0xFF00; |
| 34 | |
Oded Gabbay | efb1c65 | 2016-02-09 13:30:12 +0200 | [diff] [blame] | 35 | int amdgpu_amdkfd_init(void) |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 36 | { |
Oded Gabbay | efb1c65 | 2016-02-09 13:30:12 +0200 | [diff] [blame] | 37 | int ret; |
| 38 | |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 39 | #if defined(CONFIG_HSA_AMD_MODULE) |
Kent Russell | 8eabaf5 | 2017-08-15 23:00:04 -0400 | [diff] [blame] | 40 | int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 41 | |
| 42 | kgd2kfd_init_p = symbol_request(kgd2kfd_init); |
| 43 | |
| 44 | if (kgd2kfd_init_p == NULL) |
Oded Gabbay | efb1c65 | 2016-02-09 13:30:12 +0200 | [diff] [blame] | 45 | return -ENOENT; |
| 46 | |
| 47 | ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd); |
| 48 | if (ret) { |
| 49 | symbol_put(kgd2kfd_init); |
| 50 | kgd2kfd = NULL; |
| 51 | } |
| 52 | |
Oded Gabbay | fcdfa43 | 2018-05-18 22:18:16 +0300 | [diff] [blame] | 53 | |
Oded Gabbay | efb1c65 | 2016-02-09 13:30:12 +0200 | [diff] [blame] | 54 | #elif defined(CONFIG_HSA_AMD) |
Oded Gabbay | fcdfa43 | 2018-05-18 22:18:16 +0300 | [diff] [blame] | 55 | |
Oded Gabbay | efb1c65 | 2016-02-09 13:30:12 +0200 | [diff] [blame] | 56 | ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd); |
| 57 | if (ret) |
| 58 | kgd2kfd = NULL; |
| 59 | |
| 60 | #else |
Oded Gabbay | fcdfa43 | 2018-05-18 22:18:16 +0300 | [diff] [blame] | 61 | kgd2kfd = NULL; |
Oded Gabbay | efb1c65 | 2016-02-09 13:30:12 +0200 | [diff] [blame] | 62 | ret = -ENOENT; |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 63 | #endif |
Oded Gabbay | fcdfa43 | 2018-05-18 22:18:16 +0300 | [diff] [blame] | 64 | |
| 65 | #if defined(CONFIG_HSA_AMD_MODULE) || defined(CONFIG_HSA_AMD) |
Felix Kuehling | a46a2cd | 2018-02-06 20:32:38 -0500 | [diff] [blame] | 66 | amdgpu_amdkfd_gpuvm_init_mem_limits(); |
Oded Gabbay | fcdfa43 | 2018-05-18 22:18:16 +0300 | [diff] [blame] | 67 | #endif |
Oded Gabbay | efb1c65 | 2016-02-09 13:30:12 +0200 | [diff] [blame] | 68 | |
| 69 | return ret; |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 70 | } |
| 71 | |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 72 | void amdgpu_amdkfd_fini(void) |
| 73 | { |
| 74 | if (kgd2kfd) { |
| 75 | kgd2kfd->exit(); |
| 76 | symbol_put(kgd2kfd_init); |
| 77 | } |
| 78 | } |
| 79 | |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 80 | void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 81 | { |
Felix Kuehling | 5c33f21 | 2017-07-28 16:54:54 -0400 | [diff] [blame] | 82 | const struct kfd2kgd_calls *kfd2kgd; |
| 83 | |
| 84 | if (!kgd2kfd) |
| 85 | return; |
| 86 | |
| 87 | switch (adev->asic_type) { |
| 88 | #ifdef CONFIG_DRM_AMDGPU_CIK |
| 89 | case CHIP_KAVERI: |
Felix Kuehling | 30d1342 | 2018-01-04 17:17:48 -0500 | [diff] [blame] | 90 | case CHIP_HAWAII: |
Felix Kuehling | 5c33f21 | 2017-07-28 16:54:54 -0400 | [diff] [blame] | 91 | kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions(); |
| 92 | break; |
| 93 | #endif |
| 94 | case CHIP_CARRIZO: |
Felix Kuehling | 30d1342 | 2018-01-04 17:17:48 -0500 | [diff] [blame] | 95 | case CHIP_TONGA: |
| 96 | case CHIP_FIJI: |
| 97 | case CHIP_POLARIS10: |
| 98 | case CHIP_POLARIS11: |
Felix Kuehling | 5c33f21 | 2017-07-28 16:54:54 -0400 | [diff] [blame] | 99 | kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); |
| 100 | break; |
Felix Kuehling | d5a114a | 2018-04-10 17:33:01 -0400 | [diff] [blame] | 101 | case CHIP_VEGA10: |
| 102 | case CHIP_RAVEN: |
| 103 | kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions(); |
| 104 | break; |
Felix Kuehling | 5c33f21 | 2017-07-28 16:54:54 -0400 | [diff] [blame] | 105 | default: |
Tom Stellard | c3032fd | 2018-05-24 14:07:14 -0700 | [diff] [blame] | 106 | dev_info(adev->dev, "kfd not supported on this ASIC\n"); |
Felix Kuehling | 5c33f21 | 2017-07-28 16:54:54 -0400 | [diff] [blame] | 107 | return; |
| 108 | } |
| 109 | |
| 110 | adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev, |
| 111 | adev->pdev, kfd2kgd); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 112 | } |
| 113 | |
Alex Deucher | 22cb016 | 2017-12-14 16:27:11 -0500 | [diff] [blame] | 114 | /** |
| 115 | * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to |
| 116 | * setup amdkfd |
| 117 | * |
| 118 | * @adev: amdgpu_device pointer |
| 119 | * @aperture_base: output returning doorbell aperture base physical address |
| 120 | * @aperture_size: output returning doorbell aperture size in bytes |
| 121 | * @start_offset: output returning # of doorbell bytes reserved for amdgpu. |
| 122 | * |
| 123 | * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up, |
| 124 | * takes doorbells required for its own rings and reports the setup to amdkfd. |
| 125 | * amdgpu reserved doorbells are at the start of the doorbell aperture. |
| 126 | */ |
| 127 | static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, |
| 128 | phys_addr_t *aperture_base, |
| 129 | size_t *aperture_size, |
| 130 | size_t *start_offset) |
| 131 | { |
| 132 | /* |
| 133 | * The first num_doorbells are used by amdgpu. |
| 134 | * amdkfd takes whatever's left in the aperture. |
| 135 | */ |
| 136 | if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) { |
| 137 | *aperture_base = adev->doorbell.base; |
| 138 | *aperture_size = adev->doorbell.size; |
| 139 | *start_offset = adev->doorbell.num_doorbells * sizeof(u32); |
| 140 | } else { |
| 141 | *aperture_base = 0; |
| 142 | *aperture_size = 0; |
| 143 | *start_offset = 0; |
| 144 | } |
| 145 | } |
| 146 | |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 147 | void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 148 | { |
Andres Rodriguez | d0b63bb3 | 2017-02-03 16:28:48 -0500 | [diff] [blame] | 149 | int i; |
| 150 | int last_valid_bit; |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 151 | if (adev->kfd) { |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 152 | struct kgd2kfd_shared_resources gpu_resources = { |
Felix Kuehling | 155494d | 2018-02-06 20:32:36 -0500 | [diff] [blame] | 153 | .compute_vmid_bitmap = compute_vmid_bitmap, |
Andres Rodriguez | d0b63bb3 | 2017-02-03 16:28:48 -0500 | [diff] [blame] | 154 | .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, |
Felix Kuehling | 155494d | 2018-02-06 20:32:36 -0500 | [diff] [blame] | 155 | .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe, |
| 156 | .gpuvm_size = min(adev->vm_manager.max_pfn |
| 157 | << AMDGPU_GPU_PAGE_SHIFT, |
| 158 | AMDGPU_VA_HOLE_START), |
| 159 | .drm_render_minor = adev->ddev->render->index |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 160 | }; |
| 161 | |
Andres Rodriguez | d0b63bb3 | 2017-02-03 16:28:48 -0500 | [diff] [blame] | 162 | /* this is going to have a few of the MSBs set that we need to |
| 163 | * clear */ |
| 164 | bitmap_complement(gpu_resources.queue_bitmap, |
| 165 | adev->gfx.mec.queue_bitmap, |
| 166 | KGD_MAX_QUEUES); |
| 167 | |
Andres Rodriguez | 7b2124a | 2017-04-06 00:10:53 -0400 | [diff] [blame] | 168 | /* remove the KIQ bit as well */ |
| 169 | if (adev->gfx.kiq.ring.ready) |
Alex Deucher | 2db0cdb | 2017-06-07 12:59:29 -0400 | [diff] [blame] | 170 | clear_bit(amdgpu_gfx_queue_to_bit(adev, |
| 171 | adev->gfx.kiq.ring.me - 1, |
| 172 | adev->gfx.kiq.ring.pipe, |
| 173 | adev->gfx.kiq.ring.queue), |
Andres Rodriguez | 7b2124a | 2017-04-06 00:10:53 -0400 | [diff] [blame] | 174 | gpu_resources.queue_bitmap); |
| 175 | |
Andres Rodriguez | d0b63bb3 | 2017-02-03 16:28:48 -0500 | [diff] [blame] | 176 | /* According to linux/bitmap.h we shouldn't use bitmap_clear if |
| 177 | * nbits is not compile time constant */ |
Jay Cornwall | 3447d22 | 2017-07-13 20:21:53 -0500 | [diff] [blame] | 178 | last_valid_bit = 1 /* only first MEC can have compute queues */ |
Andres Rodriguez | d0b63bb3 | 2017-02-03 16:28:48 -0500 | [diff] [blame] | 179 | * adev->gfx.mec.num_pipe_per_mec |
| 180 | * adev->gfx.mec.num_queue_per_pipe; |
| 181 | for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i) |
| 182 | clear_bit(i, gpu_resources.queue_bitmap); |
| 183 | |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 184 | amdgpu_doorbell_get_kfd_info(adev, |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 185 | &gpu_resources.doorbell_physical_address, |
| 186 | &gpu_resources.doorbell_aperture_size, |
| 187 | &gpu_resources.doorbell_start_offset); |
Felix Kuehling | 642a0e8 | 2018-04-10 17:33:02 -0400 | [diff] [blame] | 188 | if (adev->asic_type >= CHIP_VEGA10) { |
| 189 | /* On SOC15 the BIF is involved in routing |
| 190 | * doorbells using the low 12 bits of the |
| 191 | * address. Communicate the assignments to |
| 192 | * KFD. KFD uses two doorbell pages per |
| 193 | * process in case of 64-bit doorbells so we |
| 194 | * can use each doorbell assignment twice. |
| 195 | */ |
| 196 | gpu_resources.sdma_doorbell[0][0] = |
| 197 | AMDGPU_DOORBELL64_sDMA_ENGINE0; |
| 198 | gpu_resources.sdma_doorbell[0][1] = |
| 199 | AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200; |
| 200 | gpu_resources.sdma_doorbell[1][0] = |
| 201 | AMDGPU_DOORBELL64_sDMA_ENGINE1; |
| 202 | gpu_resources.sdma_doorbell[1][1] = |
| 203 | AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200; |
| 204 | /* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for |
| 205 | * SDMA, IH and VCN. So don't use them for the CP. |
| 206 | */ |
| 207 | gpu_resources.reserved_doorbell_mask = 0x1f0; |
| 208 | gpu_resources.reserved_doorbell_val = 0x0f0; |
| 209 | } |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 210 | |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 211 | kgd2kfd->device_init(adev->kfd, &gpu_resources); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 212 | } |
| 213 | } |
| 214 | |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 215 | void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev) |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 216 | { |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 217 | if (adev->kfd) { |
| 218 | kgd2kfd->device_exit(adev->kfd); |
| 219 | adev->kfd = NULL; |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 220 | } |
| 221 | } |
| 222 | |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 223 | void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 224 | const void *ih_ring_entry) |
| 225 | { |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 226 | if (adev->kfd) |
| 227 | kgd2kfd->interrupt(adev->kfd, ih_ring_entry); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 228 | } |
| 229 | |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 230 | void amdgpu_amdkfd_suspend(struct amdgpu_device *adev) |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 231 | { |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 232 | if (adev->kfd) |
| 233 | kgd2kfd->suspend(adev->kfd); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 234 | } |
| 235 | |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 236 | int amdgpu_amdkfd_resume(struct amdgpu_device *adev) |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 237 | { |
| 238 | int r = 0; |
| 239 | |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 240 | if (adev->kfd) |
| 241 | r = kgd2kfd->resume(adev->kfd); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 242 | |
| 243 | return r; |
| 244 | } |
| 245 | |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 246 | int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, |
| 247 | void **mem_obj, uint64_t *gpu_addr, |
| 248 | void **cpu_ptr) |
| 249 | { |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 250 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
Yong Zhao | 473fee4 | 2018-02-06 20:32:31 -0500 | [diff] [blame] | 251 | struct amdgpu_bo *bo = NULL; |
Chunming Zhou | 3216c6b | 2018-04-16 18:27:50 +0800 | [diff] [blame] | 252 | struct amdgpu_bo_param bp; |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 253 | int r; |
Yong Zhao | 473fee4 | 2018-02-06 20:32:31 -0500 | [diff] [blame] | 254 | uint64_t gpu_addr_tmp = 0; |
| 255 | void *cpu_ptr_tmp = NULL; |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 256 | |
Chunming Zhou | 3216c6b | 2018-04-16 18:27:50 +0800 | [diff] [blame] | 257 | memset(&bp, 0, sizeof(bp)); |
| 258 | bp.size = size; |
| 259 | bp.byte_align = PAGE_SIZE; |
| 260 | bp.domain = AMDGPU_GEM_DOMAIN_GTT; |
| 261 | bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
| 262 | bp.type = ttm_bo_type_kernel; |
| 263 | bp.resv = NULL; |
| 264 | r = amdgpu_bo_create(adev, &bp, &bo); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 265 | if (r) { |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 266 | dev_err(adev->dev, |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 267 | "failed to allocate BO for amdkfd (%d)\n", r); |
| 268 | return r; |
| 269 | } |
| 270 | |
| 271 | /* map the buffer */ |
Yong Zhao | 473fee4 | 2018-02-06 20:32:31 -0500 | [diff] [blame] | 272 | r = amdgpu_bo_reserve(bo, true); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 273 | if (r) { |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 274 | dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 275 | goto allocate_mem_reserve_bo_failed; |
| 276 | } |
| 277 | |
Yong Zhao | 473fee4 | 2018-02-06 20:32:31 -0500 | [diff] [blame] | 278 | r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, |
| 279 | &gpu_addr_tmp); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 280 | if (r) { |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 281 | dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 282 | goto allocate_mem_pin_bo_failed; |
| 283 | } |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 284 | |
Yong Zhao | 473fee4 | 2018-02-06 20:32:31 -0500 | [diff] [blame] | 285 | r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 286 | if (r) { |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 287 | dev_err(adev->dev, |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 288 | "(%d) failed to map bo to kernel for amdkfd\n", r); |
| 289 | goto allocate_mem_kmap_bo_failed; |
| 290 | } |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 291 | |
Yong Zhao | 473fee4 | 2018-02-06 20:32:31 -0500 | [diff] [blame] | 292 | *mem_obj = bo; |
| 293 | *gpu_addr = gpu_addr_tmp; |
| 294 | *cpu_ptr = cpu_ptr_tmp; |
| 295 | |
| 296 | amdgpu_bo_unreserve(bo); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 297 | |
| 298 | return 0; |
| 299 | |
| 300 | allocate_mem_kmap_bo_failed: |
Yong Zhao | 473fee4 | 2018-02-06 20:32:31 -0500 | [diff] [blame] | 301 | amdgpu_bo_unpin(bo); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 302 | allocate_mem_pin_bo_failed: |
Yong Zhao | 473fee4 | 2018-02-06 20:32:31 -0500 | [diff] [blame] | 303 | amdgpu_bo_unreserve(bo); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 304 | allocate_mem_reserve_bo_failed: |
Yong Zhao | 473fee4 | 2018-02-06 20:32:31 -0500 | [diff] [blame] | 305 | amdgpu_bo_unref(&bo); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 306 | |
| 307 | return r; |
| 308 | } |
| 309 | |
| 310 | void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) |
| 311 | { |
Yong Zhao | 473fee4 | 2018-02-06 20:32:31 -0500 | [diff] [blame] | 312 | struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 313 | |
Yong Zhao | 473fee4 | 2018-02-06 20:32:31 -0500 | [diff] [blame] | 314 | amdgpu_bo_reserve(bo, true); |
| 315 | amdgpu_bo_kunmap(bo); |
| 316 | amdgpu_bo_unpin(bo); |
| 317 | amdgpu_bo_unreserve(bo); |
| 318 | amdgpu_bo_unref(&(bo)); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 319 | } |
| 320 | |
Harish Kasiviswanathan | 30f1c04 | 2017-12-08 23:08:42 -0500 | [diff] [blame] | 321 | void get_local_mem_info(struct kgd_dev *kgd, |
| 322 | struct kfd_local_mem_info *mem_info) |
| 323 | { |
| 324 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
| 325 | uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask : |
| 326 | ~((1ULL << 32) - 1); |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 327 | resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size; |
Harish Kasiviswanathan | 30f1c04 | 2017-12-08 23:08:42 -0500 | [diff] [blame] | 328 | |
| 329 | memset(mem_info, 0, sizeof(*mem_info)); |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 330 | if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) { |
| 331 | mem_info->local_mem_size_public = adev->gmc.visible_vram_size; |
| 332 | mem_info->local_mem_size_private = adev->gmc.real_vram_size - |
| 333 | adev->gmc.visible_vram_size; |
Harish Kasiviswanathan | 30f1c04 | 2017-12-08 23:08:42 -0500 | [diff] [blame] | 334 | } else { |
| 335 | mem_info->local_mem_size_public = 0; |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 336 | mem_info->local_mem_size_private = adev->gmc.real_vram_size; |
Harish Kasiviswanathan | 30f1c04 | 2017-12-08 23:08:42 -0500 | [diff] [blame] | 337 | } |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 338 | mem_info->vram_width = adev->gmc.vram_width; |
Harish Kasiviswanathan | 30f1c04 | 2017-12-08 23:08:42 -0500 | [diff] [blame] | 339 | |
Arnd Bergmann | fb8baef | 2018-01-08 13:53:56 +0100 | [diff] [blame] | 340 | pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n", |
Christian König | 770d13b | 2018-01-12 14:52:22 +0100 | [diff] [blame] | 341 | &adev->gmc.aper_base, &aper_limit, |
Harish Kasiviswanathan | 30f1c04 | 2017-12-08 23:08:42 -0500 | [diff] [blame] | 342 | mem_info->local_mem_size_public, |
| 343 | mem_info->local_mem_size_private); |
| 344 | |
Shaoyun Liu | 4a2ba39 | 2018-02-05 16:41:33 -0500 | [diff] [blame] | 345 | if (amdgpu_emu_mode == 1) { |
| 346 | mem_info->mem_clk_max = 100; |
| 347 | return; |
| 348 | } |
| 349 | |
Harish Kasiviswanathan | 30f1c04 | 2017-12-08 23:08:42 -0500 | [diff] [blame] | 350 | if (amdgpu_sriov_vf(adev)) |
| 351 | mem_info->mem_clk_max = adev->clock.default_mclk / 100; |
| 352 | else |
| 353 | mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100; |
| 354 | } |
| 355 | |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 356 | uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) |
| 357 | { |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 358 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 359 | |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 360 | if (adev->gfx.funcs->get_gpu_clock_counter) |
| 361 | return adev->gfx.funcs->get_gpu_clock_counter(adev); |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 362 | return 0; |
| 363 | } |
| 364 | |
| 365 | uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) |
| 366 | { |
Andres Rodriguez | dc102c4 | 2017-02-01 17:02:13 -0500 | [diff] [blame] | 367 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 368 | |
Felix Kuehling | a9efcc1 | 2017-11-27 18:29:43 -0500 | [diff] [blame] | 369 | /* the sclk is in quantas of 10kHz */ |
Shaoyun Liu | 4a2ba39 | 2018-02-05 16:41:33 -0500 | [diff] [blame] | 370 | if (amdgpu_emu_mode == 1) |
| 371 | return 100; |
| 372 | |
Felix Kuehling | a9efcc1 | 2017-11-27 18:29:43 -0500 | [diff] [blame] | 373 | if (amdgpu_sriov_vf(adev)) |
| 374 | return adev->clock.default_sclk / 100; |
| 375 | |
| 376 | return amdgpu_dpm_get_sclk(adev, false) / 100; |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 377 | } |
Flora Cui | ebdebf4 | 2017-12-08 23:08:40 -0500 | [diff] [blame] | 378 | |
| 379 | void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) |
| 380 | { |
| 381 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
| 382 | struct amdgpu_cu_info acu_info = adev->gfx.cu_info; |
| 383 | |
| 384 | memset(cu_info, 0, sizeof(*cu_info)); |
| 385 | if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap)) |
| 386 | return; |
| 387 | |
| 388 | cu_info->cu_active_number = acu_info.number; |
| 389 | cu_info->cu_ao_mask = acu_info.ao_cu_mask; |
| 390 | memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0], |
| 391 | sizeof(acu_info.bitmap)); |
| 392 | cu_info->num_shader_engines = adev->gfx.config.max_shader_engines; |
| 393 | cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; |
| 394 | cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh; |
| 395 | cu_info->simd_per_cu = acu_info.simd_per_cu; |
| 396 | cu_info->max_waves_per_simd = acu_info.max_waves_per_simd; |
| 397 | cu_info->wave_front_size = acu_info.wave_front_size; |
| 398 | cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu; |
| 399 | cu_info->lds_size = acu_info.lds_size; |
| 400 | } |
Kent Russell | 9f0a0b4 | 2017-12-08 23:09:05 -0500 | [diff] [blame] | 401 | |
| 402 | uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd) |
| 403 | { |
| 404 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
| 405 | |
| 406 | return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); |
| 407 | } |
Felix Kuehling | 155494d | 2018-02-06 20:32:36 -0500 | [diff] [blame] | 408 | |
Felix Kuehling | 4c660c8 | 2018-02-06 20:32:39 -0500 | [diff] [blame] | 409 | int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, |
| 410 | uint32_t vmid, uint64_t gpu_addr, |
| 411 | uint32_t *ib_cmd, uint32_t ib_len) |
| 412 | { |
| 413 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
| 414 | struct amdgpu_job *job; |
| 415 | struct amdgpu_ib *ib; |
| 416 | struct amdgpu_ring *ring; |
| 417 | struct dma_fence *f = NULL; |
| 418 | int ret; |
| 419 | |
| 420 | switch (engine) { |
| 421 | case KGD_ENGINE_MEC1: |
| 422 | ring = &adev->gfx.compute_ring[0]; |
| 423 | break; |
| 424 | case KGD_ENGINE_SDMA1: |
| 425 | ring = &adev->sdma.instance[0].ring; |
| 426 | break; |
| 427 | case KGD_ENGINE_SDMA2: |
| 428 | ring = &adev->sdma.instance[1].ring; |
| 429 | break; |
| 430 | default: |
| 431 | pr_err("Invalid engine in IB submission: %d\n", engine); |
| 432 | ret = -EINVAL; |
| 433 | goto err; |
| 434 | } |
| 435 | |
| 436 | ret = amdgpu_job_alloc(adev, 1, &job, NULL); |
| 437 | if (ret) |
| 438 | goto err; |
| 439 | |
| 440 | ib = &job->ibs[0]; |
| 441 | memset(ib, 0, sizeof(struct amdgpu_ib)); |
| 442 | |
| 443 | ib->gpu_addr = gpu_addr; |
| 444 | ib->ptr = ib_cmd; |
| 445 | ib->length_dw = ib_len; |
| 446 | /* This works for NO_HWS. TODO: need to handle without knowing VMID */ |
| 447 | job->vmid = vmid; |
| 448 | |
| 449 | ret = amdgpu_ib_schedule(ring, 1, ib, job, &f); |
| 450 | if (ret) { |
| 451 | DRM_ERROR("amdgpu: failed to schedule IB.\n"); |
| 452 | goto err_ib_sched; |
| 453 | } |
| 454 | |
| 455 | ret = dma_fence_wait(f, false); |
| 456 | |
| 457 | err_ib_sched: |
| 458 | dma_fence_put(f); |
| 459 | amdgpu_job_free(job); |
| 460 | err: |
| 461 | return ret; |
| 462 | } |
| 463 | |
Felix Kuehling | 155494d | 2018-02-06 20:32:36 -0500 | [diff] [blame] | 464 | bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) |
| 465 | { |
| 466 | if (adev->kfd) { |
| 467 | if ((1 << vmid) & compute_vmid_bitmap) |
| 468 | return true; |
| 469 | } |
| 470 | |
| 471 | return false; |
| 472 | } |
Oded Gabbay | fcdfa43 | 2018-05-18 22:18:16 +0300 | [diff] [blame] | 473 | |
| 474 | #if !defined(CONFIG_HSA_AMD_MODULE) && !defined(CONFIG_HSA_AMD) |
| 475 | bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) |
| 476 | { |
| 477 | return false; |
| 478 | } |
| 479 | |
| 480 | void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo) |
| 481 | { |
| 482 | } |
| 483 | |
| 484 | void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, |
| 485 | struct amdgpu_vm *vm) |
| 486 | { |
| 487 | } |
| 488 | |
| 489 | struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) |
| 490 | { |
| 491 | return NULL; |
| 492 | } |
| 493 | |
| 494 | int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) |
| 495 | { |
| 496 | return 0; |
| 497 | } |
| 498 | |
| 499 | struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) |
| 500 | { |
| 501 | return NULL; |
| 502 | } |
| 503 | |
| 504 | struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) |
| 505 | { |
| 506 | return NULL; |
| 507 | } |
| 508 | |
| 509 | struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) |
| 510 | { |
| 511 | return NULL; |
| 512 | } |
| 513 | #endif |