blob: 84d68d658f8a03653f3a60898671f775a210e904 [file] [log] [blame]
Oded Gabbay130e0372015-06-12 21:35:14 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "amdgpu_amdkfd.h"
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080024#include "amd_shared.h"
Oded Gabbay130e0372015-06-12 21:35:14 +030025#include <drm/drmP.h>
26#include "amdgpu.h"
27#include <linux/module.h>
28
29const struct kfd2kgd_calls *kfd2kgd;
30const struct kgd2kfd_calls *kgd2kfd;
31bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
32
33bool amdgpu_amdkfd_init(void)
34{
35#if defined(CONFIG_HSA_AMD_MODULE)
36 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
37
38 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
39
40 if (kgd2kfd_init_p == NULL)
41 return false;
42#endif
43 return true;
44}
45
46bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
47{
48#if defined(CONFIG_HSA_AMD_MODULE)
49 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
50#endif
51
52 switch (rdev->asic_type) {
Alex Deucher41548ef2015-07-31 17:20:14 -040053#ifdef CONFIG_DRM_AMDGPU_CIK
Oded Gabbay130e0372015-06-12 21:35:14 +030054 case CHIP_KAVERI:
Oded Gabbay32c22e92015-06-12 21:38:22 +030055 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
56 break;
Alex Deucher41548ef2015-07-31 17:20:14 -040057#endif
Ben Gozff758a12014-10-07 14:43:07 +030058 case CHIP_CARRIZO:
59 kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
60 break;
Oded Gabbay130e0372015-06-12 21:35:14 +030061 default:
62 return false;
63 }
64
65#if defined(CONFIG_HSA_AMD_MODULE)
66 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
67
68 if (kgd2kfd_init_p == NULL) {
69 kfd2kgd = NULL;
70 return false;
71 }
72
73 if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
74 symbol_put(kgd2kfd_init);
75 kfd2kgd = NULL;
76 kgd2kfd = NULL;
77
78 return false;
79 }
80
81 return true;
82#elif defined(CONFIG_HSA_AMD)
83 if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
84 kfd2kgd = NULL;
85 kgd2kfd = NULL;
86 return false;
87 }
88
89 return true;
90#else
91 kfd2kgd = NULL;
92 return false;
93#endif
94}
95
96void amdgpu_amdkfd_fini(void)
97{
98 if (kgd2kfd) {
99 kgd2kfd->exit();
100 symbol_put(kgd2kfd_init);
101 }
102}
103
104void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
105{
106 if (kgd2kfd)
107 rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
108 rdev->pdev, kfd2kgd);
109}
110
111void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
112{
113 if (rdev->kfd) {
114 struct kgd2kfd_shared_resources gpu_resources = {
115 .compute_vmid_bitmap = 0xFF00,
116
117 .first_compute_pipe = 1,
118 .compute_pipe_count = 4 - 1,
119 };
120
121 amdgpu_doorbell_get_kfd_info(rdev,
122 &gpu_resources.doorbell_physical_address,
123 &gpu_resources.doorbell_aperture_size,
124 &gpu_resources.doorbell_start_offset);
125
126 kgd2kfd->device_init(rdev->kfd, &gpu_resources);
127 }
128}
129
130void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
131{
132 if (rdev->kfd) {
133 kgd2kfd->device_exit(rdev->kfd);
134 rdev->kfd = NULL;
135 }
136}
137
138void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
139 const void *ih_ring_entry)
140{
141 if (rdev->kfd)
142 kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
143}
144
145void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
146{
147 if (rdev->kfd)
148 kgd2kfd->suspend(rdev->kfd);
149}
150
151int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
152{
153 int r = 0;
154
155 if (rdev->kfd)
156 r = kgd2kfd->resume(rdev->kfd);
157
158 return r;
159}
160
161u32 pool_to_domain(enum kgd_memory_pool p)
162{
163 switch (p) {
164 case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
165 default: return AMDGPU_GEM_DOMAIN_GTT;
166 }
167}
168
169int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
170 void **mem_obj, uint64_t *gpu_addr,
171 void **cpu_ptr)
172{
173 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
174 struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
175 int r;
176
177 BUG_ON(kgd == NULL);
178 BUG_ON(gpu_addr == NULL);
179 BUG_ON(cpu_ptr == NULL);
180
181 *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
182 if ((*mem) == NULL)
183 return -ENOMEM;
184
185 r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
Christian König72d76682015-09-03 17:34:59 +0200186 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
Oded Gabbay130e0372015-06-12 21:35:14 +0300187 if (r) {
188 dev_err(rdev->dev,
189 "failed to allocate BO for amdkfd (%d)\n", r);
190 return r;
191 }
192
193 /* map the buffer */
194 r = amdgpu_bo_reserve((*mem)->bo, true);
195 if (r) {
196 dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
197 goto allocate_mem_reserve_bo_failed;
198 }
199
200 r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
201 &(*mem)->gpu_addr);
202 if (r) {
203 dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
204 goto allocate_mem_pin_bo_failed;
205 }
206 *gpu_addr = (*mem)->gpu_addr;
207
208 r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
209 if (r) {
210 dev_err(rdev->dev,
211 "(%d) failed to map bo to kernel for amdkfd\n", r);
212 goto allocate_mem_kmap_bo_failed;
213 }
214 *cpu_ptr = (*mem)->cpu_ptr;
215
216 amdgpu_bo_unreserve((*mem)->bo);
217
218 return 0;
219
220allocate_mem_kmap_bo_failed:
221 amdgpu_bo_unpin((*mem)->bo);
222allocate_mem_pin_bo_failed:
223 amdgpu_bo_unreserve((*mem)->bo);
224allocate_mem_reserve_bo_failed:
225 amdgpu_bo_unref(&(*mem)->bo);
226
227 return r;
228}
229
230void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
231{
232 struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
233
234 BUG_ON(mem == NULL);
235
236 amdgpu_bo_reserve(mem->bo, true);
237 amdgpu_bo_kunmap(mem->bo);
238 amdgpu_bo_unpin(mem->bo);
239 amdgpu_bo_unreserve(mem->bo);
240 amdgpu_bo_unref(&(mem->bo));
241 kfree(mem);
242}
243
244uint64_t get_vmem_size(struct kgd_dev *kgd)
245{
246 struct amdgpu_device *rdev =
247 (struct amdgpu_device *)kgd;
248
249 BUG_ON(kgd == NULL);
250
251 return rdev->mc.real_vram_size;
252}
253
254uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
255{
256 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
257
258 if (rdev->asic_funcs->get_gpu_clock_counter)
259 return rdev->asic_funcs->get_gpu_clock_counter(rdev);
260 return 0;
261}
262
263uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
264{
265 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
266
267 /* The sclk is in quantas of 10kHz */
268 return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
269}