blob: 7aa5ab09ed099157c5140fc7fab407c90a20d282 [file] [log] [blame]
Oded Gabbay130e0372015-06-12 21:35:14 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "amdgpu_amdkfd.h"
24#include "amdgpu_family.h"
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include <linux/module.h>
28
29const struct kfd2kgd_calls *kfd2kgd;
30const struct kgd2kfd_calls *kgd2kfd;
31bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
32
33bool amdgpu_amdkfd_init(void)
34{
35#if defined(CONFIG_HSA_AMD_MODULE)
36 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
37
38 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
39
40 if (kgd2kfd_init_p == NULL)
41 return false;
42#endif
43 return true;
44}
45
46bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
47{
48#if defined(CONFIG_HSA_AMD_MODULE)
49 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
50#endif
51
52 switch (rdev->asic_type) {
53 case CHIP_KAVERI:
Oded Gabbay32c22e92015-06-12 21:38:22 +030054 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
55 break;
Oded Gabbay130e0372015-06-12 21:35:14 +030056 default:
57 return false;
58 }
59
60#if defined(CONFIG_HSA_AMD_MODULE)
61 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
62
63 if (kgd2kfd_init_p == NULL) {
64 kfd2kgd = NULL;
65 return false;
66 }
67
68 if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
69 symbol_put(kgd2kfd_init);
70 kfd2kgd = NULL;
71 kgd2kfd = NULL;
72
73 return false;
74 }
75
76 return true;
77#elif defined(CONFIG_HSA_AMD)
78 if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
79 kfd2kgd = NULL;
80 kgd2kfd = NULL;
81 return false;
82 }
83
84 return true;
85#else
86 kfd2kgd = NULL;
87 return false;
88#endif
89}
90
91void amdgpu_amdkfd_fini(void)
92{
93 if (kgd2kfd) {
94 kgd2kfd->exit();
95 symbol_put(kgd2kfd_init);
96 }
97}
98
99void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
100{
101 if (kgd2kfd)
102 rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
103 rdev->pdev, kfd2kgd);
104}
105
106void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
107{
108 if (rdev->kfd) {
109 struct kgd2kfd_shared_resources gpu_resources = {
110 .compute_vmid_bitmap = 0xFF00,
111
112 .first_compute_pipe = 1,
113 .compute_pipe_count = 4 - 1,
114 };
115
116 amdgpu_doorbell_get_kfd_info(rdev,
117 &gpu_resources.doorbell_physical_address,
118 &gpu_resources.doorbell_aperture_size,
119 &gpu_resources.doorbell_start_offset);
120
121 kgd2kfd->device_init(rdev->kfd, &gpu_resources);
122 }
123}
124
125void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
126{
127 if (rdev->kfd) {
128 kgd2kfd->device_exit(rdev->kfd);
129 rdev->kfd = NULL;
130 }
131}
132
133void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
134 const void *ih_ring_entry)
135{
136 if (rdev->kfd)
137 kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
138}
139
140void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
141{
142 if (rdev->kfd)
143 kgd2kfd->suspend(rdev->kfd);
144}
145
146int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
147{
148 int r = 0;
149
150 if (rdev->kfd)
151 r = kgd2kfd->resume(rdev->kfd);
152
153 return r;
154}
155
156u32 pool_to_domain(enum kgd_memory_pool p)
157{
158 switch (p) {
159 case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
160 default: return AMDGPU_GEM_DOMAIN_GTT;
161 }
162}
163
164int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
165 void **mem_obj, uint64_t *gpu_addr,
166 void **cpu_ptr)
167{
168 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
169 struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
170 int r;
171
172 BUG_ON(kgd == NULL);
173 BUG_ON(gpu_addr == NULL);
174 BUG_ON(cpu_ptr == NULL);
175
176 *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
177 if ((*mem) == NULL)
178 return -ENOMEM;
179
180 r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
181 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo);
182 if (r) {
183 dev_err(rdev->dev,
184 "failed to allocate BO for amdkfd (%d)\n", r);
185 return r;
186 }
187
188 /* map the buffer */
189 r = amdgpu_bo_reserve((*mem)->bo, true);
190 if (r) {
191 dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
192 goto allocate_mem_reserve_bo_failed;
193 }
194
195 r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
196 &(*mem)->gpu_addr);
197 if (r) {
198 dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
199 goto allocate_mem_pin_bo_failed;
200 }
201 *gpu_addr = (*mem)->gpu_addr;
202
203 r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
204 if (r) {
205 dev_err(rdev->dev,
206 "(%d) failed to map bo to kernel for amdkfd\n", r);
207 goto allocate_mem_kmap_bo_failed;
208 }
209 *cpu_ptr = (*mem)->cpu_ptr;
210
211 amdgpu_bo_unreserve((*mem)->bo);
212
213 return 0;
214
215allocate_mem_kmap_bo_failed:
216 amdgpu_bo_unpin((*mem)->bo);
217allocate_mem_pin_bo_failed:
218 amdgpu_bo_unreserve((*mem)->bo);
219allocate_mem_reserve_bo_failed:
220 amdgpu_bo_unref(&(*mem)->bo);
221
222 return r;
223}
224
225void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
226{
227 struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
228
229 BUG_ON(mem == NULL);
230
231 amdgpu_bo_reserve(mem->bo, true);
232 amdgpu_bo_kunmap(mem->bo);
233 amdgpu_bo_unpin(mem->bo);
234 amdgpu_bo_unreserve(mem->bo);
235 amdgpu_bo_unref(&(mem->bo));
236 kfree(mem);
237}
238
239uint64_t get_vmem_size(struct kgd_dev *kgd)
240{
241 struct amdgpu_device *rdev =
242 (struct amdgpu_device *)kgd;
243
244 BUG_ON(kgd == NULL);
245
246 return rdev->mc.real_vram_size;
247}
248
249uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
250{
251 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
252
253 if (rdev->asic_funcs->get_gpu_clock_counter)
254 return rdev->asic_funcs->get_gpu_clock_counter(rdev);
255 return 0;
256}
257
258uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
259{
260 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
261
262 /* The sclk is in quantas of 10kHz */
263 return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
264}