blob: e7dfb7b44b4b42e9ae6f5a82bbdb72850badd03a [file] [log] [blame]
Monk Liu4e4bbe72017-01-09 15:21:13 +08001/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
pding43ca8ef2017-10-13 15:38:35 +080025#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
Monk Liu4e4bbe72017-01-09 15:21:13 +080026
pdinga16f8f12017-10-24 10:01:13 +080027bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
28{
29 /* By now all MMIO pages except mailbox are blocked */
30 /* if blocking is enabled in hypervisor. Choose the */
31 /* SCRATCH_REG0 to test. */
32 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
33}
34
Monk Liu4e4bbe72017-01-09 15:21:13 +080035int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
36{
37 int r;
38 void *ptr;
39
40 r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
41 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
42 &adev->virt.csa_vmid0_addr, &ptr);
43 if (r)
44 return r;
45
46 memset(ptr, 0, AMDGPU_CSA_SIZE);
47 return 0;
48}
49
Monk Liu84e5b512017-11-14 16:52:14 +080050void amdgpu_free_static_csa(struct amdgpu_device *adev) {
51 amdgpu_bo_free_kernel(&adev->virt.csa_obj,
52 &adev->virt.csa_vmid0_addr,
53 NULL);
54}
55
Monk Liu4e4bbe72017-01-09 15:21:13 +080056/*
57 * amdgpu_map_static_csa should be called during amdgpu_vm_init
58 * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
59 * to this VM, and each command submission of GFX should use this virtual
60 * address within META_DATA init package to support SRIOV gfx preemption.
61 */
62
Christian König0f4b3c62017-07-31 15:32:40 +020063int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
64 struct amdgpu_bo_va **bo_va)
Monk Liu4e4bbe72017-01-09 15:21:13 +080065{
Monk Liu4e4bbe72017-01-09 15:21:13 +080066 struct ww_acquire_ctx ticket;
67 struct list_head list;
68 struct amdgpu_bo_list_entry pd;
69 struct ttm_validate_buffer csa_tv;
Christian König0f4b3c62017-07-31 15:32:40 +020070 int r;
Monk Liu4e4bbe72017-01-09 15:21:13 +080071
72 INIT_LIST_HEAD(&list);
73 INIT_LIST_HEAD(&csa_tv.head);
74 csa_tv.bo = &adev->virt.csa_obj->tbo;
75 csa_tv.shared = true;
76
77 list_add(&csa_tv.head, &list);
78 amdgpu_vm_get_pd_bo(vm, &list, &pd);
79
80 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
81 if (r) {
82 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
83 return r;
84 }
85
Christian König0f4b3c62017-07-31 15:32:40 +020086 *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
87 if (!*bo_va) {
Monk Liu4e4bbe72017-01-09 15:21:13 +080088 ttm_eu_backoff_reservation(&ticket, &list);
89 DRM_ERROR("failed to create bo_va for static CSA\n");
90 return -ENOMEM;
91 }
92
Christian Königec681542017-08-01 10:51:43 +020093 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
Christian König0f4b3c62017-07-31 15:32:40 +020094 AMDGPU_CSA_SIZE);
Christian König663e4572017-03-13 10:13:37 +010095 if (r) {
96 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
Christian König0f4b3c62017-07-31 15:32:40 +020097 amdgpu_vm_bo_rmv(adev, *bo_va);
Christian König663e4572017-03-13 10:13:37 +010098 ttm_eu_backoff_reservation(&ticket, &list);
99 return r;
100 }
101
Christian König0f4b3c62017-07-31 15:32:40 +0200102 r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
103 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
104 AMDGPU_PTE_EXECUTABLE);
Monk Liu4e4bbe72017-01-09 15:21:13 +0800105
106 if (r) {
107 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
Christian König0f4b3c62017-07-31 15:32:40 +0200108 amdgpu_vm_bo_rmv(adev, *bo_va);
Monk Liu4e4bbe72017-01-09 15:21:13 +0800109 ttm_eu_backoff_reservation(&ticket, &list);
Monk Liu4e4bbe72017-01-09 15:21:13 +0800110 return r;
111 }
112
Monk Liu4e4bbe72017-01-09 15:21:13 +0800113 ttm_eu_backoff_reservation(&ticket, &list);
114 return 0;
115}
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800116
117void amdgpu_virt_init_setting(struct amdgpu_device *adev)
118{
Xiangliang Yu06465d82017-01-11 17:18:40 +0800119 /* enable virtual display */
120 adev->mode_info.num_crtc = 1;
121 adev->enable_virtual_display = true;
Xiangliang Yu213cace2017-04-21 14:01:29 +0800122 adev->cg_flags = 0;
123 adev->pg_flags = 0;
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800124}
125
126uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
127{
128 signed long r;
pdingcdd9a8b2017-11-07 14:32:36 +0800129 unsigned long flags;
pding43ca8ef2017-10-13 15:38:35 +0800130 uint32_t val, seq;
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800131 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
132 struct amdgpu_ring *ring = &kiq->ring;
133
134 BUG_ON(!ring->funcs->emit_rreg);
135
pdingcdd9a8b2017-11-07 14:32:36 +0800136 spin_lock_irqsave(&kiq->ring_lock, flags);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800137 amdgpu_ring_alloc(ring, 32);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800138 amdgpu_ring_emit_rreg(ring, reg);
pding43ca8ef2017-10-13 15:38:35 +0800139 amdgpu_fence_emit_polling(ring, &seq);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800140 amdgpu_ring_commit(ring);
pdingcdd9a8b2017-11-07 14:32:36 +0800141 spin_unlock_irqrestore(&kiq->ring_lock, flags);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800142
pding43ca8ef2017-10-13 15:38:35 +0800143 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
Monk Liuff825772017-05-05 17:30:50 -0400144 if (r < 1) {
pding43ca8ef2017-10-13 15:38:35 +0800145 DRM_ERROR("wait for kiq fence error: %ld\n", r);
Monk Liuff825772017-05-05 17:30:50 -0400146 return ~0;
147 }
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800148 val = adev->wb.wb[adev->virt.reg_val_offs];
149
150 return val;
151}
152
153void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
154{
155 signed long r;
pdingcdd9a8b2017-11-07 14:32:36 +0800156 unsigned long flags;
pding43ca8ef2017-10-13 15:38:35 +0800157 uint32_t seq;
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800158 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
159 struct amdgpu_ring *ring = &kiq->ring;
160
161 BUG_ON(!ring->funcs->emit_wreg);
162
pdingcdd9a8b2017-11-07 14:32:36 +0800163 spin_lock_irqsave(&kiq->ring_lock, flags);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800164 amdgpu_ring_alloc(ring, 32);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800165 amdgpu_ring_emit_wreg(ring, reg, v);
pding43ca8ef2017-10-13 15:38:35 +0800166 amdgpu_fence_emit_polling(ring, &seq);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800167 amdgpu_ring_commit(ring);
pdingcdd9a8b2017-11-07 14:32:36 +0800168 spin_unlock_irqrestore(&kiq->ring_lock, flags);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800169
pding43ca8ef2017-10-13 15:38:35 +0800170 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
Monk Liuff825772017-05-05 17:30:50 -0400171 if (r < 1)
pding43ca8ef2017-10-13 15:38:35 +0800172 DRM_ERROR("wait for kiq fence error: %ld\n", r);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800173}
Xiangliang Yu1e9f1392017-01-12 14:53:08 +0800174
175/**
176 * amdgpu_virt_request_full_gpu() - request full gpu access
177 * @amdgpu: amdgpu device.
178 * @init: is driver init time.
179 * When start to init/fini driver, first need to request full gpu access.
180 * Return: Zero if request success, otherwise will return error.
181 */
182int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
183{
184 struct amdgpu_virt *virt = &adev->virt;
185 int r;
186
187 if (virt->ops && virt->ops->req_full_gpu) {
188 r = virt->ops->req_full_gpu(adev, init);
189 if (r)
190 return r;
191
192 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
193 }
194
195 return 0;
196}
197
198/**
199 * amdgpu_virt_release_full_gpu() - release full gpu access
200 * @amdgpu: amdgpu device.
201 * @init: is driver init time.
202 * When finishing driver init/fini, need to release full gpu access.
203 * Return: Zero if release success, otherwise will returen error.
204 */
205int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
206{
207 struct amdgpu_virt *virt = &adev->virt;
208 int r;
209
210 if (virt->ops && virt->ops->rel_full_gpu) {
211 r = virt->ops->rel_full_gpu(adev, init);
212 if (r)
213 return r;
214
215 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
216 }
217 return 0;
218}
219
220/**
221 * amdgpu_virt_reset_gpu() - reset gpu
222 * @amdgpu: amdgpu device.
223 * Send reset command to GPU hypervisor to reset GPU that VM is using
224 * Return: Zero if reset success, otherwise will return error.
225 */
226int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
227{
228 struct amdgpu_virt *virt = &adev->virt;
229 int r;
230
231 if (virt->ops && virt->ops->reset_gpu) {
232 r = virt->ops->reset_gpu(adev);
233 if (r)
234 return r;
235
236 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
237 }
238
239 return 0;
240}
Xiangliang Yu904cd382017-04-21 15:40:25 +0800241
242/**
pdingb6361762017-10-24 09:51:04 +0800243 * amdgpu_virt_wait_reset() - wait for reset gpu completed
244 * @amdgpu: amdgpu device.
245 * Wait for GPU reset completed.
246 * Return: Zero if reset success, otherwise will return error.
247 */
248int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
249{
250 struct amdgpu_virt *virt = &adev->virt;
251
252 if (!virt->ops || !virt->ops->wait_reset)
253 return -EINVAL;
254
255 return virt->ops->wait_reset(adev);
256}
257
258/**
Xiangliang Yu904cd382017-04-21 15:40:25 +0800259 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
260 * @amdgpu: amdgpu device.
261 * MM table is used by UVD and VCE for its initialization
262 * Return: Zero if allocate success.
263 */
264int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
265{
266 int r;
267
268 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
269 return 0;
270
271 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
272 AMDGPU_GEM_DOMAIN_VRAM,
273 &adev->virt.mm_table.bo,
274 &adev->virt.mm_table.gpu_addr,
275 (void *)&adev->virt.mm_table.cpu_addr);
276 if (r) {
277 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
278 return r;
279 }
280
281 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
282 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
283 adev->virt.mm_table.gpu_addr,
284 adev->virt.mm_table.cpu_addr);
285 return 0;
286}
287
288/**
289 * amdgpu_virt_free_mm_table() - free mm table memory
290 * @amdgpu: amdgpu device.
291 * Free MM table memory
292 */
293void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
294{
295 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
296 return;
297
298 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
299 &adev->virt.mm_table.gpu_addr,
300 (void *)&adev->virt.mm_table.cpu_addr);
301 adev->virt.mm_table.gpu_addr = 0;
302}
Horace Chen2dc8f812017-10-09 16:17:16 +0800303
304
305int amdgpu_virt_fw_reserve_get_checksum(void *obj,
306 unsigned long obj_size,
307 unsigned int key,
308 unsigned int chksum)
309{
310 unsigned int ret = key;
311 unsigned long i = 0;
312 unsigned char *pos;
313
314 pos = (char *)obj;
315 /* calculate checksum */
316 for (i = 0; i < obj_size; ++i)
317 ret += *(pos + i);
318 /* minus the chksum itself */
319 pos = (char *)&chksum;
320 for (i = 0; i < sizeof(chksum); ++i)
321 ret -= *(pos + i);
322 return ret;
323}
324
325void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
326{
Horace Chen2dc8f812017-10-09 16:17:16 +0800327 uint32_t pf2vf_size = 0;
328 uint32_t checksum = 0;
329 uint32_t checkval;
330 char *str;
331
332 adev->virt.fw_reserve.p_pf2vf = NULL;
333 adev->virt.fw_reserve.p_vf2pf = NULL;
334
335 if (adev->fw_vram_usage.va != NULL) {
336 adev->virt.fw_reserve.p_pf2vf =
337 (struct amdgim_pf2vf_info_header *)(
338 adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
Horace Chen2dc8f812017-10-09 16:17:16 +0800339 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
340 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
Monk Liu75bc6092017-10-30 20:11:54 +0800341 AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
Horace Chen2dc8f812017-10-09 16:17:16 +0800342
343 /* pf2vf message must be in 4K */
344 if (pf2vf_size > 0 && pf2vf_size < 4096) {
345 checkval = amdgpu_virt_fw_reserve_get_checksum(
346 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
347 adev->virt.fw_reserve.checksum_key, checksum);
348 if (checkval == checksum) {
349 adev->virt.fw_reserve.p_vf2pf =
350 ((void *)adev->virt.fw_reserve.p_pf2vf +
351 pf2vf_size);
352 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
353 sizeof(amdgim_vf2pf_info));
354 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
355 AMDGPU_FW_VRAM_VF2PF_VER);
356 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
357 sizeof(amdgim_vf2pf_info));
358 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
359 &str);
Arnd Bergmanne477e942017-11-02 12:25:39 +0100360#ifdef MODULE
Horace Chen2dc8f812017-10-09 16:17:16 +0800361 if (THIS_MODULE->version != NULL)
362 strcpy(str, THIS_MODULE->version);
363 else
Arnd Bergmanne477e942017-11-02 12:25:39 +0100364#endif
Horace Chen2dc8f812017-10-09 16:17:16 +0800365 strcpy(str, "N/A");
366 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
367 0);
368 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
369 amdgpu_virt_fw_reserve_get_checksum(
370 adev->virt.fw_reserve.p_vf2pf,
371 pf2vf_size,
372 adev->virt.fw_reserve.checksum_key, 0));
373 }
374 }
375 }
376}
377
378