blob: b832651d21375313bf7ed8c60d2efff41438fe7f [file] [log] [blame]
Monk Liu4e4bbe72017-01-09 15:21:13 +08001/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
pding43ca8ef2017-10-13 15:38:35 +080025#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
Monk Liu4e4bbe72017-01-09 15:21:13 +080026
Christian König6f05c4e2018-01-22 11:25:48 +010027uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
28{
29 uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
30
31 addr -= AMDGPU_VA_RESERVED_SIZE;
32
33 if (addr >= AMDGPU_VA_HOLE_START)
34 addr |= AMDGPU_VA_HOLE_END;
35
36 return addr;
37}
38
pdinga16f8f12017-10-24 10:01:13 +080039bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
40{
41 /* By now all MMIO pages except mailbox are blocked */
42 /* if blocking is enabled in hypervisor. Choose the */
43 /* SCRATCH_REG0 to test. */
44 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
45}
46
Monk Liu4e4bbe72017-01-09 15:21:13 +080047int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
48{
49 int r;
50 void *ptr;
51
52 r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
53 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
54 &adev->virt.csa_vmid0_addr, &ptr);
55 if (r)
56 return r;
57
58 memset(ptr, 0, AMDGPU_CSA_SIZE);
59 return 0;
60}
61
Monk Liu84e5b512017-11-14 16:52:14 +080062void amdgpu_free_static_csa(struct amdgpu_device *adev) {
63 amdgpu_bo_free_kernel(&adev->virt.csa_obj,
64 &adev->virt.csa_vmid0_addr,
65 NULL);
66}
67
Monk Liu4e4bbe72017-01-09 15:21:13 +080068/*
69 * amdgpu_map_static_csa should be called during amdgpu_vm_init
Christian König6f05c4e2018-01-22 11:25:48 +010070 * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
71 * submission of GFX should use this virtual address within META_DATA init
72 * package to support SRIOV gfx preemption.
Monk Liu4e4bbe72017-01-09 15:21:13 +080073 */
Christian König0f4b3c62017-07-31 15:32:40 +020074int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
75 struct amdgpu_bo_va **bo_va)
Monk Liu4e4bbe72017-01-09 15:21:13 +080076{
Christian König6f05c4e2018-01-22 11:25:48 +010077 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
Monk Liu4e4bbe72017-01-09 15:21:13 +080078 struct ww_acquire_ctx ticket;
79 struct list_head list;
80 struct amdgpu_bo_list_entry pd;
81 struct ttm_validate_buffer csa_tv;
Christian König0f4b3c62017-07-31 15:32:40 +020082 int r;
Monk Liu4e4bbe72017-01-09 15:21:13 +080083
84 INIT_LIST_HEAD(&list);
85 INIT_LIST_HEAD(&csa_tv.head);
86 csa_tv.bo = &adev->virt.csa_obj->tbo;
87 csa_tv.shared = true;
88
89 list_add(&csa_tv.head, &list);
90 amdgpu_vm_get_pd_bo(vm, &list, &pd);
91
92 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
93 if (r) {
94 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
95 return r;
96 }
97
Christian König0f4b3c62017-07-31 15:32:40 +020098 *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
99 if (!*bo_va) {
Monk Liu4e4bbe72017-01-09 15:21:13 +0800100 ttm_eu_backoff_reservation(&ticket, &list);
101 DRM_ERROR("failed to create bo_va for static CSA\n");
102 return -ENOMEM;
103 }
104
Christian König6f05c4e2018-01-22 11:25:48 +0100105 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
Christian König0f4b3c62017-07-31 15:32:40 +0200106 AMDGPU_CSA_SIZE);
Christian König663e4572017-03-13 10:13:37 +0100107 if (r) {
108 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
Christian König0f4b3c62017-07-31 15:32:40 +0200109 amdgpu_vm_bo_rmv(adev, *bo_va);
Christian König663e4572017-03-13 10:13:37 +0100110 ttm_eu_backoff_reservation(&ticket, &list);
111 return r;
112 }
113
Christian König6f05c4e2018-01-22 11:25:48 +0100114 r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
Christian König0f4b3c62017-07-31 15:32:40 +0200115 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
116 AMDGPU_PTE_EXECUTABLE);
Monk Liu4e4bbe72017-01-09 15:21:13 +0800117
118 if (r) {
119 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
Christian König0f4b3c62017-07-31 15:32:40 +0200120 amdgpu_vm_bo_rmv(adev, *bo_va);
Monk Liu4e4bbe72017-01-09 15:21:13 +0800121 ttm_eu_backoff_reservation(&ticket, &list);
Monk Liu4e4bbe72017-01-09 15:21:13 +0800122 return r;
123 }
124
Monk Liu4e4bbe72017-01-09 15:21:13 +0800125 ttm_eu_backoff_reservation(&ticket, &list);
126 return 0;
127}
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800128
129void amdgpu_virt_init_setting(struct amdgpu_device *adev)
130{
Xiangliang Yu06465d82017-01-11 17:18:40 +0800131 /* enable virtual display */
132 adev->mode_info.num_crtc = 1;
133 adev->enable_virtual_display = true;
Xiangliang Yu213cace2017-04-21 14:01:29 +0800134 adev->cg_flags = 0;
135 adev->pg_flags = 0;
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800136}
137
138uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
139{
140 signed long r;
pdingcdd9a8b2017-11-07 14:32:36 +0800141 unsigned long flags;
pding43ca8ef2017-10-13 15:38:35 +0800142 uint32_t val, seq;
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800143 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
144 struct amdgpu_ring *ring = &kiq->ring;
145
146 BUG_ON(!ring->funcs->emit_rreg);
147
pdingcdd9a8b2017-11-07 14:32:36 +0800148 spin_lock_irqsave(&kiq->ring_lock, flags);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800149 amdgpu_ring_alloc(ring, 32);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800150 amdgpu_ring_emit_rreg(ring, reg);
pding43ca8ef2017-10-13 15:38:35 +0800151 amdgpu_fence_emit_polling(ring, &seq);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800152 amdgpu_ring_commit(ring);
pdingcdd9a8b2017-11-07 14:32:36 +0800153 spin_unlock_irqrestore(&kiq->ring_lock, flags);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800154
pding43ca8ef2017-10-13 15:38:35 +0800155 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
Monk Liuff825772017-05-05 17:30:50 -0400156 if (r < 1) {
pding43ca8ef2017-10-13 15:38:35 +0800157 DRM_ERROR("wait for kiq fence error: %ld\n", r);
Monk Liuff825772017-05-05 17:30:50 -0400158 return ~0;
159 }
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800160 val = adev->wb.wb[adev->virt.reg_val_offs];
161
162 return val;
163}
164
165void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
166{
167 signed long r;
pdingcdd9a8b2017-11-07 14:32:36 +0800168 unsigned long flags;
pding43ca8ef2017-10-13 15:38:35 +0800169 uint32_t seq;
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800170 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
171 struct amdgpu_ring *ring = &kiq->ring;
172
173 BUG_ON(!ring->funcs->emit_wreg);
174
pdingcdd9a8b2017-11-07 14:32:36 +0800175 spin_lock_irqsave(&kiq->ring_lock, flags);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800176 amdgpu_ring_alloc(ring, 32);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800177 amdgpu_ring_emit_wreg(ring, reg, v);
pding43ca8ef2017-10-13 15:38:35 +0800178 amdgpu_fence_emit_polling(ring, &seq);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800179 amdgpu_ring_commit(ring);
pdingcdd9a8b2017-11-07 14:32:36 +0800180 spin_unlock_irqrestore(&kiq->ring_lock, flags);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800181
pding43ca8ef2017-10-13 15:38:35 +0800182 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
Monk Liuff825772017-05-05 17:30:50 -0400183 if (r < 1)
pding43ca8ef2017-10-13 15:38:35 +0800184 DRM_ERROR("wait for kiq fence error: %ld\n", r);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800185}
Xiangliang Yu1e9f1392017-01-12 14:53:08 +0800186
187/**
188 * amdgpu_virt_request_full_gpu() - request full gpu access
189 * @amdgpu: amdgpu device.
190 * @init: is driver init time.
191 * When start to init/fini driver, first need to request full gpu access.
192 * Return: Zero if request success, otherwise will return error.
193 */
194int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
195{
196 struct amdgpu_virt *virt = &adev->virt;
197 int r;
198
199 if (virt->ops && virt->ops->req_full_gpu) {
200 r = virt->ops->req_full_gpu(adev, init);
201 if (r)
202 return r;
203
204 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
205 }
206
207 return 0;
208}
209
210/**
211 * amdgpu_virt_release_full_gpu() - release full gpu access
212 * @amdgpu: amdgpu device.
213 * @init: is driver init time.
214 * When finishing driver init/fini, need to release full gpu access.
215 * Return: Zero if release success, otherwise will returen error.
216 */
217int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
218{
219 struct amdgpu_virt *virt = &adev->virt;
220 int r;
221
222 if (virt->ops && virt->ops->rel_full_gpu) {
223 r = virt->ops->rel_full_gpu(adev, init);
224 if (r)
225 return r;
226
227 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
228 }
229 return 0;
230}
231
232/**
233 * amdgpu_virt_reset_gpu() - reset gpu
234 * @amdgpu: amdgpu device.
235 * Send reset command to GPU hypervisor to reset GPU that VM is using
236 * Return: Zero if reset success, otherwise will return error.
237 */
238int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
239{
240 struct amdgpu_virt *virt = &adev->virt;
241 int r;
242
243 if (virt->ops && virt->ops->reset_gpu) {
244 r = virt->ops->reset_gpu(adev);
245 if (r)
246 return r;
247
248 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
249 }
250
251 return 0;
252}
Xiangliang Yu904cd382017-04-21 15:40:25 +0800253
254/**
pdingb6361762017-10-24 09:51:04 +0800255 * amdgpu_virt_wait_reset() - wait for reset gpu completed
256 * @amdgpu: amdgpu device.
257 * Wait for GPU reset completed.
258 * Return: Zero if reset success, otherwise will return error.
259 */
260int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
261{
262 struct amdgpu_virt *virt = &adev->virt;
263
264 if (!virt->ops || !virt->ops->wait_reset)
265 return -EINVAL;
266
267 return virt->ops->wait_reset(adev);
268}
269
270/**
Xiangliang Yu904cd382017-04-21 15:40:25 +0800271 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
272 * @amdgpu: amdgpu device.
273 * MM table is used by UVD and VCE for its initialization
274 * Return: Zero if allocate success.
275 */
276int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
277{
278 int r;
279
280 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
281 return 0;
282
283 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
284 AMDGPU_GEM_DOMAIN_VRAM,
285 &adev->virt.mm_table.bo,
286 &adev->virt.mm_table.gpu_addr,
287 (void *)&adev->virt.mm_table.cpu_addr);
288 if (r) {
289 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
290 return r;
291 }
292
293 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
294 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
295 adev->virt.mm_table.gpu_addr,
296 adev->virt.mm_table.cpu_addr);
297 return 0;
298}
299
300/**
301 * amdgpu_virt_free_mm_table() - free mm table memory
302 * @amdgpu: amdgpu device.
303 * Free MM table memory
304 */
305void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
306{
307 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
308 return;
309
310 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
311 &adev->virt.mm_table.gpu_addr,
312 (void *)&adev->virt.mm_table.cpu_addr);
313 adev->virt.mm_table.gpu_addr = 0;
314}
Horace Chen2dc8f812017-10-09 16:17:16 +0800315
316
317int amdgpu_virt_fw_reserve_get_checksum(void *obj,
318 unsigned long obj_size,
319 unsigned int key,
320 unsigned int chksum)
321{
322 unsigned int ret = key;
323 unsigned long i = 0;
324 unsigned char *pos;
325
326 pos = (char *)obj;
327 /* calculate checksum */
328 for (i = 0; i < obj_size; ++i)
329 ret += *(pos + i);
330 /* minus the chksum itself */
331 pos = (char *)&chksum;
332 for (i = 0; i < sizeof(chksum); ++i)
333 ret -= *(pos + i);
334 return ret;
335}
336
337void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
338{
Horace Chen2dc8f812017-10-09 16:17:16 +0800339 uint32_t pf2vf_size = 0;
340 uint32_t checksum = 0;
341 uint32_t checkval;
342 char *str;
343
344 adev->virt.fw_reserve.p_pf2vf = NULL;
345 adev->virt.fw_reserve.p_vf2pf = NULL;
346
347 if (adev->fw_vram_usage.va != NULL) {
348 adev->virt.fw_reserve.p_pf2vf =
349 (struct amdgim_pf2vf_info_header *)(
350 adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
Horace Chen2dc8f812017-10-09 16:17:16 +0800351 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
352 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
Monk Liu75bc6092017-10-30 20:11:54 +0800353 AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
Horace Chen2dc8f812017-10-09 16:17:16 +0800354
355 /* pf2vf message must be in 4K */
356 if (pf2vf_size > 0 && pf2vf_size < 4096) {
357 checkval = amdgpu_virt_fw_reserve_get_checksum(
358 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
359 adev->virt.fw_reserve.checksum_key, checksum);
360 if (checkval == checksum) {
361 adev->virt.fw_reserve.p_vf2pf =
362 ((void *)adev->virt.fw_reserve.p_pf2vf +
363 pf2vf_size);
364 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
365 sizeof(amdgim_vf2pf_info));
366 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
367 AMDGPU_FW_VRAM_VF2PF_VER);
368 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
369 sizeof(amdgim_vf2pf_info));
370 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
371 &str);
Arnd Bergmanne477e942017-11-02 12:25:39 +0100372#ifdef MODULE
Horace Chen2dc8f812017-10-09 16:17:16 +0800373 if (THIS_MODULE->version != NULL)
374 strcpy(str, THIS_MODULE->version);
375 else
Arnd Bergmanne477e942017-11-02 12:25:39 +0100376#endif
Horace Chen2dc8f812017-10-09 16:17:16 +0800377 strcpy(str, "N/A");
378 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
379 0);
380 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
381 amdgpu_virt_fw_reserve_get_checksum(
382 adev->virt.fw_reserve.p_vf2pf,
383 pf2vf_size,
384 adev->virt.fw_reserve.checksum_key, 0));
385 }
386 }
387 }
388}
389
390