blob: 0f227cf7a67c087c121744607113a3d825d68c5c [file] [log] [blame]
Monk Liu4e4bbe72017-01-09 15:21:13 +08001/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
pding43ca8ef2017-10-13 15:38:35 +080025#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
Monk Liu4e4bbe72017-01-09 15:21:13 +080026
pdinga16f8f12017-10-24 10:01:13 +080027bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
28{
29 /* By now all MMIO pages except mailbox are blocked */
30 /* if blocking is enabled in hypervisor. Choose the */
31 /* SCRATCH_REG0 to test. */
32 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
33}
34
Monk Liu4e4bbe72017-01-09 15:21:13 +080035int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
36{
37 int r;
38 void *ptr;
39
40 r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
41 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
42 &adev->virt.csa_vmid0_addr, &ptr);
43 if (r)
44 return r;
45
46 memset(ptr, 0, AMDGPU_CSA_SIZE);
47 return 0;
48}
49
50/*
51 * amdgpu_map_static_csa should be called during amdgpu_vm_init
52 * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
53 * to this VM, and each command submission of GFX should use this virtual
54 * address within META_DATA init package to support SRIOV gfx preemption.
55 */
56
Christian König0f4b3c62017-07-31 15:32:40 +020057int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
58 struct amdgpu_bo_va **bo_va)
Monk Liu4e4bbe72017-01-09 15:21:13 +080059{
Monk Liu4e4bbe72017-01-09 15:21:13 +080060 struct ww_acquire_ctx ticket;
61 struct list_head list;
62 struct amdgpu_bo_list_entry pd;
63 struct ttm_validate_buffer csa_tv;
Christian König0f4b3c62017-07-31 15:32:40 +020064 int r;
Monk Liu4e4bbe72017-01-09 15:21:13 +080065
66 INIT_LIST_HEAD(&list);
67 INIT_LIST_HEAD(&csa_tv.head);
68 csa_tv.bo = &adev->virt.csa_obj->tbo;
69 csa_tv.shared = true;
70
71 list_add(&csa_tv.head, &list);
72 amdgpu_vm_get_pd_bo(vm, &list, &pd);
73
74 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
75 if (r) {
76 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
77 return r;
78 }
79
Christian König0f4b3c62017-07-31 15:32:40 +020080 *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
81 if (!*bo_va) {
Monk Liu4e4bbe72017-01-09 15:21:13 +080082 ttm_eu_backoff_reservation(&ticket, &list);
83 DRM_ERROR("failed to create bo_va for static CSA\n");
84 return -ENOMEM;
85 }
86
Christian Königec681542017-08-01 10:51:43 +020087 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
Christian König0f4b3c62017-07-31 15:32:40 +020088 AMDGPU_CSA_SIZE);
Christian König663e4572017-03-13 10:13:37 +010089 if (r) {
90 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
Christian König0f4b3c62017-07-31 15:32:40 +020091 amdgpu_vm_bo_rmv(adev, *bo_va);
Christian König663e4572017-03-13 10:13:37 +010092 ttm_eu_backoff_reservation(&ticket, &list);
93 return r;
94 }
95
Christian König0f4b3c62017-07-31 15:32:40 +020096 r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
97 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
98 AMDGPU_PTE_EXECUTABLE);
Monk Liu4e4bbe72017-01-09 15:21:13 +080099
100 if (r) {
101 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
Christian König0f4b3c62017-07-31 15:32:40 +0200102 amdgpu_vm_bo_rmv(adev, *bo_va);
Monk Liu4e4bbe72017-01-09 15:21:13 +0800103 ttm_eu_backoff_reservation(&ticket, &list);
Monk Liu4e4bbe72017-01-09 15:21:13 +0800104 return r;
105 }
106
Monk Liu4e4bbe72017-01-09 15:21:13 +0800107 ttm_eu_backoff_reservation(&ticket, &list);
108 return 0;
109}
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800110
111void amdgpu_virt_init_setting(struct amdgpu_device *adev)
112{
Xiangliang Yu06465d82017-01-11 17:18:40 +0800113 /* enable virtual display */
114 adev->mode_info.num_crtc = 1;
115 adev->enable_virtual_display = true;
Xiangliang Yu213cace2017-04-21 14:01:29 +0800116 adev->cg_flags = 0;
117 adev->pg_flags = 0;
Xiangliang Yu06465d82017-01-11 17:18:40 +0800118
Monk Liu147b5982017-01-25 15:48:01 +0800119 mutex_init(&adev->virt.lock_reset);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800120}
121
122uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
123{
124 signed long r;
pdingcdd9a8b2017-11-07 14:32:36 +0800125 unsigned long flags;
pding43ca8ef2017-10-13 15:38:35 +0800126 uint32_t val, seq;
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800127 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
128 struct amdgpu_ring *ring = &kiq->ring;
129
130 BUG_ON(!ring->funcs->emit_rreg);
131
pdingcdd9a8b2017-11-07 14:32:36 +0800132 spin_lock_irqsave(&kiq->ring_lock, flags);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800133 amdgpu_ring_alloc(ring, 32);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800134 amdgpu_ring_emit_rreg(ring, reg);
pding43ca8ef2017-10-13 15:38:35 +0800135 amdgpu_fence_emit_polling(ring, &seq);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800136 amdgpu_ring_commit(ring);
pdingcdd9a8b2017-11-07 14:32:36 +0800137 spin_unlock_irqrestore(&kiq->ring_lock, flags);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800138
pding43ca8ef2017-10-13 15:38:35 +0800139 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
Monk Liuff825772017-05-05 17:30:50 -0400140 if (r < 1) {
pding43ca8ef2017-10-13 15:38:35 +0800141 DRM_ERROR("wait for kiq fence error: %ld\n", r);
Monk Liuff825772017-05-05 17:30:50 -0400142 return ~0;
143 }
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800144 val = adev->wb.wb[adev->virt.reg_val_offs];
145
146 return val;
147}
148
149void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
150{
151 signed long r;
pdingcdd9a8b2017-11-07 14:32:36 +0800152 unsigned long flags;
pding43ca8ef2017-10-13 15:38:35 +0800153 uint32_t seq;
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800154 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
155 struct amdgpu_ring *ring = &kiq->ring;
156
157 BUG_ON(!ring->funcs->emit_wreg);
158
pdingcdd9a8b2017-11-07 14:32:36 +0800159 spin_lock_irqsave(&kiq->ring_lock, flags);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800160 amdgpu_ring_alloc(ring, 32);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800161 amdgpu_ring_emit_wreg(ring, reg, v);
pding43ca8ef2017-10-13 15:38:35 +0800162 amdgpu_fence_emit_polling(ring, &seq);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800163 amdgpu_ring_commit(ring);
pdingcdd9a8b2017-11-07 14:32:36 +0800164 spin_unlock_irqrestore(&kiq->ring_lock, flags);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800165
pding43ca8ef2017-10-13 15:38:35 +0800166 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
Monk Liuff825772017-05-05 17:30:50 -0400167 if (r < 1)
pding43ca8ef2017-10-13 15:38:35 +0800168 DRM_ERROR("wait for kiq fence error: %ld\n", r);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800169}
Xiangliang Yu1e9f1392017-01-12 14:53:08 +0800170
171/**
172 * amdgpu_virt_request_full_gpu() - request full gpu access
173 * @amdgpu: amdgpu device.
174 * @init: is driver init time.
175 * When start to init/fini driver, first need to request full gpu access.
176 * Return: Zero if request success, otherwise will return error.
177 */
178int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
179{
180 struct amdgpu_virt *virt = &adev->virt;
181 int r;
182
183 if (virt->ops && virt->ops->req_full_gpu) {
184 r = virt->ops->req_full_gpu(adev, init);
185 if (r)
186 return r;
187
188 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
189 }
190
191 return 0;
192}
193
194/**
195 * amdgpu_virt_release_full_gpu() - release full gpu access
196 * @amdgpu: amdgpu device.
197 * @init: is driver init time.
198 * When finishing driver init/fini, need to release full gpu access.
199 * Return: Zero if release success, otherwise will returen error.
200 */
201int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
202{
203 struct amdgpu_virt *virt = &adev->virt;
204 int r;
205
206 if (virt->ops && virt->ops->rel_full_gpu) {
207 r = virt->ops->rel_full_gpu(adev, init);
208 if (r)
209 return r;
210
211 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
212 }
213 return 0;
214}
215
216/**
217 * amdgpu_virt_reset_gpu() - reset gpu
218 * @amdgpu: amdgpu device.
219 * Send reset command to GPU hypervisor to reset GPU that VM is using
220 * Return: Zero if reset success, otherwise will return error.
221 */
222int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
223{
224 struct amdgpu_virt *virt = &adev->virt;
225 int r;
226
227 if (virt->ops && virt->ops->reset_gpu) {
228 r = virt->ops->reset_gpu(adev);
229 if (r)
230 return r;
231
232 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
233 }
234
235 return 0;
236}
Xiangliang Yu904cd382017-04-21 15:40:25 +0800237
238/**
239 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
240 * @amdgpu: amdgpu device.
241 * MM table is used by UVD and VCE for its initialization
242 * Return: Zero if allocate success.
243 */
244int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
245{
246 int r;
247
248 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
249 return 0;
250
251 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
252 AMDGPU_GEM_DOMAIN_VRAM,
253 &adev->virt.mm_table.bo,
254 &adev->virt.mm_table.gpu_addr,
255 (void *)&adev->virt.mm_table.cpu_addr);
256 if (r) {
257 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
258 return r;
259 }
260
261 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
262 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
263 adev->virt.mm_table.gpu_addr,
264 adev->virt.mm_table.cpu_addr);
265 return 0;
266}
267
268/**
269 * amdgpu_virt_free_mm_table() - free mm table memory
270 * @amdgpu: amdgpu device.
271 * Free MM table memory
272 */
273void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
274{
275 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
276 return;
277
278 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
279 &adev->virt.mm_table.gpu_addr,
280 (void *)&adev->virt.mm_table.cpu_addr);
281 adev->virt.mm_table.gpu_addr = 0;
282}
Horace Chen2dc8f812017-10-09 16:17:16 +0800283
284
285int amdgpu_virt_fw_reserve_get_checksum(void *obj,
286 unsigned long obj_size,
287 unsigned int key,
288 unsigned int chksum)
289{
290 unsigned int ret = key;
291 unsigned long i = 0;
292 unsigned char *pos;
293
294 pos = (char *)obj;
295 /* calculate checksum */
296 for (i = 0; i < obj_size; ++i)
297 ret += *(pos + i);
298 /* minus the chksum itself */
299 pos = (char *)&chksum;
300 for (i = 0; i < sizeof(chksum); ++i)
301 ret -= *(pos + i);
302 return ret;
303}
304
305void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
306{
307 uint32_t pf2vf_ver = 0;
308 uint32_t pf2vf_size = 0;
309 uint32_t checksum = 0;
310 uint32_t checkval;
311 char *str;
312
313 adev->virt.fw_reserve.p_pf2vf = NULL;
314 adev->virt.fw_reserve.p_vf2pf = NULL;
315
316 if (adev->fw_vram_usage.va != NULL) {
317 adev->virt.fw_reserve.p_pf2vf =
318 (struct amdgim_pf2vf_info_header *)(
319 adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
320 pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
321 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
322 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
323
324 /* pf2vf message must be in 4K */
325 if (pf2vf_size > 0 && pf2vf_size < 4096) {
326 checkval = amdgpu_virt_fw_reserve_get_checksum(
327 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
328 adev->virt.fw_reserve.checksum_key, checksum);
329 if (checkval == checksum) {
330 adev->virt.fw_reserve.p_vf2pf =
331 ((void *)adev->virt.fw_reserve.p_pf2vf +
332 pf2vf_size);
333 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
334 sizeof(amdgim_vf2pf_info));
335 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
336 AMDGPU_FW_VRAM_VF2PF_VER);
337 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
338 sizeof(amdgim_vf2pf_info));
339 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
340 &str);
Arnd Bergmanne477e942017-11-02 12:25:39 +0100341#ifdef MODULE
Horace Chen2dc8f812017-10-09 16:17:16 +0800342 if (THIS_MODULE->version != NULL)
343 strcpy(str, THIS_MODULE->version);
344 else
Arnd Bergmanne477e942017-11-02 12:25:39 +0100345#endif
Horace Chen2dc8f812017-10-09 16:17:16 +0800346 strcpy(str, "N/A");
347 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
348 0);
349 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
350 amdgpu_virt_fw_reserve_get_checksum(
351 adev->virt.fw_reserve.p_vf2pf,
352 pf2vf_size,
353 adev->virt.fw_reserve.checksum_key, 0));
354 }
355 }
356 }
357}
358
359