blob: bd422f04f2ce49166316205c8bab0a8ed435e088 [file] [log] [blame]
Monk Liu4e4bbe72017-01-09 15:21:13 +08001/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
25
26int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
27{
28 int r;
29 void *ptr;
30
31 r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
32 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
33 &adev->virt.csa_vmid0_addr, &ptr);
34 if (r)
35 return r;
36
37 memset(ptr, 0, AMDGPU_CSA_SIZE);
38 return 0;
39}
40
41/*
42 * amdgpu_map_static_csa should be called during amdgpu_vm_init
43 * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
44 * to this VM, and each command submission of GFX should use this virtual
45 * address within META_DATA init package to support SRIOV gfx preemption.
46 */
47
48int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
49{
50 int r;
51 struct amdgpu_bo_va *bo_va;
52 struct ww_acquire_ctx ticket;
53 struct list_head list;
54 struct amdgpu_bo_list_entry pd;
55 struct ttm_validate_buffer csa_tv;
56
57 INIT_LIST_HEAD(&list);
58 INIT_LIST_HEAD(&csa_tv.head);
59 csa_tv.bo = &adev->virt.csa_obj->tbo;
60 csa_tv.shared = true;
61
62 list_add(&csa_tv.head, &list);
63 amdgpu_vm_get_pd_bo(vm, &list, &pd);
64
65 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
66 if (r) {
67 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
68 return r;
69 }
70
71 bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
72 if (!bo_va) {
73 ttm_eu_backoff_reservation(&ticket, &list);
74 DRM_ERROR("failed to create bo_va for static CSA\n");
75 return -ENOMEM;
76 }
77
Christian König663e4572017-03-13 10:13:37 +010078 r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR,
79 AMDGPU_CSA_SIZE);
80 if (r) {
81 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
82 amdgpu_vm_bo_rmv(adev, bo_va);
83 ttm_eu_backoff_reservation(&ticket, &list);
84 return r;
85 }
86
Monk Liu4e4bbe72017-01-09 15:21:13 +080087 r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE,
88 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
89 AMDGPU_PTE_EXECUTABLE);
90
91 if (r) {
92 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
93 amdgpu_vm_bo_rmv(adev, bo_va);
94 ttm_eu_backoff_reservation(&ticket, &list);
Monk Liu4e4bbe72017-01-09 15:21:13 +080095 return r;
96 }
97
98 vm->csa_bo_va = bo_va;
99 ttm_eu_backoff_reservation(&ticket, &list);
100 return 0;
101}
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800102
103void amdgpu_virt_init_setting(struct amdgpu_device *adev)
104{
Xiangliang Yu06465d82017-01-11 17:18:40 +0800105 /* enable virtual display */
106 adev->mode_info.num_crtc = 1;
107 adev->enable_virtual_display = true;
Xiangliang Yu213cace2017-04-21 14:01:29 +0800108 adev->cg_flags = 0;
109 adev->pg_flags = 0;
Xiangliang Yu06465d82017-01-11 17:18:40 +0800110
Monk Liued17c71b32017-01-25 15:33:56 +0800111 mutex_init(&adev->virt.lock_kiq);
Monk Liu147b5982017-01-25 15:48:01 +0800112 mutex_init(&adev->virt.lock_reset);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800113}
114
115uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
116{
117 signed long r;
118 uint32_t val;
119 struct dma_fence *f;
120 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
121 struct amdgpu_ring *ring = &kiq->ring;
122
123 BUG_ON(!ring->funcs->emit_rreg);
124
Monk Liued17c71b32017-01-25 15:33:56 +0800125 mutex_lock(&adev->virt.lock_kiq);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800126 amdgpu_ring_alloc(ring, 32);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800127 amdgpu_ring_emit_rreg(ring, reg);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800128 amdgpu_fence_emit(ring, &f);
129 amdgpu_ring_commit(ring);
Monk Liued17c71b32017-01-25 15:33:56 +0800130 mutex_unlock(&adev->virt.lock_kiq);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800131
132 r = dma_fence_wait(f, false);
133 if (r)
134 DRM_ERROR("wait for kiq fence error: %ld.\n", r);
135 dma_fence_put(f);
136
137 val = adev->wb.wb[adev->virt.reg_val_offs];
138
139 return val;
140}
141
142void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
143{
144 signed long r;
145 struct dma_fence *f;
146 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
147 struct amdgpu_ring *ring = &kiq->ring;
148
149 BUG_ON(!ring->funcs->emit_wreg);
150
Monk Liued17c71b32017-01-25 15:33:56 +0800151 mutex_lock(&adev->virt.lock_kiq);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800152 amdgpu_ring_alloc(ring, 32);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800153 amdgpu_ring_emit_wreg(ring, reg, v);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800154 amdgpu_fence_emit(ring, &f);
155 amdgpu_ring_commit(ring);
Monk Liued17c71b32017-01-25 15:33:56 +0800156 mutex_unlock(&adev->virt.lock_kiq);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800157
158 r = dma_fence_wait(f, false);
159 if (r)
160 DRM_ERROR("wait for kiq fence error: %ld.\n", r);
161 dma_fence_put(f);
162}
Xiangliang Yu1e9f1392017-01-12 14:53:08 +0800163
164/**
165 * amdgpu_virt_request_full_gpu() - request full gpu access
166 * @amdgpu: amdgpu device.
167 * @init: is driver init time.
168 * When start to init/fini driver, first need to request full gpu access.
169 * Return: Zero if request success, otherwise will return error.
170 */
171int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
172{
173 struct amdgpu_virt *virt = &adev->virt;
174 int r;
175
176 if (virt->ops && virt->ops->req_full_gpu) {
177 r = virt->ops->req_full_gpu(adev, init);
178 if (r)
179 return r;
180
181 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
182 }
183
184 return 0;
185}
186
187/**
188 * amdgpu_virt_release_full_gpu() - release full gpu access
189 * @amdgpu: amdgpu device.
190 * @init: is driver init time.
191 * When finishing driver init/fini, need to release full gpu access.
192 * Return: Zero if release success, otherwise will returen error.
193 */
194int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
195{
196 struct amdgpu_virt *virt = &adev->virt;
197 int r;
198
199 if (virt->ops && virt->ops->rel_full_gpu) {
200 r = virt->ops->rel_full_gpu(adev, init);
201 if (r)
202 return r;
203
204 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
205 }
206 return 0;
207}
208
209/**
210 * amdgpu_virt_reset_gpu() - reset gpu
211 * @amdgpu: amdgpu device.
212 * Send reset command to GPU hypervisor to reset GPU that VM is using
213 * Return: Zero if reset success, otherwise will return error.
214 */
215int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
216{
217 struct amdgpu_virt *virt = &adev->virt;
218 int r;
219
220 if (virt->ops && virt->ops->reset_gpu) {
221 r = virt->ops->reset_gpu(adev);
222 if (r)
223 return r;
224
225 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
226 }
227
228 return 0;
229}
Xiangliang Yu904cd382017-04-21 15:40:25 +0800230
231/**
232 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
233 * @amdgpu: amdgpu device.
234 * MM table is used by UVD and VCE for its initialization
235 * Return: Zero if allocate success.
236 */
237int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
238{
239 int r;
240
241 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
242 return 0;
243
244 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
245 AMDGPU_GEM_DOMAIN_VRAM,
246 &adev->virt.mm_table.bo,
247 &adev->virt.mm_table.gpu_addr,
248 (void *)&adev->virt.mm_table.cpu_addr);
249 if (r) {
250 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
251 return r;
252 }
253
254 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
255 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
256 adev->virt.mm_table.gpu_addr,
257 adev->virt.mm_table.cpu_addr);
258 return 0;
259}
260
261/**
262 * amdgpu_virt_free_mm_table() - free mm table memory
263 * @amdgpu: amdgpu device.
264 * Free MM table memory
265 */
266void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
267{
268 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
269 return;
270
271 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
272 &adev->virt.mm_table.gpu_addr,
273 (void *)&adev->virt.mm_table.cpu_addr);
274 adev->virt.mm_table.gpu_addr = 0;
275}