blob: 1f51897acc5b4d7bcf0b86fa7e2c53accff60d49 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <drm/drmP.h>
29#include <drm/amdgpu_drm.h>
Laura Abbotted3ba072017-05-08 15:58:17 -070030#ifdef CONFIG_X86
31#include <asm/set_memory.h>
32#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -040033#include "amdgpu.h"
34
35/*
36 * GART
37 * The GART (Graphics Aperture Remapping Table) is an aperture
38 * in the GPU's address space. System pages can be mapped into
39 * the aperture and look like contiguous pages from the GPU's
40 * perspective. A page table maps the pages in the aperture
41 * to the actual backing pages in system memory.
42 *
43 * Radeon GPUs support both an internal GART, as described above,
44 * and AGP. AGP works similarly, but the GART table is configured
45 * and maintained by the northbridge rather than the driver.
46 * Radeon hw has a separate AGP aperture that is programmed to
47 * point to the AGP aperture provided by the northbridge and the
48 * requests are passed through to the northbridge aperture.
49 * Both AGP and internal GART can be used at the same time, however
50 * that is not currently supported by the driver.
51 *
52 * This file handles the common internal GART management.
53 */
54
55/*
56 * Common GART table functions.
57 */
Christian König011d4bb2017-06-26 11:37:49 +020058
59/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -040060 * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
61 *
62 * @adev: amdgpu_device pointer
63 *
64 * Allocate video memory for GART page table
65 * (pcie r4xx, r5xx+). These asics require the
66 * gart table to be in video memory.
67 * Returns 0 for success, error for failure.
68 */
69int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
70{
Monk Liuce1b1b62017-11-21 13:29:14 +080071 int r;
72
73 if (adev->gart.robj == NULL) {
74 r = amdgpu_bo_create(adev, adev->gart.table_size,
75 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
76 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
77 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
78 NULL, NULL, 0, &adev->gart.robj);
79 if (r) {
80 return r;
81 }
82 }
83 return 0;
84}
85
86/**
87 * amdgpu_gart_table_vram_pin - pin gart page table in vram
88 *
89 * @adev: amdgpu_device pointer
90 *
91 * Pin the GART page table in vram so it will not be moved
92 * by the memory manager (pcie r4xx, r5xx+). These asics require the
93 * gart table to be in video memory.
94 * Returns 0 for success, error for failure.
95 */
96int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
97{
98 uint64_t gpu_addr;
99 int r;
100
101 r = amdgpu_bo_reserve(adev->gart.robj, false);
102 if (unlikely(r != 0))
103 return r;
104 r = amdgpu_bo_pin(adev->gart.robj,
105 AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
106 if (r) {
107 amdgpu_bo_unreserve(adev->gart.robj);
108 return r;
109 }
110 r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
111 if (r)
112 amdgpu_bo_unpin(adev->gart.robj);
113 amdgpu_bo_unreserve(adev->gart.robj);
114 adev->gart.table_addr = gpu_addr;
115 return r;
116}
117
118/**
119 * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
120 *
121 * @adev: amdgpu_device pointer
122 *
123 * Unpin the GART page table in vram (pcie r4xx, r5xx+).
124 * These asics require the gart table to be in video memory.
125 */
126void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
127{
128 int r;
129
130 if (adev->gart.robj == NULL) {
131 return;
132 }
133 r = amdgpu_bo_reserve(adev->gart.robj, true);
134 if (likely(r == 0)) {
135 amdgpu_bo_kunmap(adev->gart.robj);
136 amdgpu_bo_unpin(adev->gart.robj);
137 amdgpu_bo_unreserve(adev->gart.robj);
138 adev->gart.ptr = NULL;
139 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400140}
141
142/**
143 * amdgpu_gart_table_vram_free - free gart page table vram
144 *
145 * @adev: amdgpu_device pointer
146 *
147 * Free the video memory used for the GART page table
148 * (pcie r4xx, r5xx+). These asics require the gart table to
149 * be in video memory.
150 */
151void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
152{
Monk Liuce1b1b62017-11-21 13:29:14 +0800153 if (adev->gart.robj == NULL) {
154 return;
155 }
156 amdgpu_bo_unref(&adev->gart.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400157}
158
159/*
160 * Common gart functions.
161 */
162/**
163 * amdgpu_gart_unbind - unbind pages from the gart page table
164 *
165 * @adev: amdgpu_device pointer
166 * @offset: offset into the GPU's gart aperture
167 * @pages: number of pages to unbind
168 *
169 * Unbinds the requested pages from the gart page table and
170 * replaces them with the dummy page (all asics).
Roger.He738f64c2017-05-05 13:27:10 +0800171 * Returns 0 for success, -EINVAL for failure.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400172 */
Roger.He738f64c2017-05-05 13:27:10 +0800173int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400174 int pages)
175{
176 unsigned t;
177 unsigned p;
178 int i, j;
179 u64 page_base;
Alex Deuchera0676f62017-03-03 16:42:27 -0500180 /* Starting from VEGA10, system bit must be 0 to mean invalid. */
181 uint64_t flags = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400182
183 if (!adev->gart.ready) {
184 WARN(1, "trying to unbind memory from uninitialized GART !\n");
Roger.He738f64c2017-05-05 13:27:10 +0800185 return -EINVAL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400186 }
187
188 t = offset / AMDGPU_GPU_PAGE_SIZE;
189 p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
190 for (i = 0; i < pages; i++, p++) {
Christian König186294f2016-09-25 16:10:06 +0200191#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
Christian Königa1d29472016-03-30 14:42:57 +0200192 adev->gart.pages[p] = NULL;
193#endif
194 page_base = adev->dummy_page.addr;
195 if (!adev->gart.ptr)
196 continue;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400197
Christian Königa1d29472016-03-30 14:42:57 +0200198 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
199 amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
200 t, page_base, flags);
201 page_base += AMDGPU_GPU_PAGE_SIZE;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400202 }
203 }
204 mb();
205 amdgpu_gart_flush_gpu_tlb(adev, 0);
Roger.He738f64c2017-05-05 13:27:10 +0800206 return 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400207}
208
209/**
Christian König0c2c4212017-06-29 17:24:26 +0200210 * amdgpu_gart_map - map dma_addresses into GART entries
211 *
212 * @adev: amdgpu_device pointer
213 * @offset: offset into the GPU's gart aperture
214 * @pages: number of pages to bind
215 * @dma_addr: DMA addresses of pages
216 *
217 * Map the dma_addresses into GART entries (all asics).
218 * Returns 0 for success, -EINVAL for failure.
219 */
220int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
221 int pages, dma_addr_t *dma_addr, uint64_t flags,
222 void *dst)
223{
224 uint64_t page_base;
225 unsigned i, j, t;
226
227 if (!adev->gart.ready) {
228 WARN(1, "trying to bind memory to uninitialized GART !\n");
229 return -EINVAL;
230 }
231
232 t = offset / AMDGPU_GPU_PAGE_SIZE;
233
234 for (i = 0; i < pages; i++) {
235 page_base = dma_addr[i];
236 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
237 amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
238 page_base += AMDGPU_GPU_PAGE_SIZE;
239 }
240 }
241 return 0;
242}
243
244/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400245 * amdgpu_gart_bind - bind pages into the gart page table
246 *
247 * @adev: amdgpu_device pointer
248 * @offset: offset into the GPU's gart aperture
249 * @pages: number of pages to bind
250 * @pagelist: pages to bind
251 * @dma_addr: DMA addresses of pages
252 *
253 * Binds the requested pages to the gart page table
254 * (all asics).
255 * Returns 0 for success, -EINVAL for failure.
256 */
Felix Kuehlingcab0b8d2016-08-12 19:25:21 -0400257int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400258 int pages, struct page **pagelist, dma_addr_t *dma_addr,
Chunming Zhou6b777602016-09-21 16:19:19 +0800259 uint64_t flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400260{
Christian König0c2c4212017-06-29 17:24:26 +0200261#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
262 unsigned i,t,p;
263#endif
264 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400265
266 if (!adev->gart.ready) {
267 WARN(1, "trying to bind memory to uninitialized GART !\n");
268 return -EINVAL;
269 }
270
Christian König0c2c4212017-06-29 17:24:26 +0200271#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400272 t = offset / AMDGPU_GPU_PAGE_SIZE;
273 p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
Christian König0c2c4212017-06-29 17:24:26 +0200274 for (i = 0; i < pages; i++, p++)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400275 adev->gart.pages[p] = pagelist[i];
Christian Königa1d29472016-03-30 14:42:57 +0200276#endif
Christian König0c2c4212017-06-29 17:24:26 +0200277
Christian Königfa2cd032017-10-16 17:37:06 +0200278 if (!adev->gart.ptr)
279 return 0;
280
281 r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
282 adev->gart.ptr);
283 if (r)
284 return r;
Christian König0c2c4212017-06-29 17:24:26 +0200285
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400286 mb();
287 amdgpu_gart_flush_gpu_tlb(adev, 0);
288 return 0;
289}
290
291/**
292 * amdgpu_gart_init - init the driver info for managing the gart
293 *
294 * @adev: amdgpu_device pointer
295 *
296 * Allocate the dummy page and init the gart driver info (all asics).
297 * Returns 0 for success, error for failure.
298 */
299int amdgpu_gart_init(struct amdgpu_device *adev)
300{
Christian König43251982016-03-30 10:54:16 +0200301 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400302
Christian Königa1d29472016-03-30 14:42:57 +0200303 if (adev->dummy_page.page)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400304 return 0;
Christian Königa1d29472016-03-30 14:42:57 +0200305
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400306 /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
307 if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
308 DRM_ERROR("Page size is smaller than GPU page size!\n");
309 return -EINVAL;
310 }
311 r = amdgpu_dummy_page_init(adev);
312 if (r)
313 return r;
314 /* Compute table size */
Christian König6f02a692017-07-07 11:56:59 +0200315 adev->gart.num_cpu_pages = adev->mc.gart_size / PAGE_SIZE;
316 adev->gart.num_gpu_pages = adev->mc.gart_size / AMDGPU_GPU_PAGE_SIZE;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400317 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
318 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
Christian Königa1d29472016-03-30 14:42:57 +0200319
Christian König186294f2016-09-25 16:10:06 +0200320#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400321 /* Allocate pages table */
322 adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
Monk Liuf59548c2017-11-14 11:55:50 +0800323 if (adev->gart.pages == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400324 return -ENOMEM;
Christian Königa1d29472016-03-30 14:42:57 +0200325#endif
326
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400327 return 0;
328}
329
330/**
331 * amdgpu_gart_fini - tear down the driver info for managing the gart
332 *
333 * @adev: amdgpu_device pointer
334 *
335 * Tear down the gart driver info and free the dummy page (all asics).
336 */
337void amdgpu_gart_fini(struct amdgpu_device *adev)
338{
Christian König186294f2016-09-25 16:10:06 +0200339#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400340 vfree(adev->gart.pages);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400341 adev->gart.pages = NULL;
Christian Königa1d29472016-03-30 14:42:57 +0200342#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400343 amdgpu_dummy_page_fini(adev);
344}