blob: 07ab1749cf7d2520f03985ffa95bd8e56e28cbcd [file] [log] [blame]
Ben Skeggsa11c3192010-08-27 10:00:25 +10001/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28#include "nouveau_vm.h"
29
30void
31nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
32{
33 struct nouveau_vm *vm = vma->vm;
34 struct nouveau_mm_node *r;
35 u32 offset = vma->node->offset + (delta >> 12);
36 u32 bits = vma->node->type - 12;
37 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
38 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
39 u32 max = 1 << (vm->pgt_bits - bits);
40 u32 end, len;
41
42 list_for_each_entry(r, &vram->regions, rl_entry) {
43 u64 phys = (u64)r->offset << 12;
44 u32 num = r->length >> bits;
45
46 while (num) {
47 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
48
49 end = (pte + num);
50 if (unlikely(end >= max))
51 end = max;
52 len = end - pte;
53
54 vm->map(vma, pgt, vram, pte, len, phys);
55
56 num -= len;
57 pte += len;
58 if (unlikely(end >= max)) {
59 pde++;
60 pte = 0;
61 }
62 }
63 }
64
65 vm->flush(vm);
66}
67
68void
69nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
70{
71 nouveau_vm_map_at(vma, 0, vram);
72}
73
74void
75nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
76 dma_addr_t *list)
77{
78 struct nouveau_vm *vm = vma->vm;
79 u32 offset = vma->node->offset + (delta >> 12);
80 u32 bits = vma->node->type - 12;
81 u32 num = length >> vma->node->type;
82 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
83 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
84 u32 max = 1 << (vm->pgt_bits - bits);
85 u32 end, len;
86
87 while (num) {
88 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
89
90 end = (pte + num);
91 if (unlikely(end >= max))
92 end = max;
93 len = end - pte;
94
95 vm->map_sg(vma, pgt, pte, list, len);
96
97 num -= len;
98 pte += len;
99 list += len;
100 if (unlikely(end >= max)) {
101 pde++;
102 pte = 0;
103 }
104 }
105
106 vm->flush(vm);
107}
108
109void
110nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
111{
112 struct nouveau_vm *vm = vma->vm;
113 u32 offset = vma->node->offset + (delta >> 12);
114 u32 bits = vma->node->type - 12;
115 u32 num = length >> vma->node->type;
116 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
117 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
118 u32 max = 1 << (vm->pgt_bits - bits);
119 u32 end, len;
120
121 while (num) {
122 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
123
124 end = (pte + num);
125 if (unlikely(end >= max))
126 end = max;
127 len = end - pte;
128
129 vm->unmap(pgt, pte, len);
130
131 num -= len;
132 pte += len;
133 if (unlikely(end >= max)) {
134 pde++;
135 pte = 0;
136 }
137 }
138
139 vm->flush(vm);
140}
141
142void
143nouveau_vm_unmap(struct nouveau_vma *vma)
144{
145 nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
146}
147
148static void
149nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde)
150{
151 struct nouveau_vm_pgd *vpgd;
152 struct nouveau_vm_pgt *vpgt;
153 struct nouveau_gpuobj *pgt;
154 u32 pde;
155
156 for (pde = fpde; pde <= lpde; pde++) {
157 vpgt = &vm->pgt[pde - vm->fpde];
158 if (--vpgt->refcount)
159 continue;
160
161 list_for_each_entry(vpgd, &vm->pgd_list, head) {
162 vm->unmap_pgt(vpgd->obj, pde);
163 }
164
165 pgt = vpgt->obj;
166 vpgt->obj = NULL;
167
168 mutex_unlock(&vm->mm->mutex);
169 nouveau_gpuobj_ref(NULL, &pgt);
170 mutex_lock(&vm->mm->mutex);
171 }
172}
173
174static int
175nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
176{
177 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
178 struct nouveau_vm_pgd *vpgd;
179 struct nouveau_gpuobj *pgt;
180 u32 pgt_size;
181 int ret;
182
183 pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
184 pgt_size *= 8;
185
186 mutex_unlock(&vm->mm->mutex);
187 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
188 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
189 mutex_lock(&vm->mm->mutex);
190 if (unlikely(ret))
191 return ret;
192
193 /* someone beat us to filling the PDE while we didn't have the lock */
194 if (unlikely(vpgt->refcount++)) {
195 mutex_unlock(&vm->mm->mutex);
196 nouveau_gpuobj_ref(NULL, &pgt);
197 mutex_lock(&vm->mm->mutex);
198 return 0;
199 }
200
201 list_for_each_entry(vpgd, &vm->pgd_list, head) {
202 vm->map_pgt(vpgd->obj, type, pde, pgt);
203 }
204
205 vpgt->page_shift = type;
206 vpgt->obj = pgt;
207 return 0;
208}
209
210int
211nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
212 u32 access, struct nouveau_vma *vma)
213{
214 u32 align = (1 << page_shift) >> 12;
215 u32 msize = size >> 12;
216 u32 fpde, lpde, pde;
217 int ret;
218
219 mutex_lock(&vm->mm->mutex);
220 ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
221 if (unlikely(ret != 0)) {
222 mutex_unlock(&vm->mm->mutex);
223 return ret;
224 }
225
226 fpde = (vma->node->offset >> vm->pgt_bits);
227 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
228 for (pde = fpde; pde <= lpde; pde++) {
229 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
230
231 if (likely(vpgt->refcount)) {
232 vpgt->refcount++;
233 continue;
234 }
235
236 ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
237 if (ret) {
238 if (pde != fpde)
239 nouveau_vm_unmap_pgt(vm, fpde, pde - 1);
240 nouveau_mm_put(vm->mm, vma->node);
241 mutex_unlock(&vm->mm->mutex);
242 vma->node = NULL;
243 return ret;
244 }
245 }
246 mutex_unlock(&vm->mm->mutex);
247
248 vma->vm = vm;
249 vma->offset = (u64)vma->node->offset << 12;
250 vma->access = access;
251 return 0;
252}
253
254void
255nouveau_vm_put(struct nouveau_vma *vma)
256{
257 struct nouveau_vm *vm = vma->vm;
258 u32 fpde, lpde;
259
260 if (unlikely(vma->node == NULL))
261 return;
262 fpde = (vma->node->offset >> vm->pgt_bits);
263 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
264
265 mutex_lock(&vm->mm->mutex);
266 nouveau_mm_put(vm->mm, vma->node);
267 vma->node = NULL;
268 nouveau_vm_unmap_pgt(vm, fpde, lpde);
269 mutex_unlock(&vm->mm->mutex);
270}
271
272int
273nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
274 u8 pgt_bits, u8 spg_shift, u8 lpg_shift,
275 struct nouveau_vm **pvm)
276{
277 struct drm_nouveau_private *dev_priv = dev->dev_private;
278 struct nouveau_vm *vm;
279 u64 mm_length = (offset + length) - mm_offset;
280 u32 block;
281 int ret;
282
283 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
284 if (!vm)
285 return -ENOMEM;
286
287 if (dev_priv->card_type == NV_50) {
288 vm->map_pgt = nv50_vm_map_pgt;
289 vm->unmap_pgt = nv50_vm_unmap_pgt;
290 vm->map = nv50_vm_map;
291 vm->map_sg = nv50_vm_map_sg;
292 vm->unmap = nv50_vm_unmap;
293 vm->flush = nv50_vm_flush;
294 } else {
295 kfree(vm);
296 return -ENOSYS;
297 }
298
299 vm->fpde = offset >> pgt_bits;
300 vm->lpde = (offset + length - 1) >> pgt_bits;
301 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
302 if (!vm->pgt) {
303 kfree(vm);
304 return -ENOMEM;
305 }
306
307 INIT_LIST_HEAD(&vm->pgd_list);
308 vm->dev = dev;
309 vm->refcount = 1;
310 vm->pgt_bits = pgt_bits - 12;
311 vm->spg_shift = spg_shift;
312 vm->lpg_shift = lpg_shift;
313
314 block = (1 << pgt_bits);
315 if (length < block)
316 block = length;
317
318 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
319 block >> 12);
320 if (ret) {
321 kfree(vm);
322 return ret;
323 }
324
325 *pvm = vm;
326 return 0;
327}
328
329static int
330nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
331{
332 struct nouveau_vm_pgd *vpgd;
333 int i;
334
335 if (!pgd)
336 return 0;
337
338 vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
339 if (!vpgd)
340 return -ENOMEM;
341
342 nouveau_gpuobj_ref(pgd, &vpgd->obj);
343
344 mutex_lock(&vm->mm->mutex);
345 for (i = vm->fpde; i <= vm->lpde; i++) {
346 struct nouveau_vm_pgt *vpgt = &vm->pgt[i - vm->fpde];
347
348 if (!vpgt->obj) {
349 vm->unmap_pgt(pgd, i);
350 continue;
351 }
352
353 vm->map_pgt(pgd, vpgt->page_shift, i, vpgt->obj);
354 }
355 list_add(&vpgd->head, &vm->pgd_list);
356 mutex_unlock(&vm->mm->mutex);
357 return 0;
358}
359
360static void
361nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
362{
363 struct nouveau_vm_pgd *vpgd, *tmp;
364
365 if (!pgd)
366 return;
367
368 mutex_lock(&vm->mm->mutex);
369 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
370 if (vpgd->obj != pgd)
371 continue;
372
373 list_del(&vpgd->head);
374 nouveau_gpuobj_ref(NULL, &vpgd->obj);
375 kfree(vpgd);
376 }
377 mutex_unlock(&vm->mm->mutex);
378}
379
380static void
381nouveau_vm_del(struct nouveau_vm *vm)
382{
383 struct nouveau_vm_pgd *vpgd, *tmp;
384
385 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
386 nouveau_vm_unlink(vm, vpgd->obj);
387 }
388 WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
389
390 kfree(vm->pgt);
391 kfree(vm);
392}
393
394int
395nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
396 struct nouveau_gpuobj *pgd)
397{
398 struct nouveau_vm *vm;
399 int ret;
400
401 vm = ref;
402 if (vm) {
403 ret = nouveau_vm_link(vm, pgd);
404 if (ret)
405 return ret;
406
407 vm->refcount++;
408 }
409
410 vm = *ptr;
411 *ptr = ref;
412
413 if (vm) {
414 nouveau_vm_unlink(vm, pgd);
415
416 if (--vm->refcount == 0)
417 nouveau_vm_del(vm);
418 }
419
420 return 0;
421}