drm/nouveau: modify vm to accomodate dual page tables for nvc0
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 07ab174..b023a64 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -32,6 +32,7 @@
{
struct nouveau_vm *vm = vma->vm;
struct nouveau_mm_node *r;
+ int big = vma->node->type != vm->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
@@ -44,7 +45,7 @@
u32 num = r->length >> bits;
while (num) {
- struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
end = (pte + num);
if (unlikely(end >= max))
@@ -76,6 +77,7 @@
dma_addr_t *list)
{
struct nouveau_vm *vm = vma->vm;
+ int big = vma->node->type != vm->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
u32 num = length >> vma->node->type;
@@ -85,7 +87,7 @@
u32 end, len;
while (num) {
- struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
end = (pte + num);
if (unlikely(end >= max))
@@ -110,6 +112,7 @@
nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
{
struct nouveau_vm *vm = vma->vm;
+ int big = vma->node->type != vm->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
u32 num = length >> vma->node->type;
@@ -119,7 +122,7 @@
u32 end, len;
while (num) {
- struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
end = (pte + num);
if (unlikely(end >= max))
@@ -146,7 +149,7 @@
}
static void
-nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde)
+nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
{
struct nouveau_vm_pgd *vpgd;
struct nouveau_vm_pgt *vpgt;
@@ -155,15 +158,15 @@
for (pde = fpde; pde <= lpde; pde++) {
vpgt = &vm->pgt[pde - vm->fpde];
- if (--vpgt->refcount)
+ if (--vpgt->refcount[big])
continue;
- list_for_each_entry(vpgd, &vm->pgd_list, head) {
- vm->unmap_pgt(vpgd->obj, pde);
- }
+ pgt = vpgt->obj[big];
+ vpgt->obj[big] = NULL;
- pgt = vpgt->obj;
- vpgt->obj = NULL;
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+ }
mutex_unlock(&vm->mm->mutex);
nouveau_gpuobj_ref(NULL, &pgt);
@@ -177,6 +180,7 @@
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
struct nouveau_vm_pgd *vpgd;
struct nouveau_gpuobj *pgt;
+ int big = (type != vm->spg_shift);
u32 pgt_size;
int ret;
@@ -191,19 +195,18 @@
return ret;
/* someone beat us to filling the PDE while we didn't have the lock */
- if (unlikely(vpgt->refcount++)) {
+ if (unlikely(vpgt->refcount[big]++)) {
mutex_unlock(&vm->mm->mutex);
nouveau_gpuobj_ref(NULL, &pgt);
mutex_lock(&vm->mm->mutex);
return 0;
}
+ vpgt->obj[big] = pgt;
list_for_each_entry(vpgd, &vm->pgd_list, head) {
- vm->map_pgt(vpgd->obj, type, pde, pgt);
+ vm->map_pgt(vpgd->obj, pde, vpgt->obj);
}
- vpgt->page_shift = type;
- vpgt->obj = pgt;
return 0;
}
@@ -227,16 +230,17 @@
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
for (pde = fpde; pde <= lpde; pde++) {
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+ int big = (vma->node->type != vm->spg_shift);
- if (likely(vpgt->refcount)) {
- vpgt->refcount++;
+ if (likely(vpgt->refcount[big])) {
+ vpgt->refcount[big]++;
continue;
}
ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
if (ret) {
if (pde != fpde)
- nouveau_vm_unmap_pgt(vm, fpde, pde - 1);
+ nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
nouveau_mm_put(vm->mm, vma->node);
mutex_unlock(&vm->mm->mutex);
vma->node = NULL;
@@ -263,21 +267,20 @@
lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
mutex_lock(&vm->mm->mutex);
+ nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
nouveau_mm_put(vm->mm, vma->node);
vma->node = NULL;
- nouveau_vm_unmap_pgt(vm, fpde, lpde);
mutex_unlock(&vm->mm->mutex);
}
int
nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
- u8 pgt_bits, u8 spg_shift, u8 lpg_shift,
struct nouveau_vm **pvm)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_vm *vm;
u64 mm_length = (offset + length) - mm_offset;
- u32 block;
+ u32 block, pgt_bits;
int ret;
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
@@ -286,11 +289,13 @@
if (dev_priv->card_type == NV_50) {
vm->map_pgt = nv50_vm_map_pgt;
- vm->unmap_pgt = nv50_vm_unmap_pgt;
vm->map = nv50_vm_map;
vm->map_sg = nv50_vm_map_sg;
vm->unmap = nv50_vm_unmap;
vm->flush = nv50_vm_flush;
+ vm->spg_shift = 12;
+ vm->lpg_shift = 16;
+ pgt_bits = 29;
} else {
kfree(vm);
return -ENOSYS;
@@ -308,8 +313,6 @@
vm->dev = dev;
vm->refcount = 1;
vm->pgt_bits = pgt_bits - 12;
- vm->spg_shift = spg_shift;
- vm->lpg_shift = lpg_shift;
block = (1 << pgt_bits);
if (length < block)
@@ -342,16 +345,8 @@
nouveau_gpuobj_ref(pgd, &vpgd->obj);
mutex_lock(&vm->mm->mutex);
- for (i = vm->fpde; i <= vm->lpde; i++) {
- struct nouveau_vm_pgt *vpgt = &vm->pgt[i - vm->fpde];
-
- if (!vpgt->obj) {
- vm->unmap_pgt(pgd, i);
- continue;
- }
-
- vm->map_pgt(pgd, vpgt->page_shift, i, vpgt->obj);
- }
+ for (i = vm->fpde; i <= vm->lpde; i++)
+ vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
list_add(&vpgd->head, &vm->pgd_list);
mutex_unlock(&vm->mm->mutex);
return 0;