drm/nouveau: embed nouveau_mm

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index ba258e3..6629f30 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -518,7 +518,7 @@
 };
 
 struct nouveau_vram_engine {
-	struct nouveau_mm *mm;
+	struct nouveau_mm mm;
 
 	int  (*init)(struct drm_device *);
 	void (*takedown)(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 1640dec..75b5dd9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -27,7 +27,7 @@
 #include "nouveau_mm.h"
 
 static inline void
-region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
+region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a)
 {
 	list_del(&a->nl_entry);
 	list_del(&a->fl_entry);
@@ -35,7 +35,7 @@
 }
 
 static struct nouveau_mm_node *
-region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
+region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
 {
 	struct nouveau_mm_node *b;
 
@@ -57,33 +57,33 @@
 	return b;
 }
 
-#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
+#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
 	list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
 
 void
-nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this)
 {
 	struct nouveau_mm_node *prev = node(this, prev);
 	struct nouveau_mm_node *next = node(this, next);
 
-	list_add(&this->fl_entry, &rmm->free);
+	list_add(&this->fl_entry, &mm->free);
 	this->type = 0;
 
 	if (prev && prev->type == 0) {
 		prev->length += this->length;
-		region_put(rmm, this);
+		region_put(mm, this);
 		this = prev;
 	}
 
 	if (next && next->type == 0) {
 		next->offset  = this->offset;
 		next->length += this->length;
-		region_put(rmm, this);
+		region_put(mm, this);
 	}
 }
 
 int
-nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
+nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
 	       u32 align, struct nouveau_mm_node **pnode)
 {
 	struct nouveau_mm_node *prev, *this, *next;
@@ -92,17 +92,17 @@
 	u32 splitoff;
 	u32 s, e;
 
-	list_for_each_entry(this, &rmm->free, fl_entry) {
+	list_for_each_entry(this, &mm->free, fl_entry) {
 		e = this->offset + this->length;
 		s = this->offset;
 
 		prev = node(this, prev);
 		if (prev && prev->type != type)
-			s = roundup(s, rmm->block_size);
+			s = roundup(s, mm->block_size);
 
 		next = node(this, next);
 		if (next && next->type != type)
-			e = rounddown(e, rmm->block_size);
+			e = rounddown(e, mm->block_size);
 
 		s  = (s + align_mask) & ~align_mask;
 		e &= ~align_mask;
@@ -110,10 +110,10 @@
 			continue;
 
 		splitoff = s - this->offset;
-		if (splitoff && !region_split(rmm, this, splitoff))
+		if (splitoff && !region_split(mm, this, splitoff))
 			return -ENOMEM;
 
-		this = region_split(rmm, this, min(size, e - s));
+		this = region_split(mm, this, min(size, e - s));
 		if (!this)
 			return -ENOMEM;
 
@@ -127,9 +127,8 @@
 }
 
 int
-nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
+nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
 {
-	struct nouveau_mm *rmm;
 	struct nouveau_mm_node *heap;
 
 	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
@@ -138,32 +137,25 @@
 	heap->offset = roundup(offset, block);
 	heap->length = rounddown(offset + length, block) - heap->offset;
 
-	rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
-	if (!rmm) {
-		kfree(heap);
-		return -ENOMEM;
-	}
-	rmm->block_size = block;
-	mutex_init(&rmm->mutex);
-	INIT_LIST_HEAD(&rmm->nodes);
-	INIT_LIST_HEAD(&rmm->free);
-	list_add(&heap->nl_entry, &rmm->nodes);
-	list_add(&heap->fl_entry, &rmm->free);
+	mutex_init(&mm->mutex);
+	mm->block_size = block;
+	INIT_LIST_HEAD(&mm->nodes);
+	INIT_LIST_HEAD(&mm->free);
 
-	*prmm = rmm;
+	list_add(&heap->nl_entry, &mm->nodes);
+	list_add(&heap->fl_entry, &mm->free);
 	return 0;
 }
 
 int
-nouveau_mm_fini(struct nouveau_mm **prmm)
+nouveau_mm_fini(struct nouveau_mm *mm)
 {
-	struct nouveau_mm *rmm = *prmm;
 	struct nouveau_mm_node *node, *heap =
-		list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
+		list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
 
-	if (!list_is_singular(&rmm->nodes)) {
+	if (!list_is_singular(&mm->nodes)) {
 		printk(KERN_ERR "nouveau_mm not empty at destroy time!\n");
-		list_for_each_entry(node, &rmm->nodes, nl_entry) {
+		list_for_each_entry(node, &mm->nodes, nl_entry) {
 			printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
 			       node->type, node->offset, node->length);
 		}
@@ -172,7 +164,5 @@
 	}
 
 	kfree(heap);
-	kfree(rmm);
-	*prmm = NULL;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index b9c016d..b8fe908 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -44,8 +44,8 @@
 	u32 block_size;
 };
 
-int  nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
-int  nouveau_mm_fini(struct nouveau_mm **);
+int  nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
+int  nouveau_mm_fini(struct nouveau_mm *);
 int  nouveau_mm_pre(struct nouveau_mm *);
 int  nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
 		    u32 align, struct nouveau_mm_node **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 244fd38..d432a2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -172,9 +172,9 @@
 			vm->map_pgt(vpgd->obj, pde, vpgt->obj);
 		}
 
-		mutex_unlock(&vm->mm->mutex);
+		mutex_unlock(&vm->mm.mutex);
 		nouveau_gpuobj_ref(NULL, &pgt);
-		mutex_lock(&vm->mm->mutex);
+		mutex_lock(&vm->mm.mutex);
 	}
 }
 
@@ -191,18 +191,18 @@
 	pgt_size  = (1 << (vm->pgt_bits + 12)) >> type;
 	pgt_size *= 8;
 
-	mutex_unlock(&vm->mm->mutex);
+	mutex_unlock(&vm->mm.mutex);
 	ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
 				 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
-	mutex_lock(&vm->mm->mutex);
+	mutex_lock(&vm->mm.mutex);
 	if (unlikely(ret))
 		return ret;
 
 	/* someone beat us to filling the PDE while we didn't have the lock */
 	if (unlikely(vpgt->refcount[big]++)) {
-		mutex_unlock(&vm->mm->mutex);
+		mutex_unlock(&vm->mm.mutex);
 		nouveau_gpuobj_ref(NULL, &pgt);
-		mutex_lock(&vm->mm->mutex);
+		mutex_lock(&vm->mm.mutex);
 		return 0;
 	}
 
@@ -223,10 +223,10 @@
 	u32 fpde, lpde, pde;
 	int ret;
 
-	mutex_lock(&vm->mm->mutex);
-	ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
+	mutex_lock(&vm->mm.mutex);
+	ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
 	if (unlikely(ret != 0)) {
-		mutex_unlock(&vm->mm->mutex);
+		mutex_unlock(&vm->mm.mutex);
 		return ret;
 	}
 
@@ -245,13 +245,13 @@
 		if (ret) {
 			if (pde != fpde)
 				nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
-			nouveau_mm_put(vm->mm, vma->node);
-			mutex_unlock(&vm->mm->mutex);
+			nouveau_mm_put(&vm->mm, vma->node);
+			mutex_unlock(&vm->mm.mutex);
 			vma->node = NULL;
 			return ret;
 		}
 	}
-	mutex_unlock(&vm->mm->mutex);
+	mutex_unlock(&vm->mm.mutex);
 
 	vma->vm     = vm;
 	vma->offset = (u64)vma->node->offset << 12;
@@ -270,11 +270,11 @@
 	fpde = (vma->node->offset >> vm->pgt_bits);
 	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
 
-	mutex_lock(&vm->mm->mutex);
+	mutex_lock(&vm->mm.mutex);
 	nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
-	nouveau_mm_put(vm->mm, vma->node);
+	nouveau_mm_put(&vm->mm, vma->node);
 	vma->node = NULL;
-	mutex_unlock(&vm->mm->mutex);
+	mutex_unlock(&vm->mm.mutex);
 }
 
 int
@@ -360,11 +360,11 @@
 
 	nouveau_gpuobj_ref(pgd, &vpgd->obj);
 
-	mutex_lock(&vm->mm->mutex);
+	mutex_lock(&vm->mm.mutex);
 	for (i = vm->fpde; i <= vm->lpde; i++)
 		vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
 	list_add(&vpgd->head, &vm->pgd_list);
-	mutex_unlock(&vm->mm->mutex);
+	mutex_unlock(&vm->mm.mutex);
 	return 0;
 }
 
@@ -377,7 +377,7 @@
 	if (!mpgd)
 		return;
 
-	mutex_lock(&vm->mm->mutex);
+	mutex_lock(&vm->mm.mutex);
 	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
 		if (vpgd->obj == mpgd) {
 			pgd = vpgd->obj;
@@ -386,7 +386,7 @@
 			break;
 		}
 	}
-	mutex_unlock(&vm->mm->mutex);
+	mutex_unlock(&vm->mm.mutex);
 
 	nouveau_gpuobj_ref(NULL, &pgd);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 579ca8c..6ce995f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -51,7 +51,7 @@
 
 struct nouveau_vm {
 	struct drm_device *dev;
-	struct nouveau_mm *mm;
+	struct nouveau_mm mm;
 	int refcount;
 
 	struct list_head pgd_list;
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index af32dae..9da2383 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -51,7 +51,7 @@
 nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_mm *mm = dev_priv->engine.vram.mm;
+	struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
 	struct nouveau_mm_node *this;
 	struct nouveau_mem *mem;
 
@@ -82,7 +82,7 @@
 	      u32 memtype, struct nouveau_mem **pmem)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_mm *mm = dev_priv->engine.vram.mm;
+	struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
 	struct nouveau_mm_node *r;
 	struct nouveau_mem *mem;
 	int comp = (memtype & 0x300) >> 8;
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index e45a24d..abed0d3 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -61,7 +61,7 @@
 	      u32 type, struct nouveau_mem **pmem)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_mm *mm = dev_priv->engine.vram.mm;
+	struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
 	struct nouveau_mm_node *r;
 	struct nouveau_mem *mem;
 	int ret;