drm/ttm: flip the switch, and convert to dma_fence

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index d68c965..a28b510 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -98,13 +98,12 @@
 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
 {
 	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
-	struct nouveau_fence *fence = NULL;
+	struct fence *fence = NULL;
 
 	list_del(&vma->head);
 
-	if (mapped) {
-		fence = nouveau_fence_ref(nvbo->bo.sync_obj);
-	}
+	if (mapped)
+		fence = reservation_object_get_excl(nvbo->bo.resv);
 
 	if (fence) {
 		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
@@ -114,7 +113,6 @@
 		nouveau_vm_put(vma);
 		kfree(vma);
 	}
-	nouveau_fence_unref(&fence);
 }
 
 void
@@ -874,8 +872,12 @@
 	ret = ttm_bo_reserve(&nvbo->bo, true, false, false, NULL);
 	if (!ret) {
 		ret = ttm_bo_wait(&nvbo->bo, true, true, true);
-		if (!no_wait && ret)
-			fence = nouveau_fence_ref(nvbo->bo.sync_obj);
+		if (!no_wait && ret) {
+			struct fence *excl;
+
+			excl = reservation_object_get_excl(nvbo->bo.resv);
+			fence = nouveau_fence_ref((struct nouveau_fence *)excl);
+		}
 
 		ttm_bo_unreserve(&nvbo->bo);
 	}