drm/nouveau/fence: make ttm interfaces wrap ours, not the other way around

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c45c7bc..1aa03a8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1068,22 +1068,6 @@
 	return nouveau_bo_validate(nvbo, false, true, false);
 }
 
-void
-nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
-{
-	struct nouveau_fence *old_fence;
-
-	if (likely(fence))
-		nouveau_fence_ref(fence);
-
-	spin_lock(&nvbo->bo.bdev->fence_lock);
-	old_fence = nvbo->bo.sync_obj;
-	nvbo->bo.sync_obj = fence;
-	spin_unlock(&nvbo->bo.bdev->fence_lock);
-
-	nouveau_fence_unref(&old_fence);
-}
-
 static int
 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 {
@@ -1181,6 +1165,52 @@
 	ttm_pool_unpopulate(ttm);
 }
 
+void
+nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
+{
+	struct nouveau_fence *old_fence = NULL;
+
+	if (likely(fence))
+		nouveau_fence_ref(fence);
+
+	spin_lock(&nvbo->bo.bdev->fence_lock);
+	old_fence = nvbo->bo.sync_obj;
+	nvbo->bo.sync_obj = fence;
+	spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+	nouveau_fence_unref(&old_fence);
+}
+
+static void
+nouveau_bo_fence_unref(void **sync_obj)
+{
+	nouveau_fence_unref((struct nouveau_fence **)sync_obj);
+}
+
+static void *
+nouveau_bo_fence_ref(void *sync_obj)
+{
+	return nouveau_fence_ref(sync_obj);
+}
+
+static bool
+nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
+{
+	return nouveau_fence_signalled(sync_obj);
+}
+
+static int
+nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+{
+	return nouveau_fence_wait(sync_obj, lazy, intr);
+}
+
+static int
+nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
+{
+	return 0;
+}
+
 struct ttm_bo_driver nouveau_bo_driver = {
 	.ttm_tt_create = &nouveau_ttm_tt_create,
 	.ttm_tt_populate = &nouveau_ttm_tt_populate,
@@ -1191,11 +1221,11 @@
 	.move_notify = nouveau_bo_move_ntfy,
 	.move = nouveau_bo_move,
 	.verify_access = nouveau_bo_verify_access,
-	.sync_obj_signaled = __nouveau_fence_signalled,
-	.sync_obj_wait = __nouveau_fence_wait,
-	.sync_obj_flush = __nouveau_fence_flush,
-	.sync_obj_unref = __nouveau_fence_unref,
-	.sync_obj_ref = __nouveau_fence_ref,
+	.sync_obj_signaled = nouveau_bo_fence_signalled,
+	.sync_obj_wait = nouveau_bo_fence_wait,
+	.sync_obj_flush = nouveau_bo_fence_flush,
+	.sync_obj_unref = nouveau_bo_fence_unref,
+	.sync_obj_ref = nouveau_bo_fence_ref,
 	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
 	.io_mem_free = &nouveau_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index dce6215..b17444a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1458,34 +1458,11 @@
 			       void *priv);
 struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
 
-extern bool __nouveau_fence_signalled(void *obj, void *arg);
-extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
-extern int __nouveau_fence_flush(void *obj, void *arg);
-extern void __nouveau_fence_unref(void **obj);
-extern void *__nouveau_fence_ref(void *obj);
-
-static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
-{
-	return __nouveau_fence_signalled(obj, NULL);
-}
-static inline int
-nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
-{
-	return __nouveau_fence_wait(obj, NULL, lazy, intr);
-}
+extern bool nouveau_fence_signalled(struct nouveau_fence *);
+extern int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
+extern void nouveau_fence_unref(struct nouveau_fence **);
+extern struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *);
 extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
-static inline int nouveau_fence_flush(struct nouveau_fence *obj)
-{
-	return __nouveau_fence_flush(obj, NULL);
-}
-static inline void nouveau_fence_unref(struct nouveau_fence **obj)
-{
-	__nouveau_fence_unref((void **)obj);
-}
-static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
-{
-	return __nouveau_fence_ref(obj);
-}
 
 /* nouveau_gem.c */
 extern int nouveau_gem_new(struct drm_device *, int size, int align,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index aca4719..f26177a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -199,28 +199,23 @@
 }
 
 void
-__nouveau_fence_unref(void **sync_obj)
+nouveau_fence_unref(struct nouveau_fence **pfence)
 {
-	struct nouveau_fence *fence = nouveau_fence(*sync_obj);
-
-	if (fence)
-		kref_put(&fence->refcount, nouveau_fence_del);
-	*sync_obj = NULL;
+	if (*pfence)
+		kref_put(&(*pfence)->refcount, nouveau_fence_del);
+	*pfence = NULL;
 }
 
-void *
-__nouveau_fence_ref(void *sync_obj)
+struct nouveau_fence *
+nouveau_fence_ref(struct nouveau_fence *fence)
 {
-	struct nouveau_fence *fence = nouveau_fence(sync_obj);
-
 	kref_get(&fence->refcount);
-	return sync_obj;
+	return fence;
 }
 
 bool
-__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
+nouveau_fence_signalled(struct nouveau_fence *fence)
 {
-	struct nouveau_fence *fence = nouveau_fence(sync_obj);
 	struct nouveau_channel *chan = fence->channel;
 
 	if (fence->signalled)
@@ -231,25 +226,20 @@
 }
 
 int
-__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
 {
-	struct nouveau_fence *fence = nouveau_fence(sync_obj);
-	unsigned long timeout = fence->timeout;
 	unsigned long sleep_time = NSEC_PER_MSEC / 1000;
 	ktime_t t;
 	int ret = 0;
 
-	while (1) {
-		if (__nouveau_fence_signalled(sync_obj, sync_arg))
-			break;
-
-		if (time_after_eq(jiffies, timeout)) {
+	while (!nouveau_fence_signalled(fence)) {
+		if (time_after_eq(jiffies, fence->timeout)) {
 			ret = -EBUSY;
 			break;
 		}
 
-		__set_current_state(intr ? TASK_INTERRUPTIBLE
-			: TASK_UNINTERRUPTIBLE);
+		__set_current_state(intr ? TASK_INTERRUPTIBLE :
+					   TASK_UNINTERRUPTIBLE);
 		if (lazy) {
 			t = ktime_set(0, sleep_time);
 			schedule_hrtimeout(&t, HRTIMER_MODE_REL);