drm/amdgpu: use kernel fence for last_pt_update

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <jammy.zhou@amd.com>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5b8e1ae..371ff08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -539,7 +539,7 @@
 struct amdgpu_bo_va {
 	/* protected by bo being reserved */
 	struct list_head		bo_list;
-	struct amdgpu_fence		*last_pt_update;
+	struct fence		        *last_pt_update;
 	unsigned			ref_count;
 
 	/* protected by vm mutex and spinlock */
@@ -1241,7 +1241,7 @@
 		struct amdgpu_vm *vm;
 		uint64_t start;
 		uint64_t last;
-		struct amdgpu_fence **fence;
+		struct fence **fence;
 
 	} vm_mapping;
 	struct {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index fe81b46..aee5911 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -581,7 +581,7 @@
 			if (r)
 				return r;
 
-			f = &bo_va->last_pt_update->base;
+			f = bo_va->last_pt_update;
 			r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
 			if (r)
 				return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8745d4c..d90254f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -737,7 +737,7 @@
  */
 static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
 				uint64_t start, uint64_t end,
-				struct amdgpu_fence *fence)
+				struct fence *fence)
 {
 	unsigned i;
 
@@ -745,20 +745,20 @@
 	end >>= amdgpu_vm_block_size;
 
 	for (i = start; i <= end; ++i)
-		amdgpu_bo_fence(vm->page_tables[i].bo, &fence->base, true);
+		amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
 }
 
 static int amdgpu_vm_bo_update_mapping_run_job(
 	struct amdgpu_cs_parser *sched_job)
 {
-	struct amdgpu_fence **fence = sched_job->job_param.vm_mapping.fence;
+	struct fence **fence = sched_job->job_param.vm_mapping.fence;
 	amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
 			    sched_job->job_param.vm_mapping.start,
 			    sched_job->job_param.vm_mapping.last + 1,
-			    sched_job->ibs[sched_job->num_ibs -1].fence);
+			    &sched_job->ibs[sched_job->num_ibs -1].fence->base);
 	if (fence) {
-		amdgpu_fence_unref(fence);
-		*fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs -1].fence);
+		fence_put(*fence);
+		*fence = fence_get(&sched_job->ibs[sched_job->num_ibs -1].fence->base);
 	}
 	return 0;
 }
@@ -781,7 +781,7 @@
 				       struct amdgpu_vm *vm,
 				       struct amdgpu_bo_va_mapping *mapping,
 				       uint64_t addr, uint32_t gtt_flags,
-				       struct amdgpu_fence **fence)
+				       struct fence **fence)
 {
 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
 	unsigned nptes, ncmds, ndw;
@@ -902,10 +902,10 @@
 		}
 
 		amdgpu_vm_fence_pts(vm, mapping->it.start,
-				    mapping->it.last + 1, ib->fence);
+				    mapping->it.last + 1, &ib->fence->base);
 		if (fence) {
-			amdgpu_fence_unref(fence);
-			*fence = amdgpu_fence_ref(ib->fence);
+			fence_put(*fence);
+			*fence = fence_get(&ib->fence->base);
 		}
 
 		amdgpu_ib_free(adev, ib);
@@ -1038,7 +1038,7 @@
 	spin_unlock(&vm->status_lock);
 
 	if (bo_va)
-		r = amdgpu_sync_fence(adev, sync, &bo_va->last_pt_update->base);
+		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
 
 	return r;
 }
@@ -1318,7 +1318,7 @@
 		kfree(mapping);
 	}
 
-	amdgpu_fence_unref(&bo_va->last_pt_update);
+	fence_put(bo_va->last_pt_update);
 	kfree(bo_va);
 
 	mutex_unlock(&vm->mutex);