drm/amdgpu: add optional ring to amdgpu_sync_is_idle

Check if the sync object is idle depending on the ring a submission works with.

Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 0c6c6c9..922a20c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -597,7 +597,8 @@
 		     struct amdgpu_sync *sync,
 		     struct reservation_object *resv,
 		     void *owner);
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
+bool amdgpu_sync_is_idle(struct amdgpu_sync *sync,
+			 struct amdgpu_ring *ring);
 int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
 			     struct fence *fence);
 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 009e905..e395bbe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -166,7 +166,7 @@
 	}
 	job = to_amdgpu_job(sched_job);
 
-	BUG_ON(!amdgpu_sync_is_idle(&job->sync));
+	BUG_ON(!amdgpu_sync_is_idle(&job->sync, NULL));
 
 	trace_amdgpu_sched_run_job(job);
 	r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c0ed5b9..a2766d72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -226,10 +226,13 @@
  * amdgpu_sync_is_idle - test if all fences are signaled
  *
  * @sync: the sync object
+ * @ring: optional ring to use for test
  *
- * Returns true if all fences in the sync object are signaled.
+ * Returns true if all fences in the sync object are signaled or scheduled to
+ * the ring (if provided).
  */
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
+bool amdgpu_sync_is_idle(struct amdgpu_sync *sync,
+			 struct amdgpu_ring *ring)
 {
 	struct amdgpu_sync_entry *e;
 	struct hlist_node *tmp;
@@ -237,6 +240,16 @@
 
 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
 		struct fence *f = e->fence;
+		struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+		if (ring && s_fence) {
+			/* For fences from the same ring it is sufficient
+			 * when they are scheduled.
+			 */
+			if (s_fence->sched == &ring->sched &&
+			    fence_is_signaled(&s_fence->scheduled))
+				continue;
+		}
 
 		if (fence_is_signaled(f)) {
 			hash_del(&e->node);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9f36ed3..711d92b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -240,13 +240,13 @@
 			      struct amdgpu_vm_id,
 			      list);
 
-	if (!amdgpu_sync_is_idle(&id->active)) {
+	if (!amdgpu_sync_is_idle(&id->active, NULL)) {
 		struct list_head *head = &adev->vm_manager.ids_lru;
 		struct amdgpu_vm_id *tmp;
 
 		list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
 					 list) {
-			if (amdgpu_sync_is_idle(&id->active)) {
+			if (amdgpu_sync_is_idle(&id->active, NULL)) {
 				list_move(&id->list, head);
 				head = &id->list;
 			}