drm/i915/gvt: properly access enabled intel_engine_cs

Switch to use new for_each_engine() helper to properly access
enabled intel_engine_cs as i915 core has changed that to be
dynamic managed. At GVT-g init time would still depend on ring
mask to determine engine list as it's earlier.

Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 278db0c..b605ac6 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -37,9 +37,10 @@
 static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
 {
 	struct intel_vgpu_execlist *execlist;
-	int i;
+	enum intel_engine_id i;
+	struct intel_engine_cs *engine;
 
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
+	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
 		execlist = &vgpu->execlist[i];
 		if (!list_empty(workload_q_head(vgpu, i)))
 			return true;
@@ -51,7 +52,8 @@
 static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-	int i;
+	enum intel_engine_id i;
+	struct intel_engine_cs *engine;
 
 	/* no target to schedule */
 	if (!scheduler->next_vgpu)
@@ -67,7 +69,7 @@
 	scheduler->need_reschedule = true;
 
 	/* still have uncompleted workload? */
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
+	for_each_engine(engine, gvt->dev_priv, i) {
 		if (scheduler->current_workload[i]) {
 			gvt_dbg_sched("still have running workload\n");
 			return;
@@ -84,7 +86,7 @@
 	scheduler->need_reschedule = false;
 
 	/* wake up workload dispatch thread */
-	for (i = 0; i < I915_NUM_ENGINES; i++)
+	for_each_engine(engine, gvt->dev_priv, i)
 		wake_up(&scheduler->waitq[i]);
 }