drm/i915/gvt: fix nested sleeping issue
We cannot use blocking method mutex_lock inside a wait loop.
Here we invoke pick_next_workload() which needs acquire a
mutex in our "condition" experssion. Then we go into a another
of the going-to-sleep sequence and changing the task state.
This is a dangerous. Let's rewrite the wait sequence to avoid
nested sleeping.
v2: fix do...while loop exit condition (zhenyu)
v3: rebase to gvt-staging branch
Signed-off-by: Du, Changbin <changbin.du@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index e96eaee..f7e320b 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -402,19 +402,24 @@
struct intel_vgpu_workload *workload = NULL;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
gvt_dbg_core("workload thread for ring %d started\n", ring_id);
while (!kthread_should_stop()) {
- ret = wait_event_interruptible(scheduler->waitq[ring_id],
- kthread_should_stop() ||
- (workload = pick_next_workload(gvt, ring_id)));
+ add_wait_queue(&scheduler->waitq[ring_id], &wait);
+ do {
+ workload = pick_next_workload(gvt, ring_id);
+ if (workload)
+ break;
+ wait_woken(&wait, TASK_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ } while (!kthread_should_stop());
+ remove_wait_queue(&scheduler->waitq[ring_id], &wait);
- WARN_ON_ONCE(ret);
-
- if (kthread_should_stop())
+ if (!workload)
break;
mutex_lock(&scheduler_mutex);