drm/i915: Add locking to psr code
It's not really optional to have locking ...
The ugly part is how much locking the psr work needs since it has to
recheck everything. Which is way too much. But we need to ditch the
psr work in it's current form anyway and implement proper frontbuffer
tracking.
The other nasty bit that had to go was the delayed work cancle in
psr_exit. Which means a bunch of races just became a bit more likely,
but mea culpa.
v2: Fixup HAS_PSR checks, resulting in uninitialized mutex issues.
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d8115c3..faa27d0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -660,6 +660,7 @@
struct intel_dp;
struct i915_psr {
+ struct mutex lock;
bool sink_support;
bool source_ok;
struct intel_dp *enabled;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fd92a81..3a3bb09 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1767,6 +1767,11 @@
struct drm_i915_gem_object *obj = intel_fb_obj(crtc->primary->fb);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+ lockdep_assert_held(&dev_priv->psr.lock);
+ lockdep_assert_held(&dev->struct_mutex);
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
dev_priv->psr.source_ok = false;
if (!HAS_PSR(dev)) {
@@ -1836,6 +1841,7 @@
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
+ lockdep_assert_held(&dev_priv->psr.lock);
/* Enable PSR on the panel */
intel_edp_psr_enable_sink(intel_dp);
@@ -1862,8 +1868,10 @@
return;
}
+ mutex_lock(&dev_priv->psr.lock);
if (dev_priv->psr.enabled) {
DRM_DEBUG_KMS("PSR already in use\n");
+ mutex_unlock(&dev_priv->psr.lock);
return;
}
@@ -1872,6 +1880,7 @@
if (intel_edp_psr_match_conditions(intel_dp))
intel_edp_psr_do_enable(intel_dp);
+ mutex_unlock(&dev_priv->psr.lock);
}
void intel_edp_psr_disable(struct intel_dp *intel_dp)
@@ -1879,9 +1888,15 @@
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!dev_priv->psr.enabled)
+ if (!HAS_PSR(dev))
return;
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev),
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
@@ -1897,19 +1912,30 @@
}
dev_priv->psr.enabled = NULL;
+ mutex_unlock(&dev_priv->psr.lock);
}
static void intel_edp_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), psr.work.work);
+ struct drm_device *dev = dev_priv->dev;
struct intel_dp *intel_dp = dev_priv->psr.enabled;
+ drm_modeset_lock_all(dev);
+ mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->psr.lock);
+ intel_dp = dev_priv->psr.enabled;
+
if (!intel_dp)
- return;
+ goto unlock;
if (intel_edp_psr_match_conditions(intel_dp))
intel_edp_psr_do_enable(intel_dp);
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&dev->struct_mutex);
+ drm_modeset_unlock_all(dev);
}
void intel_edp_psr_exit(struct drm_device *dev)
@@ -1922,8 +1948,7 @@
if (!dev_priv->psr.enabled)
return;
- cancel_delayed_work_sync(&dev_priv->psr.work);
-
+ mutex_lock(&dev_priv->psr.lock);
if (dev_priv->psr.active) {
u32 val = I915_READ(EDP_PSR_CTL(dev));
@@ -1936,16 +1961,15 @@
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(100));
+ mutex_unlock(&dev_priv->psr.lock);
}
void intel_edp_psr_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!HAS_PSR(dev))
- return;
-
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
+ mutex_init(&dev_priv->psr.lock);
}
static void intel_disable_dp(struct intel_encoder *encoder)