drm/i915: Small display interrupt handlers tidy

I have noticed some of our interrupt handlers use both dev and
dev_priv while they could get away with only dev_priv in the
huge majority of cases.

Tidying that up had a cascading effect on changing functions
prototypes, so relatively big churn factor, but I think it is
for the better.

For example even where changes cascade out of i915_irq.c, for
functions prefixed with intel_, genX_ or <plat>_, it makes more
sense to take dev_priv directly anyway.

This allows us to eliminate local variables and intermixed usage
of dev and dev_priv where only one is good enough.

End result is shrinkage of both source and the resulting binary.

i915.ko:

 - .text         000b0899
 + .text         000b0619

Or if we look at the Gen8 display irq chain:

 -00000000000006ad t gen8_irq_handler
 +0000000000000663 t gen8_irq_handler
   -0000000000000028 T intel_opregion_asle_intr
   +0000000000000024 T intel_opregion_asle_intr
   -000000000000008c t ilk_hpd_irq_handler
   +000000000000007f t ilk_hpd_irq_handler
   -0000000000000116 T intel_check_page_flip
   +0000000000000112 T intel_check_page_flip
   -000000000000011a T intel_prepare_page_flip
   +0000000000000119 T intel_prepare_page_flip
   -0000000000000014 T intel_finish_page_flip_plane
   +0000000000000013 T intel_finish_page_flip_plane
   -0000000000000053 t hsw_pipe_crc_irq_handler
   +000000000000004c t hsw_pipe_crc_irq_handler
   -000000000000022e t cpt_irq_handler
   +0000000000000213 t cpt_irq_handler

So small shrinkage but it is all fast paths so doesn't harm.

Situation is similar in other interrupt handlers as well.

v2: Tidy intel_queue_rps_boost_for_request as well. (Chris Wilson)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 017c431..999ab8e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4167,9 +4167,8 @@
  * mchdev_lock. */
 static struct drm_i915_private *i915_mch_dev;
 
-bool ironlake_set_drps(struct drm_device *dev, u8 val)
+bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u16 rgvswctl;
 
 	assert_spin_locked(&mchdev_lock);
@@ -4191,9 +4190,8 @@
 	return true;
 }
 
-static void ironlake_enable_drps(struct drm_device *dev)
+static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 rgvmodectl;
 	u8 fmax, fmin, fstart, vstart;
 
@@ -4250,7 +4248,7 @@
 		DRM_ERROR("stuck trying to change perf mode\n");
 	mdelay(1);
 
-	ironlake_set_drps(dev, fstart);
+	ironlake_set_drps(dev_priv, fstart);
 
 	dev_priv->ips.last_count1 = I915_READ(DMIEC) +
 		I915_READ(DDREC) + I915_READ(CSIEC);
@@ -4261,9 +4259,8 @@
 	spin_unlock_irq(&mchdev_lock);
 }
 
-static void ironlake_disable_drps(struct drm_device *dev)
+static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u16 rgvswctl;
 
 	spin_lock_irq(&mchdev_lock);
@@ -4278,7 +4275,7 @@
 	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
 
 	/* Go back to the starting frequency */
-	ironlake_set_drps(dev, dev_priv->ips.fstart);
+	ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
 	mdelay(1);
 	rgvswctl |= MEMCTL_CMD_STS;
 	I915_WRITE(MEMSWCTL, rgvswctl);
@@ -6095,7 +6092,7 @@
 
 	dev_priv->ips.max_delay = dev_priv->ips.fstart;
 
-	if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
+	if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
 		ret = false;
 
 out_unlock:
@@ -6246,13 +6243,11 @@
 		intel_runtime_pm_put(dev_priv);
 }
 
-static void gen6_suspend_rps(struct drm_device *dev)
+static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
-	gen6_disable_rps_interrupts(dev);
+	gen6_disable_rps_interrupts(dev_priv);
 }
 
 /**
@@ -6267,10 +6262,10 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return;
 
-	gen6_suspend_rps(dev);
+	gen6_suspend_rps(dev_priv);
 
 	/* Force GPU to min freq during suspend */
 	gen6_rps_idle(dev_priv);
@@ -6281,7 +6276,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (IS_IRONLAKE_M(dev)) {
-		ironlake_disable_drps(dev);
+		ironlake_disable_drps(dev_priv);
 	} else if (INTEL_INFO(dev)->gen >= 6) {
 		intel_suspend_gt_powersave(dev);
 
@@ -6337,7 +6332,7 @@
 
 	dev_priv->rps.enabled = true;
 
-	gen6_enable_rps_interrupts(dev);
+	gen6_enable_rps_interrupts(dev_priv);
 
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
@@ -6353,7 +6348,7 @@
 		return;
 
 	if (IS_IRONLAKE_M(dev)) {
-		ironlake_enable_drps(dev);
+		ironlake_enable_drps(dev_priv);
 		mutex_lock(&dev->struct_mutex);
 		intel_init_emon(dev);
 		mutex_unlock(&dev->struct_mutex);
@@ -6383,7 +6378,7 @@
 	if (INTEL_INFO(dev)->gen < 6)
 		return;
 
-	gen6_suspend_rps(dev);
+	gen6_suspend_rps(dev_priv);
 	dev_priv->rps.enabled = false;
 }
 
@@ -7412,12 +7407,11 @@
 	kfree(boost);
 }
 
-void intel_queue_rps_boost_for_request(struct drm_device *dev,
-				       struct drm_i915_gem_request *req)
+void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
 {
 	struct request_boost *boost;
 
-	if (req == NULL || INTEL_INFO(dev)->gen < 6)
+	if (req == NULL || INTEL_GEN(req->i915) < 6)
 		return;
 
 	if (i915_gem_request_completed(req, true))
@@ -7431,7 +7425,7 @@
 	boost->req = req;
 
 	INIT_WORK(&boost->work, __intel_rps_boost_work);
-	queue_work(to_i915(dev)->wq, &boost->work);
+	queue_work(req->i915->wq, &boost->work);
 }
 
 void intel_pm_setup(struct drm_device *dev)