drm/i915: move all rps state into dev_priv->rps

This way it's easier so see what belongs together, and what is used
by the ilk ips code. Also add some comments that explain the locking.

Note that (cur|min|max)_delay need to be duplicated, because
they're also used by the ips code.

v2: Missed one place that the dev_priv->ips change caught ...

Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9c9e199..ed4bc98 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1274,7 +1274,8 @@
 
 	seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
 
-	for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+	for (gpu_freq = dev_priv->rps.min_delay;
+	     gpu_freq <= dev_priv->rps.max_delay;
 	     gpu_freq++) {
 		I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
 		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
@@ -1712,7 +1713,7 @@
 		return ret;
 
 	len = snprintf(buf, sizeof(buf),
-		       "max freq: %d\n", dev_priv->max_delay * 50);
+		       "max freq: %d\n", dev_priv->rps.max_delay * 50);
 	mutex_unlock(&dev->struct_mutex);
 
 	if (len > sizeof(buf))
@@ -1755,7 +1756,7 @@
 	/*
 	 * Turbo will still be enabled, but won't go above the set value.
 	 */
-	dev_priv->max_delay = val / 50;
+	dev_priv->rps.max_delay = val / 50;
 
 	gen6_set_rps(dev, val / 50);
 	mutex_unlock(&dev->struct_mutex);
@@ -1788,7 +1789,7 @@
 		return ret;
 
 	len = snprintf(buf, sizeof(buf),
-		       "min freq: %d\n", dev_priv->min_delay * 50);
+		       "min freq: %d\n", dev_priv->rps.min_delay * 50);
 	mutex_unlock(&dev->struct_mutex);
 
 	if (len > sizeof(buf))
@@ -1829,7 +1830,7 @@
 	/*
 	 * Turbo will still be enabled, but won't go below the set value.
 	 */
-	dev_priv->min_delay = val / 50;
+	dev_priv->rps.min_delay = val / 50;
 
 	gen6_set_rps(dev, val / 50);
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a7a213c..0a1b64f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1605,7 +1605,7 @@
 
 	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->error_lock);
-	spin_lock_init(&dev_priv->rps_lock);
+	spin_lock_init(&dev_priv->rps.lock);
 
 	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 		dev_priv->num_pipe = 3;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 26a2cf6..1614097 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -819,9 +819,21 @@
 
 	bool mchbar_need_disable;
 
-	struct work_struct rps_work;
-	spinlock_t rps_lock;
-	u32 pm_iir;
+	/* gen6+ rps state */
+	struct {
+		struct work_struct work;
+		u32 pm_iir;
+		/* lock - irqsave spinlock that protectects the work_struct and
+		 * pm_iir. */
+		spinlock_t lock;
+
+		/* The below variables an all the rps hw state are protected by
+		 * dev->struct mutext. */
+		u8 cur_delay;
+		u8 min_delay;
+		u8 max_delay;
+	} rps;
+
 
 	u8 cur_delay;
 	u8 min_delay;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 440c905..74c9a0e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -349,16 +349,16 @@
 static void gen6_pm_rps_work(struct work_struct *work)
 {
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-						    rps_work);
+						    rps.work);
 	u32 pm_iir, pm_imr;
 	u8 new_delay;
 
-	spin_lock_irq(&dev_priv->rps_lock);
-	pm_iir = dev_priv->pm_iir;
-	dev_priv->pm_iir = 0;
+	spin_lock_irq(&dev_priv->rps.lock);
+	pm_iir = dev_priv->rps.pm_iir;
+	dev_priv->rps.pm_iir = 0;
 	pm_imr = I915_READ(GEN6_PMIMR);
 	I915_WRITE(GEN6_PMIMR, 0);
-	spin_unlock_irq(&dev_priv->rps_lock);
+	spin_unlock_irq(&dev_priv->rps.lock);
 
 	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
 		return;
@@ -366,9 +366,9 @@
 	mutex_lock(&dev_priv->dev->struct_mutex);
 
 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
-		new_delay = dev_priv->cur_delay + 1;
+		new_delay = dev_priv->rps.cur_delay + 1;
 	else
-		new_delay = dev_priv->cur_delay - 1;
+		new_delay = dev_priv->rps.cur_delay - 1;
 
 	gen6_set_rps(dev_priv->dev, new_delay);
 
@@ -488,20 +488,20 @@
 	 * IIR bits should never already be set because IMR should
 	 * prevent an interrupt from being shown in IIR. The warning
 	 * displays a case where we've unsafely cleared
-	 * dev_priv->pm_iir. Although missing an interrupt of the same
+	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
 	 * type is not a problem, it displays a problem in the logic.
 	 *
-	 * The mask bit in IMR is cleared by rps_work.
+	 * The mask bit in IMR is cleared by dev_priv->rps.work.
 	 */
 
-	spin_lock_irqsave(&dev_priv->rps_lock, flags);
-	WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
-	dev_priv->pm_iir |= pm_iir;
-	I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+	spin_lock_irqsave(&dev_priv->rps.lock, flags);
+	WARN(dev_priv->rps.pm_iir & pm_iir, "Missed a PM interrupt\n");
+	dev_priv->rps.pm_iir |= pm_iir;
+	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
 	POSTING_READ(GEN6_PMIMR);
-	spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
 
-	queue_work(dev_priv->wq, &dev_priv->rps_work);
+	queue_work(dev_priv->wq, &dev_priv->rps.work);
 }
 
 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
@@ -2649,7 +2649,7 @@
 
 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
 	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
-	INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
 	INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
 
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2e1f28f..bddb290 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7218,7 +7218,7 @@
 	 * enqueue unpin/hotplug work. */
 	drm_irq_uninstall(dev);
 	cancel_work_sync(&dev_priv->hotplug_work);
-	cancel_work_sync(&dev_priv->rps_work);
+	cancel_work_sync(&dev_priv->rps.work);
 
 	/* flush any delayed tasks or pending work */
 	flush_scheduled_work();
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 4fe8d0a..e1439ae 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2277,9 +2277,10 @@
 	u32 limits;
 
 	limits = 0;
-	if (*val >= dev_priv->max_delay)
-		*val = dev_priv->max_delay;
-	limits |= dev_priv->max_delay << 24;
+
+	if (*val >= dev_priv->rps.max_delay)
+		*val = dev_priv->rps.max_delay;
+	limits |= dev_priv->rps.max_delay << 24;
 
 	/* Only set the down limit when we've reached the lowest level to avoid
 	 * getting more interrupts, otherwise leave this clear. This prevents a
@@ -2287,9 +2288,9 @@
 	 * the hw runs at the minimal clock before selecting the desired
 	 * frequency, if the down threshold expires in that window we will not
 	 * receive a down interrupt. */
-	if (*val <= dev_priv->min_delay) {
-		*val = dev_priv->min_delay;
-		limits |= dev_priv->min_delay << 16;
+	if (*val <= dev_priv->rps.min_delay) {
+		*val = dev_priv->rps.min_delay;
+		limits |= dev_priv->rps.min_delay << 16;
 	}
 
 	return limits;
@@ -2302,7 +2303,7 @@
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	if (val == dev_priv->cur_delay)
+	if (val == dev_priv->rps.cur_delay)
 		return;
 
 	I915_WRITE(GEN6_RPNSWREQ,
@@ -2315,7 +2316,7 @@
 	 */
 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
 
-	dev_priv->cur_delay = val;
+	dev_priv->rps.cur_delay = val;
 }
 
 static void gen6_disable_rps(struct drm_device *dev)
@@ -2331,9 +2332,9 @@
 	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
 	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
 
-	spin_lock_irq(&dev_priv->rps_lock);
-	dev_priv->pm_iir = 0;
-	spin_unlock_irq(&dev_priv->rps_lock);
+	spin_lock_irq(&dev_priv->rps.lock);
+	dev_priv->rps.pm_iir = 0;
+	spin_unlock_irq(&dev_priv->rps.lock);
 
 	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
 }
@@ -2402,9 +2403,9 @@
 	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 
 	/* In units of 100MHz */
-	dev_priv->max_delay = rp_state_cap & 0xff;
-	dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
-	dev_priv->cur_delay = 0;
+	dev_priv->rps.max_delay = rp_state_cap & 0xff;
+	dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+	dev_priv->rps.cur_delay = 0;
 
 	/* disable the counters and set deterministic thresholds */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -2457,8 +2458,8 @@
 
 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-		   dev_priv->max_delay << 24 |
-		   dev_priv->min_delay << 16);
+		   dev_priv->rps.max_delay << 24 |
+		   dev_priv->rps.min_delay << 16);
 
 	if (IS_HASWELL(dev)) {
 		I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
@@ -2503,7 +2504,7 @@
 		     500))
 		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 	if (pcu_mbox & (1<<31)) { /* OC supported */
-		dev_priv->max_delay = pcu_mbox & 0xff;
+		dev_priv->rps.max_delay = pcu_mbox & 0xff;
 		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
 	}
 
@@ -2511,10 +2512,10 @@
 
 	/* requires MSI enabled */
 	I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
-	spin_lock_irq(&dev_priv->rps_lock);
-	WARN_ON(dev_priv->pm_iir != 0);
+	spin_lock_irq(&dev_priv->rps.lock);
+	WARN_ON(dev_priv->rps.pm_iir != 0);
 	I915_WRITE(GEN6_PMIMR, 0);
-	spin_unlock_irq(&dev_priv->rps_lock);
+	spin_unlock_irq(&dev_priv->rps.lock);
 	/* enable all PM interrupts */
 	I915_WRITE(GEN6_PMINTRMSK, 0);
 
@@ -2546,9 +2547,9 @@
 	 * to use for memory access.  We do this by specifying the IA frequency
 	 * the PCU should use as a reference to determine the ring frequency.
 	 */
-	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+	for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
 	     gpu_freq--) {
-		int diff = dev_priv->max_delay - gpu_freq;
+		int diff = dev_priv->rps.max_delay - gpu_freq;
 
 		/*
 		 * For GPU frequencies less than 750MHz, just use the lowest
@@ -2991,7 +2992,7 @@
 
 	assert_spin_locked(&mchdev_lock);
 
-	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
 	pxvid = (pxvid >> 24) & 0x7f;
 	ext_v = pvid_to_extvid(dev_priv, pxvid);