drm/i915: add dynamic performance control support for Ironlake

Ironlake (and 965GM, which this patch doesn't support) supports a
hardware performance and power management feature that allows it to
adjust to changes in GPU load over time with software help.  The goal
if this is to maximize performance/power for a given workload.

This patch enables that feature, which is also a requirement for
supporting Intelligent Power Sharing, a feature which allows for
dynamic budgeting of power between the CPU and GPU in Arrandale
platforms.

Tested-by: ykzhao <yakui.zhao@intel.com>
[anholt: Resolved against the irq handler loop removal]
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a17d6bdf..8b35f5e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -269,6 +269,57 @@
 	drm_sysfs_hotplug_event(dev);
 }
 
+static void i915_handle_rps_change(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 slow_up, slow_down, max_avg, min_avg;
+	u16 rgvswctl;
+	u8 new_delay = dev_priv->cur_delay;
+
+	I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
+	slow_up = I915_READ(RCPREVBSYTUPAVG);
+	slow_down = I915_READ(RCPREVBSYTDNAVG);
+	max_avg = I915_READ(RCBMAXAVG);
+	min_avg = I915_READ(RCBMINAVG);
+
+	/* Handle RCS change request from hw */
+	if (slow_up > max_avg) {
+		if (dev_priv->cur_delay != dev_priv->max_delay)
+			new_delay = dev_priv->cur_delay - 1;
+		if (new_delay < dev_priv->max_delay)
+			new_delay = dev_priv->max_delay;
+	} else if (slow_down < min_avg) {
+		if (dev_priv->cur_delay != dev_priv->min_delay)
+			new_delay = dev_priv->cur_delay + 1;
+		if (new_delay > dev_priv->min_delay)
+			new_delay = dev_priv->min_delay;
+	}
+
+	DRM_DEBUG("rps change requested: %d -> %d\n",
+		  dev_priv->cur_delay, new_delay);
+
+	rgvswctl = I915_READ(MEMSWCTL);
+	if (rgvswctl & MEMCTL_CMD_STS) {
+		DRM_ERROR("gpu slow, RCS change rejected\n");
+		return; /* still slow with another command */
+	}
+
+	/* Program the new state */
+	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+		(new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+	I915_WRITE(MEMSWCTL, rgvswctl);
+	POSTING_READ(MEMSWCTL);
+
+	rgvswctl |= MEMCTL_CMD_STS;
+	I915_WRITE(MEMSWCTL, rgvswctl);
+
+	dev_priv->cur_delay = new_delay;
+
+	DRM_DEBUG("rps changed\n");
+
+	return;
+}
+
 irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -331,6 +382,11 @@
 		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 	}
 
+	if (de_iir & DE_PCU_EVENT) {
+		I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
+		i915_handle_rps_change(dev);
+	}
+
 	/* should clear PCH hotplug event before clear CPU irq */
 	I915_WRITE(SDEIIR, pch_iir);
 	I915_WRITE(GTIIR, gt_iir);
@@ -1064,6 +1120,13 @@
 	I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
 	(void) I915_READ(SDEIER);
 
+	if (IS_IRONLAKE_M(dev)) {
+		/* Clear & enable PCU event interrupts */
+		I915_WRITE(DEIIR, DE_PCU_EVENT);
+		I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
+		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+	}
+
 	return 0;
 }