drm/i915: Rename and comment all the RPS *stuff*

The names of the struct members for RPS are stupid. Every time I need to
do anything in this code I have to spend a significant amount of time to
remember what it all means. By renaming the variables (and adding the
comments) I hope to clear up the situation. Indeed doing this make some
upcoming patches more readable.

I've avoided ILK because it's possible that the naming used for Ironlake
matches what is in the docs. I believe the ILK power docs were never
published, and I am too lazy to dig them up.

v2: leave rp0, and rp1 in the names. It is useful to have these limits
available at times. min_freq and max_freq (which may be equal to rp0, or
rp1 depending on the platform) represent the actual HW min and max.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6037913..d1e0a36 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1026,7 +1026,7 @@
 			   max_freq * GT_FREQUENCY_MULTIPLIER);
 
 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
-			   dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
+			   dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
 	} else if (IS_VALLEYVIEW(dev)) {
 		u32 freq_sts, val;
 
@@ -1498,8 +1498,8 @@
 
 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
 
-	for (gpu_freq = dev_priv->rps.min_delay;
-	     gpu_freq <= dev_priv->rps.max_delay;
+	for (gpu_freq = dev_priv->rps.min_freq_softlimit;
+	     gpu_freq <= dev_priv->rps.max_freq_softlimit;
 	     gpu_freq++) {
 		ia_freq = gpu_freq;
 		sandybridge_pcode_read(dev_priv,
@@ -3449,9 +3449,9 @@
 		return ret;
 
 	if (IS_VALLEYVIEW(dev))
-		*val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
+		*val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
 	else
-		*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+		*val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return 0;
@@ -3488,16 +3488,16 @@
 		do_div(val, GT_FREQUENCY_MULTIPLIER);
 
 		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-		hw_max = dev_priv->rps.hw_max;
+		hw_max = dev_priv->rps.max_freq;
 		hw_min = (rp_state_cap >> 16) & 0xff;
 	}
 
-	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
 		mutex_unlock(&dev_priv->rps.hw_lock);
 		return -EINVAL;
 	}
 
-	dev_priv->rps.max_delay = val;
+	dev_priv->rps.max_freq_softlimit = val;
 
 	if (IS_VALLEYVIEW(dev))
 		valleyview_set_rps(dev, val);
@@ -3530,9 +3530,9 @@
 		return ret;
 
 	if (IS_VALLEYVIEW(dev))
-		*val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
+		*val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
 	else
-		*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+		*val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return 0;
@@ -3569,16 +3569,16 @@
 		do_div(val, GT_FREQUENCY_MULTIPLIER);
 
 		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-		hw_max = dev_priv->rps.hw_max;
+		hw_max = dev_priv->rps.max_freq;
 		hw_min = (rp_state_cap >> 16) & 0xff;
 	}
 
-	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
 		mutex_unlock(&dev_priv->rps.hw_lock);
 		return -EINVAL;
 	}
 
-	dev_priv->rps.min_delay = val;
+	dev_priv->rps.min_freq_softlimit = val;
 
 	if (IS_VALLEYVIEW(dev))
 		valleyview_set_rps(dev, val);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 241f5e1..c5c5760 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -980,14 +980,24 @@
 	struct work_struct work;
 	u32 pm_iir;
 
-	u8 cur_delay;
-	u8 min_delay;
-	u8 max_delay;
-	u8 rpe_delay;
-	u8 rp1_delay;
-	u8 rp0_delay;
-	u8 hw_max;
-	u8 min_freq;
+	/* Frequencies are stored in potentially platform dependent multiples.
+	 * In other words, *_freq needs to be multiplied by X to be interesting.
+	 * Soft limits are those which are used for the dynamic reclocking done
+	 * by the driver (raise frequencies under heavy loads, and lower for
+	 * lighter loads). Hard limits are those imposed by the hardware.
+	 *
+	 * A distinction is made for overclocking, which is never enabled by
+	 * default, and is considered to be above the hard limit if it's
+	 * possible at all.
+	 */
+	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
+	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
+	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
+	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
+	u8 min_freq;		/* AKA RPn. Minimum frequency */
+	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
+	u8 rp1_freq;		/* "less than" RP0 power/freqency */
+	u8 rp0_freq;		/* Non-overclocked max frequency. */
 
 	bool rp_up_masked;
 	bool rp_down_masked;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1c00751..acf1ab3 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1075,7 +1075,7 @@
 			     u32 pm_iir, int new_delay)
 {
 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-		if (new_delay >= dev_priv->rps.max_delay) {
+		if (new_delay >= dev_priv->rps.max_freq_softlimit) {
 			/* Mask UP THRESHOLD Interrupts */
 			I915_WRITE(GEN6_PMINTRMSK,
 				   I915_READ(GEN6_PMINTRMSK) |
@@ -1090,7 +1090,7 @@
 			dev_priv->rps.rp_down_masked = false;
 		}
 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
-		if (new_delay <= dev_priv->rps.min_delay) {
+		if (new_delay <= dev_priv->rps.min_freq_softlimit) {
 			/* Mask DOWN THRESHOLD Interrupts */
 			I915_WRITE(GEN6_PMINTRMSK,
 				   I915_READ(GEN6_PMINTRMSK) |
@@ -1136,38 +1136,39 @@
 			adj *= 2;
 		else
 			adj = 1;
-		new_delay = dev_priv->rps.cur_delay + adj;
+		new_delay = dev_priv->rps.cur_freq + adj;
 
 		/*
 		 * For better performance, jump directly
 		 * to RPe if we're below it.
 		 */
-		if (new_delay < dev_priv->rps.rpe_delay)
-			new_delay = dev_priv->rps.rpe_delay;
+		if (new_delay < dev_priv->rps.efficient_freq)
+			new_delay = dev_priv->rps.efficient_freq;
 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
-		if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
-			new_delay = dev_priv->rps.rpe_delay;
+		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
+			new_delay = dev_priv->rps.efficient_freq;
 		else
-			new_delay = dev_priv->rps.min_delay;
+			new_delay = dev_priv->rps.min_freq_softlimit;
 		adj = 0;
 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
 		if (adj < 0)
 			adj *= 2;
 		else
 			adj = -1;
-		new_delay = dev_priv->rps.cur_delay + adj;
+		new_delay = dev_priv->rps.cur_freq + adj;
 	} else { /* unknown event */
-		new_delay = dev_priv->rps.cur_delay;
+		new_delay = dev_priv->rps.cur_freq;
 	}
 
 	/* sysfs frequency interfaces may have snuck in while servicing the
 	 * interrupt
 	 */
 	new_delay = clamp_t(int, new_delay,
-			    dev_priv->rps.min_delay, dev_priv->rps.max_delay);
+			    dev_priv->rps.min_freq_softlimit,
+			    dev_priv->rps.max_freq_softlimit);
 
 	gen6_set_pm_mask(dev_priv, pm_iir, new_delay);
-	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
+	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
 
 	if (IS_VALLEYVIEW(dev_priv->dev))
 		valleyview_set_rps(dev_priv->dev, new_delay);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e9ffefb..e3fa8cd 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -269,7 +269,7 @@
 		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 		ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
 	} else {
-		ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+		ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
 	}
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
@@ -284,7 +284,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay));
+			vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
 }
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -298,9 +298,9 @@
 
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev))
-		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
+		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
 	else
-		ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+		ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -334,13 +334,13 @@
 		val /= GT_FREQUENCY_MULTIPLIER;
 
 		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-		hw_max = dev_priv->rps.hw_max;
+		hw_max = dev_priv->rps.max_freq;
 		non_oc_max = (rp_state_cap & 0xff);
 		hw_min = ((rp_state_cap & 0xff0000) >> 16);
 	}
 
 	if (val < hw_min || val > hw_max ||
-	    val < dev_priv->rps.min_delay) {
+	    val < dev_priv->rps.min_freq_softlimit) {
 		mutex_unlock(&dev_priv->rps.hw_lock);
 		return -EINVAL;
 	}
@@ -349,9 +349,9 @@
 		DRM_DEBUG("User requested overclocking to %d\n",
 			  val * GT_FREQUENCY_MULTIPLIER);
 
-	dev_priv->rps.max_delay = val;
+	dev_priv->rps.max_freq_softlimit = val;
 
-	if (dev_priv->rps.cur_delay > val) {
+	if (dev_priv->rps.cur_freq > val) {
 		if (IS_VALLEYVIEW(dev))
 			valleyview_set_rps(dev, val);
 		else
@@ -360,7 +360,7 @@
 		/* We still need gen6_set_rps to process the new max_delay and
 		 * update the interrupt limits even though frequency request is
 		 * unchanged. */
-		gen6_set_rps(dev, dev_priv->rps.cur_delay);
+		gen6_set_rps(dev, dev_priv->rps.cur_freq);
 	}
 
 	mutex_unlock(&dev_priv->rps.hw_lock);
@@ -379,9 +379,9 @@
 
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev))
-		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
+		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
 	else
-		ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+		ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -414,18 +414,18 @@
 		val /= GT_FREQUENCY_MULTIPLIER;
 
 		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-		hw_max = dev_priv->rps.hw_max;
+		hw_max = dev_priv->rps.max_freq;
 		hw_min = ((rp_state_cap & 0xff0000) >> 16);
 	}
 
-	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
 		mutex_unlock(&dev_priv->rps.hw_lock);
 		return -EINVAL;
 	}
 
-	dev_priv->rps.min_delay = val;
+	dev_priv->rps.min_freq_softlimit = val;
 
-	if (dev_priv->rps.cur_delay < val) {
+	if (dev_priv->rps.cur_freq < val) {
 		if (IS_VALLEYVIEW(dev))
 			valleyview_set_rps(dev, val);
 		else
@@ -434,7 +434,7 @@
 		/* We still need gen6_set_rps to process the new min_delay and
 		 * update the interrupt limits even though frequency request is
 		 * unchanged. */
-		gen6_set_rps(dev, dev_priv->rps.cur_delay);
+		gen6_set_rps(dev, dev_priv->rps.cur_freq);
 	}
 
 	mutex_unlock(&dev_priv->rps.hw_lock);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index dd631d1..3db7c40 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2907,9 +2907,9 @@
 	 * the hw runs at the minimal clock before selecting the desired
 	 * frequency, if the down threshold expires in that window we will not
 	 * receive a down interrupt. */
-	limits = dev_priv->rps.max_delay << 24;
-	if (val <= dev_priv->rps.min_delay)
-		limits |= dev_priv->rps.min_delay << 16;
+	limits = dev_priv->rps.max_freq_softlimit << 24;
+	if (val <= dev_priv->rps.min_freq_softlimit)
+		limits |= dev_priv->rps.min_freq_softlimit << 16;
 
 	return limits;
 }
@@ -2921,26 +2921,26 @@
 	new_power = dev_priv->rps.power;
 	switch (dev_priv->rps.power) {
 	case LOW_POWER:
-		if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
+		if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
 			new_power = BETWEEN;
 		break;
 
 	case BETWEEN:
-		if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
+		if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
 			new_power = LOW_POWER;
-		else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
+		else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
 			new_power = HIGH_POWER;
 		break;
 
 	case HIGH_POWER:
-		if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
+		if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
 			new_power = BETWEEN;
 		break;
 	}
 	/* Max/min bins are special */
-	if (val == dev_priv->rps.min_delay)
+	if (val == dev_priv->rps.min_freq_softlimit)
 		new_power = LOW_POWER;
-	if (val == dev_priv->rps.max_delay)
+	if (val == dev_priv->rps.max_freq_softlimit)
 		new_power = HIGH_POWER;
 	if (new_power == dev_priv->rps.power)
 		return;
@@ -3014,10 +3014,10 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-	WARN_ON(val > dev_priv->rps.max_delay);
-	WARN_ON(val < dev_priv->rps.min_delay);
+	WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+	WARN_ON(val < dev_priv->rps.min_freq_softlimit);
 
-	if (val == dev_priv->rps.cur_delay) {
+	if (val == dev_priv->rps.cur_freq) {
 		/* min/max delay may still have been modified so be sure to
 		 * write the limits value */
 		I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
@@ -3045,7 +3045,7 @@
 
 	POSTING_READ(GEN6_RPNSWREQ);
 
-	dev_priv->rps.cur_delay = val;
+	dev_priv->rps.cur_freq = val;
 
 	trace_intel_gpu_freq_change(val * 50);
 }
@@ -3065,7 +3065,7 @@
 	 * When we are idle.  Drop to min voltage state.
 	 */
 
-	if (dev_priv->rps.cur_delay <= dev_priv->rps.min_delay)
+	if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
 		return;
 
 	/* Mask turbo interrupt so that they will not come in between */
@@ -3082,10 +3082,10 @@
 		return;
 	}
 
-	dev_priv->rps.cur_delay = dev_priv->rps.min_delay;
+	dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
 
 	vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
-					dev_priv->rps.min_delay);
+					dev_priv->rps.min_freq_softlimit);
 
 	if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
 				& GENFREQSTATUS) == 0, 5))
@@ -3099,7 +3099,7 @@
 	/* Unmask Up interrupts */
 	dev_priv->rps.rp_up_masked = true;
 	gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD,
-						dev_priv->rps.min_delay);
+						dev_priv->rps.min_freq_softlimit);
 }
 
 void gen6_rps_idle(struct drm_i915_private *dev_priv)
@@ -3111,7 +3111,7 @@
 		if (IS_VALLEYVIEW(dev))
 			vlv_set_rps_idle(dev_priv);
 		else
-			gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+			gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
 		dev_priv->rps.last_adj = 0;
 	}
 	mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3124,9 +3124,9 @@
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (dev_priv->rps.enabled) {
 		if (IS_VALLEYVIEW(dev))
-			valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+			valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
 		else
-			gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+			gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
 		dev_priv->rps.last_adj = 0;
 	}
 	mutex_unlock(&dev_priv->rps.hw_lock);
@@ -3137,20 +3137,20 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-	WARN_ON(val > dev_priv->rps.max_delay);
-	WARN_ON(val < dev_priv->rps.min_delay);
+	WARN_ON(val > dev_priv->rps.max_freq_softlimit);
+	WARN_ON(val < dev_priv->rps.min_freq_softlimit);
 
 	DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
-			 dev_priv->rps.cur_delay,
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+			 dev_priv->rps.cur_freq,
 			 vlv_gpu_freq(dev_priv, val), val);
 
-	if (val == dev_priv->rps.cur_delay)
+	if (val == dev_priv->rps.cur_freq)
 		return;
 
 	vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
 
-	dev_priv->rps.cur_delay = val;
+	dev_priv->rps.cur_freq = val;
 
 	trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
 }
@@ -3292,8 +3292,8 @@
 
 	/* Docs recommend 900MHz, and 300 MHz respectively */
 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-		   dev_priv->rps.max_delay << 24 |
-		   dev_priv->rps.min_delay << 16);
+		   dev_priv->rps.max_freq_softlimit << 24 |
+		   dev_priv->rps.min_freq_softlimit << 16);
 
 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
@@ -3352,20 +3352,22 @@
 	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 
-	/* In units of 50MHz */
-	dev_priv->rps.hw_max = hw_max = rp_state_cap & 0xff;
+	/* All of these values are in units of 50MHz */
+	dev_priv->rps.cur_freq = 0;
+	/* hw_max = RP0 until we check for overclocking */
+	dev_priv->rps.max_freq = hw_max = rp_state_cap & 0xff;
+	/* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
+	dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
+	dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff;
+	dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
 	dev_priv->rps.min_freq = hw_min = (rp_state_cap >> 16) & 0xff;
-	dev_priv->rps.rp1_delay = (rp_state_cap >>  8) & 0xff;
-	dev_priv->rps.rp0_delay = (rp_state_cap >>  0) & 0xff;
-	dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
-	dev_priv->rps.cur_delay = 0;
 
 	/* Preserve min/max settings in case of re-init */
-	if (dev_priv->rps.max_delay == 0)
-		dev_priv->rps.max_delay = hw_max;
+	if (dev_priv->rps.max_freq_softlimit == 0)
+		dev_priv->rps.max_freq_softlimit = hw_max;
 
-	if (dev_priv->rps.min_delay == 0)
-		dev_priv->rps.min_delay = hw_min;
+	if (dev_priv->rps.min_freq_softlimit == 0)
+		dev_priv->rps.min_freq_softlimit = hw_min;
 
 	/* disable the counters and set deterministic thresholds */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3420,13 +3422,13 @@
 	ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
 	if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
 		DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
-				 (dev_priv->rps.max_delay & 0xff) * 50,
+				 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
 				 (pcu_mbox & 0xff) * 50);
-		dev_priv->rps.hw_max = pcu_mbox & 0xff;
+		dev_priv->rps.max_freq = pcu_mbox & 0xff;
 	}
 
 	dev_priv->rps.power = HIGH_POWER; /* force a reset */
-	gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+	gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
 
 	gen6_enable_rps_interrupts(dev);
 
@@ -3482,9 +3484,9 @@
 	 * to use for memory access.  We do this by specifying the IA frequency
 	 * the PCU should use as a reference to determine the ring frequency.
 	 */
-	for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
+	for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
 	     gpu_freq--) {
-		int diff = dev_priv->rps.max_delay - gpu_freq;
+		int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
 		unsigned int ia_freq = 0, ring_freq = 0;
 
 		if (INTEL_INFO(dev)->gen >= 8) {
@@ -3650,20 +3652,20 @@
 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
-	dev_priv->rps.cur_delay = (val >> 8) & 0xff;
+	dev_priv->rps.cur_freq = (val >> 8) & 0xff;
 	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
-			 dev_priv->rps.cur_delay);
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+			 dev_priv->rps.cur_freq);
 
-	dev_priv->rps.hw_max = hw_max = valleyview_rps_max_freq(dev_priv);
+	dev_priv->rps.max_freq = hw_max = valleyview_rps_max_freq(dev_priv);
 	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
 			 vlv_gpu_freq(dev_priv, hw_max),
 			 hw_max);
 
-	dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
+	dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
 	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
-			 dev_priv->rps.rpe_delay);
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+			 dev_priv->rps.efficient_freq);
 
 	hw_min = valleyview_rps_min_freq(dev_priv);
 	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
@@ -3671,17 +3673,17 @@
 			 hw_min);
 
 	/* Preserve min/max settings in case of re-init */
-	if (dev_priv->rps.max_delay == 0)
-		dev_priv->rps.max_delay = hw_max;
+	if (dev_priv->rps.max_freq_softlimit == 0)
+		dev_priv->rps.max_freq_softlimit = hw_max;
 
-	if (dev_priv->rps.min_delay == 0)
-		dev_priv->rps.min_delay = hw_min;
+	if (dev_priv->rps.min_freq_softlimit == 0)
+		dev_priv->rps.min_freq_softlimit = hw_min;
 
 	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
-			 dev_priv->rps.rpe_delay);
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+			 dev_priv->rps.efficient_freq);
 
-	valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
+	valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
 
 	dev_priv->rps.rp_up_masked = false;
 	dev_priv->rps.rp_down_masked = false;
@@ -4122,7 +4124,7 @@
 
 	assert_spin_locked(&mchdev_lock);
 
-	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
+	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
 	pxvid = (pxvid >> 24) & 0x7f;
 	ext_v = pvid_to_extvid(dev_priv, pxvid);